ixgbe: support TCP segmentation offload
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
index f130080..937fc3c 100644 (file)
 #define IXGBE_MMW_SIZE_DEFAULT        0x4
 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
 
+/*
+ *  Default values for RX/TX configuration
+ */
+#define IXGBE_DEFAULT_RX_FREE_THRESH  32
+#define IXGBE_DEFAULT_RX_PTHRESH      8
+#define IXGBE_DEFAULT_RX_HTHRESH      8
+#define IXGBE_DEFAULT_RX_WTHRESH      0
+
+#define IXGBE_DEFAULT_TX_FREE_THRESH  32
+#define IXGBE_DEFAULT_TX_PTHRESH      32
+#define IXGBE_DEFAULT_TX_HTHRESH      0
+#define IXGBE_DEFAULT_TX_WTHRESH      0
+#define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
+
+/* Bit shift and mask */
+#define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
+#define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
+#define IXGBE_8_BIT_WIDTH  CHAR_BIT
+#define IXGBE_8_BIT_MASK   UINT8_MAX
+
 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
 
 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
@@ -118,7 +138,11 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                                             uint8_t stat_idx,
                                             uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
-                               struct rte_eth_dev_info *dev_info);
+                              struct rte_eth_dev_info *dev_info);
+static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+                                struct rte_eth_dev_info *dev_info);
+static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
 static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
@@ -141,9 +165,11 @@ static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
                struct rte_eth_pfc_conf *pfc_conf);
 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
-               struct rte_eth_rss_reta *reta_conf);
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size);
 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
-               struct rte_eth_rss_reta *reta_conf);
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
@@ -217,6 +243,8 @@ static int ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
 static int ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
                        struct rte_5tuple_filter *filter, uint16_t *rx_queue);
 
+static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
+
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
  */
@@ -293,6 +321,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .stats_reset          = ixgbe_dev_stats_reset,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .mtu_set              = ixgbe_dev_mtu_set,
        .vlan_filter_set      = ixgbe_vlan_filter_set,
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
        .vlan_offload_set     = ixgbe_vlan_offload_set,
@@ -371,7 +400,8 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .stats_get            = ixgbevf_dev_stats_get,
        .stats_reset          = ixgbevf_dev_stats_reset,
        .dev_close            = ixgbevf_dev_close,
-       .dev_infos_get        = ixgbe_dev_info_get,
+       .dev_infos_get        = ixgbevf_dev_info_get,
+       .mtu_set              = ixgbevf_dev_set_mtu,
        .vlan_filter_set      = ixgbevf_vlan_filter_set,
        .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
        .vlan_offload_set     = ixgbevf_vlan_offload_set,
@@ -537,15 +567,19 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        uint32_t q_map;
        uint8_t n, offset;
 
-       if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540))
+       if ((hw->mac.type != ixgbe_mac_82599EB) &&
+               (hw->mac.type != ixgbe_mac_X540) &&
+               (hw->mac.type != ixgbe_mac_X550) &&
+               (hw->mac.type != ixgbe_mac_X550EM_x))
                return -ENOSYS;
 
-       PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
-                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
+       PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
+                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+                    queue_id, stat_idx);
 
        n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
        if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
-               PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
+               PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
                return -EIO;
        }
        offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
@@ -565,19 +599,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        else
                stat_mappings->rqsmr[n] |= qsmr_mask;
 
-       PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
-                    "%s[%d] = 0x%08x\n",
-                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx,
-                    is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
+       PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
+                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+                    queue_id, stat_idx);
+       PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+                    is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
 
        /* Now write the mapping in the appropriate register */
        if (is_rx) {
-               PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n",
+               PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
                             stat_mappings->rqsmr[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
        }
        else {
-               PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n",
+               PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
                             stat_mappings->tqsm[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
        }
@@ -637,8 +672,10 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
        /* support all DCB capabilities in 82599 */
        dcb_config->support.capabilities = 0xFF;
 
-       /*we only support 4 Tcs for X540*/
-       if (hw->mac.type == ixgbe_mac_X540) {
+       /*we only support 4 Tcs for X540, X550 */
+       if (hw->mac.type == ixgbe_mac_X540 ||
+               hw->mac.type == ixgbe_mac_X550 ||
+               hw->mac.type == ixgbe_mac_X550EM_x) {
                dcb_config->num_tcs.pg_tcs = 4;
                dcb_config->num_tcs.pfc_tcs = 4;
        }
@@ -660,7 +697,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
         */
        mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
        if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
-                  DEBUGOUT1("SWFW phy%d lock released", hw->bus.func);
+               PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
        }
        ixgbe_release_swfw_semaphore(hw, mask);
 
@@ -672,7 +709,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
         */
        mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
        if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
-                  DEBUGOUT("SWFW common locks released");
+               PMD_DRV_LOG(DEBUG, "SWFW common locks released");
        }
        ixgbe_release_swfw_semaphore(hw, mask);
 }
@@ -718,11 +755,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
-#ifdef RTE_LIBRTE_IXGBE_ALLOW_UNSUPPORTED_SFP
        hw->allow_unsupported_sfp = 1;
-#endif
 
-       /* Initialize the shared code */
+       /* Initialize the shared code (base driver) */
 #ifdef RTE_NIC_BYPASS
        diag = ixgbe_bypass_init_shared_code(hw);
 #else
@@ -784,11 +819,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        if (diag == IXGBE_ERR_EEPROM_VERSION) {
                PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
                    "LOM.  Please be aware there may be issues associated "
-                   "with your hardware.\n If you are experiencing problems "
+                   "with your hardware.");
+               PMD_INIT_LOG(ERR, "If you are experiencing problems "
                    "please contact your Intel or hardware representative "
-                   "who provided you with this hardware.\n");
+                   "who provided you with this hardware.");
        } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
-               PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
+               PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
        if (diag) {
                PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
                return -EIO;
@@ -842,12 +878,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        IXGBE_WRITE_FLUSH(hw);
 
        if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-               PMD_INIT_LOG(DEBUG,
-                            "MAC: %d, PHY: %d, SFP+: %d<n",
+               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
                             (int) hw->mac.type, (int) hw->phy.type,
                             (int) hw->phy.sfp_type);
        else
-               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
+               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
                             (int) hw->mac.type, (int) hw->phy.type);
 
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
@@ -926,7 +961,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
        struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
 
-       PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
+       PMD_INIT_FUNC_TRACE();
 
        eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
        eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
@@ -953,7 +988,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        /* initialize the hw strip bitmap*/
        memset(hwstrip, 0, sizeof(*hwstrip));
 
-       /* Initialize the shared code */
+       /* Initialize the shared code (base driver) */
        diag = ixgbe_init_shared_code(hw);
        if (diag != IXGBE_SUCCESS) {
                PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
@@ -1005,16 +1040,15 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                        eth_dev->data->mac_addrs = NULL;
                        return diag;
                }
-               RTE_LOG(INFO, PMD,
-                       "\tVF MAC address not assigned by Host PF\n"
-                       "\tAssign randomly generated MAC address "
-                       "%02x:%02x:%02x:%02x:%02x:%02x\n",
-                       perm_addr->addr_bytes[0],
-                       perm_addr->addr_bytes[1],
-                       perm_addr->addr_bytes[2],
-                       perm_addr->addr_bytes[3],
-                       perm_addr->addr_bytes[4],
-                       perm_addr->addr_bytes[5]);
+               PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
+               PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
+                            "%02x:%02x:%02x:%02x:%02x:%02x",
+                            perm_addr->addr_bytes[0],
+                            perm_addr->addr_bytes[1],
+                            perm_addr->addr_bytes[2],
+                            perm_addr->addr_bytes[3],
+                            perm_addr->addr_bytes[4],
+                            perm_addr->addr_bytes[5]);
        }
 
        /* Copy the permanent MAC address */
@@ -1031,9 +1065,9 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                        return (-EIO);
        }
 
-       PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
-                        eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
-                        "ixgbe_mac_82599_vf");
+       PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
+                    eth_dev->data->port_id, pci_dev->id.vendor_id,
+                    pci_dev->id.device_id, "ixgbe_mac_82599_vf");
 
        return 0;
 }
@@ -1042,7 +1076,7 @@ static struct eth_driver rte_ixgbe_pmd = {
        {
                .name = "rte_ixgbe_pmd",
                .id_table = pci_id_ixgbe_map,
-               .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
        },
        .eth_dev_init = eth_ixgbe_dev_init,
        .dev_private_size = sizeof(struct ixgbe_adapter),
@@ -1083,7 +1117,7 @@ rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unuse
 static int
 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
 {
-       DEBUGFUNC("rte_ixgbevf_pmd_init");
+       PMD_INIT_FUNC_TRACE();
 
        rte_eth_driver_register(&rte_ixgbevf_pmd);
        return (0);
@@ -1411,9 +1445,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        /* IXGBE devices don't support half duplex */
        if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
                        (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
-               PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
-                               dev->data->dev_conf.link_duplex,
-                               dev->data->port_id);
+               PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
+                            dev->data->dev_conf.link_duplex,
+                            dev->data->port_id);
                return -EINVAL;
        }
 
@@ -1437,7 +1471,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        /* This can fail when allocating mbufs for descriptor rings */
        err = ixgbe_dev_rx_init(dev);
        if (err) {
-               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
                goto error;
        }
 
@@ -1484,13 +1518,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                speed = IXGBE_LINK_SPEED_10GB_FULL;
                break;
        default:
-               PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n",
-                               dev->data->dev_conf.link_speed,
-                               dev->data->port_id);
+               PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
+                            dev->data->dev_conf.link_speed,
+                            dev->data->port_id);
                goto error;
        }
 
-       err = ixgbe_setup_link(hw, speed, negotiate, link_up);
+       err = ixgbe_setup_link(hw, speed, link_up);
        if (err)
                goto error;
 
@@ -1575,6 +1609,9 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
 
        ixgbe_dev_clear_queues(dev);
 
+       /* Clear stored conf */
+       dev->data->scattered_rx = 0;
+
        /* Clear recorded link status */
        memset(&link, 0, sizeof(link));
        rte_ixgbe_dev_atomic_write_link_status(dev, &link);
@@ -1592,10 +1629,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
 #ifdef RTE_NIC_BYPASS
                if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
                        /* Not suported in bypass mode */
-                       PMD_INIT_LOG(ERR,
-                               "\nSet link up is not supported "
-                               "by device id 0x%x\n",
-                               hw->device_id);
+                       PMD_INIT_LOG(ERR, "Set link up is not supported "
+                                    "by device id 0x%x", hw->device_id);
                        return -ENOTSUP;
                }
 #endif
@@ -1604,8 +1639,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
                return 0;
        }
 
-       PMD_INIT_LOG(ERR, "\nSet link up is not supported by device id 0x%x\n",
-               hw->device_id);
+       PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
+                    hw->device_id);
        return -ENOTSUP;
 }
 
@@ -1621,10 +1656,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
 #ifdef RTE_NIC_BYPASS
                if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
                        /* Not suported in bypass mode */
-                       PMD_INIT_LOG(ERR,
-                               "\nSet link down is not supported "
-                               "by device id 0x%x\n",
-                                hw->device_id);
+                       PMD_INIT_LOG(ERR, "Set link down is not supported "
+                                    "by device id 0x%x", hw->device_id);
                        return -ENOTSUP;
                }
 #endif
@@ -1633,9 +1666,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
                return 0;
        }
 
-       PMD_INIT_LOG(ERR,
-               "\nSet link down is not supported by device id 0x%x\n",
-                hw->device_id);
+       PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
+                    hw->device_id);
        return -ENOTSUP;
 }
 
@@ -1930,6 +1962,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                dev_info->max_vmdq_pools = ETH_16_POOLS;
        else
                dev_info->max_vmdq_pools = ETH_64_POOLS;
+       dev_info->vmdq_queue_num = dev_info->max_rx_queues;
        dev_info->rx_offload_capa =
                DEV_RX_OFFLOAD_VLAN_STRIP |
                DEV_RX_OFFLOAD_IPV4_CKSUM |
@@ -1940,7 +1973,81 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_TX_OFFLOAD_IPV4_CKSUM  |
                DEV_TX_OFFLOAD_UDP_CKSUM   |
                DEV_TX_OFFLOAD_TCP_CKSUM   |
-               DEV_TX_OFFLOAD_SCTP_CKSUM;
+               DEV_TX_OFFLOAD_SCTP_CKSUM  |
+               DEV_TX_OFFLOAD_TCP_TSO;
+
+       dev_info->default_rxconf = (struct rte_eth_rxconf) {
+               .rx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+               },
+               .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+               .rx_drop_en = 0,
+       };
+
+       dev_info->default_txconf = (struct rte_eth_txconf) {
+               .tx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+               },
+               .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+               .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+                               ETH_TXQ_FLAGS_NOOFFLOADS,
+       };
+       dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+}
+
+static void
+ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+                    struct rte_eth_dev_info *dev_info)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+       dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+       dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
+       dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
+       dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+       dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+       dev_info->max_vfs = dev->pci_dev->max_vfs;
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               dev_info->max_vmdq_pools = ETH_16_POOLS;
+       else
+               dev_info->max_vmdq_pools = ETH_64_POOLS;
+       dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+                               DEV_RX_OFFLOAD_IPV4_CKSUM |
+                               DEV_RX_OFFLOAD_UDP_CKSUM  |
+                               DEV_RX_OFFLOAD_TCP_CKSUM;
+       dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+                               DEV_TX_OFFLOAD_IPV4_CKSUM  |
+                               DEV_TX_OFFLOAD_UDP_CKSUM   |
+                               DEV_TX_OFFLOAD_TCP_CKSUM   |
+                               DEV_TX_OFFLOAD_SCTP_CKSUM;
+
+       dev_info->default_rxconf = (struct rte_eth_rxconf) {
+               .rx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+               },
+               .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+               .rx_drop_en = 0,
+       };
+
+       dev_info->default_txconf = (struct rte_eth_txconf) {
+               .tx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+               },
+               .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+               .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+                               ETH_TXQ_FLAGS_NOOFFLOADS,
+       };
 }
 
 /* return 0 means link status changed, -1 means not changed */
@@ -2173,7 +2280,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
        struct rte_eth_link link;
        int intr_enable_delay = false;
 
-       PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
+       PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
 
        if (intr->flags & IXGBE_FLAG_MAILBOX) {
                ixgbe_pf_mbx_process(dev);
@@ -2250,7 +2357,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
        }
 
-       PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr);
+       PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
        ixgbe_enable_intr(dev);
        rte_intr_enable(&(dev->pci_dev->intr_handle));
 }
@@ -2309,6 +2416,7 @@ ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        fc_conf->high_water = hw->fc.high_water[0];
        fc_conf->low_water = hw->fc.low_water[0];
        fc_conf->send_xon = hw->fc.send_xon;
+       fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
 
        /*
         * Return rx_pause status according to actual setting of
@@ -2360,8 +2468,10 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        PMD_INIT_FUNC_TRACE();
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
+               return -ENOTSUP;
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
-       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 
        /*
         * At least reserve one Ethernet frame for watermark
@@ -2370,8 +2480,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
        if ((fc_conf->high_water > max_high_water) ||
                (fc_conf->high_water < fc_conf->low_water)) {
-               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
-               PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
                return (-EINVAL);
        }
 
@@ -2403,7 +2513,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                return 0;
        }
 
-       PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
+       PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
        return -EIO;
 }
 
@@ -2433,13 +2543,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
        if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
                 /* High/Low water can not be 0 */
                if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
-                       PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+                       PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
                }
 
                if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
-                       PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+                       PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
                }
@@ -2505,7 +2615,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
                fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
                break;
        default:
-               DEBUGOUT("Flow control param set incorrectly\n");
+               PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
                ret_val = IXGBE_ERR_CONFIG;
                goto out;
                break;
@@ -2583,16 +2693,16 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
        ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
        tc_num = map[pfc_conf->priority];
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
-       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
        /*
         * At least reserve one Ethernet frame for watermark
         * high_water/low_water in kilo bytes for ixgbe
         */
        max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
        if ((pfc_conf->fc.high_water > max_high_water) ||
-               (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
-               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
-               PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+           (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
                return (-EINVAL);
        }
 
@@ -2608,44 +2718,48 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
        if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
                return 0;
 
-       PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err);
+       PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
        return -EIO;
 }
 
 static int
 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
-                               struct rte_eth_rss_reta *reta_conf)
+                         struct rte_eth_rss_reta_entry64 *reta_conf,
+                         uint16_t reta_size)
 {
-       uint8_t i,j,mask;
-       uint32_t reta;
-       struct ixgbe_hw *hw =
-                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint8_t i, j, mask;
+       uint32_t reta, r;
+       uint16_t idx, shift;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
-       /*
-       * Update Redirection Table RETA[n],n=0...31,The redirection table has
-       * 128-entries in 32 registers
-        */
-       for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-               if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
-                       mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+                                               IXGBE_4_BIT_MASK);
+               if (!mask)
+                       continue;
+               if (mask == IXGBE_4_BIT_MASK)
+                       r = 0;
                else
-                       mask = (uint8_t)((reta_conf->mask_hi >>
-                               (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
-               if (mask != 0) {
-                       reta = 0;
-                       if (mask != 0xF)
-                               reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
-
-                       for (j = 0; j < 4; j++) {
-                               if (mask & (0x1 << j)) {
-                                       if (mask != 0xF)
-                                               reta &= ~(0xFF << 8 * j);
-                                       reta |= reta_conf->reta[i + j] << 8*j;
-                               }
-                       }
-                       IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),reta);
+                       r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+               for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+                       if (mask & (0x1 << j))
+                               reta |= reta_conf[idx].reta[shift + j] <<
+                                                       (CHAR_BIT * j);
+                       else
+                               reta |= r & (IXGBE_8_BIT_MASK <<
+                                               (CHAR_BIT * j));
                }
+               IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
        }
 
        return 0;
@@ -2653,32 +2767,36 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 
 static int
 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
-                               struct rte_eth_rss_reta *reta_conf)
+                        struct rte_eth_rss_reta_entry64 *reta_conf,
+                        uint16_t reta_size)
 {
-       uint8_t i,j,mask;
+       uint8_t i, j, mask;
        uint32_t reta;
-       struct ixgbe_hw *hw =
-                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint16_t idx, shift;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
-       /*
-        * Read Redirection Table RETA[n],n=0...31,The redirection table has
-        * 128-entries in 32 registers
-        */
-       for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-               if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
-                       mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
-               else
-                       mask = (uint8_t)((reta_conf->mask_hi >>
-                               (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
-
-               if (mask != 0) {
-                       reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
-                       for (j = 0; j < 4; j++) {
-                               if (mask & (0x1 << j))
-                                       reta_conf->reta[i + j] =
-                                               (uint8_t)((reta >> 8 * j) & 0xFF);
-                       }
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                               "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+                                               IXGBE_4_BIT_MASK);
+               if (!mask)
+                       continue;
+
+               reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+               for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+                       if (mask & (0x1 << j))
+                               reta_conf[idx].reta[shift + j] =
+                                       ((reta >> (CHAR_BIT * j)) &
+                                               IXGBE_8_BIT_MASK);
                }
        }
 
@@ -2703,13 +2821,59 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
        ixgbe_clear_rar(hw, index);
 }
 
+static int
+ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       uint32_t hlreg0;
+       uint32_t maxfrs;
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev_info dev_info;
+       uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       ixgbe_dev_info_get(dev, &dev_info);
+
+       /* check that mtu is within the allowed range */
+       if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+               return -EINVAL;
+
+       /* refuse mtu that requires the support of scattered packets when this
+        * feature has not been enabled before. */
+       if (!dev->data->scattered_rx &&
+           (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+
+       /* switch to jumbo mode if needed */
+       if (frame_size > ETHER_MAX_LEN) {
+               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+       } else {
+               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+       maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+       maxfrs &= 0x0000FFFF;
+       maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+       IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+
+       return 0;
+}
+
 /*
  * Virtual Function operations
  */
 static void
 ixgbevf_intr_disable(struct ixgbe_hw *hw)
 {
-       PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
+       PMD_INIT_FUNC_TRACE();
 
        /* Clear interrupt mask to stop from interrupts being generated */
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
@@ -2722,8 +2886,8 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
        struct rte_eth_conf* conf = &dev->data->dev_conf;
 
-       PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
-               dev->data->port_id);
+       PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+                    dev->data->port_id);
 
        /*
         * VF has no ability to enable/disable HW CRC
@@ -2731,12 +2895,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
         */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
        if (!conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
+               PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 1;
        }
 #else
        if (conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
+               PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 0;
        }
 #endif
@@ -2751,7 +2915,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int err, mask = 0;
 
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
+       PMD_INIT_FUNC_TRACE();
 
        hw->mac.ops.reset_hw(hw);
 
@@ -2763,7 +2927,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        /* This can fail when allocating mbufs for descriptor rings */
        err = ixgbevf_dev_rx_init(dev);
        if (err) {
-               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err);
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
                ixgbe_dev_clear_queues(dev);
                return err;
        }
@@ -2786,7 +2950,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
+       PMD_INIT_FUNC_TRACE();
 
        hw->adapter_stopped = TRUE;
        ixgbe_stop_adapter(hw);
@@ -2797,6 +2961,9 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
          */
        ixgbevf_set_vfta_all(dev,0);
 
+       /* Clear stored conf */
+       dev->data->scattered_rx = 0;
+
        ixgbe_dev_clear_queues(dev);
 }
 
@@ -2805,7 +2972,7 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close");
+       PMD_INIT_FUNC_TRACE();
 
        ixgbe_reset_hw(hw);
 
@@ -2914,7 +3081,7 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
        /* we only need to do this if VMDq is enabled */
        reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
        if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
-               PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n");
+               PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
                return (-1);
        }
 
@@ -3031,6 +3198,26 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
        return 0;
 
 }
+
+uint32_t
+ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+       uint32_t new_val = orig_val;
+
+       if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+               new_val |= IXGBE_VMOLR_AUPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+               new_val |= IXGBE_VMOLR_ROMPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+               new_val |= IXGBE_VMOLR_ROPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+               new_val |= IXGBE_VMOLR_BAM;
+       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+               new_val |= IXGBE_VMOLR_MPE;
+
+       return new_val;
+}
+
 static int
 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
                               uint16_t rx_mask, uint8_t on)
@@ -3043,22 +3230,13 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
 
        if (hw->mac.type == ixgbe_mac_82598EB) {
                PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
-                       " on 82599 hardware and newer\n");
+                            " on 82599 hardware and newer");
                return (-ENOTSUP);
        }
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
 
-       if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG )
-               val |= IXGBE_VMOLR_AUPE;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC )
-               val |= IXGBE_VMOLR_ROMPE;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
-               val |= IXGBE_VMOLR_ROPE;
-       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
-               val |= IXGBE_VMOLR_BAM;
-       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
-               val |= IXGBE_VMOLR_MPE;
+       val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
 
        if (on)
                vmolr |= val;
@@ -3884,6 +4062,40 @@ ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
        return -ENOENT;
 }
 
+static int
+ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       struct ixgbe_hw *hw;
+       uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+               return -EINVAL;
+
+       /* refuse mtu that requires the support of scattered packets when this
+        * feature has not been enabled before. */
+       if (!dev->data->scattered_rx &&
+           (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+               return -EINVAL;
+
+       /*
+        * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
+        * request of the version 2.0 of the mailbox API.
+        * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
+        * of the mailbox API.
+        * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
+        * prior to 3.11.33 which contains the following change:
+        * "ixgbe: Enable jumbo frames support w/ SR-IOV"
+        */
+       ixgbevf_rlpml_set_vf(hw, max_frame);
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+       return 0;
+}
+
 static struct rte_driver rte_ixgbe_driver = {
        .type = PMD_PDEV,
        .init = rte_ixgbe_pmd_init,