ethdev: get DCB information
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
index 3687ebf..e9ce466 100644 (file)
@@ -190,9 +190,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
                        uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
-#ifdef RTE_NEXT_ABI
 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
-#endif
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
@@ -227,14 +225,12 @@ static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
 static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
                                          void *param);
-#ifdef RTE_NEXT_ABI
 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
                                            uint16_t queue_id);
 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
                                             uint16_t queue_id);
 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
                                 uint8_t queue, uint8_t msix_vector);
-#endif
 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
 
 /* For Eth VMDQ APIs support */
@@ -252,14 +248,12 @@ static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                uint8_t rule_id, uint8_t on);
 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
                uint8_t rule_id);
-#ifdef RTE_NEXT_ABI
 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
                                          uint16_t queue_id);
 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
                                           uint16_t queue_id);
 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
                               uint8_t queue, uint8_t msix_vector);
-#endif
 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
 
 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
@@ -310,6 +304,8 @@ static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
                                      struct ether_addr *mc_addr_set,
                                      uint32_t nb_mc_addr);
+static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+                                  struct rte_eth_dcb_info *dcb_info);
 
 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
 static int ixgbe_get_regs(struct rte_eth_dev *dev,
@@ -335,10 +331,10 @@ static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
  */
-#define UPDATE_VF_STAT(reg, last, cur)                         \
+#define UPDATE_VF_STAT(reg, last, cur)                          \
 {                                                               \
        uint32_t latest = IXGBE_READ_REG(hw, reg);              \
-       cur += latest - last;                                   \
+       cur += (latest - last) & UINT_MAX;                      \
        last = latest;                                          \
 }
 
@@ -420,10 +416,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .tx_queue_start       = ixgbe_dev_tx_queue_start,
        .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
        .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
-#ifdef RTE_NEXT_ABI
        .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
        .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
-#endif
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
        .rx_queue_count       = ixgbe_dev_rx_queue_count,
        .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
@@ -473,6 +467,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .get_eeprom_length    = ixgbe_get_eeprom_length,
        .get_eeprom           = ixgbe_get_eeprom,
        .set_eeprom           = ixgbe_set_eeprom,
+       .get_dcb_info         = ixgbe_dev_get_dcb_info,
 };
 
 /*
@@ -497,16 +492,18 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
        .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
-#ifdef RTE_NEXT_ABI
        .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
        .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
-#endif
        .mac_addr_add         = ixgbevf_add_mac_addr,
        .mac_addr_remove      = ixgbevf_remove_mac_addr,
        .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
        .mac_addr_set         = ixgbevf_set_default_mac_addr,
        .get_reg_length       = ixgbevf_get_reg_length,
        .get_reg              = ixgbevf_get_regs,
+       .reta_update          = ixgbe_dev_rss_reta_update,
+       .reta_query           = ixgbe_dev_rss_reta_query,
+       .rss_hash_update      = ixgbe_dev_rss_hash_update,
+       .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -704,7 +701,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                (hw->mac.type != ixgbe_mac_X550EM_x))
                return -ENOSYS;
 
-       PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
+       PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
                     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
                     queue_id, stat_idx);
 
@@ -730,20 +727,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        else
                stat_mappings->rqsmr[n] |= qsmr_mask;
 
-       PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
+       PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
                     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
                     queue_id, stat_idx);
-       PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+       PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
                     is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
 
        /* Now write the mapping in the appropriate register */
        if (is_rx) {
-               PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
+               PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
                             stat_mappings->rqsmr[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
        }
        else {
-               PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
+               PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
                             stat_mappings->tqsm[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
        }
@@ -887,8 +884,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                        ixgbe_set_tx_function(eth_dev, txq);
                } else {
                        /* Use default TX function if we get here */
-                       PMD_INIT_LOG(INFO, "No TX queues configured yet. "
-                                          "Using default TX function.");
+                       PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
+                                            "Using default TX function.");
                }
 
                ixgbe_set_rx_function(eth_dev);
@@ -1470,7 +1467,7 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
 
        if (hw->mac.type == ixgbe_mac_82598EB) {
                /* No queue level support */
-               PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+               PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
                return;
        }
        else {
@@ -1494,7 +1491,7 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
 
        if (hw->mac.type == ixgbe_mac_82598EB) {
                /* No queue level supported */
-               PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+               PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
                return;
        }
        else {
@@ -1645,6 +1642,169 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
 }
 
+static int
+ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
+{
+       switch (nb_rx_q) {
+       case 1:
+       case 2:
+               RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+               break;
+       case 4:
+               RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
+       RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
+
+       return 0;
+}
+
+static int
+ixgbe_check_mq_mode(struct rte_eth_dev *dev)
+{
+       struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+       uint16_t nb_rx_q = dev->data->nb_rx_queues;
+       uint16_t nb_tx_q = dev->data->nb_rx_queues;
+
+       if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+               /* check multi-queue mode */
+               switch (dev_conf->rxmode.mq_mode) {
+               case ETH_MQ_RX_VMDQ_DCB:
+               case ETH_MQ_RX_VMDQ_DCB_RSS:
+                       /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
+                       PMD_INIT_LOG(ERR, "SRIOV active,"
+                                       " unsupported mq_mode rx %d.",
+                                       dev_conf->rxmode.mq_mode);
+                       return -EINVAL;
+               case ETH_MQ_RX_RSS:
+               case ETH_MQ_RX_VMDQ_RSS:
+                       dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+                       if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
+                               if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
+                                       PMD_INIT_LOG(ERR, "SRIOV is active,"
+                                               " invalid queue number"
+                                               " for VMDQ RSS, allowed"
+                                               " value are 1, 2 or 4.");
+                                       return -EINVAL;
+                               }
+                       break;
+               case ETH_MQ_RX_VMDQ_ONLY:
+               case ETH_MQ_RX_NONE:
+                       /* if nothing mq mode configure, use default scheme */
+                       dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+                       if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
+                               RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+                       break;
+               default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+                       /* SRIOV only works in VMDq enable mode */
+                       PMD_INIT_LOG(ERR, "SRIOV is active,"
+                                       " wrong mq_mode rx %d.",
+                                       dev_conf->rxmode.mq_mode);
+                       return -EINVAL;
+               }
+
+               switch (dev_conf->txmode.mq_mode) {
+               case ETH_MQ_TX_VMDQ_DCB:
+                       /* DCB VMDQ in SRIOV mode, not implement yet */
+                       PMD_INIT_LOG(ERR, "SRIOV is active,"
+                                       " unsupported VMDQ mq_mode tx %d.",
+                                       dev_conf->txmode.mq_mode);
+                       return -EINVAL;
+               default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+                       dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+                       break;
+               }
+
+               /* check valid queue number */
+               if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
+                   (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+                       PMD_INIT_LOG(ERR, "SRIOV is active,"
+                                       " queue number must less equal to %d.",
+                                       RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+                       return -EINVAL;
+               }
+       } else {
+               /* check configuration for vmdb+dcb mode */
+               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+                       const struct rte_eth_vmdq_dcb_conf *conf;
+
+                       if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+                               PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
+                                               IXGBE_VMDQ_DCB_NB_QUEUES);
+                               return -EINVAL;
+                       }
+                       conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
+                       if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+                              conf->nb_queue_pools == ETH_32_POOLS)) {
+                               PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+                                               " nb_queue_pools must be %d or %d.",
+                                               ETH_16_POOLS, ETH_32_POOLS);
+                               return -EINVAL;
+                       }
+               }
+               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+                       const struct rte_eth_vmdq_dcb_tx_conf *conf;
+
+                       if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+                               PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
+                                                IXGBE_VMDQ_DCB_NB_QUEUES);
+                               return -EINVAL;
+                       }
+                       conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+                       if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+                              conf->nb_queue_pools == ETH_32_POOLS)) {
+                               PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+                                               " nb_queue_pools != %d and"
+                                               " nb_queue_pools != %d.",
+                                               ETH_16_POOLS, ETH_32_POOLS);
+                               return -EINVAL;
+                       }
+               }
+
+               /* For DCB mode check our configuration before we go further */
+               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+                       const struct rte_eth_dcb_rx_conf *conf;
+
+                       if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
+                               PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
+                                                IXGBE_DCB_NB_QUEUES);
+                               return -EINVAL;
+                       }
+                       conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
+                       if (!(conf->nb_tcs == ETH_4_TCS ||
+                              conf->nb_tcs == ETH_8_TCS)) {
+                               PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+                                               " and nb_tcs != %d.",
+                                               ETH_4_TCS, ETH_8_TCS);
+                               return -EINVAL;
+                       }
+               }
+
+               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+                       const struct rte_eth_dcb_tx_conf *conf;
+
+                       if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
+                               PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
+                                                IXGBE_DCB_NB_QUEUES);
+                               return -EINVAL;
+                       }
+                       conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
+                       if (!(conf->nb_tcs == ETH_4_TCS ||
+                              conf->nb_tcs == ETH_8_TCS)) {
+                               PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+                                               " and nb_tcs != %d.",
+                                               ETH_4_TCS, ETH_8_TCS);
+                               return -EINVAL;
+                       }
+               }
+       }
+       return 0;
+}
+
 static int
 ixgbe_dev_configure(struct rte_eth_dev *dev)
 {
@@ -1652,8 +1812,16 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        struct ixgbe_adapter *adapter =
                (struct ixgbe_adapter *)dev->data->dev_private;
+       int ret;
 
        PMD_INIT_FUNC_TRACE();
+       /* multipe queue mode checking */
+       ret  = ixgbe_check_mq_mode(dev);
+       if (ret != 0) {
+               PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
+                           ret);
+               return ret;
+       }
 
        /* set flag to update link status after init */
        intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -1680,9 +1848,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        struct ixgbe_vf_info *vfinfo =
                *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
-#ifdef RTE_NEXT_ABI
        uint32_t intr_vector = 0;
-#endif
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
        int mask = 0;
@@ -1715,7 +1881,6 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        /* configure PF module if SRIOV enabled */
        ixgbe_pf_host_configure(dev);
 
-#ifdef RTE_NEXT_ABI
        /* check and configure queue intr-vector mapping */
        if (dev->data->dev_conf.intr_conf.rxq != 0)
                intr_vector = dev->data->nb_rx_queues;
@@ -1734,7 +1899,6 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                        return -ENOMEM;
                }
        }
-#endif
 
        /* confiugre msix for sleep until rx interrupt */
        ixgbe_configure_msix(dev);
@@ -1827,11 +1991,9 @@ skip_link_setup:
                                     " no intr multiplex\n");
        }
 
-#ifdef RTE_NEXT_ABI
        /* check if rxq interrupt is enabled */
        if (dev->data->dev_conf.intr_conf.rxq != 0)
                ixgbe_dev_rxq_interrupt_setup(dev);
-#endif
 
        /* enable uio/vfio intr/eventfd mapping */
        rte_intr_enable(intr_handle);
@@ -1942,14 +2104,12 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        memset(filter_info->fivetuple_mask, 0,
                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
 
-#ifdef RTE_NEXT_ABI
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
        if (intr_handle->intr_vec != NULL) {
                rte_free(intr_handle->intr_vec);
                intr_handle->intr_vec = NULL;
        }
-#endif
 }
 
 /*
@@ -2057,19 +2217,25 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats
                hw_stats->mpc[i] += mp;
                /* Running comprehensive total for stats display */
                *total_missed_rx += hw_stats->mpc[i];
-               if (hw->mac.type == ixgbe_mac_82598EB)
+               if (hw->mac.type == ixgbe_mac_82598EB) {
                        hw_stats->rnbc[i] +=
                            IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+                       hw_stats->pxonrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+                       hw_stats->pxoffrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+               } else {
+                       hw_stats->pxonrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+                       hw_stats->pxoffrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+                       hw_stats->pxon2offc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+               }
                hw_stats->pxontxc[i] +=
                    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
-               hw_stats->pxonrxc[i] +=
-                   IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
                hw_stats->pxofftxc[i] +=
                    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
-               hw_stats->pxoffrxc[i] +=
-                   IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
-               hw_stats->pxon2offc[i] +=
-                   IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
        }
        for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
                hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
@@ -2214,6 +2380,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        /* Rx Errors */
        stats->ierrors  = hw_stats->crcerrs +
+                         hw_stats->mspdc +
                          hw_stats->rlec +
                          hw_stats->ruc +
                          hw_stats->roc +
@@ -2224,7 +2391,6 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                          hw_stats->mlfc +
                          hw_stats->mrfc +
                          hw_stats->rfc +
-                         hw_stats->rjc +
                          hw_stats->fccrc +
                          hw_stats->fclast;
 
@@ -2415,7 +2581,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                                ETH_TXQ_FLAGS_NOOFFLOADS,
        };
        dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-       dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+       dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
        dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
 }
 
@@ -2444,7 +2610,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                                DEV_TX_OFFLOAD_IPV4_CKSUM  |
                                DEV_TX_OFFLOAD_UDP_CKSUM   |
                                DEV_TX_OFFLOAD_TCP_CKSUM   |
-                               DEV_TX_OFFLOAD_SCTP_CKSUM;
+                               DEV_TX_OFFLOAD_SCTP_CKSUM  |
+                               DEV_TX_OFFLOAD_TCP_TSO;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -2623,7 +2790,6 @@ ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
  *  - On success, zero.
  *  - On failure, a negative value.
  */
-#ifdef RTE_NEXT_ABI
 static int
 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
 {
@@ -2634,7 +2800,6 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
 
        return 0;
 }
-#endif
 
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
@@ -2659,7 +2824,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 
        /* read-on-clear nic registers here */
        eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
-       PMD_DRV_LOG(INFO, "eicr %x", eicr);
+       PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
 
        intr->flags = 0;
 
@@ -2724,7 +2889,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
                PMD_INIT_LOG(INFO, " Port %d: Link Down",
                                (int)(dev->data->port_id));
        }
-       PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+       PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
                                dev->pci_dev->addr.domain,
                                dev->pci_dev->addr.bus,
                                dev->pci_dev->addr.devid,
@@ -3223,12 +3388,22 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
        uint32_t reta, r;
        uint16_t idx, shift;
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint16_t sp_reta_size;
+       uint32_t reta_reg;
 
        PMD_INIT_FUNC_TRACE();
-       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+
+       if (!ixgbe_rss_update_sp(hw->mac.type)) {
+               PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+                       "NIC.");
+               return -ENOTSUP;
+       }
+
+       sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+       if (reta_size != sp_reta_size) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)\n", reta_size, sp_reta_size);
                return -EINVAL;
        }
 
@@ -3239,10 +3414,11 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
                                                IXGBE_4_BIT_MASK);
                if (!mask)
                        continue;
+               reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
                if (mask == IXGBE_4_BIT_MASK)
                        r = 0;
                else
-                       r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+                       r = IXGBE_READ_REG(hw, reta_reg);
                for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
                        if (mask & (0x1 << j))
                                reta |= reta_conf[idx].reta[shift + j] <<
@@ -3251,7 +3427,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
                                reta |= r & (IXGBE_8_BIT_MASK <<
                                                (CHAR_BIT * j));
                }
-               IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+               IXGBE_WRITE_REG(hw, reta_reg, reta);
        }
 
        return 0;
@@ -3266,16 +3442,19 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
        uint32_t reta;
        uint16_t idx, shift;
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint16_t sp_reta_size;
+       uint32_t reta_reg;
 
        PMD_INIT_FUNC_TRACE();
-       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+       sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+       if (reta_size != sp_reta_size) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                               "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)\n", reta_size, sp_reta_size);
                return -EINVAL;
        }
 
-       for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
+       for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
                idx = i / RTE_RETA_GROUP_SIZE;
                shift = i % RTE_RETA_GROUP_SIZE;
                mask = (uint8_t)((reta_conf[idx].mask >> shift) &
@@ -3283,7 +3462,8 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
                if (!mask)
                        continue;
 
-               reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+               reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+               reta = IXGBE_READ_REG(hw, reta_reg);
                for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
                        if (mask & (0x1 << j))
                                reta_conf[idx].reta[shift + j] =
@@ -3410,12 +3590,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
         */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
        if (!conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
+               PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 1;
        }
 #else
        if (conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
+               PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 0;
        }
 #endif
@@ -3435,9 +3615,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-#ifdef RTE_NEXT_ABI
        uint32_t intr_vector = 0;
-#endif
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
 
        int err, mask = 0;
@@ -3470,7 +3648,6 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 
        ixgbevf_dev_rxtx_start(dev);
 
-#ifdef RTE_NEXT_ABI
        /* check and configure queue intr-vector mapping */
        if (dev->data->dev_conf.intr_conf.rxq != 0)
                intr_vector = dev->data->nb_rx_queues;
@@ -3488,7 +3665,6 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
                        return -ENOMEM;
                }
        }
-#endif
        ixgbevf_configure_msix(dev);
 
        if (dev->data->dev_conf.intr_conf.lsc != 0) {
@@ -3534,23 +3710,19 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
        /* disable intr eventfd mapping */
        rte_intr_disable(intr_handle);
 
-#ifdef RTE_NEXT_ABI
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
        if (intr_handle->intr_vec != NULL) {
                rte_free(intr_handle->intr_vec);
                intr_handle->intr_vec = NULL;
        }
-#endif
 }
 
 static void
 ixgbevf_dev_close(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-#ifdef RTE_NEXT_ABI
        struct rte_pci_device *pci_dev;
-#endif
 
        PMD_INIT_FUNC_TRACE();
 
@@ -3563,13 +3735,11 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
        /* reprogram the RAR[0] in case user changed it. */
        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 
-#ifdef RTE_NEXT_ABI
        pci_dev = dev->pci_dev;
        if (pci_dev->intr_handle.intr_vec) {
                rte_free(pci_dev->intr_handle.intr_vec);
                pci_dev->intr_handle.intr_vec = NULL;
        }
-#endif
 }
 
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
@@ -4087,7 +4257,6 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
        return 0;
 }
 
-#ifdef RTE_NEXT_ABI
 static int
 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
@@ -4240,18 +4409,15 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
                }
        }
 }
-#endif
 
 static void
 ixgbevf_configure_msix(struct rte_eth_dev *dev)
 {
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
-#ifdef RTE_NEXT_ABI
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t q_idx;
        uint32_t vector_idx = 0;
-#endif
 
        /* won't configure msix register if no mapping is done
         * between intr vector and event fd.
@@ -4259,7 +4425,6 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
        if (!rte_intr_dp_is_en(intr_handle))
                return;
 
-#ifdef RTE_NEXT_ABI
        /* Configure all RX queues of VF */
        for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
                /* Force all queue use vector 0,
@@ -4271,7 +4436,6 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
 
        /* Configure VF Rx queue ivar */
        ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
-#endif
 }
 
 /**
@@ -4283,13 +4447,11 @@ static void
 ixgbe_configure_msix(struct rte_eth_dev *dev)
 {
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
-#ifdef RTE_NEXT_ABI
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t queue_id, vec = 0;
        uint32_t mask;
        uint32_t gpie;
-#endif
 
        /* won't configure msix register if no mapping is done
         * between intr vector and event fd
@@ -4297,7 +4459,6 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
        if (!rte_intr_dp_is_en(intr_handle))
                return;
 
-#ifdef RTE_NEXT_ABI
        /* setup GPIE for MSI-x mode */
        gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
        gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
@@ -4347,7 +4508,6 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
                  IXGBE_EIMS_LSC);
 
        IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
-#endif
 }
 
 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
@@ -5484,8 +5644,8 @@ ixgbe_get_eeprom(struct rte_eth_dev *dev,
 
        first = in_eeprom->offset >> 1;
        length = in_eeprom->length >> 1;
-       if ((first >= hw->eeprom.word_size) ||
-           ((first + length) >= hw->eeprom.word_size))
+       if ((first > hw->eeprom.word_size) ||
+           ((first + length) > hw->eeprom.word_size))
                return -EINVAL;
 
        in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -5504,8 +5664,8 @@ ixgbe_set_eeprom(struct rte_eth_dev *dev,
 
        first = in_eeprom->offset >> 1;
        length = in_eeprom->length >> 1;
-       if ((first >= hw->eeprom.word_size) ||
-           ((first + length) >= hw->eeprom.word_size))
+       if ((first > hw->eeprom.word_size) ||
+           ((first + length) > hw->eeprom.word_size))
                return -EINVAL;
 
        in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -5513,6 +5673,147 @@ ixgbe_set_eeprom(struct rte_eth_dev *dev,
        return eeprom->ops.write_buffer(hw,  first, length, data);
 }
 
+uint16_t
+ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
+       switch (mac_type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+               return ETH_RSS_RETA_SIZE_512;
+       case ixgbe_mac_X550_vf:
+       case ixgbe_mac_X550EM_x_vf:
+               return ETH_RSS_RETA_SIZE_64;
+       default:
+               return ETH_RSS_RETA_SIZE_128;
+       }
+}
+
+uint32_t
+ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
+       switch (mac_type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+               if (reta_idx < ETH_RSS_RETA_SIZE_128)
+                       return IXGBE_RETA(reta_idx >> 2);
+               else
+                       return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+       case ixgbe_mac_X550_vf:
+       case ixgbe_mac_X550EM_x_vf:
+               return IXGBE_VFRETA(reta_idx >> 2);
+       default:
+               return IXGBE_RETA(reta_idx >> 2);
+       }
+}
+
+uint32_t
+ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
+       switch (mac_type) {
+       case ixgbe_mac_X550_vf:
+       case ixgbe_mac_X550EM_x_vf:
+               return IXGBE_VFMRQC;
+       default:
+               return IXGBE_MRQC;
+       }
+}
+
+uint32_t
+ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
+       switch (mac_type) {
+       case ixgbe_mac_X550_vf:
+       case ixgbe_mac_X550EM_x_vf:
+               return IXGBE_VFRSSRK(i);
+       default:
+               return IXGBE_RSSRK(i);
+       }
+}
+
+bool
+ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
+       switch (mac_type) {
+       case ixgbe_mac_82599_vf:
+       case ixgbe_mac_X540_vf:
+               return 0;
+       default:
+               return 1;
+       }
+}
+
+static int
+ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+                       struct rte_eth_dcb_info *dcb_info)
+{
+       struct ixgbe_dcb_config *dcb_config =
+                       IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+       struct ixgbe_dcb_tc_config *tc;
+       uint8_t i, j;
+
+       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+               dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+       else
+               dcb_info->nb_tcs = 1;
+
+       if (dcb_config->vt_mode) { /* vt is enabled*/
+               struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+                               &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+                       dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+               for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+                       for (j = 0; j < dcb_info->nb_tcs; j++) {
+                               dcb_info->tc_queue.tc_rxq[i][j].base =
+                                               i * dcb_info->nb_tcs + j;
+                               dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
+                               dcb_info->tc_queue.tc_txq[i][j].base =
+                                               i * dcb_info->nb_tcs + j;
+                               dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+                       }
+               }
+       } else { /* vt is disabled*/
+               struct rte_eth_dcb_rx_conf *rx_conf =
+                               &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+                       dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+               if (dcb_info->nb_tcs == ETH_4_TCS) {
+                       for (i = 0; i < dcb_info->nb_tcs; i++) {
+                               dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+                               dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+                       }
+                       dcb_info->tc_queue.tc_txq[0][0].base = 0;
+                       dcb_info->tc_queue.tc_txq[0][1].base = 64;
+                       dcb_info->tc_queue.tc_txq[0][2].base = 96;
+                       dcb_info->tc_queue.tc_txq[0][3].base = 112;
+                       dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+                       dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+               } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+                       for (i = 0; i < dcb_info->nb_tcs; i++) {
+                               dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+                               dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+                       }
+                       dcb_info->tc_queue.tc_txq[0][0].base = 0;
+                       dcb_info->tc_queue.tc_txq[0][1].base = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].base = 64;
+                       dcb_info->tc_queue.tc_txq[0][3].base = 80;
+                       dcb_info->tc_queue.tc_txq[0][4].base = 96;
+                       dcb_info->tc_queue.tc_txq[0][5].base = 104;
+                       dcb_info->tc_queue.tc_txq[0][6].base = 112;
+                       dcb_info->tc_queue.tc_txq[0][7].base = 120;
+                       dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+               }
+       }
+       for (i = 0; i < dcb_info->nb_tcs; i++) {
+               tc = &dcb_config->tc_config[i];
+               dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
+       }
+       return 0;
+}
+
 static struct rte_driver rte_ixgbe_driver = {
        .type = PMD_PDEV,
        .init = rte_ixgbe_pmd_init,