ethdev: return diagnostic when setting MAC address
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
index a0ae089..a5e2fc0 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
  */
 
 #include <sys/queue.h>
 #include <rte_log.h>
 #include <rte_debug.h>
 #include <rte_pci.h>
-#include <rte_atomic.h>
+#include <rte_bus_pci.h>
 #include <rte_branch_prediction.h>
 #include <rte_memory.h>
-#include <rte_memzone.h>
 #include <rte_eal.h>
 #include <rte_alarm.h>
 #include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
 #include <rte_malloc.h>
 #include <rte_random.h>
 #include <rte_dev.h>
 #include <rte_hash_crc.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_security_driver.h>
+#endif
 
 #include "ixgbe_logs.h"
 #include "base/ixgbe_api.h"
@@ -93,6 +65,9 @@
 /* Timer value included in XOFF frames. */
 #define IXGBE_FC_PAUSE 0x680
 
+/*Default value of Max Rx Queue*/
+#define IXGBE_MAX_RX_QUEUE_NUM 128
+
 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
@@ -170,13 +145,14 @@ static void ixgbe_dev_stop(struct rte_eth_dev *dev);
 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
 static void ixgbe_dev_close(struct rte_eth_dev *dev);
+static int  ixgbe_dev_reset(struct rte_eth_dev *dev);
 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
                                int wait_to_complete);
-static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
+static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
                                struct rte_eth_stats *stats);
 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
                                struct rte_eth_xstat *xstats, unsigned n);
@@ -219,7 +195,7 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
                uint16_t queue, bool on);
 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
                int on);
-static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
@@ -240,7 +216,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
                        struct rte_eth_rss_reta_entry64 *reta_conf,
                        uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
-static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
@@ -251,7 +227,7 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param);
 static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
                         uint32_t index, uint32_t pool);
 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
-static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
+static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
                                           struct ether_addr *mac_addr);
 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
 static bool is_device_supported(struct rte_eth_dev *dev,
@@ -266,16 +242,17 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
                                   int wait_to_complete);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
+static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
-static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
+static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
                struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
                uint16_t queue, int on);
-static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
                                            uint16_t queue_id);
@@ -304,14 +281,11 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
                               uint8_t queue, uint8_t msix_vector);
 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
 
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
-               uint16_t queue_idx, uint16_t tx_rate);
-
 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
                                struct ether_addr *mac_addr,
                                uint32_t index, uint32_t pool);
 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
-static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
+static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
                                             struct ether_addr *mac_addr);
 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
                        struct rte_eth_syn_filter *filter);
@@ -426,6 +400,9 @@ static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
                (r) = (h)->bitmap[idx] >> bit & 1;\
        } while (0)
 
+int ixgbe_logtype_init;
+int ixgbe_logtype_driver;
+
 /*
  * The set of PCI devices this driver supports
  */
@@ -446,13 +423,8 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = {
        { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
        { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
        { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
-       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
        { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
        { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
-       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
-       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
-       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
-       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
        { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
        { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
        { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
@@ -527,6 +499,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .dev_set_link_up    = ixgbe_dev_set_link_up,
        .dev_set_link_down  = ixgbe_dev_set_link_down,
        .dev_close            = ixgbe_dev_close,
+       .dev_reset            = ixgbe_dev_reset,
        .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
        .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
        .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
@@ -599,6 +572,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
        .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
        .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
+       .tm_ops_get           = ixgbe_tm_ops_get,
 };
 
 /*
@@ -616,6 +590,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .xstats_reset         = ixgbevf_dev_stats_reset,
        .xstats_get_names     = ixgbevf_dev_xstats_get_names,
        .dev_close            = ixgbevf_dev_close,
+       .dev_reset            = ixgbevf_dev_reset,
        .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
        .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
        .dev_infos_get        = ixgbevf_dev_info_get,
@@ -811,58 +786,6 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
                sizeof(rte_ixgbevf_stats_strings[0]))
 
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- *   - Pointer to the structure rte_eth_dev to read from.
- *   - Pointer to the buffer to be saved with the link status.
- *
- * @return
- *   - On success, zero.
- *   - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
-                               struct rte_eth_link *link)
-{
-       struct rte_eth_link *dst = link;
-       struct rte_eth_link *src = &(dev->data->dev_link);
-
-       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-                                       *(uint64_t *)src) == 0)
-               return -1;
-
-       return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- *   - Pointer to the structure rte_eth_dev to read from.
- *   - Pointer to the buffer to be saved with the link status.
- *
- * @return
- *   - On success, zero.
- *   - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
-                               struct rte_eth_link *link)
-{
-       struct rte_eth_link *dst = &(dev->data->dev_link);
-       struct rte_eth_link *src = link;
-
-       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-                                       *(uint64_t *)src) == 0)
-               return -1;
-
-       return 0;
-}
-
 /*
  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
  */
@@ -1172,7 +1095,6 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
@@ -1198,6 +1120,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        /* Unlock any pending hardware semaphore */
        ixgbe_swfw_lock_reset(hw);
 
+#ifdef RTE_LIBRTE_SECURITY
+       /* Initialize security_ctx only for primary process*/
+       if (ixgbe_ipsec_ctx_create(eth_dev))
+               return -ENOMEM;
+#endif
+
        /* Initialize DCB configuration*/
        memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
        ixgbe_dcb_init(hw, dcb_config);
@@ -1340,16 +1268,15 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        /* initialize l2 tunnel filter list & hash */
        ixgbe_l2_tn_filter_init(eth_dev);
 
-       TAILQ_INIT(&filter_ntuple_list);
-       TAILQ_INIT(&filter_ethertype_list);
-       TAILQ_INIT(&filter_syn_list);
-       TAILQ_INIT(&filter_fdir_list);
-       TAILQ_INIT(&filter_l2_tunnel_list);
-       TAILQ_INIT(&ixgbe_flow_list);
+       /* initialize flow filter lists */
+       ixgbe_filterlist_init();
 
        /* initialize bandwidth configuration info */
        memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
 
+       /* initialize Traffic Manager configuration */
+       ixgbe_tm_conf_init(eth_dev);
+
        return 0;
 }
 
@@ -1359,6 +1286,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct ixgbe_hw *hw;
+       int retries = 0;
+       int ret;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1379,8 +1308,20 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 
        /* disable uio intr before callback unregister */
        rte_intr_disable(intr_handle);
-       rte_intr_callback_unregister(intr_handle,
-                                    ixgbe_dev_interrupt_handler, eth_dev);
+
+       do {
+               ret = rte_intr_callback_unregister(intr_handle,
+                               ixgbe_dev_interrupt_handler, eth_dev);
+               if (ret >= 0) {
+                       break;
+               } else if (ret != -EAGAIN) {
+                       PMD_INIT_LOG(ERR,
+                               "intr callback unregister failed: %d",
+                               ret);
+                       return ret;
+               }
+               rte_delay_ms(100);
+       } while (retries++ < (10 + IXGBE_LINK_UP_TIME));
 
        /* uninitialize PF if max_vfs not zero */
        ixgbe_pf_host_uninit(eth_dev);
@@ -1403,6 +1344,13 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
        /* clear all the filters list */
        ixgbe_filterlist_flush();
 
+       /* Remove all Traffic Manager configuration */
+       ixgbe_tm_conf_uninit(eth_dev);
+
+#ifdef RTE_LIBRTE_SECURITY
+       rte_free(eth_dev->security_ctx);
+#endif
+
        return 0;
 }
 
@@ -1629,7 +1577,6 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
@@ -1783,7 +1730,8 @@ static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_ixgbe_pmd = {
        .id_table = pci_id_ixgbe_map,
-       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+                    RTE_PCI_DRV_IOVA_AS_VA,
        .probe = eth_ixgbe_pci_probe,
        .remove = eth_ixgbe_pci_remove,
 };
@@ -1805,7 +1753,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
  */
 static struct rte_pci_driver rte_ixgbevf_pmd = {
        .id_table = pci_id_ixgbevf_map,
-       .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
        .probe = eth_ixgbevf_pci_probe,
        .remove = eth_ixgbevf_pci_remove,
 };
@@ -1961,9 +1909,9 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
        rxq = dev->data->rx_queues[queue];
 
        if (on)
-               rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+               rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
        else
-               rxq->vlan_flags = PKT_RX_VLAN_PKT;
+               rxq->vlan_flags = PKT_RX_VLAN;
 }
 
 static void
@@ -2014,64 +1962,6 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
        ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
 }
 
-void
-ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
-{
-       struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t ctrl;
-       uint16_t i;
-       struct ixgbe_rx_queue *rxq;
-
-       PMD_INIT_FUNC_TRACE();
-
-       if (hw->mac.type == ixgbe_mac_82598EB) {
-               ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-               ctrl &= ~IXGBE_VLNCTRL_VME;
-               IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-       } else {
-               /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-               for (i = 0; i < dev->data->nb_rx_queues; i++) {
-                       rxq = dev->data->rx_queues[i];
-                       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-                       ctrl &= ~IXGBE_RXDCTL_VME;
-                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
-                       /* record those setting for HW strip per queue */
-                       ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
-               }
-       }
-}
-
-void
-ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
-{
-       struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t ctrl;
-       uint16_t i;
-       struct ixgbe_rx_queue *rxq;
-
-       PMD_INIT_FUNC_TRACE();
-
-       if (hw->mac.type == ixgbe_mac_82598EB) {
-               ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-               ctrl |= IXGBE_VLNCTRL_VME;
-               IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-       } else {
-               /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-               for (i = 0; i < dev->data->nb_rx_queues; i++) {
-                       rxq = dev->data->rx_queues[i];
-                       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-                       ctrl |= IXGBE_RXDCTL_VME;
-                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
-                       /* record those setting for HW strip per queue */
-                       ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
-               }
-       }
-}
-
 static void
 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 {
@@ -2127,29 +2017,77 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
         */
 }
 
-static void
+void
+ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+       uint32_t ctrl;
+       uint16_t i;
+       struct ixgbe_rx_queue *rxq;
+       bool on;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+                       ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+                       ctrl |= IXGBE_VLNCTRL_VME;
+                       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+               } else {
+                       ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+                       ctrl &= ~IXGBE_VLNCTRL_VME;
+                       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+               }
+       } else {
+               /*
+                * Other 10G NIC, the VLAN strip can be setup
+                * per queue in RXDCTL
+                */
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                       rxq = dev->data->rx_queues[i];
+                       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+                       if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+                               ctrl |= IXGBE_RXDCTL_VME;
+                               on = TRUE;
+                       } else {
+                               ctrl &= ~IXGBE_RXDCTL_VME;
+                               on = FALSE;
+                       }
+                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
+
+                       /* record those setting for HW strip per queue */
+                       ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
+               }
+       }
+}
+
+static int
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+       struct rte_eth_rxmode *rxmode;
+       rxmode = &dev->data->dev_conf.rxmode;
+
        if (mask & ETH_VLAN_STRIP_MASK) {
-               if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-                       ixgbe_vlan_hw_strip_enable_all(dev);
-               else
-                       ixgbe_vlan_hw_strip_disable_all(dev);
+               ixgbe_vlan_hw_strip_config(dev);
        }
 
        if (mask & ETH_VLAN_FILTER_MASK) {
-               if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
                        ixgbe_vlan_hw_filter_enable(dev);
                else
                        ixgbe_vlan_hw_filter_disable(dev);
        }
 
        if (mask & ETH_VLAN_EXTEND_MASK) {
-               if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
                        ixgbe_vlan_hw_extend_enable(dev);
                else
                        ixgbe_vlan_hw_extend_disable(dev);
        }
+
+       return 0;
 }
 
 static void
@@ -2181,9 +2119,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
                return -EINVAL;
        }
 
-       RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
-       RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
-
+       RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+               IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+       RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+               pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
        return 0;
 }
 
@@ -2223,8 +2162,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
                case ETH_MQ_RX_NONE:
                        /* if nothing mq mode configure, use default scheme */
                        dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
-                       if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
-                               RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
                        break;
                default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
                        /* SRIOV only works in VMDq enable mode */
@@ -2359,6 +2296,9 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        struct ixgbe_adapter *adapter =
                (struct ixgbe_adapter *)dev->data->dev_private;
+       struct rte_eth_dev_info dev_info;
+       uint64_t rx_offloads;
+       uint64_t tx_offloads;
        int ret;
 
        PMD_INIT_FUNC_TRACE();
@@ -2370,6 +2310,22 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
                return ret;
        }
 
+       ixgbe_dev_info_get(dev, &dev_info);
+       rx_offloads = dev->data->dev_conf.rxmode.offloads;
+       if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+               PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+                           "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+                           rx_offloads, dev_info.rx_offload_capa);
+               return -ENOTSUP;
+       }
+       tx_offloads = dev->data->dev_conf.txmode.offloads;
+       if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+               PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+                           "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+                           tx_offloads, dev_info.tx_offload_capa);
+               return -ENOTSUP;
+       }
+
        /* set flag to update link status after init */
        intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -2492,10 +2448,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        uint32_t intr_vector = 0;
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
+       uint32_t allowed_speeds = 0;
        int mask = 0;
        int status;
        uint16_t vf, idx;
        uint32_t *link_speeds;
+       struct ixgbe_tm_conf *tm_conf =
+               IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
 
@@ -2504,8 +2463,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        *    - fixed speed: TODO implement
        */
        if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
-               PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported",
-                            dev->data->port_id);
+               PMD_INIT_LOG(ERR,
+               "Invalid link_speeds for port %u, fix speed not supported",
+                               dev->data->port_id);
                return -EINVAL;
        }
 
@@ -2568,9 +2528,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                goto error;
        }
 
-    mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
                ETH_VLAN_EXTEND_MASK;
-       ixgbe_vlan_offload_set(dev, mask);
+       err = ixgbe_vlan_offload_set(dev, mask);
+       if (err) {
+               PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+               goto error;
+       }
 
        if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
                /* Enable vlan filtering for VMDq */
@@ -2633,21 +2597,50 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        if (err)
                goto error;
 
+       switch (hw->mac.type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
+               allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+                       ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
+                       ETH_LINK_SPEED_10G;
+               break;
+       default:
+               allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+                       ETH_LINK_SPEED_10G;
+       }
+
        link_speeds = &dev->data->dev_conf.link_speeds;
-       if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-                       ETH_LINK_SPEED_10G)) {
+       if (*link_speeds & ~allowed_speeds) {
                PMD_INIT_LOG(ERR, "Invalid link setting");
                goto error;
        }
 
        speed = 0x0;
        if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
-               speed = (hw->mac.type != ixgbe_mac_82598EB) ?
-                               IXGBE_LINK_SPEED_82599_AUTONEG :
-                               IXGBE_LINK_SPEED_82598_AUTONEG;
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       speed = IXGBE_LINK_SPEED_82598_AUTONEG;
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+                       speed = IXGBE_LINK_SPEED_82599_AUTONEG;
+                       break;
+               case ixgbe_mac_X550:
+               case ixgbe_mac_X550EM_x:
+               case ixgbe_mac_X550EM_a:
+                       speed = IXGBE_LINK_SPEED_X550_AUTONEG;
+                       break;
+               default:
+                       speed = IXGBE_LINK_SPEED_82599_AUTONEG;
+               }
        } else {
                if (*link_speeds & ETH_LINK_SPEED_10G)
                        speed |= IXGBE_LINK_SPEED_10GB_FULL;
+               if (*link_speeds & ETH_LINK_SPEED_5G)
+                       speed |= IXGBE_LINK_SPEED_5GB_FULL;
+               if (*link_speeds & ETH_LINK_SPEED_2_5G)
+                       speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
                if (*link_speeds & ETH_LINK_SPEED_1G)
                        speed |= IXGBE_LINK_SPEED_1GB_FULL;
                if (*link_speeds & ETH_LINK_SPEED_100M)
@@ -2658,12 +2651,16 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        if (err)
                goto error;
 
+       ixgbe_dev_link_update(dev, 0);
+
 skip_link_setup:
 
        if (rte_intr_allow_others(intr_handle)) {
                /* check if lsc interrupt is enabled */
                if (dev->data->dev_conf.intr_conf.lsc != 0)
-                       ixgbe_dev_lsc_interrupt_setup(dev);
+                       ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
+               else
+                       ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
                ixgbe_dev_macsec_interrupt_setup(dev);
        } else {
                rte_intr_callback_unregister(intr_handle,
@@ -2686,6 +2683,11 @@ skip_link_setup:
        ixgbe_l2_tunnel_conf(dev);
        ixgbe_filter_restore(dev);
 
+       if (tm_conf->root && !tm_conf->committed)
+               PMD_DRV_LOG(WARNING,
+                           "please call hierarchy_commit() "
+                           "before starting the port");
+
        return 0;
 
 error:
@@ -2708,6 +2710,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        int vf;
+       struct ixgbe_tm_conf *tm_conf =
+               IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
 
@@ -2740,7 +2744,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
 
        /* Clear recorded link status */
        memset(&link, 0, sizeof(link));
-       rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+       rte_eth_linkstatus_set(dev, &link);
 
        if (!rte_intr_allow_others(intr_handle))
                /* resume to the default handler */
@@ -2754,6 +2758,9 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
                rte_free(intr_handle->intr_vec);
                intr_handle->intr_vec = NULL;
        }
+
+       /* reset hierarchy commit */
+       tm_conf->committed = false;
 }
 
 /*
@@ -2817,7 +2824,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
 }
 
 /*
- * Reest and stop device.
+ * Reset and stop device.
  */
 static void
 ixgbe_dev_close(struct rte_eth_dev *dev)
@@ -2840,6 +2847,32 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 }
 
+/*
+ * Reset PF device.
+ */
+static int
+ixgbe_dev_reset(struct rte_eth_dev *dev)
+{
+       int ret;
+
+       /* When a DPDK PMD PF begin to reset PF port, it should notify all
+        * its VF to make them align with it. The detailed notification
+        * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
+        * To avoid unexpected behavior in VF, currently reset of PF with
+        * SR-IOV activation is not supported. It might be supported later.
+        */
+       if (dev->data->sriov.active)
+               return -ENOTSUP;
+
+       ret = eth_ixgbe_dev_uninit(dev);
+       if (ret)
+               return ret;
+
+       ret = eth_ixgbe_dev_init(dev);
+
+       return ret;
+}
+
 static void
 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
                           struct ixgbe_hw_stats *hw_stats,
@@ -3052,7 +3085,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 /*
  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
  */
-static void
+static int
 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        struct ixgbe_hw *hw =
@@ -3074,7 +3107,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                        &total_qbrc, &total_qprc, &total_qprdc);
 
        if (stats == NULL)
-               return;
+               return -EINVAL;
 
        /* Fill out the rte_eth_stats statistics structure */
        stats->ipackets = total_qprc;
@@ -3105,6 +3138,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        /* Tx Errors */
        stats->oerrors  = 0;
+       return 0;
 }
 
 static void
@@ -3516,7 +3550,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        return IXGBEVF_NB_XSTATS;
 }
 
-static void
+static int
 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
@@ -3525,12 +3559,13 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        ixgbevf_update_stats(dev);
 
        if (stats == NULL)
-               return;
+               return -EINVAL;
 
        stats->ipackets = hw_stats->vfgprc;
        stats->ibytes = hw_stats->vfgorc;
        stats->opackets = hw_stats->vfgptc;
        stats->obytes = hw_stats->vfgotc;
+       return 0;
 }
 
 static void
@@ -3577,7 +3612,6 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
 
-       dev_info->pci_dev = pci_dev;
        dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
        if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
@@ -3599,46 +3633,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        else
                dev_info->max_vmdq_pools = ETH_64_POOLS;
        dev_info->vmdq_queue_num = dev_info->max_rx_queues;
-       dev_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_UDP_CKSUM  |
-               DEV_RX_OFFLOAD_TCP_CKSUM;
-
-       /*
-        * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
-        * mode.
-        */
-       if ((hw->mac.type == ixgbe_mac_82599EB ||
-            hw->mac.type == ixgbe_mac_X540) &&
-           !RTE_ETH_DEV_SRIOV(dev).active)
-               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
-       if (hw->mac.type == ixgbe_mac_82599EB ||
-           hw->mac.type == ixgbe_mac_X540)
-               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
-
-       if (hw->mac.type == ixgbe_mac_X550 ||
-           hw->mac.type == ixgbe_mac_X550EM_x ||
-           hw->mac.type == ixgbe_mac_X550EM_a)
-               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
-
-       dev_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_IPV4_CKSUM  |
-               DEV_TX_OFFLOAD_UDP_CKSUM   |
-               DEV_TX_OFFLOAD_TCP_CKSUM   |
-               DEV_TX_OFFLOAD_SCTP_CKSUM  |
-               DEV_TX_OFFLOAD_TCP_TSO;
-
-       if (hw->mac.type == ixgbe_mac_82599EB ||
-           hw->mac.type == ixgbe_mac_X540)
-               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
-       if (hw->mac.type == ixgbe_mac_X550 ||
-           hw->mac.type == ixgbe_mac_X550EM_x ||
-           hw->mac.type == ixgbe_mac_X550EM_a)
-               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+       dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+       dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+                                    dev_info->rx_queue_offload_capa);
+       dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+       dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -3648,6 +3647,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                },
                .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
                .rx_drop_en = 0,
+               .offloads = 0,
        };
 
        dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3659,7 +3659,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
                .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
                .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-                               ETH_TXQ_FLAGS_NOOFFLOADS,
+                            ETH_TXQ_FLAGS_NOOFFLOADS |
+                            ETH_TXQ_FLAGS_IGNORE,
+               .offloads = 0,
        };
 
        dev_info->rx_desc_lim = rx_desc_lim;
@@ -3676,6 +3678,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
            hw->mac.type == ixgbe_mac_X550_vf) {
                dev_info->speed_capa |= ETH_LINK_SPEED_100M;
        }
+       if (hw->mac.type == ixgbe_mac_X550) {
+               dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
+               dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+       }
 }
 
 static const uint32_t *
@@ -3724,7 +3730,6 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       dev_info->pci_dev = pci_dev;
        dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
        dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
@@ -3736,16 +3741,11 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                dev_info->max_vmdq_pools = ETH_16_POOLS;
        else
                dev_info->max_vmdq_pools = ETH_64_POOLS;
-       dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-                               DEV_RX_OFFLOAD_IPV4_CKSUM |
-                               DEV_RX_OFFLOAD_UDP_CKSUM  |
-                               DEV_RX_OFFLOAD_TCP_CKSUM;
-       dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-                               DEV_TX_OFFLOAD_IPV4_CKSUM  |
-                               DEV_TX_OFFLOAD_UDP_CKSUM   |
-                               DEV_TX_OFFLOAD_TCP_CKSUM   |
-                               DEV_TX_OFFLOAD_SCTP_CKSUM  |
-                               DEV_TX_OFFLOAD_TCP_TSO;
+       dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+       dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+                                    dev_info->rx_queue_offload_capa);
+       dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+       dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -3755,6 +3755,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                },
                .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
                .rx_drop_en = 0,
+               .offloads = 0,
        };
 
        dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3766,7 +3767,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
                .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
                .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-                               ETH_TXQ_FLAGS_NOOFFLOADS,
+                            ETH_TXQ_FLAGS_NOOFFLOADS |
+                            ETH_TXQ_FLAGS_IGNORE,
+               .offloads = 0,
        };
 
        dev_info->rx_desc_lim = rx_desc_lim;
@@ -3802,7 +3805,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
        /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
         * before the link status is correct
         */
-       if (mac->type == ixgbe_mac_82599_vf) {
+       if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
                int i;
 
                for (i = 0; i < 5; i++) {
@@ -3885,7 +3888,7 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
                            int wait_to_complete, int vf)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct rte_eth_link link, old;
+       struct rte_eth_link link;
        ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        struct ixgbe_interrupt *intr =
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -3895,11 +3898,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
        int wait = 1;
        bool autoneg = false;
 
+       memset(&link, 0, sizeof(link));
        link.link_status = ETH_LINK_DOWN;
        link.link_speed = 0;
        link.link_duplex = ETH_LINK_HALF_DUPLEX;
-       memset(&old, 0, sizeof(old));
-       rte_ixgbe_dev_atomic_read_link_status(dev, &old);
+       link.link_autoneg = ETH_LINK_AUTONEG;
 
        hw->mac.get_link_status = true;
 
@@ -3923,19 +3926,14 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
        if (diag != 0) {
                link.link_speed = ETH_SPEED_NUM_100M;
                link.link_duplex = ETH_LINK_FULL_DUPLEX;
-               rte_ixgbe_dev_atomic_write_link_status(dev, &link);
-               if (link.link_status == old.link_status)
-                       return -1;
-               return 0;
+               return rte_eth_linkstatus_set(dev, &link);
        }
 
        if (link_up == 0) {
-               rte_ixgbe_dev_atomic_write_link_status(dev, &link);
                intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
-               if (link.link_status == old.link_status)
-                       return -1;
-               return 0;
+               return rte_eth_linkstatus_set(dev, &link);
        }
+
        intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
        link.link_status = ETH_LINK_UP;
        link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -3955,16 +3953,20 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
                link.link_speed = ETH_SPEED_NUM_1G;
                break;
 
+       case IXGBE_LINK_SPEED_2_5GB_FULL:
+               link.link_speed = ETH_SPEED_NUM_2_5G;
+               break;
+
+       case IXGBE_LINK_SPEED_5GB_FULL:
+               link.link_speed = ETH_SPEED_NUM_5G;
+               break;
+
        case IXGBE_LINK_SPEED_10GB_FULL:
                link.link_speed = ETH_SPEED_NUM_10G;
                break;
        }
-       rte_ixgbe_dev_atomic_write_link_status(dev, &link);
 
-       if (link.link_status == old.link_status)
-               return -1;
-
-       return 0;
+       return rte_eth_linkstatus_set(dev, &link);
 }
 
 static int
@@ -4036,19 +4038,24 @@ ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
  *
  * @param dev
  *  Pointer to struct rte_eth_dev.
+ * @param on
+ *  Enable or Disable.
  *
  * @return
  *  - On success, zero.
  *  - On failure, a negative value.
  */
 static int
-ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
+ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
 {
        struct ixgbe_interrupt *intr =
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 
        ixgbe_dev_link_status_print(dev);
-       intr->mask |= IXGBE_EICR_LSC;
+       if (on)
+               intr->mask |= IXGBE_EICR_LSC;
+       else
+               intr->mask &= ~IXGBE_EICR_LSC;
 
        return 0;
 }
@@ -4158,8 +4165,8 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_eth_link link;
 
-       memset(&link, 0, sizeof(link));
-       rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+       rte_eth_linkstatus_get(dev, &link);
+
        if (link.link_status) {
                PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
                                        (int)(dev->data->port_id),
@@ -4194,7 +4201,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
        struct ixgbe_interrupt *intr =
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        int64_t timeout;
-       struct rte_eth_link link;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -4211,9 +4217,10 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
        }
 
        if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+               struct rte_eth_link link;
+
                /* get the link status before link update, for predicting later */
-               memset(&link, 0, sizeof(link));
-               rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+               rte_eth_linkstatus_get(dev, &link);
 
                ixgbe_dev_link_update(dev, 0);
 
@@ -4287,12 +4294,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
                intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
                ixgbe_dev_link_status_print(dev);
                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
-                                             NULL, NULL);
+                                             NULL);
        }
 
        if (intr->flags & IXGBE_FLAG_MACSEC) {
                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
-                                             NULL, NULL);
+                                             NULL);
                intr->flags &= ~IXGBE_FLAG_MACSEC;
        }
 
@@ -4778,14 +4785,15 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
        ixgbe_clear_rar(hw, index);
 }
 
-static void
+static int
 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        ixgbe_remove_rar(dev, 0);
-
        ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
+
+       return 0;
 }
 
 static bool
@@ -4834,10 +4842,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
        /* switch to jumbo mode if needed */
        if (frame_size > ETHER_MAX_LEN) {
-               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               dev->data->dev_conf.rxmode.offloads |=
+                       DEV_RX_OFFLOAD_JUMBO_FRAME;
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
        } else {
-               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               dev->data->dev_conf.rxmode.offloads &=
+                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
                hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
        }
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4886,23 +4896,42 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
        struct rte_eth_conf *conf = &dev->data->dev_conf;
        struct ixgbe_adapter *adapter =
                        (struct ixgbe_adapter *)dev->data->dev_private;
+       struct rte_eth_dev_info dev_info;
+       uint64_t rx_offloads;
+       uint64_t tx_offloads;
 
        PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
                     dev->data->port_id);
 
+       ixgbevf_dev_info_get(dev, &dev_info);
+       rx_offloads = dev->data->dev_conf.rxmode.offloads;
+       if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+               PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+                           "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+                           rx_offloads, dev_info.rx_offload_capa);
+               return -ENOTSUP;
+       }
+       tx_offloads = dev->data->dev_conf.txmode.offloads;
+       if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+               PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+                           "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+                           tx_offloads, dev_info.tx_offload_capa);
+               return -ENOTSUP;
+       }
+
        /*
         * VF has no ability to enable/disable HW CRC
         * Keep the persistent behavior the same as Host PF
         */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-       if (!conf->rxmode.hw_strip_crc) {
+       if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
                PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-               conf->rxmode.hw_strip_crc = 1;
+               conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
        }
 #else
-       if (conf->rxmode.hw_strip_crc) {
+       if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
                PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-               conf->rxmode.hw_strip_crc = 0;
+               conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
        }
 #endif
 
@@ -4929,7 +4958,11 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       hw->mac.ops.reset_hw(hw);
+       err = hw->mac.ops.reset_hw(hw);
+       if (err) {
+               PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
+               return err;
+       }
        hw->mac.get_link_status = true;
 
        /* negotiate mailbox API version to use with the PF. */
@@ -4951,13 +4984,24 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        /* Set HW strip */
        mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
                ETH_VLAN_EXTEND_MASK;
-       ixgbevf_vlan_offload_set(dev, mask);
+       err = ixgbevf_vlan_offload_set(dev, mask);
+       if (err) {
+               PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
+               ixgbe_dev_clear_queues(dev);
+               return err;
+       }
 
        ixgbevf_dev_rxtx_start(dev);
 
+       ixgbevf_dev_link_update(dev, 0);
+
        /* check and configure queue intr-vector mapping */
-       if (dev->data->dev_conf.intr_conf.rxq != 0) {
-               intr_vector = dev->data->nb_rx_queues;
+       if (rte_intr_cap_multiple(intr_handle) &&
+           dev->data->dev_conf.intr_conf.rxq) {
+               /* According to datasheet, only vector 0/1/2 can be used,
+                * now only one vector is used for Rx queue
+                */
+               intr_vector = 1;
                if (rte_intr_efd_enable(intr_handle, intr_vector))
                        return -1;
        }
@@ -4974,6 +5018,15 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        }
        ixgbevf_configure_msix(dev);
 
+       /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
+        * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
+        * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
+        * is not cleared, it will fail when following rte_intr_enable( ) tries
+        * to map Rx queue interrupt to other VFIO vectors.
+        * So clear uio/vfio intr/evevnfd first to avoid failure.
+        */
+       rte_intr_disable(intr_handle);
+
        rte_intr_enable(intr_handle);
 
        /* Re-enable interrupt for VF */
@@ -5036,6 +5089,23 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
        ixgbevf_remove_mac_addr(dev, 0);
 }
 
+/*
+ * Reset VF device
+ */
+static int
+ixgbevf_dev_reset(struct rte_eth_dev *dev)
+{
+       int ret;
+
+       ret = eth_ixgbevf_dev_uninit(dev);
+       if (ret)
+               return ret;
+
+       ret = eth_ixgbevf_dev_init(dev);
+
+       return ret;
+}
+
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5111,21 +5181,25 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
        ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
 }
 
-static void
+static int
 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_rx_queue *rxq;
        uint16_t i;
        int on = 0;
 
        /* VF function only support hw strip feature, others are not support */
        if (mask & ETH_VLAN_STRIP_MASK) {
-               on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
-
-               for (i = 0; i < hw->mac.max_rx_queues; i++)
+               for (i = 0; i < hw->mac.max_rx_queues; i++) {
+                       rxq = dev->data->rx_queues[i];
+                       on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
                        ixgbevf_vlan_strip_queue_set(dev, i, on);
+               }
        }
+
+       return 0;
 }
 
 int
@@ -5408,13 +5482,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
        IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
 
        /* write pool mirrror control  register */
-       if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
+       if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
                IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
                IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
                                mp_msb);
        }
        /* write VLAN mirrror control  register */
-       if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
+       if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
                IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
                IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
                                mv_msb);
@@ -5439,6 +5513,9 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
        if (ixgbe_vt_check(hw) < 0)
                return -ENOTSUP;
 
+       if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+               return -EINVAL;
+
        memset(&mr_info->mr_conf[rule_id], 0,
               sizeof(struct rte_eth_mirror_conf));
 
@@ -5464,9 +5541,12 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
        uint32_t mask;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t vec = IXGBE_MISC_VEC_ID;
 
        mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
-       mask |= (1 << IXGBE_MISC_VEC_ID);
+       if (rte_intr_allow_others(intr_handle))
+               vec = IXGBE_RX_VEC_START;
+       mask |= (1 << vec);
        RTE_SET_USED(queue_id);
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 
@@ -5481,9 +5561,14 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
        uint32_t mask;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       uint32_t vec = IXGBE_MISC_VEC_ID;
 
        mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
-       mask &= ~(1 << IXGBE_MISC_VEC_ID);
+       if (rte_intr_allow_others(intr_handle))
+               vec = IXGBE_RX_VEC_START;
+       mask &= ~(1 << vec);
        RTE_SET_USED(queue_id);
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 
@@ -5596,7 +5681,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
                tmp |= (msix_vector << (8 * (queue & 0x3)));
                IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
        } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
-                       (hw->mac.type == ixgbe_mac_X540)) {
+                       (hw->mac.type == ixgbe_mac_X540) ||
+                       (hw->mac.type == ixgbe_mac_X550)) {
                if (direction == -1) {
                        /* other causes */
                        idx = ((queue & 1) * 8);
@@ -5624,6 +5710,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t q_idx;
        uint32_t vector_idx = IXGBE_MISC_VEC_ID;
+       uint32_t base = IXGBE_MISC_VEC_ID;
 
        /* Configure VF other cause ivar */
        ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
@@ -5634,6 +5721,11 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
        if (!rte_intr_dp_is_en(intr_handle))
                return;
 
+       if (rte_intr_allow_others(intr_handle)) {
+               base = IXGBE_RX_VEC_START;
+               vector_idx = IXGBE_RX_VEC_START;
+       }
+
        /* Configure all RX queues of VF */
        for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
                /* Force all queue use vector 0,
@@ -5641,6 +5733,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
                 */
                ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
                intr_handle->intr_vec[q_idx] = vector_idx;
+               if (vector_idx < base + intr_handle->nb_efd - 1)
+                       vector_idx++;
        }
 }
 
@@ -5704,6 +5798,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
                break;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
+       case ixgbe_mac_X550:
                ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
                break;
        default:
@@ -5721,10 +5816,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
        IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
 }
 
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
-       uint16_t queue_idx, uint16_t tx_rate)
+int
+ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+                          uint16_t queue_idx, uint16_t tx_rate)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_rxmode *rxmode;
        uint32_t rf_dec, rf_int;
        uint32_t bcnrc_val;
        uint16_t link_speed = dev->data->dev_link.link_speed;
@@ -5746,14 +5843,14 @@ static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
                bcnrc_val = 0;
        }
 
+       rxmode = &dev->data->dev_conf.rxmode;
        /*
         * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
         * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
         * set as 0x4.
         */
-       if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
-               (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
-                               IXGBE_MAX_JUMBO_FRAME_SIZE))
+       if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+           (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
                        IXGBE_MMW_SIZE_JUMBO_FRAME);
        else
@@ -5845,12 +5942,14 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
        }
 }
 
-static void
+static int
 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
+
+       return 0;
 }
 
 int
@@ -6100,7 +6199,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
        /* refuse mtu that requires the support of scattered packets when this
         * feature has not been enabled before.
         */
-       if (!rx_conf->enable_scatter &&
+       if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
            (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
             dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
                return -EINVAL;
@@ -6265,7 +6364,7 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
                                sizeof(struct ixgbe_5tuple_filter), 0);
                if (filter == NULL)
                        return -ENOMEM;
-               (void)rte_memcpy(&filter->filter_info,
+               rte_memcpy(&filter->filter_info,
                                 &filter_5tuple,
                                 sizeof(struct ixgbe_5tuple_filter_info));
                filter->queue = ntuple_filter->queue;
@@ -6674,9 +6773,8 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
        uint32_t shift = 0;
 
        /* Get current link speed. */
-       memset(&link, 0, sizeof(link));
        ixgbe_dev_link_update(dev, 1);
-       rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+       rte_eth_linkstatus_get(dev, &link);
 
        switch (link.link_speed) {
        case ETH_SPEED_NUM_100M:
@@ -7105,6 +7203,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        struct ixgbe_dcb_config *dcb_config =
                        IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
        struct ixgbe_dcb_tc_config *tc;
+       struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+       uint8_t nb_tcs;
        uint8_t i, j;
 
        if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
@@ -7112,19 +7212,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        else
                dcb_info->nb_tcs = 1;
 
+       tc_queue = &dcb_info->tc_queue;
+       nb_tcs = dcb_info->nb_tcs;
+
        if (dcb_config->vt_mode) { /* vt is enabled*/
                struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
                                &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
                for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
                        dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
-               for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
-                       for (j = 0; j < dcb_info->nb_tcs; j++) {
-                               dcb_info->tc_queue.tc_rxq[i][j].base =
-                                               i * dcb_info->nb_tcs + j;
-                               dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
-                               dcb_info->tc_queue.tc_txq[i][j].base =
-                                               i * dcb_info->nb_tcs + j;
-                               dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+               if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+                       for (j = 0; j < nb_tcs; j++) {
+                               tc_queue->tc_rxq[0][j].base = j;
+                               tc_queue->tc_rxq[0][j].nb_queue = 1;
+                               tc_queue->tc_txq[0][j].base = j;
+                               tc_queue->tc_txq[0][j].nb_queue = 1;
+                       }
+               } else {
+                       for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+                               for (j = 0; j < nb_tcs; j++) {
+                                       tc_queue->tc_rxq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_rxq[i][j].nb_queue = 1;
+                                       tc_queue->tc_txq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_txq[i][j].nb_queue = 1;
+                               }
                        }
                }
        } else { /* vt is disabled*/
@@ -7481,7 +7593,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
                if (!node)
                        return -ENOMEM;
 
-               (void)rte_memcpy(&node->key,
+               rte_memcpy(&node->key,
                                 &key,
                                 sizeof(struct ixgbe_l2_tn_key));
                node->pool = l2_tunnel->pool;
@@ -7997,13 +8109,17 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        u32 in_msg = 0;
 
-       if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
-               return;
+       /* peek the message first */
+       in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
 
        /* PF reset VF event */
-       if (in_msg == IXGBE_PF_CONTROL_MSG)
+       if (in_msg == IXGBE_PF_CONTROL_MSG) {
+               /* dummy mbx read to ack pf */
+               if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+                       return;
                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
-                                             NULL, NULL);
+                                             NULL);
+       }
 }
 
 static int
@@ -8171,6 +8287,18 @@ ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
        }
 }
 
+/* restore rss filter */
+static inline void
+ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+       if (filter_info->rss_info.num)
+               ixgbe_config_rss_filter(dev,
+                       &filter_info->rss_info, TRUE);
+}
+
 static int
 ixgbe_filter_restore(struct rte_eth_dev *dev)
 {
@@ -8179,6 +8307,7 @@ ixgbe_filter_restore(struct rte_eth_dev *dev)
        ixgbe_syn_filter_restore(dev);
        ixgbe_fdir_filter_restore(dev);
        ixgbe_l2_tn_filter_restore(dev);
+       ixgbe_rss_filter_restore(dev);
 
        return 0;
 }
@@ -8276,3 +8405,15 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
+
+RTE_INIT(ixgbe_init_log);
+static void
+ixgbe_init_log(void)
+{
+       ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
+       if (ixgbe_logtype_init >= 0)
+               rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE);
+       ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver");
+       if (ixgbe_logtype_driver >= 0)
+               rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE);
+}