ethdev: move info filling of PCI into drivers
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
index 66be9f8..8f63044 100644 (file)
                I40E_PFINT_ICR0_ENA_GRST_MASK | \
                I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
                I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
-               I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
                I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
                I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
                I40E_PFINT_ICR0_ENA_VFLR_MASK | \
 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
 /* Destination IPv4 address */
 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
+/* Source IPv4 address for X722 */
+#define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
+/* Destination IPv4 address for X722 */
+#define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
+/* IPv4 Protocol for X722 */
+#define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
+/* IPv4 Time to Live for X722 */
+#define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
 /* IPv4 Type of Service (TOS) */
 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
 /* IPv4 Protocol */
@@ -366,8 +373,8 @@ static void i40e_stat_update_48(struct i40e_hw *hw,
                               uint64_t *offset,
                               uint64_t *stat);
 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
-static void i40e_dev_interrupt_handler(
-               __rte_unused struct rte_intr_handle *handle, void *param);
+static void i40e_dev_interrupt_handler(struct rte_intr_handle *handle,
+                                      void *param);
 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
                                uint32_t base, uint32_t num);
 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
@@ -702,8 +709,9 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
        return 0;
 }
 
-DRIVER_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
-DRIVER_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
+RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio");
 
 #ifndef I40E_GLQF_ORT
 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
@@ -900,7 +908,7 @@ is_floating_veb_supported(struct rte_devargs *devargs)
 static void
 config_floating_veb(struct rte_eth_dev *dev)
 {
-       struct rte_pci_device *pci_dev = dev->pci_dev;
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -924,6 +932,7 @@ static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev;
+       struct rte_intr_handle *intr_handle;
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_vsi *vsi;
@@ -945,7 +954,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
                i40e_set_tx_function(dev);
                return 0;
        }
-       pci_dev = dev->pci_dev;
+       pci_dev = I40E_DEV_TO_PCI(dev);
+       intr_handle = &pci_dev->intr_handle;
 
        rte_eth_copy_pci_info(dev, pci_dev);
 
@@ -1113,11 +1123,13 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
        /* Disable double vlan by default */
        i40e_vsi_config_double_vlan(vsi, FALSE);
 
-       /* Disable S-TAG identification by default */
-       ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
-       if (ret & I40E_L2_TAGS_S_TAG_MASK) {
-               ret &= ~I40E_L2_TAGS_S_TAG_MASK;
-               I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
+       /* Disable S-TAG identification when floating_veb is disabled */
+       if (!pf->floating_veb) {
+               ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
+               if (ret & I40E_L2_TAGS_S_TAG_MASK) {
+                       ret &= ~I40E_L2_TAGS_S_TAG_MASK;
+                       I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
+               }
        }
 
        if (!vsi->max_macaddrs)
@@ -1139,15 +1151,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
        i40e_pf_host_init(dev);
 
        /* register callback func to eal lib */
-       rte_intr_callback_register(&(pci_dev->intr_handle),
-               i40e_dev_interrupt_handler, (void *)dev);
+       rte_intr_callback_register(intr_handle,
+                                  i40e_dev_interrupt_handler, dev);
 
        /* configure and enable device interrupt */
        i40e_pf_config_irq0(hw, TRUE);
        i40e_pf_enable_irq0(hw);
 
        /* enable uio intr after callback register */
-       rte_intr_enable(&(pci_dev->intr_handle));
+       rte_intr_enable(intr_handle);
        /*
         * Add an ethertype filter to drop all flow control frames transmitted
         * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
@@ -1195,6 +1207,7 @@ static int
 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev;
+       struct rte_intr_handle *intr_handle;
        struct i40e_hw *hw;
        struct i40e_filter_control_settings settings;
        int ret;
@@ -1206,7 +1219,8 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
                return 0;
 
        hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = dev->pci_dev;
+       pci_dev = I40E_DEV_TO_PCI(dev);
+       intr_handle = &pci_dev->intr_handle;
 
        if (hw->adapter_stopped == 0)
                i40e_dev_close(dev);
@@ -1215,11 +1229,6 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
        dev->rx_pkt_burst = NULL;
        dev->tx_pkt_burst = NULL;
 
-       /* Disable LLDP */
-       ret = i40e_aq_stop_lldp(hw, true, NULL);
-       if (ret != I40E_SUCCESS) /* Its failure can be ignored */
-               PMD_INIT_LOG(INFO, "Failed to stop lldp");
-
        /* Clear PXE mode */
        i40e_clear_pxe_mode(hw);
 
@@ -1241,11 +1250,11 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
        dev->data->mac_addrs = NULL;
 
        /* disable uio intr before callback unregister */
-       rte_intr_disable(&(pci_dev->intr_handle));
+       rte_intr_disable(intr_handle);
 
        /* register callback func to eal lib */
-       rte_intr_callback_unregister(&(pci_dev->intr_handle),
-               i40e_dev_interrupt_handler, (void *)dev);
+       rte_intr_callback_unregister(intr_handle,
+                                    i40e_dev_interrupt_handler, dev);
 
        return 0;
 }
@@ -1331,7 +1340,8 @@ void
 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
 {
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        uint16_t msix_vect = vsi->msix_intr;
        uint16_t i;
@@ -1444,7 +1454,8 @@ void
 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
 {
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        uint16_t msix_vect = vsi->msix_intr;
        uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
@@ -1515,7 +1526,8 @@ static void
 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
 {
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        uint16_t interval = i40e_calc_itr_interval(\
                RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -1546,7 +1558,8 @@ static void
 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
 {
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        uint16_t msix_intr, i;
 
@@ -1569,6 +1582,8 @@ i40e_parse_link_speeds(uint16_t link_speeds)
 
        if (link_speeds & ETH_LINK_SPEED_40G)
                link_speed |= I40E_LINK_SPEED_40GB;
+       if (link_speeds & ETH_LINK_SPEED_25G)
+               link_speed |= I40E_LINK_SPEED_25GB;
        if (link_speeds & ETH_LINK_SPEED_20G)
                link_speed |= I40E_LINK_SPEED_20GB;
        if (link_speeds & ETH_LINK_SPEED_10G)
@@ -1594,6 +1609,7 @@ i40e_phy_conf_link(struct i40e_hw *hw,
                        I40E_AQ_PHY_FLAG_PAUSE_RX |
                        I40E_AQ_PHY_FLAG_LOW_POWER;
        const uint8_t advt = I40E_LINK_SPEED_40GB |
+                       I40E_LINK_SPEED_25GB |
                        I40E_LINK_SPEED_10GB |
                        I40E_LINK_SPEED_1GB |
                        I40E_LINK_SPEED_100MB;
@@ -1646,7 +1662,8 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
        struct rte_eth_conf *conf = &dev->data->dev_conf;
 
        speed = i40e_parse_link_speeds(conf->link_speeds);
-       abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+       if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
+               abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
        if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
                abilities |= I40E_AQ_PHY_AN_ENABLED;
        abilities |= I40E_AQ_PHY_LINK_ENABLED;
@@ -1667,7 +1684,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_vsi *main_vsi = pf->main_vsi;
        int ret, i;
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        uint32_t intr_vector = 0;
 
        hw->adapter_stopped = 0;
@@ -1748,7 +1766,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
        /* Apply link configure */
        if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
                                ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-                               ETH_LINK_SPEED_20G | ETH_LINK_SPEED_40G)) {
+                               ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
+                               ETH_LINK_SPEED_40G)) {
                PMD_DRV_LOG(ERR, "Invalid link setting");
                goto err_up;
        }
@@ -1769,6 +1788,16 @@ i40e_dev_start(struct rte_eth_dev *dev)
                if (dev->data->dev_conf.intr_conf.lsc != 0)
                        PMD_INIT_LOG(INFO, "lsc won't enable because of"
                                     " no intr multiplex\n");
+       } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
+               ret = i40e_aq_set_phy_int_mask(hw,
+                                              ~(I40E_AQ_EVENT_LINK_UPDOWN |
+                                              I40E_AQ_EVENT_MODULE_QUAL_FAIL |
+                                              I40E_AQ_EVENT_MEDIA_NA), NULL);
+               if (ret != I40E_SUCCESS)
+                       PMD_DRV_LOG(WARNING, "Fail to set phy mask");
+
+               /* Call get_link_info aq commond to enable LSE */
+               i40e_dev_link_update(dev, 0);
        }
 
        /* enable uio intr after callback register */
@@ -1789,7 +1818,8 @@ i40e_dev_stop(struct rte_eth_dev *dev)
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_vsi *main_vsi = pf->main_vsi;
        struct i40e_mirror_rule *p_mirror;
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        int i;
 
        /* Disable all queues */
@@ -1840,6 +1870,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        uint32_t reg;
        int i;
 
@@ -1851,7 +1883,7 @@ i40e_dev_close(struct rte_eth_dev *dev)
 
        /* Disable interrupt */
        i40e_pf_disable_irq0(hw);
-       rte_intr_disable(&(dev->pci_dev->intr_handle));
+       rte_intr_disable(intr_handle);
 
        /* shutdown and destroy the HMC */
        i40e_shutdown_lan_hmc(hw);
@@ -1968,9 +2000,11 @@ static int
 i40e_dev_set_link_down(struct rte_eth_dev *dev)
 {
        uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
-       uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+       uint8_t abilities = 0;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+       if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
+               abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
        return i40e_phy_conf_link(hw, abilities, speed);
 }
 
@@ -1985,6 +2019,7 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
        struct rte_eth_link link, old;
        int status;
        unsigned rep_cnt = MAX_REPEAT_TIME;
+       bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
 
        memset(&link, 0, sizeof(link));
        memset(&old, 0, sizeof(old));
@@ -1993,7 +2028,8 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
 
        do {
                /* Get link status information from hardware */
-               status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
+               status = i40e_aq_get_link_info(hw, enable_lse,
+                                               &link_status, NULL);
                if (status != I40E_SUCCESS) {
                        link.link_speed = ETH_SPEED_NUM_100M;
                        link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -2028,6 +2064,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
        case I40E_LINK_SPEED_20GB:
                link.link_speed = ETH_SPEED_NUM_20G;
                break;
+       case I40E_LINK_SPEED_25GB:
+               link.link_speed = ETH_SPEED_NUM_25G;
+               break;
        case I40E_LINK_SPEED_40GB:
                link.link_speed = ETH_SPEED_NUM_40G;
                break;
@@ -2556,13 +2595,15 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_vsi *vsi = pf->main_vsi;
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
 
+       dev_info->pci_dev = pci_dev;
        dev_info->max_rx_queues = vsi->nb_qps;
        dev_info->max_tx_queues = vsi->nb_qps;
        dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
        dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
        dev_info->max_mac_addrs = vsi->max_macaddrs;
-       dev_info->max_vfs = dev->pci_dev->max_vfs;
+       dev_info->max_vfs = pci_dev->max_vfs;
        dev_info->rx_offload_capa =
                DEV_RX_OFFLOAD_VLAN_STRIP |
                DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2577,7 +2618,11 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_TX_OFFLOAD_TCP_CKSUM |
                DEV_TX_OFFLOAD_SCTP_CKSUM |
                DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_TCP_TSO;
+               DEV_TX_OFFLOAD_TCP_TSO |
+               DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+               DEV_TX_OFFLOAD_GRE_TNL_TSO |
+               DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+               DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
        dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
                                                sizeof(uint32_t);
        dev_info->reta_size = pf->hash_lut_size;
@@ -2630,6 +2675,9 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
                /* For XL710 */
                dev_info->speed_capa = ETH_LINK_SPEED_40G;
+       else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
+               /* For XXV710 */
+               dev_info->speed_capa = ETH_LINK_SPEED_25G;
        else
                /* For X710 */
                dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
@@ -3457,9 +3505,10 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
        uint16_t qp_count = 0, vsi_count = 0;
 
-       if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
+       if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
                PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
                return -EINVAL;
        }
@@ -3500,10 +3549,10 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 
        /* VF queue/VSI allocation */
        pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
-       if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
+       if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
                pf->flags |= I40E_FLAG_SRIOV;
                pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
-               pf->vf_num = dev->pci_dev->max_vfs;
+               pf->vf_num = pci_dev->max_vfs;
                PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
                            "in total %u queues", pf->vf_num, pf->vf_nb_qps,
                            pf->vf_nb_qps * pf->vf_num);
@@ -4098,11 +4147,13 @@ i40e_vsi_release(struct i40e_vsi *vsi)
        void *temp;
        int ret;
        struct i40e_mac_filter *f;
-       uint16_t user_param = vsi->user_param;
+       uint16_t user_param;
 
        if (!vsi)
                return I40E_SUCCESS;
 
+       user_param = vsi->user_param;
+
        pf = I40E_VSI_TO_PF(vsi);
        hw = I40E_VSI_TO_HW(vsi);
 
@@ -5407,6 +5458,24 @@ i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
        }
 }
 
+static void
+i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
+{
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_virtchnl_pf_event event;
+       int i;
+
+       event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+       event.event_data.link_event.link_status =
+               dev->data->dev_link.link_status;
+       event.event_data.link_event.link_speed =
+               (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+
+       for (i = 0; i < pf->vf_num; i++)
+               i40e_pf_host_send_msg_to_vf(&pf->vfs[i], I40E_VIRTCHNL_OP_EVENT,
+                               I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+}
+
 static void
 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
 {
@@ -5443,6 +5512,14 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
                                        info.msg_buf,
                                        info.msg_len);
                        break;
+               case i40e_aqc_opc_get_link_status:
+                       ret = i40e_dev_link_update(dev, 0);
+                       if (!ret) {
+                               i40e_notify_all_vfs_link_status(dev);
+                               _rte_eth_dev_callback_process(dev,
+                                       RTE_ETH_EVENT_INTR_LSC, NULL);
+                       }
+                       break;
                default:
                        PMD_DRV_LOG(ERR, "Request %u is not supported yet",
                                    opcode);
@@ -5452,57 +5529,6 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
        rte_free(info.msg_buf);
 }
 
-/*
- * Interrupt handler is registered as the alarm callback for handling LSC
- * interrupt in a definite of time, in order to wait the NIC into a stable
- * state. Currently it waits 1 sec in i40e for the link up interrupt, and
- * no need for link down interrupt.
- */
-static void
-i40e_dev_interrupt_delayed_handler(void *param)
-{
-       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t icr0;
-
-       /* read interrupt causes again */
-       icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
-
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
-       if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
-               PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
-       if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
-               PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
-       if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
-               PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
-       if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
-               PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
-       if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
-               PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
-                                                               "state\n");
-       if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
-               PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
-       if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
-               PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
-#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
-
-       if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
-               PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
-               i40e_dev_handle_vfr_event(dev);
-       }
-       if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
-               PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
-               i40e_dev_handle_aq_msg(dev);
-       }
-
-       /* handle the link up interrupt in an alarm callback */
-       i40e_dev_link_update(dev, 0);
-       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
-
-       i40e_pf_enable_irq0(hw);
-       rte_intr_enable(&(dev->pci_dev->intr_handle));
-}
-
 /**
  * Interrupt handler triggered by NIC  for handling
  * specific interrupt.
@@ -5516,7 +5542,7 @@ i40e_dev_interrupt_delayed_handler(void *param)
  *  void
  */
 static void
-i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+i40e_dev_interrupt_handler(struct rte_intr_handle *intr_handle,
                           void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
@@ -5560,34 +5586,10 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
                i40e_dev_handle_aq_msg(dev);
        }
 
-       /* Link Status Change interrupt */
-       if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
-#define I40E_US_PER_SECOND 1000000
-               struct rte_eth_link link;
-
-               PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
-               memset(&link, 0, sizeof(link));
-               rte_i40e_dev_atomic_read_link_status(dev, &link);
-               i40e_dev_link_update(dev, 0);
-
-               /*
-                * For link up interrupt, it needs to wait 1 second to let the
-                * hardware be a stable state. Otherwise several consecutive
-                * interrupts can be observed.
-                * For link down interrupt, no need to wait.
-                */
-               if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
-                       i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
-                       return;
-               else
-                       _rte_eth_dev_callback_process(dev,
-                               RTE_ETH_EVENT_INTR_LSC);
-       }
-
 done:
        /* Enable interrupt */
        i40e_pf_enable_irq0(hw);
-       rte_intr_enable(&(dev->pci_dev->intr_handle));
+       rte_intr_enable(intr_handle);
 }
 
 static int
@@ -6145,7 +6147,7 @@ DONE:
 
 /* Configure hash enable flags for RSS */
 uint64_t
-i40e_config_hena(uint64_t flags)
+i40e_config_hena(uint64_t flags, enum i40e_mac_type type)
 {
        uint64_t hena = 0;
 
@@ -6154,42 +6156,42 @@ i40e_config_hena(uint64_t flags)
 
        if (flags & ETH_RSS_FRAG_IPV4)
                hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
-       if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
-#ifdef X722_SUPPORT
-               hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
-                       (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
-#else
-               hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
-#endif
-       if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
-#ifdef X722_SUPPORT
-               hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                       (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
-                       (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-#else
-               hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
-#endif
+       if (flags & ETH_RSS_NONFRAG_IPV4_TCP) {
+               if (type == I40E_MAC_X722) {
+                       hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
+                        (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+               } else
+                       hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+       }
+       if (flags & ETH_RSS_NONFRAG_IPV4_UDP) {
+               if (type == I40E_MAC_X722) {
+                       hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                        (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+                        (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+               } else
+                       hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+       }
        if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
                hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
        if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
                hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
        if (flags & ETH_RSS_FRAG_IPV6)
                hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
-       if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
-#ifdef X722_SUPPORT
-               hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
-                       (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
-#else
-               hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
-#endif
-       if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
-#ifdef X722_SUPPORT
-               hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                       (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
-                       (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-#else
-               hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
-#endif
+       if (flags & ETH_RSS_NONFRAG_IPV6_TCP) {
+               if (type == I40E_MAC_X722) {
+                       hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
+                        (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+               } else
+                       hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+       }
+       if (flags & ETH_RSS_NONFRAG_IPV6_UDP) {
+               if (type == I40E_MAC_X722) {
+                       hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                        (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+                        (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+               } else
+                       hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+       }
        if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
                hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
        if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
@@ -6263,7 +6265,10 @@ i40e_pf_disable_rss(struct i40e_pf *pf)
 
        hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
        hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
-       hena &= ~I40E_RSS_HENA_ALL;
+       if (hw->mac.type == I40E_MAC_X722)
+               hena &= ~I40E_RSS_HENA_ALL_X722;
+       else
+               hena &= ~I40E_RSS_HENA_ALL;
        i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
        i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
        I40E_WRITE_FLUSH(hw);
@@ -6350,8 +6355,11 @@ i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
        rss_hf = rss_conf->rss_hf;
        hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
        hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
-       hena &= ~I40E_RSS_HENA_ALL;
-       hena |= i40e_config_hena(rss_hf);
+       if (hw->mac.type == I40E_MAC_X722)
+               hena &= ~I40E_RSS_HENA_ALL_X722;
+       else
+               hena &= ~I40E_RSS_HENA_ALL;
+       hena |= i40e_config_hena(rss_hf, hw->mac.type);
        i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
        i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
        I40E_WRITE_FLUSH(hw);
@@ -6370,7 +6378,9 @@ i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
 
        hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
        hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
-       if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
+       if (!(hena & ((hw->mac.type == I40E_MAC_X722)
+                ? I40E_RSS_HENA_ALL_X722
+                : I40E_RSS_HENA_ALL))) { /* RSS disabled */
                if (rss_hf != 0) /* Enable RSS */
                        return -EINVAL;
                return 0; /* Nothing to do */
@@ -7541,25 +7551,23 @@ i40e_parse_input_set(uint64_t *inset,
  * and vice versa
  */
 static uint64_t
-i40e_translate_input_set_reg(uint64_t input)
+i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
 {
        uint64_t val = 0;
        uint16_t i;
 
-       static const struct {
+       struct inset_map {
                uint64_t inset;
                uint64_t inset_reg;
-       } inset_map[] = {
+       };
+
+       static const struct inset_map inset_map_common[] = {
                {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
                {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
                {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
                {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
                {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
-               {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
-               {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
                {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
-               {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
-               {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
                {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
                {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
                {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
@@ -7588,13 +7596,40 @@ i40e_translate_input_set_reg(uint64_t input)
                {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
        };
 
+    /* some different registers map in x722*/
+       static const struct inset_map inset_map_diff_x722[] = {
+               {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
+               {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
+               {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
+               {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
+       };
+
+       static const struct inset_map inset_map_diff_not_x722[] = {
+               {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
+               {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
+               {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
+               {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
+       };
+
        if (input == 0)
                return val;
 
        /* Translate input set to register aware inset */
-       for (i = 0; i < RTE_DIM(inset_map); i++) {
-               if (input & inset_map[i].inset)
-                       val |= inset_map[i].inset_reg;
+       if (type == I40E_MAC_X722) {
+               for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
+                       if (input & inset_map_diff_x722[i].inset)
+                               val |= inset_map_diff_x722[i].inset_reg;
+               }
+       } else {
+               for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
+                       if (input & inset_map_diff_not_x722[i].inset)
+                               val |= inset_map_diff_not_x722[i].inset_reg;
+               }
+       }
+
+       for (i = 0; i < RTE_DIM(inset_map_common); i++) {
+               if (input & inset_map_common[i].inset)
+                       val |= inset_map_common[i].inset_reg;
        }
 
        return val;
@@ -7671,15 +7706,22 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
 
        for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
             pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
-               if (!I40E_VALID_PCTYPE(pctype))
-                       continue;
+               if (hw->mac.type == I40E_MAC_X722) {
+                       if (!I40E_VALID_PCTYPE_X722(pctype))
+                               continue;
+               } else {
+                       if (!I40E_VALID_PCTYPE(pctype))
+                               continue;
+               }
+
                input_set = i40e_get_default_input_set(pctype);
 
                num = i40e_generate_inset_mask_reg(input_set, mask_reg,
                                                   I40E_INSET_MASK_NUM_REG);
                if (num < 0)
                        return;
-               inset_reg = i40e_translate_input_set_reg(input_set);
+               inset_reg = i40e_translate_input_set_reg(hw->mac.type,
+                                       input_set);
 
                i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
                                      (uint32_t)(inset_reg & UINT32_MAX));
@@ -7738,14 +7780,13 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
                return -EINVAL;
        }
 
-#ifdef X722_SUPPORT
-       /* get translated pctype value in fd pctype register */
-       pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
-               I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype(
-               conf->flow_type)));
-#else
-       pctype = i40e_flowtype_to_pctype(conf->flow_type);
-#endif
+       if (hw->mac.type == I40E_MAC_X722) {
+               /* get translated pctype value in fd pctype register */
+               pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
+                       I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype(
+                       conf->flow_type)));
+       } else
+               pctype = i40e_flowtype_to_pctype(conf->flow_type);
 
        ret = i40e_parse_input_set(&input_set, pctype, conf->field,
                                   conf->inset_size);
@@ -7770,7 +7811,7 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
        if (num < 0)
                return -EINVAL;
 
-       inset_reg |= i40e_translate_input_set_reg(input_set);
+       inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
 
        i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
                              (uint32_t)(inset_reg & UINT32_MAX));
@@ -7848,7 +7889,7 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
        if (num < 0)
                return -EINVAL;
 
-       inset_reg |= i40e_translate_input_set_reg(input_set);
+       inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
 
        i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
                              (uint32_t)(inset_reg & UINT32_MAX));
@@ -8099,10 +8140,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
 static void
 i40e_enable_extended_tag(struct rte_eth_dev *dev)
 {
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
        uint32_t buf = 0;
        int ret;
 
-       ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+       ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf),
                                      PCI_DEV_CAP_REG);
        if (ret < 0) {
                PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
@@ -8115,7 +8157,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev)
        }
 
        buf = 0;
-       ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+       ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf),
                                      PCI_DEV_CTRL_REG);
        if (ret < 0) {
                PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
@@ -8127,7 +8169,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev)
                return;
        }
        buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
-       ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
+       ret = rte_eal_pci_write_config(pci_dev, &buf, sizeof(buf),
                                       PCI_DEV_CTRL_REG);
        if (ret < 0) {
                PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
@@ -8290,7 +8332,8 @@ i40e_configure_registers(struct i40e_hw *hw)
 
        for (i = 0; i < RTE_DIM(reg_table); i++) {
                if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
-                       if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) /* For XL710 */
+                       if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
+                           I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
                                reg_table[i].val =
                                        I40E_GL_SWR_PM_UP_THR_SF_VALUE;
                        else /* For X710 */
@@ -9365,17 +9408,13 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
         * LLDP MIB change event.
         */
        if (sw_dcb == TRUE) {
-               ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
-               if (ret != I40E_SUCCESS)
-                       PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
-
                ret = i40e_init_dcb(hw);
-               /* if sw_dcb, lldp agent is stopped, the return from
+               /* If lldp agent is stopped, the return value from
                 * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
-                * adminq status.
+                * adminq status. Otherwise, it should return success.
                 */
-               if (ret != I40E_SUCCESS &&
-                   hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+               if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
+                   hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
                        memset(&hw->local_dcbx_config, 0,
                                sizeof(struct i40e_dcbx_config));
                        /* set dcb default configuration */
@@ -9404,8 +9443,8 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
                                return -ENOSYS;
                        }
                } else {
-                       PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
-                                         " aq_err = %d.", ret,
+                       PMD_INIT_LOG(ERR, "DCB initialization in FW fails,"
+                                         " err = %d, aq_err = %d.", ret,
                                          hw->aq.asq_last_status);
                        return -ENOTSUP;
                }
@@ -9533,7 +9572,8 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 static int
 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t interval =
                i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -9558,7 +9598,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
                                I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
 
        I40E_WRITE_FLUSH(hw);
-       rte_intr_enable(&dev->pci_dev->intr_handle);
+       rte_intr_enable(&pci_dev->intr_handle);
 
        return 0;
 }
@@ -9566,7 +9606,8 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 static int
 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t msix_intr;