net: add rte prefix to ether structures
[dpdk.git] / drivers / net / i40e / i40e_ethdev_vf.c
index b9dea2e..0b05ddb 100644 (file)
@@ -44,6 +44,8 @@
 #define I40EVF_BUSY_WAIT_COUNT 50
 #define MAX_RESET_WAIT_CNT     20
 
+#define I40EVF_ALARM_INTERVAL 50000 /* us */
+
 struct i40evf_arq_msg_info {
        enum virtchnl_ops ops;
        enum i40e_status_code result;
@@ -104,7 +106,7 @@ static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
                                    uint16_t tx_queue_id);
 static int i40evf_add_mac_addr(struct rte_eth_dev *dev,
-                              struct ether_addr *addr,
+                              struct rte_ether_addr *addr,
                               uint32_t index,
                               uint32_t pool);
 static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
@@ -121,7 +123,7 @@ static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
                                        struct rte_eth_rss_conf *rss_conf);
 static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
-                                       struct ether_addr *mac_addr);
+                                       struct rte_ether_addr *mac_addr);
 static int
 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
 static int
@@ -132,10 +134,11 @@ static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
 
 static int
 i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
-                       struct ether_addr *mc_addr_set,
+                       struct rte_ether_addr *mc_addr_set,
                        uint32_t nb_mc_addr, bool add);
 static int
-i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev,
+                       struct rte_ether_addr *mc_addr_set,
                        uint32_t nb_mc_addr);
 
 /* Default hash key buffer for RSS */
@@ -357,6 +360,28 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
                } while (i++ < MAX_TRY_TIMES);
                _clear_cmd(vf);
                break;
+       case VIRTCHNL_OP_REQUEST_QUEUES:
+               /**
+                * ignore async reply, only wait for system message,
+                * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING,
+                * if not, means request queues failed.
+                */
+               err = -1;
+               do {
+                       ret = i40evf_read_pfmsg(dev, &info);
+                       vf->cmd_retval = info.result;
+                       if (ret == I40EVF_MSG_SYS && vf->vf_reset) {
+                               err = 0;
+                               break;
+                       } else if (ret == I40EVF_MSG_ERR ||
+                                          ret == I40EVF_MSG_CMD) {
+                               break;
+                       }
+                       rte_delay_ms(ASQ_DELAY_MS);
+                       /* If don't read msg or read sys event, continue */
+               } while (i++ < MAX_TRY_TIMES);
+               _clear_cmd(vf);
+               break;
 
        default:
                /* for other adminq in running time, waiting the cmd done flag */
@@ -752,7 +777,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
 
 static int
 i40evf_add_mac_addr(struct rte_eth_dev *dev,
-                   struct ether_addr *addr,
+                   struct rte_ether_addr *addr,
                    __rte_unused uint32_t index,
                    __rte_unused uint32_t pool)
 {
@@ -794,7 +819,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
 
 static void
 i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
-                           struct ether_addr *addr)
+                           struct rte_ether_addr *addr)
 {
        struct virtchnl_ether_addr_list *list;
        struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -835,7 +860,7 @@ static void
 i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
 {
        struct rte_eth_dev_data *data = dev->data;
-       struct ether_addr *addr;
+       struct rte_ether_addr *addr;
 
        addr = &data->mac_addrs[index];
 
@@ -1010,6 +1035,28 @@ i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
        return err;
 }
 
+static int
+i40evf_request_queues(struct rte_eth_dev *dev, uint16_t num)
+{
+       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct virtchnl_vf_res_request vfres;
+       struct vf_cmd_info args;
+       int err;
+
+       vfres.num_queue_pairs = num;
+
+       args.ops = VIRTCHNL_OP_REQUEST_QUEUES;
+       args.in_args = (u8 *)&vfres;
+       args.in_args_size = sizeof(vfres);
+       args.out_buffer = vf->aq_resp;
+       args.out_size = I40E_AQ_BUF_SZ;
+       err = i40evf_execute_vf_cmd(dev, &args);
+       if (err)
+               PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
+
+       return err;
+}
+
 static int
 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
 {
@@ -1078,9 +1125,11 @@ i40evf_enable_irq0(struct i40e_hw *hw)
 }
 
 static int
-i40evf_check_vf_reset_done(struct i40e_hw *hw)
+i40evf_check_vf_reset_done(struct rte_eth_dev *dev)
 {
        int i, reset;
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
        for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
                reset = I40E_READ_REG(hw, I40E_VFGEN_RSTAT) &
@@ -1095,12 +1144,16 @@ i40evf_check_vf_reset_done(struct i40e_hw *hw)
        if (i >= MAX_RESET_WAIT_CNT)
                return -1;
 
+       vf->vf_reset = false;
+       vf->pend_msg &= ~PFMSG_RESET_IMPENDING;
+
        return 0;
 }
 static int
-i40evf_reset_vf(struct i40e_hw *hw)
+i40evf_reset_vf(struct rte_eth_dev *dev)
 {
        int ret;
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (i40e_vf_reset(hw) != I40E_SUCCESS) {
                PMD_INIT_LOG(ERR, "Reset VF NIC failed");
@@ -1117,7 +1170,7 @@ i40evf_reset_vf(struct i40e_hw *hw)
          */
        rte_delay_ms(200);
 
-       ret = i40evf_check_vf_reset_done(hw);
+       ret = i40evf_check_vf_reset_done(dev);
        if (ret) {
                PMD_INIT_LOG(ERR, "VF is still resetting");
                return ret;
@@ -1133,7 +1186,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
        uint16_t interval =
-               i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+               i40e_calc_itr_interval(0, 0);
 
        vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        vf->dev_data = dev->data;
@@ -1143,7 +1196,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
                goto err;
        }
 
-       err = i40evf_check_vf_reset_done(hw);
+       err = i40evf_check_vf_reset_done(dev);
        if (err)
                goto err;
 
@@ -1155,7 +1208,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
        }
 
        /* Reset VF and wait until it's complete */
-       if (i40evf_reset_vf(hw)) {
+       if (i40evf_reset_vf(dev)) {
                PMD_INIT_LOG(ERR, "reset NIC failed");
                goto err_aq;
        }
@@ -1221,7 +1274,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
        vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 
        /* Store the MAC address configured by host, or generate random one */
-       if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
+       if (is_valid_assigned_ether_addr((struct rte_ether_addr *)hw->mac.addr))
                vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
        else
                eth_random_addr(hw->mac.addr); /* Generate a random one */
@@ -1254,7 +1307,7 @@ i40evf_uninit_vf(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       if (hw->adapter_stopped == 0)
+       if (hw->adapter_closed == 0)
                i40evf_dev_close(dev);
        rte_free(vf->vf_res);
        vf->vf_res = NULL;
@@ -1370,7 +1423,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
  *  void
  */
 static void
-i40evf_dev_interrupt_handler(void *param)
+i40evf_dev_alarm_handler(void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1382,10 +1435,8 @@ i40evf_dev_interrupt_handler(void *param)
        icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01);
 
        /* No interrupt event indicated */
-       if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
-               PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
+       if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK))
                goto done;
-       }
 
        if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
                PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
@@ -1399,6 +1450,8 @@ i40evf_dev_interrupt_handler(void *param)
 
 done:
        i40evf_enable_irq0(hw);
+       rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+                         i40evf_dev_alarm_handler, dev);
 }
 
 static int
@@ -1436,18 +1489,15 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
        hw->bus.func = pci_dev->addr.function;
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
        hw->adapter_stopped = 0;
+       hw->adapter_closed = 0;
 
        if(i40evf_init_vf(eth_dev) != 0) {
                PMD_INIT_LOG(ERR, "Init vf failed");
                return -1;
        }
 
-       /* register callback func to eal lib */
-       rte_intr_callback_register(&pci_dev->intr_handle,
-               i40evf_dev_interrupt_handler, (void *)eth_dev);
-
-       /* enable uio intr after callback register */
-       rte_intr_enable(&pci_dev->intr_handle);
+       rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+                         i40evf_dev_alarm_handler, eth_dev);
 
        /* configure and enable device interrupt */
        i40evf_enable_irq0(hw);
@@ -1462,7 +1512,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
                                ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
                return -ENOMEM;
        }
-       ether_addr_copy((struct ether_addr *)hw->mac.addr,
+       ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
                        &eth_dev->data->mac_addrs[0]);
 
        return 0;
@@ -1485,9 +1535,6 @@ i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
                return -1;
        }
 
-       rte_free(eth_dev->data->mac_addrs);
-       eth_dev->data->mac_addrs = NULL;
-
        return 0;
 }
 
@@ -1520,10 +1567,11 @@ RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio-pci");
 static int
 i40evf_dev_configure(struct rte_eth_dev *dev)
 {
+       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
        struct i40e_adapter *ad =
                I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-       struct rte_eth_conf *conf = &dev->data->dev_conf;
-       struct i40e_vf *vf;
+       uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+                               dev->data->nb_tx_queues);
 
        /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
         * allocation or vector Rx preconditions we will reset it.
@@ -1533,17 +1581,18 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
        ad->tx_simple_allowed = true;
        ad->tx_vec_allowed = true;
 
-       /* For non-DPDK PF drivers, VF has no ability to disable HW
-        * CRC strip, and is implicitly enabled by the PF.
-        */
-       if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-               vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
-               if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
-                   (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
-                       /* Peer is running non-DPDK PF driver. */
-                       PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
-                       return -EINVAL;
-               }
+       if (num_queue_pairs > vf->vsi_res->num_queue_pairs) {
+               int ret = 0;
+
+               PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
+                           vf->vsi_res->num_queue_pairs, num_queue_pairs);
+               ret = i40evf_request_queues(dev, num_queue_pairs);
+               if (ret != 0)
+                       return ret;
+
+               ret = i40evf_dev_reset(dev);
+               if (ret != 0)
+                       return ret;
        }
 
        return i40evf_init_vlan(dev);
@@ -1583,37 +1632,35 @@ static int
 i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
        struct i40e_rx_queue *rxq;
-       int err = 0;
+       int err;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
 
-       if (rx_queue_id < dev->data->nb_rx_queues) {
-               rxq = dev->data->rx_queues[rx_queue_id];
-
-               err = i40e_alloc_rx_queue_mbufs(rxq);
-               if (err) {
-                       PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
-                       return err;
-               }
+       rxq = dev->data->rx_queues[rx_queue_id];
 
-               rte_wmb();
+       err = i40e_alloc_rx_queue_mbufs(rxq);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+               return err;
+       }
 
-               /* Init the RX tail register. */
-               I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
-               I40EVF_WRITE_FLUSH(hw);
+       rte_wmb();
 
-               /* Ready to switch the queue on */
-               err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+       /* Init the RX tail register. */
+       I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+       I40EVF_WRITE_FLUSH(hw);
 
-               if (err)
-                       PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
-                                   rx_queue_id);
-               else
-                       dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+       /* Ready to switch the queue on */
+       err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+                           rx_queue_id);
+               return err;
        }
+       dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 
-       return err;
+       return 0;
 }
 
 static int
@@ -1622,45 +1669,39 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        struct i40e_rx_queue *rxq;
        int err;
 
-       if (rx_queue_id < dev->data->nb_rx_queues) {
-               rxq = dev->data->rx_queues[rx_queue_id];
-
-               err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
-
-               if (err) {
-                       PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
-                                   rx_queue_id);
-                       return err;
-               }
+       rxq = dev->data->rx_queues[rx_queue_id];
 
-               i40e_rx_queue_release_mbufs(rxq);
-               i40e_reset_rx_queue(rxq);
-               dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+       err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+                           rx_queue_id);
+               return err;
        }
 
+       i40e_rx_queue_release_mbufs(rxq);
+       i40e_reset_rx_queue(rxq);
+       dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
        return 0;
 }
 
 static int
 i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       int err = 0;
+       int err;
 
        PMD_INIT_FUNC_TRACE();
 
-       if (tx_queue_id < dev->data->nb_tx_queues) {
-
-               /* Ready to switch the queue on */
-               err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
-
-               if (err)
-                       PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
-                                   tx_queue_id);
-               else
-                       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+       /* Ready to switch the queue on */
+       err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+                           tx_queue_id);
+               return err;
        }
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 
-       return err;
+       return 0;
 }
 
 static int
@@ -1669,22 +1710,19 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
        struct i40e_tx_queue *txq;
        int err;
 
-       if (tx_queue_id < dev->data->nb_tx_queues) {
-               txq = dev->data->tx_queues[tx_queue_id];
-
-               err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
-
-               if (err) {
-                       PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
-                                   tx_queue_id);
-                       return err;
-               }
+       txq = dev->data->tx_queues[tx_queue_id];
 
-               i40e_tx_queue_release_mbufs(txq);
-               i40e_reset_tx_queue(txq);
-               dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+       err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+                           tx_queue_id);
+               return err;
        }
 
+       i40e_tx_queue_release_mbufs(txq);
+       i40e_reset_tx_queue(txq);
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
        return 0;
 }
 
@@ -1748,9 +1786,8 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
        }
 
        if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
-           (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+           rxq->max_pkt_len > buf_size)
                dev_data->scattered_rx = 1;
-       }
 
        return 0;
 }
@@ -1836,7 +1873,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t interval =
-               i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+               i40e_calc_itr_interval(0, 0);
        uint16_t msix_intr;
 
        msix_intr = intr_handle->intr_vec[queue_id];
@@ -1859,8 +1896,6 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 
        I40EVF_WRITE_FLUSH(hw);
 
-       rte_intr_enable(&pci_dev->intr_handle);
-
        return 0;
 }
 
@@ -1895,7 +1930,7 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
        int next_begin = 0;
        int begin = 0;
        uint32_t len;
-       struct ether_addr *addr;
+       struct rte_ether_addr *addr;
        struct vf_cmd_info args;
 
        do {
@@ -2016,17 +2051,9 @@ i40evf_dev_start(struct rte_eth_dev *dev)
                goto err_mac;
        }
 
-       /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
-        * is mapped to VFIO vector 0 in i40evf_dev_init( ).
-        * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is
-        * not cleared, it will fail when rte_intr_enable( ) tries to map Rx
-        * queue interrupt to other VFIO vectors.
-        * So clear uio/vfio intr/evevnfd first to avoid failure.
-        */
-       if (dev->data->dev_conf.intr_conf.rxq != 0) {
-               rte_intr_disable(intr_handle);
+       /* only enable interrupt in rx interrupt mode */
+       if (dev->data->dev_conf.intr_conf.rxq != 0)
                rte_intr_enable(intr_handle);
-       }
 
        i40evf_enable_queues_intr(dev);
 
@@ -2050,6 +2077,9 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
+       if (dev->data->dev_conf.intr_conf.rxq != 0)
+               rte_intr_disable(intr_handle);
+
        if (hw->adapter_stopped == 1)
                return;
        i40evf_stop_queues(dev);
@@ -2182,11 +2212,12 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
        struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
-       memset(dev_info, 0, sizeof(*dev_info));
-       dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
-       dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+       dev_info->max_rx_queues = I40E_MAX_QP_NUM_PER_VF;
+       dev_info->max_tx_queues = I40E_MAX_QP_NUM_PER_VF;
        dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
        dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+       dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
+       dev_info->min_mtu = ETHER_MIN_MTU;
        dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
        dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
        dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
@@ -2199,8 +2230,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_RX_OFFLOAD_UDP_CKSUM |
                DEV_RX_OFFLOAD_TCP_CKSUM |
                DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_CRC_STRIP |
-               DEV_RX_OFFLOAD_SCATTER;
+               DEV_RX_OFFLOAD_SCATTER |
+               DEV_RX_OFFLOAD_JUMBO_FRAME |
+               DEV_RX_OFFLOAD_VLAN_FILTER;
 
        dev_info->tx_queue_offload_capa = 0;
        dev_info->tx_offload_capa =
@@ -2215,7 +2247,8 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
                DEV_TX_OFFLOAD_GRE_TNL_TSO |
                DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-               DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+               DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+               DEV_TX_OFFLOAD_MULTI_SEGS;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -2282,20 +2315,22 @@ static void
 i40evf_dev_close(struct rte_eth_dev *dev)
 {
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 
        i40evf_dev_stop(dev);
        i40e_dev_free_queues(dev);
-       i40evf_reset_vf(hw);
-       i40e_shutdown_adminq(hw);
-       /* disable uio intr before callback unregister */
-       rte_intr_disable(intr_handle);
+       /*
+        * disable promiscuous mode before reset vf
+        * it is a workaround solution when work with kernel driver
+        * and it is not the normal way
+        */
+       i40evf_dev_promiscuous_disable(dev);
+       i40evf_dev_allmulticast_disable(dev);
+       rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
 
-       /* unregister callback func from eal lib */
-       rte_intr_callback_unregister(intr_handle,
-                                    i40evf_dev_interrupt_handler, dev);
+       i40evf_reset_vf(dev);
+       i40e_shutdown_adminq(hw);
        i40evf_disable_irq0(hw);
+       hw->adapter_closed = 1;
 }
 
 /*
@@ -2667,7 +2702,7 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 static int
 i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
-                           struct ether_addr *mac_addr)
+                           struct rte_ether_addr *mac_addr)
 {
        struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2680,18 +2715,18 @@ i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
        if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
                return -EPERM;
 
-       i40evf_del_mac_addr_by_addr(dev, (struct ether_addr *)hw->mac.addr);
+       i40evf_del_mac_addr_by_addr(dev, (struct rte_ether_addr *)hw->mac.addr);
 
        if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0)
                return -EIO;
 
-       ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
+       ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
        return 0;
 }
 
 static int
 i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
-                       struct ether_addr *mc_addrs,
+                       struct rte_ether_addr *mc_addrs,
                        uint32_t mc_addrs_num, bool add)
 {
        struct virtchnl_ether_addr_list *list;
@@ -2745,7 +2780,8 @@ i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
 }
 
 static int
-i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addrs,
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev,
+                       struct rte_ether_addr *mc_addrs,
                        uint32_t mc_addrs_num)
 {
        struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);