net/hns3: remove restriction on setting VF MTU
[dpdk.git] / drivers / net / hns3 / hns3_ethdev_vf.c
index b1736e7..0c79202 100644 (file)
@@ -131,7 +131,7 @@ hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
                                     (pos + PCI_MSIX_FLAGS));
                return 0;
        }
-       return -1;
+       return -ENXIO;
 }
 
 static int
@@ -208,12 +208,27 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
 
        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
                                HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
-                               HNS3_TWO_ETHER_ADDR_LEN, false, NULL, 0);
+                               HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
        if (ret) {
-               rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
-                                     mac_addr);
-               hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", mac_str,
-                        ret);
+               /*
+                * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev
+                * driver. When user has configured a MAC address for VF device
+                * by "ip link set ..." command based on the PF device, the hns3
+                * PF kernel ethdev driver does not allow VF driver to request
+                * reconfiguring a different default MAC address, and return
+                * -EPREM to VF driver through mailbox.
+                */
+               if (ret == -EPERM) {
+                       rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+                                             old_addr);
+                       hns3_warn(hw, "Has permanet mac addr(%s) for vf",
+                                 mac_str);
+               } else {
+                       rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+                                             mac_addr);
+                       hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
+                                mac_str, ret);
+               }
        }
 
        rte_ether_addr_copy(mac_addr,
@@ -414,6 +429,96 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
        return ret;
 }
 
+static int
+hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
+                            bool mmap, enum hns3_ring_type queue_type,
+                            uint16_t queue_id)
+{
+       struct hns3_vf_bind_vector_msg bind_msg;
+       const char *op_str;
+       uint16_t code;
+       int ret;
+
+       memset(&bind_msg, 0, sizeof(bind_msg));
+       code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
+               HNS3_MBX_UNMAP_RING_TO_VECTOR;
+       bind_msg.vector_id = vector_id;
+
+       if (queue_type == HNS3_RING_TYPE_RX)
+               bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
+       else
+               bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
+
+       bind_msg.param[0].ring_type = queue_type;
+       bind_msg.ring_num = 1;
+       bind_msg.param[0].tqp_index = queue_id;
+       op_str = mmap ? "Map" : "Unmap";
+       ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
+                               sizeof(bind_msg), false, NULL, 0);
+       if (ret) {
+               hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.",
+                        op_str, queue_id, bind_msg.vector_id, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
+hns3vf_init_ring_with_vector(struct hns3_hw *hw)
+{
+       uint8_t vec;
+       int ret;
+       int i;
+
+       /*
+        * In hns3 network engine, vector 0 is always the misc interrupt of this
+        * function, vector 1~N can be used respectively for the queues of the
+        * function. Tx and Rx queues with the same number share the interrupt
+        * vector. In the initialization clearing the all hardware mapping
+        * relationship configurations between queues and interrupt vectors is
+        * needed, so some error caused by the residual configurations, such as
+        * the unexpected Tx interrupt, can be avoid. Because of the hardware
+        * constraints in hns3 hardware engine, we have to implement clearing
+        * the mapping relationship configurations by binding all queues to the
+        * last interrupt vector and reserving the last interrupt vector. This
+        * method results in a decrease of the maximum queues when upper
+        * applications call the rte_eth_dev_configure API function to enable
+        * Rx interrupt.
+        */
+       vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+       hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+       for (i = 0; i < hw->intr_tqps_num; i++) {
+               /*
+                * Set gap limiter and rate limiter configuration of queue's
+                * interrupt.
+                */
+               hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+                                      HNS3_TQP_INTR_GL_DEFAULT);
+               hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+                                      HNS3_TQP_INTR_GL_DEFAULT);
+               hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+
+               ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+                                                  HNS3_RING_TYPE_TX, i);
+               if (ret) {
+                       PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
+                                         "vector: %d, ret=%d", i, vec, ret);
+                       return ret;
+               }
+
+               ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+                                                  HNS3_RING_TYPE_RX, i);
+               if (ret) {
+                       PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
+                                         "vector: %d, ret=%d", i, vec, ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
 static int
 hns3vf_dev_configure(struct rte_eth_dev *dev)
 {
@@ -428,24 +533,28 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
        int ret;
 
        /*
-        * Hardware does not support where the number of rx and tx queues is
-        * not equal in hip08.
+        * Hardware does not support individually enable/disable/reset the Tx or
+        * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
+        * and Rx queues at the same time. When the numbers of Tx queues
+        * allocated by upper applications are not equal to the numbers of Rx
+        * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
+        * of Tx/Rx queues. otherwise, network engine can not work as usual. But
+        * these fake queues are imperceptible, and can not be used by upper
+        * applications.
         */
-       if (nb_rx_q != nb_tx_q) {
-               hns3_err(hw,
-                        "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
-                        "Hardware does not support this configuration!",
-                        nb_rx_q, nb_tx_q);
-               return -EINVAL;
+       ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
+       if (ret) {
+               hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
+               return ret;
        }
 
+       hw->adapter_state = HNS3_NIC_CONFIGURING;
        if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
                hns3_err(hw, "setting link speed/duplex not supported");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto cfg_err;
        }
 
-       hw->adapter_state = HNS3_NIC_CONFIGURING;
-
        /* When RSS is not configured, redirect the packet queue 0 */
        if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
                rss_conf = conf->rx_adv_conf.rss_conf;
@@ -484,7 +593,9 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
        return 0;
 
 cfg_err:
+       (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
        hw->adapter_state = HNS3_NIC_INITIALIZED;
+
        return ret;
 }
 
@@ -508,12 +619,14 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
        int ret;
 
-       if (dev->data->dev_started) {
-               hns3_err(hw, "Failed to set mtu, port %u must be stopped "
-                        "before configuration", dev->data->port_id);
-               return -EBUSY;
-       }
-
+       /*
+        * The hns3 PF/VF devices on the same port share the hardware MTU
+        * configuration. Currently, we send mailbox to inform hns3 PF kernel
+        * ethdev driver to finish hardware MTU configuration in hns3 VF PMD
+        * driver, there is no need to stop the port for hns3 VF device, and the
+        * MTU value issued by hns3 VF PMD driver must be less than or equal to
+        * PF's MTU.
+        */
        if (rte_atomic16_read(&hw->reset.resetting)) {
                hns3_err(hw, "Failed to set mtu during resetting");
                return -EIO;
@@ -542,8 +655,16 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 {
        struct hns3_adapter *hns = eth_dev->data->dev_private;
        struct hns3_hw *hw = &hns->hw;
+       uint16_t q_num = hw->tqps_num;
+
+       /*
+        * In interrupt mode, 'max_rx_queues' is set based on the number of
+        * MSI-X interrupt resources of the hardware.
+        */
+       if (hw->data->dev_conf.intr_conf.rxq == 1)
+               q_num = hw->intr_tqps_num;
 
-       info->max_rx_queues = hw->tqps_num;
+       info->max_rx_queues = q_num;
        info->max_tx_queues = hw->tqps_num;
        info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
        info->min_rx_bufsize = hw->rx_buf_len;
@@ -571,6 +692,10 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
                                 DEV_TX_OFFLOAD_VLAN_INSERT |
                                 DEV_TX_OFFLOAD_QINQ_INSERT |
                                 DEV_TX_OFFLOAD_MULTI_SEGS |
+                                DEV_TX_OFFLOAD_TCP_TSO |
+                                DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+                                DEV_TX_OFFLOAD_GRE_TNL_TSO |
+                                DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
                                 info->tx_queue_offload_capa);
 
        info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -778,6 +903,24 @@ hns3vf_get_tc_info(struct hns3_hw *hw)
        return 0;
 }
 
+static int
+hns3vf_get_host_mac_addr(struct hns3_hw *hw)
+{
+       uint8_t host_mac[RTE_ETHER_ADDR_LEN];
+       int ret;
+
+       ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
+                               true, host_mac, RTE_ETHER_ADDR_LEN);
+       if (ret) {
+               hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
+               return ret;
+       }
+
+       memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
+
+       return 0;
+}
+
 static int
 hns3vf_get_configuration(struct hns3_hw *hw)
 {
@@ -795,16 +938,21 @@ hns3vf_get_configuration(struct hns3_hw *hw)
        if (ret)
                return ret;
 
+       /* Get user defined VF MAC addr from PF */
+       ret = hns3vf_get_host_mac_addr(hw);
+       if (ret)
+               return ret;
+
        /* Get tc configuration from PF */
        return hns3vf_get_tc_info(hw);
 }
 
-static void
+static int
 hns3vf_set_tc_info(struct hns3_adapter *hns)
 {
        struct hns3_hw *hw = &hns->hw;
        uint16_t nb_rx_q = hw->data->nb_rx_queues;
-       uint16_t new_tqps;
+       uint16_t nb_tx_q = hw->data->nb_tx_queues;
        uint8_t i;
 
        hw->num_tc = 0;
@@ -812,11 +960,22 @@ hns3vf_set_tc_info(struct hns3_adapter *hns)
                if (hw->hw_tc_map & BIT(i))
                        hw->num_tc++;
 
-       new_tqps = RTE_MIN(hw->tqps_num, nb_rx_q);
-       hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, new_tqps / hw->num_tc);
-       hw->alloc_tqps = hw->alloc_rss_size * hw->num_tc;
+       if (nb_rx_q < hw->num_tc) {
+               hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
+                        nb_rx_q, hw->num_tc);
+               return -EINVAL;
+       }
+
+       if (nb_tx_q < hw->num_tc) {
+               hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
+                        nb_tx_q, hw->num_tc);
+               return -EINVAL;
+       }
+
+       hns3_set_rss_size(hw, nb_rx_q);
+       hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
 
-       hns3_tc_queue_mapping_cfg(hw);
+       return 0;
 }
 
 static void
@@ -1055,6 +1214,35 @@ hns3vf_service_handler(void *param)
                          eth_dev);
 }
 
+static int
+hns3_query_vf_resource(struct hns3_hw *hw)
+{
+       struct hns3_vf_res_cmd *req;
+       struct hns3_cmd_desc desc;
+       uint16_t num_msi;
+       int ret;
+
+       hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
+       ret = hns3_cmd_send(hw, &desc, 1);
+       if (ret) {
+               hns3_err(hw, "query vf resource failed, ret = %d", ret);
+               return ret;
+       }
+
+       req = (struct hns3_vf_res_cmd *)desc.data;
+       num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
+                                HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+       if (num_msi < HNS3_MIN_VECTOR_NUM) {
+               hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
+                        num_msi, HNS3_MIN_VECTOR_NUM);
+               return -EINVAL;
+       }
+
+       hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
+
+       return 0;
+}
+
 static int
 hns3vf_init_hardware(struct hns3_adapter *hns)
 {
@@ -1131,6 +1319,11 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
                goto err_cmd_init;
        }
 
+       /* Get VF resource */
+       ret = hns3_query_vf_resource(hw);
+       if (ret)
+               goto err_cmd_init;
+
        rte_spinlock_init(&hw->mbx_resp.lock);
 
        hns3vf_clear_event_cause(hw, 0);
@@ -1153,7 +1346,20 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
                goto err_get_config;
        }
 
-       rte_eth_random_addr(hw->mac.mac_addr); /* Generate a random mac addr */
+       /*
+        * The hns3 PF ethdev driver in kernel support setting VF MAC address
+        * on the host by "ip link set ..." command. To avoid some incorrect
+        * scenes, for example, hns3 VF PMD driver fails to receive and send
+        * packets after user configure the MAC address by using the
+        * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
+        * address strategy as the hns3 kernel ethdev driver in the
+        * initialization. If user configure a MAC address by the ip command
+        * for VF device, then hns3 VF PMD driver will start with it, otherwise
+        * start with a random MAC address in the initialization.
+        */
+       ret = rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr);
+       if (ret)
+               rte_eth_random_addr(hw->mac.mac_addr);
 
        ret = hns3vf_clear_vport_list(hw);
        if (ret) {
@@ -1167,7 +1373,16 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
 
        hns3_set_default_rss_args(hw);
 
-       (void)hns3_stats_reset(eth_dev);
+       /*
+        * In the initialization clearing the all hardware mapping relationship
+        * configurations between queues and interrupt vectors is needed, so
+        * some error caused by the residual configurations, such as the
+        * unexpected interrupt, can be avoid.
+        */
+       ret = hns3vf_init_ring_with_vector(hw);
+       if (ret)
+               goto err_get_config;
+
        return 0;
 
 err_get_config:
@@ -1225,18 +1440,53 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 }
 
 static void
-hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
+hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
 {
-       struct hns3_adapter *hns = eth_dev->data->dev_private;
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       uint8_t base = 0;
+       uint8_t vec = 0;
+       uint16_t q_id;
+
+       if (dev->data->dev_conf.intr_conf.rxq == 0)
+               return;
+
+       /* unmap the ring with vector */
+       if (rte_intr_allow_others(intr_handle)) {
+               vec = RTE_INTR_VEC_RXTX_OFFSET;
+               base = RTE_INTR_VEC_RXTX_OFFSET;
+       }
+       if (rte_intr_dp_is_en(intr_handle)) {
+               for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+                       (void)hns3vf_bind_ring_with_vector(hw, vec, false,
+                                                          HNS3_RING_TYPE_RX,
+                                                          q_id);
+                       if (vec < base + intr_handle->nb_efd - 1)
+                               vec++;
+               }
+       }
+       /* Clean datapath event and queue/vec mapping */
+       rte_intr_efd_disable(intr_handle);
+       if (intr_handle->intr_vec) {
+               rte_free(intr_handle->intr_vec);
+               intr_handle->intr_vec = NULL;
+       }
+}
+
+static void
+hns3vf_dev_stop(struct rte_eth_dev *dev)
+{
+       struct hns3_adapter *hns = dev->data->dev_private;
        struct hns3_hw *hw = &hns->hw;
 
        PMD_INIT_FUNC_TRACE();
 
        hw->adapter_state = HNS3_NIC_STOPPING;
-       hns3_set_rxtx_function(eth_dev);
+       hns3_set_rxtx_function(dev);
        rte_wmb();
        /* Disable datapath on secondary process. */
-       hns3_mp_req_stop_rxtx(eth_dev);
+       hns3_mp_req_stop_rxtx(dev);
        /* Prevent crashes when queues are still in use. */
        rte_delay_ms(hw->tqps_num);
 
@@ -1246,8 +1496,10 @@ hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
                hns3_dev_release_mbufs(hns);
                hw->adapter_state = HNS3_NIC_CONFIGURED;
        }
-       rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
+       rte_eal_alarm_cancel(hns3vf_service_handler, dev);
        rte_spinlock_unlock(&hw->lock);
+
+       hns3vf_unmap_rx_interrupt(dev);
 }
 
 static void
@@ -1317,7 +1569,9 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
        struct hns3_hw *hw = &hns->hw;
        int ret;
 
-       hns3vf_set_tc_info(hns);
+       ret = hns3vf_set_tc_info(hns);
+       if (ret)
+               return ret;
 
        ret = hns3_start_queues(hns, reset_queue);
        if (ret) {
@@ -1329,15 +1583,85 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
 }
 
 static int
-hns3vf_dev_start(struct rte_eth_dev *eth_dev)
+hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 {
-       struct hns3_adapter *hns = eth_dev->data->dev_private;
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t intr_vector;
+       uint8_t base = 0;
+       uint8_t vec = 0;
+       uint16_t q_id;
+       int ret;
+
+       if (dev->data->dev_conf.intr_conf.rxq == 0)
+               return 0;
+
+       /* disable uio/vfio intr/eventfd mapping */
+       rte_intr_disable(intr_handle);
+
+       /* check and configure queue intr-vector mapping */
+       if (rte_intr_cap_multiple(intr_handle) ||
+           !RTE_ETH_DEV_SRIOV(dev).active) {
+               intr_vector = hw->used_rx_queues;
+               /* It creates event fd for each intr vector when MSIX is used */
+               if (rte_intr_efd_enable(intr_handle, intr_vector))
+                       return -EINVAL;
+       }
+       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+               intr_handle->intr_vec =
+                       rte_zmalloc("intr_vec",
+                                   hw->used_rx_queues * sizeof(int), 0);
+               if (intr_handle->intr_vec == NULL) {
+                       hns3_err(hw, "Failed to allocate %d rx_queues"
+                                    " intr_vec", hw->used_rx_queues);
+                       ret = -ENOMEM;
+                       goto vf_alloc_intr_vec_error;
+               }
+       }
+
+       if (rte_intr_allow_others(intr_handle)) {
+               vec = RTE_INTR_VEC_RXTX_OFFSET;
+               base = RTE_INTR_VEC_RXTX_OFFSET;
+       }
+       if (rte_intr_dp_is_en(intr_handle)) {
+               for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+                       ret = hns3vf_bind_ring_with_vector(hw, vec, true,
+                                                          HNS3_RING_TYPE_RX,
+                                                          q_id);
+                       if (ret)
+                               goto vf_bind_vector_error;
+                       intr_handle->intr_vec[q_id] = vec;
+                       if (vec < base + intr_handle->nb_efd - 1)
+                               vec++;
+               }
+       }
+       rte_intr_enable(intr_handle);
+       return 0;
+
+vf_bind_vector_error:
+       rte_intr_efd_disable(intr_handle);
+       if (intr_handle->intr_vec) {
+               free(intr_handle->intr_vec);
+               intr_handle->intr_vec = NULL;
+       }
+       return ret;
+vf_alloc_intr_vec_error:
+       rte_intr_efd_disable(intr_handle);
+       return ret;
+}
+
+static int
+hns3vf_dev_start(struct rte_eth_dev *dev)
+{
+       struct hns3_adapter *hns = dev->data->dev_private;
        struct hns3_hw *hw = &hns->hw;
        int ret;
 
        PMD_INIT_FUNC_TRACE();
        if (rte_atomic16_read(&hw->reset.resetting))
                return -EBUSY;
+
        rte_spinlock_lock(&hw->lock);
        hw->adapter_state = HNS3_NIC_STARTING;
        ret = hns3vf_do_start(hns, true);
@@ -1348,11 +1672,15 @@ hns3vf_dev_start(struct rte_eth_dev *eth_dev)
        }
        hw->adapter_state = HNS3_NIC_STARTED;
        rte_spinlock_unlock(&hw->lock);
-       hns3_set_rxtx_function(eth_dev);
-       hns3_mp_req_start_rxtx(eth_dev);
-       rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
-                         eth_dev);
-       return 0;
+
+       ret = hns3vf_map_rx_interrupt(dev);
+       if (ret)
+               return ret;
+       hns3_set_rxtx_function(dev);
+       hns3_mp_req_start_rxtx(dev);
+       rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
+
+       return ret;
 }
 
 static bool
@@ -1464,7 +1792,8 @@ hns3vf_stop_service(struct hns3_adapter *hns)
        struct rte_eth_dev *eth_dev;
 
        eth_dev = &rte_eth_devices[hw->data->port_id];
-       rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
+       if (hw->adapter_state == HNS3_NIC_STARTED)
+               rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
        hw->mac.link_status = ETH_LINK_DOWN;
 
        hns3_set_rxtx_function(eth_dev);
@@ -1502,8 +1831,50 @@ hns3vf_start_service(struct hns3_adapter *hns)
        eth_dev = &rte_eth_devices[hw->data->port_id];
        hns3_set_rxtx_function(eth_dev);
        hns3_mp_req_start_rxtx(eth_dev);
+       if (hw->adapter_state == HNS3_NIC_STARTED)
+               hns3vf_service_handler(eth_dev);
+
+       return 0;
+}
+
+static int
+hns3vf_check_default_mac_change(struct hns3_hw *hw)
+{
+       char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+       struct rte_ether_addr *hw_mac;
+       int ret;
+
+       /*
+        * The hns3 PF ethdev driver in kernel support setting VF MAC address
+        * on the host by "ip link set ..." command. If the hns3 PF kernel
+        * ethdev driver sets the MAC address for VF device after the
+        * initialization of the related VF device, the PF driver will notify
+        * VF driver to reset VF device to make the new MAC address effective
+        * immediately. The hns3 VF PMD driver should check whether the MAC
+        * address has been changed by the PF kernel ethdev driver, if changed
+        * VF driver should configure hardware using the new MAC address in the
+        * recovering hardware configuration stage of the reset process.
+        */
+       ret = hns3vf_get_host_mac_addr(hw);
+       if (ret)
+               return ret;
+
+       hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
+       ret = rte_is_zero_ether_addr(hw_mac);
+       if (ret) {
+               rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
+       } else {
+               ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
+               if (!ret) {
+                       rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
+                       rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+                                             &hw->data->mac_addrs[0]);
+                       hns3_warn(hw, "Default MAC address has been changed to:"
+                                 " %s by the host PF kernel ethdev driver",
+                                 mac_str);
+               }
+       }
 
-       hns3vf_service_handler(eth_dev);
        return 0;
 }
 
@@ -1513,6 +1884,10 @@ hns3vf_restore_conf(struct hns3_adapter *hns)
        struct hns3_hw *hw = &hns->hw;
        int ret;
 
+       ret = hns3vf_check_default_mac_change(hw);
+       if (ret)
+               return ret;
+
        ret = hns3vf_configure_mac_addr(hns, false);
        if (ret)
                return ret;
@@ -1685,6 +2060,8 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = {
        .tx_queue_setup     = hns3_tx_queue_setup,
        .rx_queue_release   = hns3_dev_rx_queue_release,
        .tx_queue_release   = hns3_dev_tx_queue_release,
+       .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
        .dev_configure      = hns3vf_dev_configure,
        .mac_addr_add       = hns3vf_add_mac_addr,
        .mac_addr_remove    = hns3vf_remove_mac_addr,