net/hns3: fix data type to store queue number
[dpdk.git] / drivers / net / hns3 / hns3_rxtx.c
index 68d7a6a..3386453 100644 (file)
@@ -23,6 +23,9 @@
 #include <rte_net.h>
 #include <rte_malloc.h>
 #include <rte_pci.h>
+#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
+#include <rte_cpuflags.h>
+#endif
 
 #include "hns3_ethdev.h"
 #include "hns3_rxtx.h"
@@ -74,7 +77,7 @@ hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
 {
        uint16_t i;
 
-       /* Note: Fake rx queue will not enter here */
+       /* Note: Fake tx queue will not enter here */
        if (txq->sw_ring) {
                for (i = 0; i < txq->nb_tx_desc; i++) {
                        if (txq->sw_ring[i].mbuf) {
@@ -339,26 +342,26 @@ hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
 }
 
 void
-hns3_update_all_queues_pvid_state(struct hns3_hw *hw)
+hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
 {
        uint16_t nb_rx_q = hw->data->nb_rx_queues;
        uint16_t nb_tx_q = hw->data->nb_tx_queues;
        struct hns3_rx_queue *rxq;
        struct hns3_tx_queue *txq;
-       int pvid_state;
+       bool pvid_en;
        int i;
 
-       pvid_state = hw->port_base_vlan_cfg.state;
+       pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE;
        for (i = 0; i < hw->cfg_max_queues; i++) {
                if (i < nb_rx_q) {
                        rxq = hw->data->rx_queues[i];
                        if (rxq != NULL)
-                               rxq->pvid_state = pvid_state;
+                               rxq->pvid_sw_discard_en = pvid_en;
                }
                if (i < nb_tx_q) {
                        txq = hw->data->tx_queues[i];
                        if (txq != NULL)
-                               txq->pvid_state = pvid_state;
+                               txq->pvid_sw_shift_en = pvid_en;
                }
        }
 }
@@ -371,27 +374,159 @@ hns3_enable_all_queues(struct hns3_hw *hw, bool en)
        struct hns3_rx_queue *rxq;
        struct hns3_tx_queue *txq;
        uint32_t rcb_reg;
+       void *tqp_base;
        int i;
 
        for (i = 0; i < hw->cfg_max_queues; i++) {
-               if (i < nb_rx_q)
-                       rxq = hw->data->rx_queues[i];
-               else
-                       rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
-               if (i < nb_tx_q)
-                       txq = hw->data->tx_queues[i];
-               else
-                       txq = hw->fkq_data.tx_queues[i - nb_tx_q];
-               if (rxq == NULL || txq == NULL ||
-                   (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
-                       continue;
+               if (hns3_dev_indep_txrx_supported(hw)) {
+                       rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
+                       txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
+                       /*
+                        * After initialization, rxq and txq won't be NULL at
+                        * the same time.
+                        */
+                       if (rxq != NULL)
+                               tqp_base = rxq->io_base;
+                       else if (txq != NULL)
+                               tqp_base = txq->io_base;
+                       else
+                               return;
+               } else {
+                       rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
+                             hw->fkq_data.rx_queues[i - nb_rx_q];
 
-               rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
+                       tqp_base = rxq->io_base;
+               }
+               /*
+                * This is the master switch that used to control the enabling
+                * of a pair of Tx and Rx queues. Both the Rx and Tx point to
+                * the same register
+                */
+               rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG);
                if (en)
                        rcb_reg |= BIT(HNS3_RING_EN_B);
                else
                        rcb_reg &= ~BIT(HNS3_RING_EN_B);
-               hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
+               hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg);
+       }
+}
+
+static void
+hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
+{
+       struct hns3_hw *hw = &txq->hns->hw;
+       uint32_t reg;
+
+       if (hns3_dev_indep_txrx_supported(hw)) {
+               reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
+               if (en)
+                       reg |= BIT(HNS3_RING_EN_B);
+               else
+                       reg &= ~BIT(HNS3_RING_EN_B);
+               hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg);
+       }
+       txq->enabled = en;
+}
+
+static void
+hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
+{
+       struct hns3_hw *hw = &rxq->hns->hw;
+       uint32_t reg;
+
+       if (hns3_dev_indep_txrx_supported(hw)) {
+               reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
+               if (en)
+                       reg |= BIT(HNS3_RING_EN_B);
+               else
+                       reg &= ~BIT(HNS3_RING_EN_B);
+               hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg);
+       }
+       rxq->enabled = en;
+}
+
+int
+hns3_start_all_txqs(struct rte_eth_dev *dev)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct hns3_tx_queue *txq;
+       uint16_t i, j;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = hw->data->tx_queues[i];
+               if (!txq) {
+                       hns3_err(hw, "Tx queue %u not available or setup.", i);
+                       goto start_txqs_fail;
+               }
+               /*
+                * Tx queue is enabled by default. Therefore, the Tx queues
+                * needs to be disabled when deferred_start is set. There is
+                * another master switch used to control the enabling of a pair
+                * of Tx and Rx queues. And the master switch is disabled by
+                * default.
+                */
+               if (txq->tx_deferred_start)
+                       hns3_enable_txq(txq, false);
+               else
+                       hns3_enable_txq(txq, true);
+       }
+       return 0;
+
+start_txqs_fail:
+       for (j = 0; j < i; j++) {
+               txq = hw->data->tx_queues[j];
+               hns3_enable_txq(txq, false);
+       }
+       return -EINVAL;
+}
+
+int
+hns3_start_all_rxqs(struct rte_eth_dev *dev)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct hns3_rx_queue *rxq;
+       uint16_t i, j;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = hw->data->rx_queues[i];
+               if (!rxq) {
+                       hns3_err(hw, "Rx queue %u not available or setup.", i);
+                       goto start_rxqs_fail;
+               }
+               /*
+                * Rx queue is enabled by default. Therefore, the Rx queues
+                * needs to be disabled when deferred_start is set. There is
+                * another master switch used to control the enabling of a pair
+                * of Tx and Rx queues. And the master switch is disabled by
+                * default.
+                */
+               if (rxq->rx_deferred_start)
+                       hns3_enable_rxq(rxq, false);
+               else
+                       hns3_enable_rxq(rxq, true);
+       }
+       return 0;
+
+start_rxqs_fail:
+       for (j = 0; j < i; j++) {
+               rxq = hw->data->rx_queues[j];
+               hns3_enable_rxq(rxq, false);
+       }
+       return -EINVAL;
+}
+
+void
+hns3_stop_all_txqs(struct rte_eth_dev *dev)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct hns3_tx_queue *txq;
+       uint16_t i;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = hw->data->tx_queues[i];
+               if (!txq)
+                       continue;
+               hns3_enable_txq(txq, false);
        }
 }
 
@@ -405,7 +540,7 @@ hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
        req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
 
        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
-       req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+       req->tqp_id = rte_cpu_to_le_16(queue_id);
        req->stream_id = 0;
        hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
 
@@ -426,18 +561,19 @@ hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
 
        req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
-       req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+       req->tqp_id = rte_cpu_to_le_16(queue_id);
        hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
-
        ret = hns3_cmd_send(hw, &desc, 1);
        if (ret)
-               hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
+               hns3_err(hw, "send tqp reset cmd error, queue_id = %u, "
+                            "ret = %d", queue_id, ret);
 
        return ret;
 }
 
 static int
-hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
+hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id,
+                         uint8_t *reset_status)
 {
        struct hns3_reset_tqp_queue_cmd *req;
        struct hns3_cmd_desc desc;
@@ -446,23 +582,24 @@ hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
 
        req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
-       req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+       req->tqp_id = rte_cpu_to_le_16(queue_id);
 
        ret = hns3_cmd_send(hw, &desc, 1);
        if (ret) {
-               hns3_err(hw, "Get reset status error, ret =%d", ret);
+               hns3_err(hw, "get tqp reset status error, queue_id = %u, "
+                            "ret = %d.", queue_id, ret);
                return ret;
        }
-
-       return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+       *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+       return ret;
 }
 
 static int
-hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
+hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
 {
 #define HNS3_TQP_RESET_TRY_MS  200
+       uint8_t reset_status;
        uint64_t end;
-       int reset_status;
        int ret;
 
        ret = hns3_tqp_enable(hw, queue_id, false);
@@ -479,21 +616,23 @@ hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
                hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
                return ret;
        }
-       ret = -ETIMEDOUT;
        end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
        do {
                /* Wait for tqp hw reset */
                rte_delay_ms(HNS3_POLL_RESPONE_MS);
-               reset_status = hns3_get_reset_status(hw, queue_id);
-               if (reset_status) {
-                       ret = 0;
+               ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
+               if (ret)
+                       goto tqp_reset_fail;
+
+               if (reset_status)
                        break;
-               }
        } while (get_timeofday_ms() < end);
 
-       if (ret) {
-               hns3_err(hw, "Reset TQP fail, ret = %d", ret);
-               return ret;
+       if (!reset_status) {
+               ret = -ETIMEDOUT;
+               hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d",
+                            queue_id, ret);
+               goto tqp_reset_fail;
        }
 
        ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
@@ -501,6 +640,10 @@ hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
                hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
 
        return ret;
+
+tqp_reset_fail:
+       hns3_send_reset_tqp_cmd(hw, queue_id, false);
+       return ret;
 }
 
 static int
@@ -516,28 +659,33 @@ hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
 
        memcpy(msg_data, &queue_id, sizeof(uint16_t));
 
-       return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
+       ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
                                 sizeof(msg_data), true, NULL, 0);
+       if (ret)
+               hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
+                        queue_id, ret);
+       return ret;
 }
 
 static int
-hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
+hns3_reset_tqp(struct hns3_adapter *hns, uint16_t queue_id)
 {
        struct hns3_hw *hw = &hns->hw;
+
        if (hns->is_vf)
                return hns3vf_reset_tqp(hw, queue_id);
        else
-               return hns3_reset_tqp(hw, queue_id);
+               return hns3pf_reset_tqp(hw, queue_id);
 }
 
 int
-hns3_reset_all_queues(struct hns3_adapter *hns)
+hns3_reset_all_tqps(struct hns3_adapter *hns)
 {
        struct hns3_hw *hw = &hns->hw;
        int ret, i;
 
        for (i = 0; i < hw->cfg_max_queues; i++) {
-               ret = hns3_reset_queue(hns, i);
+               ret = hns3_reset_tqp(hns, i);
                if (ret) {
                        hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
                        return ret;
@@ -546,6 +694,121 @@ hns3_reset_all_queues(struct hns3_adapter *hns)
        return 0;
 }
 
+static int
+hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id,
+                         enum hns3_ring_type queue_type, bool enable)
+{
+       struct hns3_reset_tqp_queue_cmd *req;
+       struct hns3_cmd_desc desc;
+       int queue_direction;
+       int ret;
+
+       hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false);
+
+       req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
+       req->tqp_id = rte_cpu_to_le_16(queue_id);
+       queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
+       req->queue_direction = rte_cpu_to_le_16(queue_direction);
+       hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
+
+       ret = hns3_cmd_send(hw, &desc, 1);
+       if (ret)
+               hns3_err(hw, "send queue reset cmd error, queue_id = %u, "
+                        "queue_type = %s, ret = %d.", queue_id,
+                        queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
+       return ret;
+}
+
+static int
+hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id,
+                           enum hns3_ring_type queue_type,
+                           uint8_t *reset_status)
+{
+       struct hns3_reset_tqp_queue_cmd *req;
+       struct hns3_cmd_desc desc;
+       int queue_direction;
+       int ret;
+
+       hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true);
+
+       req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
+       req->tqp_id = rte_cpu_to_le_16(queue_id);
+       queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
+       req->queue_direction = rte_cpu_to_le_16(queue_direction);
+
+       ret = hns3_cmd_send(hw, &desc, 1);
+       if (ret) {
+               hns3_err(hw, "get queue reset status error, queue_id = %u "
+                        "queue_type = %s, ret = %d.", queue_id,
+                        queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
+               return ret;
+       }
+
+       *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+       return  ret;
+}
+
+static int
+hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
+                enum hns3_ring_type queue_type)
+{
+#define HNS3_QUEUE_RESET_TRY_MS        200
+       struct hns3_tx_queue *txq;
+       struct hns3_rx_queue *rxq;
+       uint32_t reset_wait_times;
+       uint32_t max_wait_times;
+       uint8_t reset_status;
+       int ret;
+
+       if (queue_type == HNS3_RING_TYPE_TX) {
+               txq = hw->data->tx_queues[queue_id];
+               hns3_enable_txq(txq, false);
+       } else {
+               rxq = hw->data->rx_queues[queue_id];
+               hns3_enable_rxq(rxq, false);
+       }
+
+       ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true);
+       if (ret) {
+               hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret);
+               return ret;
+       }
+
+       reset_wait_times = 0;
+       max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS;
+       while (reset_wait_times < max_wait_times) {
+               /* Wait for queue hw reset */
+               rte_delay_ms(HNS3_POLL_RESPONE_MS);
+               ret = hns3_get_queue_reset_status(hw, queue_id,
+                                               queue_type, &reset_status);
+               if (ret)
+                       goto queue_reset_fail;
+
+               if (reset_status)
+                       break;
+               reset_wait_times++;
+       }
+
+       if (!reset_status) {
+               hns3_err(hw, "reset queue timeout, queue_id = %u, "
+                            "queue_type = %s", queue_id,
+                            queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx");
+               ret = -ETIMEDOUT;
+               goto queue_reset_fail;
+       }
+
+       ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
+       if (ret)
+               hns3_err(hw, "deassert queue reset fail, ret = %d.", ret);
+
+       return ret;
+
+queue_reset_fail:
+       hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
+       return ret;
+}
+
+
 void
 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
                       uint8_t gl_idx, uint16_t gl_value)
@@ -658,7 +921,7 @@ hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 }
 
 static int
-hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx)
 {
        struct hns3_hw *hw = &hns->hw;
        struct hns3_rx_queue *rxq;
@@ -669,7 +932,7 @@ hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
        rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
        ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
        if (ret) {
-               hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
+               hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.",
                         idx, ret);
                return ret;
        }
@@ -687,7 +950,7 @@ hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 }
 
 static void
-hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx)
 {
        struct hns3_hw *hw = &hns->hw;
        struct hns3_rx_queue *rxq;
@@ -701,9 +964,8 @@ hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 }
 
 static void
-hns3_init_tx_queue(struct hns3_tx_queue *queue)
+hns3_init_txq(struct hns3_tx_queue *txq)
 {
-       struct hns3_tx_queue *txq = queue;
        struct hns3_desc *desc;
        int i;
 
@@ -720,26 +982,6 @@ hns3_init_tx_queue(struct hns3_tx_queue *queue)
        hns3_init_tx_queue_hw(txq);
 }
 
-static void
-hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
-{
-       struct hns3_hw *hw = &hns->hw;
-       struct hns3_tx_queue *txq;
-
-       txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
-       hns3_init_tx_queue(txq);
-}
-
-static void
-hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
-{
-       struct hns3_hw *hw = &hns->hw;
-       struct hns3_tx_queue *txq;
-
-       txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
-       hns3_init_tx_queue(txq);
-}
-
 static void
 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
 {
@@ -766,38 +1008,41 @@ hns3_init_tx_ring_tc(struct hns3_adapter *hns)
 }
 
 static int
-hns3_start_rx_queues(struct hns3_adapter *hns)
+hns3_init_rx_queues(struct hns3_adapter *hns)
 {
        struct hns3_hw *hw = &hns->hw;
        struct hns3_rx_queue *rxq;
-       int i, j;
+       uint16_t i, j;
        int ret;
 
        /* Initialize RSS for queues */
        ret = hns3_config_rss(hns);
        if (ret) {
-               hns3_err(hw, "Failed to configure rss %d", ret);
+               hns3_err(hw, "failed to configure rss, ret = %d.", ret);
                return ret;
        }
 
        for (i = 0; i < hw->data->nb_rx_queues; i++) {
                rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
-               if (rxq == NULL || rxq->rx_deferred_start)
+               if (!rxq) {
+                       hns3_err(hw, "Rx queue %u not available or setup.", i);
+                       goto out;
+               }
+
+               if (rxq->rx_deferred_start)
                        continue;
-               ret = hns3_dev_rx_queue_start(hns, i);
+
+               ret = hns3_init_rxq(hns, i);
                if (ret) {
-                       hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
+                       hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i,
                                 ret);
                        goto out;
                }
        }
 
-       for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
-               rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
-               if (rxq == NULL || rxq->rx_deferred_start)
-                       continue;
-               hns3_fake_rx_queue_start(hns, i);
-       }
+       for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++)
+               hns3_init_fake_rxq(hns, i);
+
        return 0;
 
 out:
@@ -809,74 +1054,104 @@ out:
        return ret;
 }
 
-static void
-hns3_start_tx_queues(struct hns3_adapter *hns)
+static int
+hns3_init_tx_queues(struct hns3_adapter *hns)
 {
        struct hns3_hw *hw = &hns->hw;
        struct hns3_tx_queue *txq;
-       int i;
+       uint16_t i;
 
        for (i = 0; i < hw->data->nb_tx_queues; i++) {
                txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
-               if (txq == NULL || txq->tx_deferred_start)
+               if (!txq) {
+                       hns3_err(hw, "Tx queue %u not available or setup.", i);
+                       return -EINVAL;
+               }
+
+               if (txq->tx_deferred_start)
                        continue;
-               hns3_dev_tx_queue_start(hns, i);
+               hns3_init_txq(txq);
        }
 
        for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
                txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
-               if (txq == NULL || txq->tx_deferred_start)
-                       continue;
-               hns3_fake_tx_queue_start(hns, i);
+               hns3_init_txq(txq);
        }
-
        hns3_init_tx_ring_tc(hns);
+
+       return 0;
 }
 
 /*
- * Start all queues.
- * Note: just init and setup queues, and don't enable queue rx&tx.
+ * Init all queues.
+ * Note: just init and setup queues, and don't enable tqps.
  */
 int
-hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
 {
        struct hns3_hw *hw = &hns->hw;
        int ret;
 
        if (reset_queue) {
-               ret = hns3_reset_all_queues(hns);
+               ret = hns3_reset_all_tqps(hns);
                if (ret) {
-                       hns3_err(hw, "Failed to reset all queues %d", ret);
+                       hns3_err(hw, "failed to reset all queues, ret = %d.",
+                                ret);
                        return ret;
                }
        }
 
-       ret = hns3_start_rx_queues(hns);
+       ret = hns3_init_rx_queues(hns);
        if (ret) {
-               hns3_err(hw, "Failed to start rx queues: %d", ret);
+               hns3_err(hw, "failed to init rx queues, ret = %d.", ret);
                return ret;
        }
 
-       hns3_start_tx_queues(hns);
+       ret = hns3_init_tx_queues(hns);
+       if (ret) {
+               hns3_dev_release_mbufs(hns);
+               hns3_err(hw, "failed to init tx queues, ret = %d.", ret);
+       }
 
-       return 0;
+       return ret;
 }
 
-int
-hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
+void
+hns3_start_tqps(struct hns3_hw *hw)
 {
-       struct hns3_hw *hw = &hns->hw;
-       int ret;
+       struct hns3_tx_queue *txq;
+       struct hns3_rx_queue *rxq;
+       uint16_t i;
 
-       hns3_enable_all_queues(hw, false);
-       if (reset_queue) {
-               ret = hns3_reset_all_queues(hns);
-               if (ret) {
-                       hns3_err(hw, "Failed to reset all queues %d", ret);
-                       return ret;
-               }
+       hns3_enable_all_queues(hw, true);
+
+       for (i = 0; i < hw->data->nb_tx_queues; i++) {
+               txq = hw->data->tx_queues[i];
+               if (txq->enabled)
+                       hw->data->tx_queue_state[i] =
+                               RTE_ETH_QUEUE_STATE_STARTED;
+       }
+
+       for (i = 0; i < hw->data->nb_rx_queues; i++) {
+               rxq = hw->data->rx_queues[i];
+               if (rxq->enabled)
+                       hw->data->rx_queue_state[i] =
+                               RTE_ETH_QUEUE_STATE_STARTED;
        }
-       return 0;
+}
+
+void
+hns3_stop_tqps(struct hns3_hw *hw)
+{
+       uint16_t i;
+
+       hns3_enable_all_queues(hw, false);
+
+       for (i = 0; i < hw->data->nb_tx_queues; i++)
+               hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       for (i = 0; i < hw->data->nb_rx_queues; i++)
+               hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 }
 
 /*
@@ -1103,7 +1378,7 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 {
        uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
        void **rxq;
-       uint8_t i;
+       uint16_t i;
 
        if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
                /* first time configuration */
@@ -1150,7 +1425,7 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 {
        uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
        void **txq;
-       uint8_t i;
+       uint16_t i;
 
        if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
                /* first time configuration */
@@ -1202,13 +1477,12 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
        int ret;
 
        /* Setup new number of fake RX/TX queues and reconfigure device. */
-       hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
        rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
        tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
        ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
        if (ret) {
                hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
-               goto cfg_fake_rx_q_fail;
+               return ret;
        }
 
        ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
@@ -1241,8 +1515,6 @@ setup_fake_rx_q_fail:
        (void)hns3_fake_tx_queue_config(hw, 0);
 cfg_fake_tx_q_fail:
        (void)hns3_fake_rx_queue_config(hw, 0);
-cfg_fake_rx_q_fail:
-       hw->cfg_max_queues = 0;
 
        return ret;
 }
@@ -1258,7 +1530,7 @@ hns3_dev_release_mbufs(struct hns3_adapter *hns)
        if (dev_data->rx_queues)
                for (i = 0; i < dev_data->nb_rx_queues; i++) {
                        rxq = dev_data->rx_queues[i];
-                       if (rxq == NULL || rxq->rx_deferred_start)
+                       if (rxq == NULL)
                                continue;
                        hns3_rx_queue_release_mbufs(rxq);
                }
@@ -1266,7 +1538,7 @@ hns3_dev_release_mbufs(struct hns3_adapter *hns)
        if (dev_data->tx_queues)
                for (i = 0; i < dev_data->nb_tx_queues; i++) {
                        txq = dev_data->tx_queues[i];
-                       if (txq == NULL || txq->tx_deferred_start)
+                       if (txq == NULL)
                                continue;
                        hns3_tx_queue_release_mbufs(txq);
                }
@@ -1314,11 +1586,50 @@ hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
        return 0;
 }
 
+static int
+hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
+                               uint16_t nb_desc)
+{
+       struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+       struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode;
+       eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+       uint16_t min_vec_bds;
+
+       /*
+        * HNS3 hardware network engine set scattered as default. If the driver
+        * is not work in scattered mode and the pkts greater than buf_size
+        * but smaller than max_rx_pkt_len will be distributed to multiple BDs.
+        * Driver cannot handle this situation.
+        */
+       if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) {
+               hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater "
+                            "than rx_buf_len if scattered is off.");
+               return -EINVAL;
+       }
+
+       if (pkt_burst == hns3_recv_pkts_vec) {
+               min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
+                             HNS3_DEFAULT_RX_BURST;
+               if (nb_desc < min_vec_bds ||
+                   nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) {
+                       hns3_err(hw, "if Rx burst mode is vector, "
+                                "number of descriptor is required to be "
+                                "bigger than min vector bds:%u, and could be "
+                                "divided by rxq rearm thresh:%u.",
+                                min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH);
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
 static int
 hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
                         struct rte_mempool *mp, uint16_t nb_desc,
                         uint16_t *buf_size)
 {
+       int ret;
+
        if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
            nb_desc % HNS3_ALIGN_RING_DESC) {
                hns3_err(hw, "Number (%u) of rx descriptors is invalid",
@@ -1338,9 +1649,33 @@ hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
                return -EINVAL;
        }
 
+       if (hw->data->dev_started) {
+               ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc);
+               if (ret) {
+                       hns3_err(hw, "Rx queue runtime setup fail.");
+                       return ret;
+               }
+       }
+
        return 0;
 }
 
+uint32_t
+hns3_get_tqp_reg_offset(uint16_t queue_id)
+{
+       uint32_t reg_offset;
+
+       /* Need an extend offset to config queue > 1024 */
+       if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID)
+               reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE;
+       else
+               reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET +
+                            (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) *
+                            HNS3_TQP_REG_SIZE;
+
+       return reg_offset;
+}
+
 int
 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
                    unsigned int socket_id, const struct rte_eth_rxconf *conf,
@@ -1354,11 +1689,6 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        int rx_entry_len;
        int ret;
 
-       if (dev->data->dev_started) {
-               hns3_err(hw, "rx_queue_setup after dev_start no supported");
-               return -EINVAL;
-       }
-
        ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
        if (ret)
                return ret;
@@ -1386,7 +1716,12 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        rxq->mb_pool = mp;
        rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
                conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
+
        rxq->rx_deferred_start = conf->rx_deferred_start;
+       if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+               hns3_warn(hw, "deferred start is not supported.");
+               rxq->rx_deferred_start = false;
+       }
 
        rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
                        sizeof(struct hns3_entry);
@@ -1405,10 +1740,25 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        rxq->pkt_first_seg = NULL;
        rxq->pkt_last_seg = NULL;
        rxq->port_id = dev->data->port_id;
-       rxq->pvid_state = hw->port_base_vlan_cfg.state;
+       /*
+        * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
+        * the pvid_sw_discard_en in the queue struct should not be changed,
+        * because PVID-related operations do not need to be processed by PMD
+        * driver. For hns3 VF device, whether it needs to process PVID depends
+        * on the configuration of PF kernel mode netdevice driver. And the
+        * related PF configuration is delivered through the mailbox and finally
+        * reflectd in port_base_vlan_cfg.
+        */
+       if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
+               rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
+                                      HNS3_PORT_BASE_VLAN_ENABLE;
+       else
+               rxq->pvid_sw_discard_en = false;
        rxq->configured = true;
        rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
                                idx * HNS3_TQP_REG_SIZE);
+       rxq->io_base = (void *)((char *)hw->io_base +
+                                       hns3_get_tqp_reg_offset(idx));
        rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
                           HNS3_RING_RX_HEAD_REG);
        rxq->rx_buf_len = rx_buf_size;
@@ -1489,37 +1839,60 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
                RTE_PTYPE_L4_TCP,
                RTE_PTYPE_L4_UDP,
                RTE_PTYPE_TUNNEL_GRE,
+               RTE_PTYPE_INNER_L2_ETHER,
+               RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               RTE_PTYPE_INNER_L2_ETHER_QINQ,
+               RTE_PTYPE_INNER_L3_IPV4,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_INNER_L3_IPV4_EXT,
+               RTE_PTYPE_INNER_L3_IPV6_EXT,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_SCTP,
+               RTE_PTYPE_INNER_L4_ICMP,
+               RTE_PTYPE_TUNNEL_VXLAN,
+               RTE_PTYPE_TUNNEL_NVGRE,
                RTE_PTYPE_UNKNOWN
        };
 
        if (dev->rx_pkt_burst == hns3_recv_pkts ||
            dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
-           dev->rx_pkt_burst == hns3_recv_pkts_vec)
+           dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+           dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
                return ptypes;
 
        return NULL;
 }
 
-void
-hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
-{
-       struct hns3_adapter *hns = dev->data->dev_private;
-       struct hns3_ptype_table *tbl = &hns->ptype_tbl;
-
-       memset(tbl, 0, sizeof(*tbl));
-
-       tbl->l2table[0] = RTE_PTYPE_L2_ETHER;
-       tbl->l2table[1] = RTE_PTYPE_L2_ETHER_QINQ;
-       tbl->l2table[2] = RTE_PTYPE_L2_ETHER_VLAN;
-       tbl->l2table[3] = RTE_PTYPE_L2_ETHER_VLAN;
-
-       tbl->l3table[0] = RTE_PTYPE_L3_IPV4;
-       tbl->l3table[1] = RTE_PTYPE_L3_IPV6;
-       tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP;
-       tbl->l3table[3] = RTE_PTYPE_L2_ETHER;
-       tbl->l3table[4] = RTE_PTYPE_L3_IPV4_EXT;
-       tbl->l3table[5] = RTE_PTYPE_L3_IPV6_EXT;
-       tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP;
+static void
+hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
+{
+       tbl->l2l3table[0][0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
+       tbl->l2l3table[0][1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
+       tbl->l2l3table[0][2] = RTE_PTYPE_L2_ETHER_ARP;
+       tbl->l2l3table[0][3] = RTE_PTYPE_L2_ETHER;
+       tbl->l2l3table[0][4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
+       tbl->l2l3table[0][5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
+       tbl->l2l3table[0][6] = RTE_PTYPE_L2_ETHER_LLDP;
+       tbl->l2l3table[0][15] = RTE_PTYPE_L2_ETHER;
+
+       tbl->l2l3table[1][0] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4;
+       tbl->l2l3table[1][1] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6;
+       tbl->l2l3table[1][2] = RTE_PTYPE_L2_ETHER_ARP;
+       tbl->l2l3table[1][3] = RTE_PTYPE_L2_ETHER_VLAN;
+       tbl->l2l3table[1][4] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4_EXT;
+       tbl->l2l3table[1][5] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6_EXT;
+       tbl->l2l3table[1][6] = RTE_PTYPE_L2_ETHER_LLDP;
+       tbl->l2l3table[1][15] = RTE_PTYPE_L2_ETHER_VLAN;
+
+       tbl->l2l3table[2][0] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4;
+       tbl->l2l3table[2][1] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6;
+       tbl->l2l3table[2][2] = RTE_PTYPE_L2_ETHER_ARP;
+       tbl->l2l3table[2][3] = RTE_PTYPE_L2_ETHER_QINQ;
+       tbl->l2l3table[2][4] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4_EXT;
+       tbl->l2l3table[2][5] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6_EXT;
+       tbl->l2l3table[2][6] = RTE_PTYPE_L2_ETHER_LLDP;
+       tbl->l2l3table[2][15] = RTE_PTYPE_L2_ETHER_QINQ;
 
        tbl->l4table[0] = RTE_PTYPE_L4_UDP;
        tbl->l4table[1] = RTE_PTYPE_L4_TCP;
@@ -1527,37 +1900,60 @@ hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
        tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
        tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
        tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
+}
 
+static void
+hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
+{
        tbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER;
        tbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN;
        tbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ;
 
        tbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4;
        tbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6;
-       tbl->inner_l3table[2] = 0;
-       tbl->inner_l3table[3] = RTE_PTYPE_INNER_L2_ETHER;
+       /* There is not a ptype for inner ARP/RARP */
+       tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN;
+       tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN;
        tbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT;
        tbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT;
 
        tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
        tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
-       tbl->inner_l4table[2] = RTE_PTYPE_TUNNEL_GRE;
+       /* There is not a ptype for inner GRE */
+       tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN;
        tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
-       tbl->inner_l4table[4] = RTE_PTYPE_L4_IGMP;
+       /* There is not a ptype for inner IGMP */
+       tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN;
        tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
 
+       tbl->ol2table[0] = RTE_PTYPE_L2_ETHER;
+       tbl->ol2table[1] = RTE_PTYPE_L2_ETHER_VLAN;
+       tbl->ol2table[2] = RTE_PTYPE_L2_ETHER_QINQ;
+
        tbl->ol3table[0] = RTE_PTYPE_L3_IPV4;
        tbl->ol3table[1] = RTE_PTYPE_L3_IPV6;
-       tbl->ol3table[2] = 0;
-       tbl->ol3table[3] = 0;
+       tbl->ol3table[2] = RTE_PTYPE_UNKNOWN;
+       tbl->ol3table[3] = RTE_PTYPE_UNKNOWN;
        tbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT;
        tbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT;
 
-       tbl->ol4table[0] = 0;
+       tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
        tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;
        tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
 }
 
+void
+hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
+{
+       struct hns3_adapter *hns = dev->data->dev_private;
+       struct hns3_ptype_table *tbl = &hns->ptype_tbl;
+
+       memset(tbl, 0, sizeof(*tbl));
+
+       hns3_init_non_tunnel_ptype_tbl(tbl);
+       hns3_init_tunnel_ptype_tbl(tbl);
+}
+
 static inline void
 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
                     uint32_t l234_info, const struct hns3_desc *rxd)
@@ -1592,7 +1988,7 @@ hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
        };
        strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
                                      HNS3_RXD_STRP_TAGP_S);
-       report_mode = report_type[rxq->pvid_state][strip_status];
+       report_mode = report_type[rxq->pvid_sw_discard_en][strip_status];
        switch (report_mode) {
        case HNS3_NO_STRP_VLAN_VLD:
                mb->vlan_tci = 0;
@@ -1605,6 +2001,9 @@ hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
                mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
                mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
                return;
+       default:
+               mb->vlan_tci = 0;
+               return;
        }
 }
 
@@ -1991,12 +2390,20 @@ hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
 
 uint16_t __rte_weak
 hns3_recv_pkts_vec(__rte_unused void *tx_queue,
-                  __rte_unused struct rte_mbuf **tx_pkts,
+                  __rte_unused struct rte_mbuf **rx_pkts,
                   __rte_unused uint16_t nb_pkts)
 {
        return 0;
 }
 
+uint16_t __rte_weak
+hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue,
+                      __rte_unused struct rte_mbuf **rx_pkts,
+                      __rte_unused uint16_t nb_pkts)
+{
+       return 0;
+}
+
 int
 hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
                       struct rte_eth_burst_mode *mode)
@@ -2008,6 +2415,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
                { hns3_recv_pkts,               "Scalar" },
                { hns3_recv_scattered_pkts,     "Scalar Scattered" },
                { hns3_recv_pkts_vec,           "Vector Neon" },
+               { hns3_recv_pkts_vec_sve,       "Vector Sve" },
        };
 
        eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
@@ -2026,6 +2434,16 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
        return ret;
 }
 
+static bool
+hns3_check_sve_support(void)
+{
+#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
+       if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
+               return true;
+#endif
+       return false;
+}
+
 static eth_rx_burst_t
 hns3_get_rx_function(struct rte_eth_dev *dev)
 {
@@ -2033,7 +2451,8 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
        uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
 
        if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
-               return hns3_recv_pkts_vec;
+               return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve :
+                      hns3_recv_pkts_vec;
 
        if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
            (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
@@ -2098,11 +2517,6 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        int tx_entry_len;
        int ret;
 
-       if (dev->data->dev_started) {
-               hns3_err(hw, "tx_queue_setup after dev_start no supported");
-               return -EINVAL;
-       }
-
        ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
                                       &tx_rs_thresh, &tx_free_thresh, idx);
        if (ret)
@@ -2126,6 +2540,11 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        }
 
        txq->tx_deferred_start = conf->tx_deferred_start;
+       if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+               hns3_warn(hw, "deferred start is not supported.");
+               txq->tx_deferred_start = false;
+       }
+
        tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
        txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
                                          RTE_CACHE_LINE_SIZE, socket_id);
@@ -2151,13 +2570,28 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        }
 
        txq->port_id = dev->data->port_id;
-       txq->pvid_state = hw->port_base_vlan_cfg.state;
+       /*
+        * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
+        * the pvid_sw_shift_en in the queue struct should not be changed,
+        * because PVID-related operations do not need to be processed by PMD
+        * driver. For hns3 VF device, whether it needs to process PVID depends
+        * on the configuration of PF kernel mode netdev driver. And the
+        * related PF configuration is delivered through the mailbox and finally
+        * reflectd in port_base_vlan_cfg.
+        */
+       if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
+               txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
+                                       HNS3_PORT_BASE_VLAN_ENABLE;
+       else
+               txq->pvid_sw_shift_en = false;
+       txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
        txq->configured = true;
-       txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
-                               idx * HNS3_TQP_REG_SIZE);
+       txq->io_base = (void *)((char *)hw->io_base +
+                                               hns3_get_tqp_reg_offset(idx));
        txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
                                             HNS3_RING_TX_TAIL_REG);
        txq->min_tx_pkt_len = hw->min_tx_pkt_len;
+       txq->tso_mode = hw->tso_mode;
        txq->over_length_pkt_cnt = 0;
        txq->exceed_limit_bd_pkt_cnt = 0;
        txq->exceed_limit_bd_reassem_fail = 0;
@@ -2352,7 +2786,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
         * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
         * be added to the position close to the IP header when PVID is enabled.
         */
-       if (!txq->pvid_state && ol_flags & (PKT_TX_VLAN_PKT |
+       if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT |
                                PKT_TX_QINQ_PKT)) {
                desc->tx.ol_type_vlan_len_msec |=
                                rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
@@ -2365,7 +2799,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
        }
 
        if (ol_flags & PKT_TX_QINQ_PKT ||
-           ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_state)) {
+           ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
                desc->tx.type_cs_vlan_tso_len |=
                                        rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
                desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
@@ -2409,7 +2843,8 @@ hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
 }
 
 static int
-hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt)
+hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
+                                 uint8_t max_non_tso_bd_num)
 {
        struct rte_mempool *mb_pool;
        struct rte_mbuf *new_mbuf;
@@ -2429,7 +2864,7 @@ hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt)
        mb_pool = tx_pkt->pool;
        buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
        nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
-       if (nb_new_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)
+       if (nb_new_buf > max_non_tso_bd_num)
                return -EINVAL;
 
        last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
@@ -2661,7 +3096,8 @@ hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
 }
 
 static bool
-hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
+hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
+                                uint32_t max_non_tso_bd_num)
 {
        struct rte_mbuf *m_first = tx_pkts;
        struct rte_mbuf *m_last = tx_pkts;
@@ -2676,10 +3112,10 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
         * frags greater than gso header len + mss, and the remaining 7
         * consecutive frags greater than MSS except the last 7 frags.
         */
-       if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT)
+       if (bd_num <= max_non_tso_bd_num)
                return false;
 
-       for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1;
+       for (i = 0; m_last && i < max_non_tso_bd_num - 1;
             i++, m_last = m_last->next)
                tot_len += m_last->data_len;
 
@@ -2697,7 +3133,7 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
         * ensure the sum of the data length of every 7 consecutive buffer
         * is greater than mss except the last one.
         */
-       for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) {
+       for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) {
                tot_len -= m_first->data_len;
                tot_len += m_last->data_len;
 
@@ -2791,7 +3227,7 @@ hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
        struct rte_ether_hdr *eh;
        struct rte_vlan_hdr *vh;
 
-       if (!txq->pvid_state)
+       if (!txq->pvid_sw_shift_en)
                return 0;
 
        /*
@@ -2826,43 +3262,66 @@ hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
 }
 #endif
 
-uint16_t
-hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
-              uint16_t nb_pkts)
+static int
+hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
 {
-       struct rte_mbuf *m;
-       uint16_t i;
        int ret;
 
-       for (i = 0; i < nb_pkts; i++) {
-               m = tx_pkts[i];
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+       ret = rte_validate_tx_offload(m);
+       if (ret != 0) {
+               rte_errno = -ret;
+               return ret;
+       }
 
-               if (hns3_pkt_is_tso(m) &&
-                   (hns3_pkt_need_linearized(m, m->nb_segs) ||
-                    hns3_check_tso_pkt_valid(m))) {
+       ret = hns3_vld_vlan_chk(tx_queue, m);
+       if (ret != 0) {
+               rte_errno = EINVAL;
+               return ret;
+       }
+#endif
+       if (hns3_pkt_is_tso(m)) {
+               if (hns3_pkt_need_linearized(m, m->nb_segs,
+                                            tx_queue->max_non_tso_bd_num) ||
+                   hns3_check_tso_pkt_valid(m)) {
                        rte_errno = EINVAL;
-                       return i;
+                       return -EINVAL;
                }
 
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
-               ret = rte_validate_tx_offload(m);
-               if (ret != 0) {
-                       rte_errno = -ret;
-                       return i;
+               if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
+                       /*
+                        * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means
+                        * hardware support recalculate the TCP pseudo header
+                        * checksum of packets that need TSO, so network driver
+                        * software not need to recalculate it.
+                        */
+                       hns3_outer_header_cksum_prepare(m);
+                       return 0;
                }
+       }
 
-               if (hns3_vld_vlan_chk(tx_queue, m)) {
-                       rte_errno = EINVAL;
-                       return i;
-               }
-#endif
-               ret = rte_net_intel_cksum_prepare(m);
-               if (ret != 0) {
-                       rte_errno = -ret;
-                       return i;
-               }
+       ret = rte_net_intel_cksum_prepare(m);
+       if (ret != 0) {
+               rte_errno = -ret;
+               return ret;
+       }
+
+       hns3_outer_header_cksum_prepare(m);
+
+       return 0;
+}
+
+uint16_t
+hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+              uint16_t nb_pkts)
+{
+       struct rte_mbuf *m;
+       uint16_t i;
 
-               hns3_outer_header_cksum_prepare(m);
+       for (i = 0; i < nb_pkts; i++) {
+               m = tx_pkts[i];
+               if (hns3_prep_pkt_proc(tx_queue, m))
+                       return i;
        }
 
        return i;
@@ -2892,6 +3351,7 @@ static int
 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
                      struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
 {
+       uint8_t max_non_tso_bd_num;
        struct rte_mbuf *new_pkt;
        int ret;
 
@@ -2907,9 +3367,11 @@ hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
                return -EINVAL;
        }
 
-       if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
+       max_non_tso_bd_num = txq->max_non_tso_bd_num;
+       if (unlikely(nb_buf > max_non_tso_bd_num)) {
                txq->exceed_limit_bd_pkt_cnt++;
-               ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt);
+               ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
+                                             max_non_tso_bd_num);
                if (ret) {
                        txq->exceed_limit_bd_reassem_fail++;
                        return ret;
@@ -3198,6 +3660,14 @@ hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
        return 0;
 }
 
+uint16_t __rte_weak
+hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue,
+                      struct rte_mbuf __rte_unused **tx_pkts,
+                      uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
 int
 hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
                       struct rte_eth_burst_mode *mode)
@@ -3211,6 +3681,8 @@ hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
                info = "Scalar";
        else if (pkt_burst == hns3_xmit_pkts_vec)
                info = "Vector Neon";
+       else if (pkt_burst == hns3_xmit_pkts_vec_sve)
+               info = "Vector Sve";
 
        if (info == NULL)
                return -EINVAL;
@@ -3228,7 +3700,8 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
 
        if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
                *prep = NULL;
-               return hns3_xmit_pkts_vec;
+               return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve :
+                       hns3_xmit_pkts_vec;
        }
 
        if (hns->tx_simple_allowed &&
@@ -3275,6 +3748,8 @@ hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        qinfo->mp = rxq->mb_pool;
        qinfo->nb_desc = rxq->nb_rx_desc;
        qinfo->scattered_rx = dev->data->scattered_rx;
+       /* Report the HW Rx buffer length to user */
+       qinfo->rx_buf_size = rxq->rx_buf_len;
 
        /*
         * If there are no available Rx buffer descriptors, incoming packets
@@ -3298,3 +3773,123 @@ hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
        qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
 }
+
+int
+hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+       struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+       int ret;
+
+       if (!hns3_dev_indep_txrx_supported(hw))
+               return -ENOTSUP;
+
+       ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
+       if (ret) {
+               hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
+                        rx_queue_id, ret);
+               return ret;
+       }
+
+       ret = hns3_init_rxq(hns, rx_queue_id);
+       if (ret) {
+               hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
+                        rx_queue_id, ret);
+               return ret;
+       }
+
+       hns3_enable_rxq(rxq, true);
+       dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+       return ret;
+}
+
+int
+hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+
+       if (!hns3_dev_indep_txrx_supported(hw))
+               return -ENOTSUP;
+
+       hns3_enable_rxq(rxq, false);
+       hns3_rx_queue_release_mbufs(rxq);
+       dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       return 0;
+}
+
+int
+hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+       int ret;
+
+       if (!hns3_dev_indep_txrx_supported(hw))
+               return -ENOTSUP;
+
+       ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
+       if (ret) {
+               hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
+                        tx_queue_id, ret);
+               return ret;
+       }
+
+       hns3_init_txq(txq);
+       hns3_enable_txq(txq, true);
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+       return ret;
+}
+
+int
+hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+
+       if (!hns3_dev_indep_txrx_supported(hw))
+               return -ENOTSUP;
+
+       hns3_enable_txq(txq, false);
+       hns3_tx_queue_release_mbufs(txq);
+       /*
+        * All the mbufs in sw_ring are released and all the pointers in sw_ring
+        * are set to NULL. If this queue is still called by upper layer,
+        * residual SW status of this txq may cause these pointers in sw_ring
+        * which have been set to NULL to be released again. To avoid it,
+        * reinit the txq.
+        */
+       hns3_init_txq(txq);
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       return 0;
+}
+
+uint32_t
+hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       /*
+        * Number of BDs that have been processed by the driver
+        * but have not been notified to the hardware.
+        */
+       uint32_t driver_hold_bd_num;
+       struct hns3_rx_queue *rxq;
+       uint32_t fbd_num;
+
+       rxq = dev->data->rx_queues[rx_queue_id];
+       fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
+       if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+           dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
+               driver_hold_bd_num = rxq->rx_rearm_nb;
+       else
+               driver_hold_bd_num = rxq->rx_free_hold;
+
+       if (fbd_num <= driver_hold_bd_num)
+               return 0;
+       else
+               return fbd_num - driver_hold_bd_num;
+}