1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
5 #include <rte_bus_pci.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_geneve.h>
10 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
15 #include <rte_cpuflags.h>
18 #include "hns3_ethdev.h"
19 #include "hns3_rxtx.h"
20 #include "hns3_regs.h"
21 #include "hns3_logs.h"
23 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
24 #define HNS3_RX_RING_PREFETCTH_MASK 3
27 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
31 /* Note: Fake rx queue will not enter here */
32 if (rxq->sw_ring == NULL)
35 if (rxq->rx_rearm_nb == 0) {
36 for (i = 0; i < rxq->nb_rx_desc; i++) {
37 if (rxq->sw_ring[i].mbuf != NULL) {
38 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
39 rxq->sw_ring[i].mbuf = NULL;
43 for (i = rxq->next_to_use;
44 i != rxq->rx_rearm_start;
45 i = (i + 1) % rxq->nb_rx_desc) {
46 if (rxq->sw_ring[i].mbuf != NULL) {
47 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
48 rxq->sw_ring[i].mbuf = NULL;
53 for (i = 0; i < rxq->bulk_mbuf_num; i++)
54 rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]);
55 rxq->bulk_mbuf_num = 0;
57 if (rxq->pkt_first_seg) {
58 rte_pktmbuf_free(rxq->pkt_first_seg);
59 rxq->pkt_first_seg = NULL;
64 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
68 /* Note: Fake tx queue will not enter here */
70 for (i = 0; i < txq->nb_tx_desc; i++) {
71 if (txq->sw_ring[i].mbuf) {
72 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
73 txq->sw_ring[i].mbuf = NULL;
80 hns3_rx_queue_release(void *queue)
82 struct hns3_rx_queue *rxq = queue;
84 hns3_rx_queue_release_mbufs(rxq);
86 rte_memzone_free(rxq->mz);
88 rte_free(rxq->sw_ring);
94 hns3_tx_queue_release(void *queue)
96 struct hns3_tx_queue *txq = queue;
98 hns3_tx_queue_release_mbufs(txq);
100 rte_memzone_free(txq->mz);
102 rte_free(txq->sw_ring);
110 hns3_dev_rx_queue_release(void *queue)
112 struct hns3_rx_queue *rxq = queue;
113 struct hns3_adapter *hns;
119 rte_spinlock_lock(&hns->hw.lock);
120 hns3_rx_queue_release(queue);
121 rte_spinlock_unlock(&hns->hw.lock);
125 hns3_dev_tx_queue_release(void *queue)
127 struct hns3_tx_queue *txq = queue;
128 struct hns3_adapter *hns;
134 rte_spinlock_lock(&hns->hw.lock);
135 hns3_tx_queue_release(queue);
136 rte_spinlock_unlock(&hns->hw.lock);
140 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
142 struct hns3_rx_queue *rxq = queue;
143 struct hns3_adapter *hns;
153 if (hw->fkq_data.rx_queues[idx]) {
154 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
155 hw->fkq_data.rx_queues[idx] = NULL;
158 /* free fake rx queue arrays */
159 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
160 hw->fkq_data.nb_fake_rx_queues = 0;
161 rte_free(hw->fkq_data.rx_queues);
162 hw->fkq_data.rx_queues = NULL;
167 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
169 struct hns3_tx_queue *txq = queue;
170 struct hns3_adapter *hns;
180 if (hw->fkq_data.tx_queues[idx]) {
181 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
182 hw->fkq_data.tx_queues[idx] = NULL;
185 /* free fake tx queue arrays */
186 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
187 hw->fkq_data.nb_fake_tx_queues = 0;
188 rte_free(hw->fkq_data.tx_queues);
189 hw->fkq_data.tx_queues = NULL;
194 hns3_free_rx_queues(struct rte_eth_dev *dev)
196 struct hns3_adapter *hns = dev->data->dev_private;
197 struct hns3_fake_queue_data *fkq_data;
198 struct hns3_hw *hw = &hns->hw;
202 nb_rx_q = hw->data->nb_rx_queues;
203 for (i = 0; i < nb_rx_q; i++) {
204 if (dev->data->rx_queues[i]) {
205 hns3_rx_queue_release(dev->data->rx_queues[i]);
206 dev->data->rx_queues[i] = NULL;
210 /* Free fake Rx queues */
211 fkq_data = &hw->fkq_data;
212 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
213 if (fkq_data->rx_queues[i])
214 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
219 hns3_free_tx_queues(struct rte_eth_dev *dev)
221 struct hns3_adapter *hns = dev->data->dev_private;
222 struct hns3_fake_queue_data *fkq_data;
223 struct hns3_hw *hw = &hns->hw;
227 nb_tx_q = hw->data->nb_tx_queues;
228 for (i = 0; i < nb_tx_q; i++) {
229 if (dev->data->tx_queues[i]) {
230 hns3_tx_queue_release(dev->data->tx_queues[i]);
231 dev->data->tx_queues[i] = NULL;
235 /* Free fake Tx queues */
236 fkq_data = &hw->fkq_data;
237 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
238 if (fkq_data->tx_queues[i])
239 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
244 hns3_free_all_queues(struct rte_eth_dev *dev)
246 hns3_free_rx_queues(dev);
247 hns3_free_tx_queues(dev);
251 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
253 struct rte_mbuf *mbuf;
257 for (i = 0; i < rxq->nb_rx_desc; i++) {
258 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
259 if (unlikely(mbuf == NULL)) {
260 hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
262 hns3_rx_queue_release_mbufs(rxq);
266 rte_mbuf_refcnt_set(mbuf, 1);
268 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
270 mbuf->port = rxq->port_id;
272 rxq->sw_ring[i].mbuf = mbuf;
273 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
274 rxq->rx_ring[i].addr = dma_addr;
275 rxq->rx_ring[i].rx.bd_base_info = 0;
282 hns3_buf_size2type(uint32_t buf_size)
288 bd_size_type = HNS3_BD_SIZE_512_TYPE;
291 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
294 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
297 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
304 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
306 uint32_t rx_buf_len = rxq->rx_buf_len;
307 uint64_t dma_addr = rxq->rx_ring_phys_addr;
309 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
310 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
311 (uint32_t)((dma_addr >> 31) >> 1));
313 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
314 hns3_buf_size2type(rx_buf_len));
315 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
316 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
320 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
322 uint64_t dma_addr = txq->tx_ring_phys_addr;
324 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
325 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
326 (uint32_t)((dma_addr >> 31) >> 1));
328 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
329 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
333 hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
335 uint16_t nb_rx_q = hw->data->nb_rx_queues;
336 uint16_t nb_tx_q = hw->data->nb_tx_queues;
337 struct hns3_rx_queue *rxq;
338 struct hns3_tx_queue *txq;
342 pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE;
343 for (i = 0; i < hw->cfg_max_queues; i++) {
345 rxq = hw->data->rx_queues[i];
347 rxq->pvid_sw_discard_en = pvid_en;
350 txq = hw->data->tx_queues[i];
352 txq->pvid_sw_shift_en = pvid_en;
358 hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type)
363 reg_offset = queue_type == HNS3_RING_TYPE_TX ?
364 HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG;
365 reg = hns3_read_reg(tqp_base, reg_offset);
366 reg &= ~BIT(HNS3_RING_EN_B);
367 hns3_write_reg(tqp_base, reg_offset, reg);
371 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
373 uint16_t nb_rx_q = hw->data->nb_rx_queues;
374 uint16_t nb_tx_q = hw->data->nb_tx_queues;
375 struct hns3_rx_queue *rxq;
376 struct hns3_tx_queue *txq;
381 for (i = 0; i < hw->cfg_max_queues; i++) {
382 if (hns3_dev_indep_txrx_supported(hw)) {
383 rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
384 txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
386 tqp_base = (void *)((char *)hw->io_base +
387 hns3_get_tqp_reg_offset(i));
389 * If queue struct is not initialized, it means the
390 * related HW ring has not been initialized yet.
391 * So, these queues should be disabled before enable
392 * the tqps to avoid a HW exception since the queues
393 * are enabled by default.
396 hns3_stop_unused_queue(tqp_base,
399 hns3_stop_unused_queue(tqp_base,
402 rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
403 hw->fkq_data.rx_queues[i - nb_rx_q];
405 tqp_base = rxq->io_base;
408 * This is the master switch that used to control the enabling
409 * of a pair of Tx and Rx queues. Both the Rx and Tx point to
412 rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG);
414 rcb_reg |= BIT(HNS3_RING_EN_B);
416 rcb_reg &= ~BIT(HNS3_RING_EN_B);
417 hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg);
422 hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
424 struct hns3_hw *hw = &txq->hns->hw;
427 if (hns3_dev_indep_txrx_supported(hw)) {
428 reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
430 reg |= BIT(HNS3_RING_EN_B);
432 reg &= ~BIT(HNS3_RING_EN_B);
433 hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg);
439 hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
441 struct hns3_hw *hw = &rxq->hns->hw;
444 if (hns3_dev_indep_txrx_supported(hw)) {
445 reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
447 reg |= BIT(HNS3_RING_EN_B);
449 reg &= ~BIT(HNS3_RING_EN_B);
450 hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg);
456 hns3_start_all_txqs(struct rte_eth_dev *dev)
458 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
459 struct hns3_tx_queue *txq;
462 for (i = 0; i < dev->data->nb_tx_queues; i++) {
463 txq = hw->data->tx_queues[i];
465 hns3_err(hw, "Tx queue %u not available or setup.", i);
466 goto start_txqs_fail;
469 * Tx queue is enabled by default. Therefore, the Tx queues
470 * needs to be disabled when deferred_start is set. There is
471 * another master switch used to control the enabling of a pair
472 * of Tx and Rx queues. And the master switch is disabled by
475 if (txq->tx_deferred_start)
476 hns3_enable_txq(txq, false);
478 hns3_enable_txq(txq, true);
483 for (j = 0; j < i; j++) {
484 txq = hw->data->tx_queues[j];
485 hns3_enable_txq(txq, false);
491 hns3_start_all_rxqs(struct rte_eth_dev *dev)
493 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
494 struct hns3_rx_queue *rxq;
497 for (i = 0; i < dev->data->nb_rx_queues; i++) {
498 rxq = hw->data->rx_queues[i];
500 hns3_err(hw, "Rx queue %u not available or setup.", i);
501 goto start_rxqs_fail;
504 * Rx queue is enabled by default. Therefore, the Rx queues
505 * needs to be disabled when deferred_start is set. There is
506 * another master switch used to control the enabling of a pair
507 * of Tx and Rx queues. And the master switch is disabled by
510 if (rxq->rx_deferred_start)
511 hns3_enable_rxq(rxq, false);
513 hns3_enable_rxq(rxq, true);
518 for (j = 0; j < i; j++) {
519 rxq = hw->data->rx_queues[j];
520 hns3_enable_rxq(rxq, false);
526 hns3_restore_tqp_enable_state(struct hns3_hw *hw)
528 struct hns3_rx_queue *rxq;
529 struct hns3_tx_queue *txq;
532 for (i = 0; i < hw->data->nb_rx_queues; i++) {
533 rxq = hw->data->rx_queues[i];
535 hns3_enable_rxq(rxq, rxq->enabled);
538 for (i = 0; i < hw->data->nb_tx_queues; i++) {
539 txq = hw->data->tx_queues[i];
541 hns3_enable_txq(txq, txq->enabled);
546 hns3_stop_all_txqs(struct rte_eth_dev *dev)
548 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
549 struct hns3_tx_queue *txq;
552 for (i = 0; i < dev->data->nb_tx_queues; i++) {
553 txq = hw->data->tx_queues[i];
556 hns3_enable_txq(txq, false);
561 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
563 struct hns3_cfg_com_tqp_queue_cmd *req;
564 struct hns3_cmd_desc desc;
567 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
569 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
570 req->tqp_id = rte_cpu_to_le_16(queue_id);
572 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
574 ret = hns3_cmd_send(hw, &desc, 1);
576 hns3_err(hw, "TQP enable fail, ret = %d", ret);
582 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
584 struct hns3_reset_tqp_queue_cmd *req;
585 struct hns3_cmd_desc desc;
588 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
590 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
591 req->tqp_id = rte_cpu_to_le_16(queue_id);
592 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
593 ret = hns3_cmd_send(hw, &desc, 1);
595 hns3_err(hw, "send tqp reset cmd error, queue_id = %u, "
596 "ret = %d", queue_id, ret);
602 hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id,
603 uint8_t *reset_status)
605 struct hns3_reset_tqp_queue_cmd *req;
606 struct hns3_cmd_desc desc;
609 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
611 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
612 req->tqp_id = rte_cpu_to_le_16(queue_id);
614 ret = hns3_cmd_send(hw, &desc, 1);
616 hns3_err(hw, "get tqp reset status error, queue_id = %u, "
617 "ret = %d.", queue_id, ret);
620 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
625 hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
627 #define HNS3_TQP_RESET_TRY_MS 200
628 uint8_t reset_status;
633 * In current version VF is not supported when PF is driven by DPDK
634 * driver, all task queue pairs are mapped to PF function, so PF's queue
635 * id is equals to the global queue id in PF range.
637 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
639 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
642 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
644 /* Wait for tqp hw reset */
645 rte_delay_ms(HNS3_POLL_RESPONE_MS);
646 ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
652 } while (get_timeofday_ms() < end);
656 hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d",
661 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
663 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
668 hns3_send_reset_tqp_cmd(hw, queue_id, false);
673 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
678 memcpy(msg_data, &queue_id, sizeof(uint16_t));
680 ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
681 sizeof(msg_data), true, NULL, 0);
683 hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
689 hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status)
691 struct hns3_reset_cmd *req;
692 struct hns3_cmd_desc desc;
695 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
696 req = (struct hns3_reset_cmd *)desc.data;
697 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_RCB_B, 1);
700 * The start qid should be the global qid of the first tqp of the
701 * function which should be reset in this port. Since our PF not
702 * support take over of VFs, so we only need to reset function 0,
703 * and its start qid is always 0.
705 req->fun_reset_rcb_vqid_start = rte_cpu_to_le_16(0);
706 req->fun_reset_rcb_vqid_num = rte_cpu_to_le_16(hw->cfg_max_queues);
708 ret = hns3_cmd_send(hw, &desc, 1);
710 hns3_err(hw, "fail to send rcb reset cmd, ret = %d.", ret);
714 *reset_status = req->fun_reset_rcb_return_status;
719 hns3pf_reset_all_tqps(struct hns3_hw *hw)
721 #define HNS3_RESET_RCB_NOT_SUPPORT 0U
722 #define HNS3_RESET_ALL_TQP_SUCCESS 1U
723 uint8_t reset_status;
727 ret = hns3_reset_rcb_cmd(hw, &reset_status);
732 * If the firmware version is low, it may not support the rcb reset
733 * which means reset all the tqps at a time. In this case, we should
734 * reset tqps one by one.
736 if (reset_status == HNS3_RESET_RCB_NOT_SUPPORT) {
737 for (i = 0; i < hw->cfg_max_queues; i++) {
738 ret = hns3pf_reset_tqp(hw, i);
741 "fail to reset tqp, queue_id = %d, ret = %d.",
746 } else if (reset_status != HNS3_RESET_ALL_TQP_SUCCESS) {
747 hns3_err(hw, "fail to reset all tqps, reset_status = %u.",
756 hns3vf_reset_all_tqps(struct hns3_hw *hw)
758 #define HNS3VF_RESET_ALL_TQP_DONE 1U
759 uint8_t reset_status;
764 memset(msg_data, 0, sizeof(uint16_t));
765 ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
766 sizeof(msg_data), true, &reset_status,
767 sizeof(reset_status));
769 hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret);
773 if (reset_status == HNS3VF_RESET_ALL_TQP_DONE)
777 * If the firmware version or kernel PF version is low, it may not
778 * support the rcb reset which means reset all the tqps at a time.
779 * In this case, we should reset tqps one by one.
781 for (i = 1; i < hw->cfg_max_queues; i++) {
782 ret = hns3vf_reset_tqp(hw, i);
791 hns3_reset_all_tqps(struct hns3_adapter *hns)
793 struct hns3_hw *hw = &hns->hw;
796 /* Disable all queues before reset all queues */
797 for (i = 0; i < hw->cfg_max_queues; i++) {
798 ret = hns3_tqp_enable(hw, i, false);
801 "fail to disable tqps before tqps reset, ret = %d.",
808 return hns3vf_reset_all_tqps(hw);
810 return hns3pf_reset_all_tqps(hw);
814 hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id,
815 enum hns3_ring_type queue_type, bool enable)
817 struct hns3_reset_tqp_queue_cmd *req;
818 struct hns3_cmd_desc desc;
822 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false);
824 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
825 req->tqp_id = rte_cpu_to_le_16(queue_id);
826 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
827 req->queue_direction = rte_cpu_to_le_16(queue_direction);
828 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
830 ret = hns3_cmd_send(hw, &desc, 1);
832 hns3_err(hw, "send queue reset cmd error, queue_id = %u, "
833 "queue_type = %s, ret = %d.", queue_id,
834 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
839 hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id,
840 enum hns3_ring_type queue_type,
841 uint8_t *reset_status)
843 struct hns3_reset_tqp_queue_cmd *req;
844 struct hns3_cmd_desc desc;
848 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true);
850 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
851 req->tqp_id = rte_cpu_to_le_16(queue_id);
852 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
853 req->queue_direction = rte_cpu_to_le_16(queue_direction);
855 ret = hns3_cmd_send(hw, &desc, 1);
857 hns3_err(hw, "get queue reset status error, queue_id = %u "
858 "queue_type = %s, ret = %d.", queue_id,
859 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
863 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
868 hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
869 enum hns3_ring_type queue_type)
871 #define HNS3_QUEUE_RESET_TRY_MS 200
872 struct hns3_tx_queue *txq;
873 struct hns3_rx_queue *rxq;
874 uint32_t reset_wait_times;
875 uint32_t max_wait_times;
876 uint8_t reset_status;
879 if (queue_type == HNS3_RING_TYPE_TX) {
880 txq = hw->data->tx_queues[queue_id];
881 hns3_enable_txq(txq, false);
883 rxq = hw->data->rx_queues[queue_id];
884 hns3_enable_rxq(rxq, false);
887 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true);
889 hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret);
893 reset_wait_times = 0;
894 max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS;
895 while (reset_wait_times < max_wait_times) {
896 /* Wait for queue hw reset */
897 rte_delay_ms(HNS3_POLL_RESPONE_MS);
898 ret = hns3_get_queue_reset_status(hw, queue_id,
899 queue_type, &reset_status);
901 goto queue_reset_fail;
909 hns3_err(hw, "reset queue timeout, queue_id = %u, "
910 "queue_type = %s", queue_id,
911 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx");
913 goto queue_reset_fail;
916 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
918 hns3_err(hw, "deassert queue reset fail, ret = %d.", ret);
923 hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
928 hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id)
932 /* Need an extend offset to config queues > 64 */
933 if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID)
934 reg_offset = HNS3_TQP_INTR_REG_BASE +
935 tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET;
937 reg_offset = HNS3_TQP_INTR_EXT_REG_BASE +
938 tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID *
939 HNS3_TQP_INTR_HIGH_ORDER_OFFSET +
940 tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID *
941 HNS3_TQP_INTR_LOW_ORDER_OFFSET;
947 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
948 uint8_t gl_idx, uint16_t gl_value)
950 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
951 HNS3_TQP_INTR_GL1_REG,
952 HNS3_TQP_INTR_GL2_REG};
953 uint32_t addr, value;
955 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
958 addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id);
959 if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
960 value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
962 value = HNS3_GL_USEC_TO_REG(gl_value);
964 hns3_write_dev(hw, addr, value);
968 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
970 uint32_t addr, value;
972 if (rl_value > HNS3_TQP_INTR_RL_MAX)
975 addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
976 value = HNS3_RL_USEC_TO_REG(rl_value);
978 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
980 hns3_write_dev(hw, addr, value);
984 hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
989 * int_ql_max == 0 means the hardware does not support QL,
990 * QL regs config is not permitted if QL is not supported,
993 if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE)
996 addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
997 hns3_write_dev(hw, addr, ql_value);
999 addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
1000 hns3_write_dev(hw, addr, ql_value);
1004 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
1006 uint32_t addr, value;
1008 addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
1011 hns3_write_dev(hw, addr, value);
1015 * Enable all rx queue interrupt when in interrupt rx mode.
1016 * This api was called before enable queue rx&tx (in normal start or reset
1017 * recover scenes), used to fix hardware rx queue interrupt enable was clear
1021 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
1023 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1024 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1027 if (dev->data->dev_conf.intr_conf.rxq == 0)
1030 for (i = 0; i < nb_rx_q; i++)
1031 hns3_queue_intr_enable(hw, i, en);
1035 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1037 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1038 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1039 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1041 if (dev->data->dev_conf.intr_conf.rxq == 0)
1044 hns3_queue_intr_enable(hw, queue_id, true);
1046 return rte_intr_ack(intr_handle);
1050 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1052 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1054 if (dev->data->dev_conf.intr_conf.rxq == 0)
1057 hns3_queue_intr_enable(hw, queue_id, false);
1063 hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx)
1065 struct hns3_hw *hw = &hns->hw;
1066 struct hns3_rx_queue *rxq;
1069 PMD_INIT_FUNC_TRACE();
1071 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
1072 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
1074 hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.",
1079 rxq->next_to_use = 0;
1080 rxq->rx_rearm_start = 0;
1081 rxq->rx_free_hold = 0;
1082 rxq->rx_rearm_nb = 0;
1083 rxq->pkt_first_seg = NULL;
1084 rxq->pkt_last_seg = NULL;
1085 hns3_init_rx_queue_hw(rxq);
1086 hns3_rxq_vec_setup(rxq);
1092 hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx)
1094 struct hns3_hw *hw = &hns->hw;
1095 struct hns3_rx_queue *rxq;
1097 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
1098 rxq->next_to_use = 0;
1099 rxq->rx_free_hold = 0;
1100 rxq->rx_rearm_start = 0;
1101 rxq->rx_rearm_nb = 0;
1102 hns3_init_rx_queue_hw(rxq);
1106 hns3_init_txq(struct hns3_tx_queue *txq)
1108 struct hns3_desc *desc;
1112 desc = txq->tx_ring;
1113 for (i = 0; i < txq->nb_tx_desc; i++) {
1114 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1118 txq->next_to_use = 0;
1119 txq->next_to_clean = 0;
1120 txq->tx_bd_ready = txq->nb_tx_desc - 1;
1121 hns3_init_tx_queue_hw(txq);
1125 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
1127 struct hns3_hw *hw = &hns->hw;
1128 struct hns3_tx_queue *txq;
1131 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
1132 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
1135 if (!tc_queue->enable)
1138 for (j = 0; j < tc_queue->tqp_count; j++) {
1139 num = tc_queue->tqp_offset + j;
1140 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
1144 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
1150 hns3_init_rx_queues(struct hns3_adapter *hns)
1152 struct hns3_hw *hw = &hns->hw;
1153 struct hns3_rx_queue *rxq;
1157 /* Initialize RSS for queues */
1158 ret = hns3_config_rss(hns);
1160 hns3_err(hw, "failed to configure rss, ret = %d.", ret);
1164 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1165 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
1167 hns3_err(hw, "Rx queue %u not available or setup.", i);
1171 if (rxq->rx_deferred_start)
1174 ret = hns3_init_rxq(hns, i);
1176 hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i,
1182 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++)
1183 hns3_init_fake_rxq(hns, i);
1188 for (j = 0; j < i; j++) {
1189 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
1190 hns3_rx_queue_release_mbufs(rxq);
1197 hns3_init_tx_queues(struct hns3_adapter *hns)
1199 struct hns3_hw *hw = &hns->hw;
1200 struct hns3_tx_queue *txq;
1203 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1204 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
1206 hns3_err(hw, "Tx queue %u not available or setup.", i);
1210 if (txq->tx_deferred_start)
1215 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
1216 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
1219 hns3_init_tx_ring_tc(hns);
1226 * Note: just init and setup queues, and don't enable tqps.
1229 hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
1231 struct hns3_hw *hw = &hns->hw;
1235 ret = hns3_reset_all_tqps(hns);
1237 hns3_err(hw, "failed to reset all queues, ret = %d.",
1243 ret = hns3_init_rx_queues(hns);
1245 hns3_err(hw, "failed to init rx queues, ret = %d.", ret);
1249 ret = hns3_init_tx_queues(hns);
1251 hns3_dev_release_mbufs(hns);
1252 hns3_err(hw, "failed to init tx queues, ret = %d.", ret);
1259 hns3_start_tqps(struct hns3_hw *hw)
1261 struct hns3_tx_queue *txq;
1262 struct hns3_rx_queue *rxq;
1265 hns3_enable_all_queues(hw, true);
1267 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1268 txq = hw->data->tx_queues[i];
1270 hw->data->tx_queue_state[i] =
1271 RTE_ETH_QUEUE_STATE_STARTED;
1274 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1275 rxq = hw->data->rx_queues[i];
1277 hw->data->rx_queue_state[i] =
1278 RTE_ETH_QUEUE_STATE_STARTED;
1283 hns3_stop_tqps(struct hns3_hw *hw)
1287 hns3_enable_all_queues(hw, false);
1289 for (i = 0; i < hw->data->nb_tx_queues; i++)
1290 hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1292 for (i = 0; i < hw->data->nb_rx_queues; i++)
1293 hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1297 * Iterate over all Rx Queue, and call the callback() function for each Rx
1301 * The target eth dev.
1302 * @param[in] callback
1303 * The function to call for each queue.
1304 * if callback function return nonzero will stop iterate and return it's value
1306 * The arguments to provide the callback function with.
1309 * 0 on success, otherwise with errno set.
1312 hns3_rxq_iterate(struct rte_eth_dev *dev,
1313 int (*callback)(struct hns3_rx_queue *, void *), void *arg)
1318 if (dev->data->rx_queues == NULL)
1321 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1322 ret = callback(dev->data->rx_queues[i], arg);
1331 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
1332 struct hns3_queue_info *q_info)
1334 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1335 const struct rte_memzone *rx_mz;
1336 struct hns3_rx_queue *rxq;
1337 unsigned int rx_desc;
1339 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
1340 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1342 hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
1347 /* Allocate rx ring hardware descriptors. */
1348 rxq->queue_id = q_info->idx;
1349 rxq->nb_rx_desc = q_info->nb_desc;
1352 * Allocate a litter more memory because rx vector functions
1353 * don't check boundaries each time.
1355 rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1356 sizeof(struct hns3_desc);
1357 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1358 rx_desc, HNS3_RING_BASE_ALIGN,
1360 if (rx_mz == NULL) {
1361 hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
1363 hns3_rx_queue_release(rxq);
1367 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
1368 rxq->rx_ring_phys_addr = rx_mz->iova;
1370 hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx,
1371 rxq->rx_ring_phys_addr);
1377 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1378 uint16_t nb_desc, unsigned int socket_id)
1380 struct hns3_adapter *hns = dev->data->dev_private;
1381 struct hns3_hw *hw = &hns->hw;
1382 struct hns3_queue_info q_info;
1383 struct hns3_rx_queue *rxq;
1386 if (hw->fkq_data.rx_queues[idx]) {
1387 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
1388 hw->fkq_data.rx_queues[idx] = NULL;
1392 q_info.socket_id = socket_id;
1393 q_info.nb_desc = nb_desc;
1394 q_info.type = "hns3 fake RX queue";
1395 q_info.ring_name = "rx_fake_ring";
1396 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1398 hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
1402 /* Don't need alloc sw_ring, because upper applications don't use it */
1403 rxq->sw_ring = NULL;
1406 rxq->rx_deferred_start = false;
1407 rxq->port_id = dev->data->port_id;
1408 rxq->configured = true;
1409 nb_rx_q = dev->data->nb_rx_queues;
1410 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1411 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
1412 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
1414 rte_spinlock_lock(&hw->lock);
1415 hw->fkq_data.rx_queues[idx] = rxq;
1416 rte_spinlock_unlock(&hw->lock);
1422 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
1423 struct hns3_queue_info *q_info)
1425 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1426 const struct rte_memzone *tx_mz;
1427 struct hns3_tx_queue *txq;
1428 struct hns3_desc *desc;
1429 unsigned int tx_desc;
1432 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
1433 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1435 hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
1440 /* Allocate tx ring hardware descriptors. */
1441 txq->queue_id = q_info->idx;
1442 txq->nb_tx_desc = q_info->nb_desc;
1443 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
1444 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1445 tx_desc, HNS3_RING_BASE_ALIGN,
1447 if (tx_mz == NULL) {
1448 hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
1450 hns3_tx_queue_release(txq);
1454 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
1455 txq->tx_ring_phys_addr = tx_mz->iova;
1457 hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx,
1458 txq->tx_ring_phys_addr);
1461 desc = txq->tx_ring;
1462 for (i = 0; i < txq->nb_tx_desc; i++) {
1463 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1471 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1472 uint16_t nb_desc, unsigned int socket_id)
1474 struct hns3_adapter *hns = dev->data->dev_private;
1475 struct hns3_hw *hw = &hns->hw;
1476 struct hns3_queue_info q_info;
1477 struct hns3_tx_queue *txq;
1480 if (hw->fkq_data.tx_queues[idx] != NULL) {
1481 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
1482 hw->fkq_data.tx_queues[idx] = NULL;
1486 q_info.socket_id = socket_id;
1487 q_info.nb_desc = nb_desc;
1488 q_info.type = "hns3 fake TX queue";
1489 q_info.ring_name = "tx_fake_ring";
1490 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1492 hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
1496 /* Don't need alloc sw_ring, because upper applications don't use it */
1497 txq->sw_ring = NULL;
1501 txq->tx_deferred_start = false;
1502 txq->port_id = dev->data->port_id;
1503 txq->configured = true;
1504 nb_tx_q = dev->data->nb_tx_queues;
1505 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1506 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1508 rte_spinlock_lock(&hw->lock);
1509 hw->fkq_data.tx_queues[idx] = txq;
1510 rte_spinlock_unlock(&hw->lock);
1516 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1518 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1522 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1523 /* first time configuration */
1525 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1526 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1527 RTE_CACHE_LINE_SIZE);
1528 if (hw->fkq_data.rx_queues == NULL) {
1529 hw->fkq_data.nb_fake_rx_queues = 0;
1532 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1534 rxq = hw->fkq_data.rx_queues;
1535 for (i = nb_queues; i < old_nb_queues; i++)
1536 hns3_dev_rx_queue_release(rxq[i]);
1538 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1539 RTE_CACHE_LINE_SIZE);
1542 if (nb_queues > old_nb_queues) {
1543 uint16_t new_qs = nb_queues - old_nb_queues;
1544 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1547 hw->fkq_data.rx_queues = rxq;
1548 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1549 rxq = hw->fkq_data.rx_queues;
1550 for (i = nb_queues; i < old_nb_queues; i++)
1551 hns3_dev_rx_queue_release(rxq[i]);
1553 rte_free(hw->fkq_data.rx_queues);
1554 hw->fkq_data.rx_queues = NULL;
1557 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1563 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1565 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1569 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1570 /* first time configuration */
1572 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1573 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1574 RTE_CACHE_LINE_SIZE);
1575 if (hw->fkq_data.tx_queues == NULL) {
1576 hw->fkq_data.nb_fake_tx_queues = 0;
1579 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1581 txq = hw->fkq_data.tx_queues;
1582 for (i = nb_queues; i < old_nb_queues; i++)
1583 hns3_dev_tx_queue_release(txq[i]);
1584 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1585 RTE_CACHE_LINE_SIZE);
1588 if (nb_queues > old_nb_queues) {
1589 uint16_t new_qs = nb_queues - old_nb_queues;
1590 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1593 hw->fkq_data.tx_queues = txq;
1594 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1595 txq = hw->fkq_data.tx_queues;
1596 for (i = nb_queues; i < old_nb_queues; i++)
1597 hns3_dev_tx_queue_release(txq[i]);
1599 rte_free(hw->fkq_data.tx_queues);
1600 hw->fkq_data.tx_queues = NULL;
1602 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1608 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1611 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1612 uint16_t rx_need_add_nb_q;
1613 uint16_t tx_need_add_nb_q;
1618 /* Setup new number of fake RX/TX queues and reconfigure device. */
1619 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1620 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1621 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1623 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1627 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1629 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1630 goto cfg_fake_tx_q_fail;
1633 /* Allocate and set up fake RX queue per Ethernet port. */
1634 port_id = hw->data->port_id;
1635 for (q = 0; q < rx_need_add_nb_q; q++) {
1636 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1637 rte_eth_dev_socket_id(port_id));
1639 goto setup_fake_rx_q_fail;
1642 /* Allocate and set up fake TX queue per Ethernet port. */
1643 for (q = 0; q < tx_need_add_nb_q; q++) {
1644 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1645 rte_eth_dev_socket_id(port_id));
1647 goto setup_fake_tx_q_fail;
1652 setup_fake_tx_q_fail:
1653 setup_fake_rx_q_fail:
1654 (void)hns3_fake_tx_queue_config(hw, 0);
1656 (void)hns3_fake_rx_queue_config(hw, 0);
1662 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1664 struct rte_eth_dev_data *dev_data = hns->hw.data;
1665 struct hns3_rx_queue *rxq;
1666 struct hns3_tx_queue *txq;
1669 if (dev_data->rx_queues)
1670 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1671 rxq = dev_data->rx_queues[i];
1674 hns3_rx_queue_release_mbufs(rxq);
1677 if (dev_data->tx_queues)
1678 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1679 txq = dev_data->tx_queues[i];
1682 hns3_tx_queue_release_mbufs(txq);
1687 hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1689 uint16_t vld_buf_size;
1690 uint16_t num_hw_specs;
1694 * hns3 network engine only support to set 4 typical specification, and
1695 * different buffer size will affect the max packet_len and the max
1696 * number of segmentation when hw gro is turned on in receive side. The
1697 * relationship between them is as follows:
1698 * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg
1699 * ---------------------|-------------------|----------------
1700 * HNS3_4K_BD_BUF_SIZE | 60KB | 15
1701 * HNS3_2K_BD_BUF_SIZE | 62KB | 31
1702 * HNS3_1K_BD_BUF_SIZE | 63KB | 63
1703 * HNS3_512_BD_BUF_SIZE | 31.5KB | 63
1705 static const uint16_t hw_rx_buf_size[] = {
1706 HNS3_4K_BD_BUF_SIZE,
1707 HNS3_2K_BD_BUF_SIZE,
1708 HNS3_1K_BD_BUF_SIZE,
1709 HNS3_512_BD_BUF_SIZE
1712 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1713 RTE_PKTMBUF_HEADROOM);
1714 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1717 num_hw_specs = RTE_DIM(hw_rx_buf_size);
1718 for (i = 0; i < num_hw_specs; i++) {
1719 if (vld_buf_size >= hw_rx_buf_size[i]) {
1720 *rx_buf_len = hw_rx_buf_size[i];
1728 hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
1731 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1732 struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode;
1733 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
1734 uint16_t min_vec_bds;
1737 * HNS3 hardware network engine set scattered as default. If the driver
1738 * is not work in scattered mode and the pkts greater than buf_size
1739 * but smaller than max_rx_pkt_len will be distributed to multiple BDs.
1740 * Driver cannot handle this situation.
1742 if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) {
1743 hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater "
1744 "than rx_buf_len if scattered is off.");
1748 if (pkt_burst == hns3_recv_pkts_vec) {
1749 min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
1750 HNS3_DEFAULT_RX_BURST;
1751 if (nb_desc < min_vec_bds ||
1752 nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) {
1753 hns3_err(hw, "if Rx burst mode is vector, "
1754 "number of descriptor is required to be "
1755 "bigger than min vector bds:%u, and could be "
1756 "divided by rxq rearm thresh:%u.",
1757 min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH);
1765 hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
1766 struct rte_mempool *mp, uint16_t nb_desc,
1771 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1772 nb_desc % HNS3_ALIGN_RING_DESC) {
1773 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1778 if (conf->rx_drop_en == 0)
1779 hns3_warn(hw, "if no descriptors available, packets are always "
1780 "dropped and rx_drop_en (1) is fixed on");
1782 if (hns3_rx_buf_len_calc(mp, buf_size)) {
1783 hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! "
1784 "minimal data room size (%u).",
1785 rte_pktmbuf_data_room_size(mp),
1786 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1790 if (hw->data->dev_started) {
1791 ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc);
1793 hns3_err(hw, "Rx queue runtime setup fail.");
1802 hns3_get_tqp_reg_offset(uint16_t queue_id)
1804 uint32_t reg_offset;
1806 /* Need an extend offset to config queue > 1024 */
1807 if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID)
1808 reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE;
1810 reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET +
1811 (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) *
1818 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1819 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1820 struct rte_mempool *mp)
1822 struct hns3_adapter *hns = dev->data->dev_private;
1823 struct hns3_hw *hw = &hns->hw;
1824 struct hns3_queue_info q_info;
1825 struct hns3_rx_queue *rxq;
1826 uint16_t rx_buf_size;
1830 ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
1834 if (dev->data->rx_queues[idx]) {
1835 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1836 dev->data->rx_queues[idx] = NULL;
1840 q_info.socket_id = socket_id;
1841 q_info.nb_desc = nb_desc;
1842 q_info.type = "hns3 RX queue";
1843 q_info.ring_name = "rx_ring";
1845 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1848 "Failed to alloc mem and reserve DMA mem for rx ring!");
1853 rxq->ptype_tbl = &hns->ptype_tbl;
1855 rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
1856 conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
1858 rxq->rx_deferred_start = conf->rx_deferred_start;
1859 if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
1860 hns3_warn(hw, "deferred start is not supported.");
1861 rxq->rx_deferred_start = false;
1864 rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1865 sizeof(struct hns3_entry);
1866 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1867 RTE_CACHE_LINE_SIZE, socket_id);
1868 if (rxq->sw_ring == NULL) {
1869 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1870 hns3_rx_queue_release(rxq);
1874 rxq->next_to_use = 0;
1875 rxq->rx_free_hold = 0;
1876 rxq->rx_rearm_start = 0;
1877 rxq->rx_rearm_nb = 0;
1878 rxq->pkt_first_seg = NULL;
1879 rxq->pkt_last_seg = NULL;
1880 rxq->port_id = dev->data->port_id;
1882 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
1883 * the pvid_sw_discard_en in the queue struct should not be changed,
1884 * because PVID-related operations do not need to be processed by PMD
1885 * driver. For hns3 VF device, whether it needs to process PVID depends
1886 * on the configuration of PF kernel mode netdevice driver. And the
1887 * related PF configuration is delivered through the mailbox and finally
1888 * reflectd in port_base_vlan_cfg.
1890 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1891 rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
1892 HNS3_PORT_BASE_VLAN_ENABLE;
1894 rxq->pvid_sw_discard_en = false;
1895 rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false;
1896 rxq->configured = true;
1897 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1898 idx * HNS3_TQP_REG_SIZE);
1899 rxq->io_base = (void *)((char *)hw->io_base +
1900 hns3_get_tqp_reg_offset(idx));
1901 rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
1902 HNS3_RING_RX_HEAD_REG);
1903 rxq->rx_buf_len = rx_buf_size;
1904 memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats));
1905 memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats));
1906 memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
1908 /* CRC len set here is used for amending packet length */
1909 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1910 rxq->crc_len = RTE_ETHER_CRC_LEN;
1914 rxq->bulk_mbuf_num = 0;
1916 rte_spinlock_lock(&hw->lock);
1917 dev->data->rx_queues[idx] = rxq;
1918 rte_spinlock_unlock(&hw->lock);
1924 hns3_rx_scattered_reset(struct rte_eth_dev *dev)
1926 struct hns3_adapter *hns = dev->data->dev_private;
1927 struct hns3_hw *hw = &hns->hw;
1930 dev->data->scattered_rx = false;
1934 hns3_rx_scattered_calc(struct rte_eth_dev *dev)
1936 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1937 struct hns3_adapter *hns = dev->data->dev_private;
1938 struct hns3_hw *hw = &hns->hw;
1939 struct hns3_rx_queue *rxq;
1942 if (dev->data->rx_queues == NULL)
1945 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
1946 rxq = dev->data->rx_queues[queue_id];
1947 if (hw->rx_buf_len == 0)
1948 hw->rx_buf_len = rxq->rx_buf_len;
1950 hw->rx_buf_len = RTE_MIN(hw->rx_buf_len,
1954 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
1955 dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
1956 dev->data->scattered_rx = true;
1960 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1962 static const uint32_t ptypes[] = {
1964 RTE_PTYPE_L2_ETHER_VLAN,
1965 RTE_PTYPE_L2_ETHER_QINQ,
1966 RTE_PTYPE_L2_ETHER_LLDP,
1967 RTE_PTYPE_L2_ETHER_ARP,
1969 RTE_PTYPE_L3_IPV4_EXT,
1971 RTE_PTYPE_L3_IPV6_EXT,
1977 RTE_PTYPE_TUNNEL_GRE,
1978 RTE_PTYPE_INNER_L2_ETHER,
1979 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1980 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1981 RTE_PTYPE_INNER_L3_IPV4,
1982 RTE_PTYPE_INNER_L3_IPV6,
1983 RTE_PTYPE_INNER_L3_IPV4_EXT,
1984 RTE_PTYPE_INNER_L3_IPV6_EXT,
1985 RTE_PTYPE_INNER_L4_UDP,
1986 RTE_PTYPE_INNER_L4_TCP,
1987 RTE_PTYPE_INNER_L4_SCTP,
1988 RTE_PTYPE_INNER_L4_ICMP,
1989 RTE_PTYPE_TUNNEL_VXLAN,
1990 RTE_PTYPE_TUNNEL_NVGRE,
1994 if (dev->rx_pkt_burst == hns3_recv_pkts ||
1995 dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
1996 dev->rx_pkt_burst == hns3_recv_pkts_vec ||
1997 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
2004 hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
2006 tbl->l2l3table[0][0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
2007 tbl->l2l3table[0][1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
2008 tbl->l2l3table[0][2] = RTE_PTYPE_L2_ETHER_ARP;
2009 tbl->l2l3table[0][3] = RTE_PTYPE_L2_ETHER;
2010 tbl->l2l3table[0][4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
2011 tbl->l2l3table[0][5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
2012 tbl->l2l3table[0][6] = RTE_PTYPE_L2_ETHER_LLDP;
2013 tbl->l2l3table[0][15] = RTE_PTYPE_L2_ETHER;
2015 tbl->l2l3table[1][0] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4;
2016 tbl->l2l3table[1][1] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6;
2017 tbl->l2l3table[1][2] = RTE_PTYPE_L2_ETHER_ARP;
2018 tbl->l2l3table[1][3] = RTE_PTYPE_L2_ETHER_VLAN;
2019 tbl->l2l3table[1][4] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4_EXT;
2020 tbl->l2l3table[1][5] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6_EXT;
2021 tbl->l2l3table[1][6] = RTE_PTYPE_L2_ETHER_LLDP;
2022 tbl->l2l3table[1][15] = RTE_PTYPE_L2_ETHER_VLAN;
2024 tbl->l2l3table[2][0] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4;
2025 tbl->l2l3table[2][1] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6;
2026 tbl->l2l3table[2][2] = RTE_PTYPE_L2_ETHER_ARP;
2027 tbl->l2l3table[2][3] = RTE_PTYPE_L2_ETHER_QINQ;
2028 tbl->l2l3table[2][4] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4_EXT;
2029 tbl->l2l3table[2][5] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6_EXT;
2030 tbl->l2l3table[2][6] = RTE_PTYPE_L2_ETHER_LLDP;
2031 tbl->l2l3table[2][15] = RTE_PTYPE_L2_ETHER_QINQ;
2033 tbl->l4table[0] = RTE_PTYPE_L4_UDP;
2034 tbl->l4table[1] = RTE_PTYPE_L4_TCP;
2035 tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE;
2036 tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
2037 tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
2038 tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
2042 hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
2044 tbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER;
2045 tbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN;
2046 tbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ;
2048 tbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4;
2049 tbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6;
2050 /* There is not a ptype for inner ARP/RARP */
2051 tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN;
2052 tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN;
2053 tbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT;
2054 tbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT;
2056 tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
2057 tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
2058 /* There is not a ptype for inner GRE */
2059 tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN;
2060 tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
2061 /* There is not a ptype for inner IGMP */
2062 tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN;
2063 tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
2065 tbl->ol2table[0] = RTE_PTYPE_L2_ETHER;
2066 tbl->ol2table[1] = RTE_PTYPE_L2_ETHER_VLAN;
2067 tbl->ol2table[2] = RTE_PTYPE_L2_ETHER_QINQ;
2069 tbl->ol3table[0] = RTE_PTYPE_L3_IPV4;
2070 tbl->ol3table[1] = RTE_PTYPE_L3_IPV6;
2071 tbl->ol3table[2] = RTE_PTYPE_UNKNOWN;
2072 tbl->ol3table[3] = RTE_PTYPE_UNKNOWN;
2073 tbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT;
2074 tbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT;
2076 tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
2077 tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;
2078 tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
2082 hns3_init_adv_layout_ptype(struct hns3_ptype_table *tbl)
2084 uint32_t *ptype = tbl->ptype;
2087 ptype[1] = RTE_PTYPE_L2_ETHER_ARP;
2088 ptype[3] = RTE_PTYPE_L2_ETHER_LLDP;
2089 ptype[8] = RTE_PTYPE_L2_ETHER_TIMESYNC;
2091 /* Non-tunnel IPv4 */
2092 ptype[17] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2094 ptype[18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2095 RTE_PTYPE_L4_NONFRAG;
2096 ptype[19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2098 ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2100 /* The next ptype is GRE over IPv4 */
2101 ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
2102 ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2104 ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2106 ptype[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2108 /* The next ptype is PTP over IPv4 + UDP */
2109 ptype[25] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2112 /* IPv4 --> GRE/Teredo/VXLAN */
2113 ptype[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2114 RTE_PTYPE_TUNNEL_GRENAT;
2115 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2116 ptype[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2117 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
2119 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2120 ptype[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2121 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2122 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2123 RTE_PTYPE_INNER_L4_FRAG;
2124 ptype[32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2125 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2126 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2127 RTE_PTYPE_INNER_L4_NONFRAG;
2128 ptype[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2129 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2130 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2131 RTE_PTYPE_INNER_L4_UDP;
2132 ptype[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2133 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2134 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2135 RTE_PTYPE_INNER_L4_TCP;
2136 ptype[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2137 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2138 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2139 RTE_PTYPE_INNER_L4_SCTP;
2140 /* The next ptype's inner L4 is IGMP */
2141 ptype[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2142 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2143 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
2144 ptype[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2145 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2146 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2147 RTE_PTYPE_INNER_L4_ICMP;
2149 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2150 ptype[39] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2151 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2152 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2153 RTE_PTYPE_INNER_L4_FRAG;
2154 ptype[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2155 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2156 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2157 RTE_PTYPE_INNER_L4_NONFRAG;
2158 ptype[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2159 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2160 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2161 RTE_PTYPE_INNER_L4_UDP;
2162 ptype[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2163 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2164 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2165 RTE_PTYPE_INNER_L4_TCP;
2166 ptype[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2167 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2168 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2169 RTE_PTYPE_INNER_L4_SCTP;
2170 /* The next ptype's inner L4 is IGMP */
2171 ptype[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2172 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2173 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
2174 ptype[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2175 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2176 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2177 RTE_PTYPE_INNER_L4_ICMP;
2179 /* Non-tunnel IPv6 */
2180 ptype[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2182 ptype[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2183 RTE_PTYPE_L4_NONFRAG;
2184 ptype[113] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2186 ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2188 /* The next ptype is GRE over IPv6 */
2189 ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
2190 ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2192 ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2194 ptype[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2196 /* Special for PTP over IPv6 + UDP */
2197 ptype[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2200 /* IPv6 --> GRE/Teredo/VXLAN */
2201 ptype[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2202 RTE_PTYPE_TUNNEL_GRENAT;
2203 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2204 ptype[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2205 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
2207 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2208 ptype[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2209 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2210 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2211 RTE_PTYPE_INNER_L4_FRAG;
2212 ptype[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2213 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2214 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2215 RTE_PTYPE_INNER_L4_NONFRAG;
2216 ptype[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2217 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2218 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2219 RTE_PTYPE_INNER_L4_UDP;
2220 ptype[128] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2221 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2222 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2223 RTE_PTYPE_INNER_L4_TCP;
2224 ptype[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2225 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2226 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2227 RTE_PTYPE_INNER_L4_SCTP;
2228 /* The next ptype's inner L4 is IGMP */
2229 ptype[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2230 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2231 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
2232 ptype[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2233 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2234 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2235 RTE_PTYPE_INNER_L4_ICMP;
2237 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2238 ptype[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2239 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2240 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2241 RTE_PTYPE_INNER_L4_FRAG;
2242 ptype[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2243 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2244 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2245 RTE_PTYPE_INNER_L4_NONFRAG;
2246 ptype[135] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2247 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2248 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2249 RTE_PTYPE_INNER_L4_UDP;
2250 ptype[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2251 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2252 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2253 RTE_PTYPE_INNER_L4_TCP;
2254 ptype[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2255 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2256 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2257 RTE_PTYPE_INNER_L4_SCTP;
2258 /* The next ptype's inner L4 is IGMP */
2259 ptype[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2260 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2261 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
2262 ptype[139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2263 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2264 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2265 RTE_PTYPE_INNER_L4_ICMP;
2269 hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
2271 struct hns3_adapter *hns = dev->data->dev_private;
2272 struct hns3_ptype_table *tbl = &hns->ptype_tbl;
2274 memset(tbl, 0, sizeof(*tbl));
2276 hns3_init_non_tunnel_ptype_tbl(tbl);
2277 hns3_init_tunnel_ptype_tbl(tbl);
2278 hns3_init_adv_layout_ptype(tbl);
2282 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
2283 uint32_t l234_info, const struct hns3_desc *rxd)
2285 #define HNS3_STRP_STATUS_NUM 0x4
2287 #define HNS3_NO_STRP_VLAN_VLD 0x0
2288 #define HNS3_INNER_STRP_VLAN_VLD 0x1
2289 #define HNS3_OUTER_STRP_VLAN_VLD 0x2
2290 uint32_t strip_status;
2291 uint32_t report_mode;
2294 * Since HW limitation, the vlan tag will always be inserted into RX
2295 * descriptor when strip the tag from packet, driver needs to determine
2296 * reporting which tag to mbuf according to the PVID configuration
2297 * and vlan striped status.
2299 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
2301 HNS3_NO_STRP_VLAN_VLD,
2302 HNS3_OUTER_STRP_VLAN_VLD,
2303 HNS3_INNER_STRP_VLAN_VLD,
2304 HNS3_OUTER_STRP_VLAN_VLD
2307 HNS3_NO_STRP_VLAN_VLD,
2308 HNS3_NO_STRP_VLAN_VLD,
2309 HNS3_NO_STRP_VLAN_VLD,
2310 HNS3_INNER_STRP_VLAN_VLD
2313 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
2314 HNS3_RXD_STRP_TAGP_S);
2315 report_mode = report_type[rxq->pvid_sw_discard_en][strip_status];
2316 switch (report_mode) {
2317 case HNS3_NO_STRP_VLAN_VLD:
2320 case HNS3_INNER_STRP_VLAN_VLD:
2321 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2322 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
2324 case HNS3_OUTER_STRP_VLAN_VLD:
2325 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2326 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
2335 recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
2336 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
2339 uint8_t crc_len = rxq->crc_len;
2341 if (data_len <= crc_len) {
2342 rte_pktmbuf_free_seg(rxm);
2343 first_seg->nb_segs--;
2344 last_seg->data_len = (uint16_t)(last_seg->data_len -
2345 (crc_len - data_len));
2346 last_seg->next = NULL;
2348 rxm->data_len = (uint16_t)(data_len - crc_len);
2351 static inline struct rte_mbuf *
2352 hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
2356 if (likely(rxq->bulk_mbuf_num > 0))
2357 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2359 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf,
2360 HNS3_BULK_ALLOC_MBUF_NUM);
2361 if (likely(ret == 0)) {
2362 rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM;
2363 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2365 return rte_mbuf_raw_alloc(rxq->mb_pool);
2369 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2371 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
2372 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
2373 struct hns3_rx_queue *rxq; /* RX queue */
2374 struct hns3_entry *sw_ring;
2375 struct hns3_entry *rxe;
2376 struct hns3_desc rxd;
2377 struct rte_mbuf *nmb; /* pointer of the new mbuf */
2378 struct rte_mbuf *rxm;
2379 uint32_t bd_base_info;
2392 rx_ring = rxq->rx_ring;
2393 sw_ring = rxq->sw_ring;
2394 rx_id = rxq->next_to_use;
2396 while (nb_rx < nb_pkts) {
2397 rxdp = &rx_ring[rx_id];
2398 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2399 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2402 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2403 (1u << HNS3_RXD_VLD_B)];
2405 nmb = hns3_rx_alloc_buffer(rxq);
2406 if (unlikely(nmb == NULL)) {
2409 port_id = rxq->port_id;
2410 rte_eth_devices[port_id].data->rx_mbuf_alloc_failed++;
2415 rxe = &sw_ring[rx_id];
2417 if (unlikely(rx_id == rxq->nb_rx_desc))
2420 rte_prefetch0(sw_ring[rx_id].mbuf);
2421 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2422 rte_prefetch0(&rx_ring[rx_id]);
2423 rte_prefetch0(&sw_ring[rx_id]);
2429 dma_addr = rte_mbuf_data_iova_default(nmb);
2430 rxdp->addr = rte_cpu_to_le_64(dma_addr);
2431 rxdp->rx.bd_base_info = 0;
2433 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2434 rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) -
2436 rxm->data_len = rxm->pkt_len;
2437 rxm->port = rxq->port_id;
2438 rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2439 rxm->ol_flags = PKT_RX_RSS_HASH;
2440 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2442 rte_le_to_cpu_16(rxd.rx.fd_id);
2443 rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2448 /* Load remained descriptor data and extract necessary fields */
2449 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2450 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2451 ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
2452 l234_info, &cksum_err);
2456 rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
2458 if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2459 hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
2461 hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
2463 /* Increment bytes counter */
2464 rxq->basic_stats.bytes += rxm->pkt_len;
2466 rx_pkts[nb_rx++] = rxm;
2469 rte_pktmbuf_free(rxm);
2472 rxq->next_to_use = rx_id;
2473 rxq->rx_free_hold += nb_rx_bd;
2474 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2475 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2476 rxq->rx_free_hold = 0;
2483 hns3_recv_scattered_pkts(void *rx_queue,
2484 struct rte_mbuf **rx_pkts,
2487 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
2488 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
2489 struct hns3_rx_queue *rxq; /* RX queue */
2490 struct hns3_entry *sw_ring;
2491 struct hns3_entry *rxe;
2492 struct rte_mbuf *first_seg;
2493 struct rte_mbuf *last_seg;
2494 struct hns3_desc rxd;
2495 struct rte_mbuf *nmb; /* pointer of the new mbuf */
2496 struct rte_mbuf *rxm;
2497 struct rte_eth_dev *dev;
2498 uint32_t bd_base_info;
2513 rx_id = rxq->next_to_use;
2514 rx_ring = rxq->rx_ring;
2515 sw_ring = rxq->sw_ring;
2516 first_seg = rxq->pkt_first_seg;
2517 last_seg = rxq->pkt_last_seg;
2519 while (nb_rx < nb_pkts) {
2520 rxdp = &rx_ring[rx_id];
2521 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2522 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2526 * The interactive process between software and hardware of
2527 * receiving a new packet in hns3 network engine:
2528 * 1. Hardware network engine firstly writes the packet content
2529 * to the memory pointed by the 'addr' field of the Rx Buffer
2530 * Descriptor, secondly fills the result of parsing the
2531 * packet include the valid field into the Rx Buffer
2532 * Descriptor in one write operation.
2533 * 2. Driver reads the Rx BD's valid field in the loop to check
2534 * whether it's valid, if valid then assign a new address to
2535 * the addr field, clear the valid field, get the other
2536 * information of the packet by parsing Rx BD's other fields,
2537 * finally write back the number of Rx BDs processed by the
2538 * driver to the HNS3_RING_RX_HEAD_REG register to inform
2540 * In the above process, the ordering is very important. We must
2541 * make sure that CPU read Rx BD's other fields only after the
2544 * There are two type of re-ordering: compiler re-ordering and
2545 * CPU re-ordering under the ARMv8 architecture.
2546 * 1. we use volatile to deal with compiler re-ordering, so you
2547 * can see that rx_ring/rxdp defined with volatile.
2548 * 2. we commonly use memory barrier to deal with CPU
2549 * re-ordering, but the cost is high.
2551 * In order to solve the high cost of using memory barrier, we
2552 * use the data dependency order under the ARMv8 architecture,
2555 * instr02: load B <- A
2556 * the instr02 will always execute after instr01.
2558 * To construct the data dependency ordering, we use the
2559 * following assignment:
2560 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2561 * (1u<<HNS3_RXD_VLD_B)]
2562 * Using gcc compiler under the ARMv8 architecture, the related
2563 * assembly code example as follows:
2564 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
2565 * instr01: ldr w26, [x22, #28] --read bd_base_info
2566 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
2567 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
2569 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
2570 * instr05: ldp x2, x3, [x0]
2571 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
2572 * instr07: ldp x4, x5, [x0, #16]
2573 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
2574 * the instr05~08 depend on x0's value, x0 depent on w26's
2575 * value, the w26 is the bd_base_info, this form the data
2576 * dependency ordering.
2577 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
2578 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
2579 * assignment is correct.
2581 * So we use the data dependency ordering instead of memory
2582 * barrier to improve receive performance.
2584 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2585 (1u << HNS3_RXD_VLD_B)];
2587 nmb = hns3_rx_alloc_buffer(rxq);
2588 if (unlikely(nmb == NULL)) {
2589 dev = &rte_eth_devices[rxq->port_id];
2590 dev->data->rx_mbuf_alloc_failed++;
2595 rxe = &sw_ring[rx_id];
2597 if (unlikely(rx_id == rxq->nb_rx_desc))
2600 rte_prefetch0(sw_ring[rx_id].mbuf);
2601 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2602 rte_prefetch0(&rx_ring[rx_id]);
2603 rte_prefetch0(&sw_ring[rx_id]);
2609 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2610 rxdp->rx.bd_base_info = 0;
2611 rxdp->addr = dma_addr;
2613 if (first_seg == NULL) {
2615 first_seg->nb_segs = 1;
2617 first_seg->nb_segs++;
2618 last_seg->next = rxm;
2621 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2622 rxm->data_len = rte_le_to_cpu_16(rxd.rx.size);
2624 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2631 * The last buffer of the received packet. packet len from
2632 * buffer description may contains CRC len, packet len should
2633 * subtract it, same as data len.
2635 first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len);
2638 * This is the last buffer of the received packet. If the CRC
2639 * is not stripped by the hardware:
2640 * - Subtract the CRC length from the total packet length.
2641 * - If the last buffer only contains the whole CRC or a part
2642 * of it, free the mbuf associated to the last buffer. If part
2643 * of the CRC is also contained in the previous mbuf, subtract
2644 * the length of that CRC part from the data length of the
2648 if (unlikely(rxq->crc_len > 0)) {
2649 first_seg->pkt_len -= rxq->crc_len;
2650 recalculate_data_len(first_seg, last_seg, rxm, rxq,
2654 first_seg->port = rxq->port_id;
2655 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2656 first_seg->ol_flags = PKT_RX_RSS_HASH;
2657 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2658 first_seg->hash.fdir.hi =
2659 rte_le_to_cpu_16(rxd.rx.fd_id);
2660 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2663 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
2664 HNS3_RXD_GRO_SIZE_S);
2665 if (gro_size != 0) {
2666 first_seg->ol_flags |= PKT_RX_LRO;
2667 first_seg->tso_segsz = gro_size;
2670 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2671 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2672 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
2673 l234_info, &cksum_err);
2677 first_seg->packet_type = hns3_rx_calc_ptype(rxq,
2678 l234_info, ol_info);
2680 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
2681 hns3_rx_set_cksum_flag(first_seg,
2682 first_seg->packet_type,
2684 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
2686 /* Increment bytes counter */
2687 rxq->basic_stats.bytes += first_seg->pkt_len;
2689 rx_pkts[nb_rx++] = first_seg;
2693 rte_pktmbuf_free(first_seg);
2697 rxq->next_to_use = rx_id;
2698 rxq->pkt_first_seg = first_seg;
2699 rxq->pkt_last_seg = last_seg;
2701 rxq->rx_free_hold += nb_rx_bd;
2702 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2703 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2704 rxq->rx_free_hold = 0;
2711 hns3_rxq_vec_setup(__rte_unused struct hns3_rx_queue *rxq)
2716 hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
2722 hns3_recv_pkts_vec(__rte_unused void *tx_queue,
2723 __rte_unused struct rte_mbuf **rx_pkts,
2724 __rte_unused uint16_t nb_pkts)
2730 hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue,
2731 __rte_unused struct rte_mbuf **rx_pkts,
2732 __rte_unused uint16_t nb_pkts)
2738 hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2739 struct rte_eth_burst_mode *mode)
2741 static const struct {
2742 eth_rx_burst_t pkt_burst;
2745 { hns3_recv_pkts, "Scalar" },
2746 { hns3_recv_scattered_pkts, "Scalar Scattered" },
2747 { hns3_recv_pkts_vec, "Vector Neon" },
2748 { hns3_recv_pkts_vec_sve, "Vector Sve" },
2751 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2755 for (i = 0; i < RTE_DIM(burst_infos); i++) {
2756 if (pkt_burst == burst_infos[i].pkt_burst) {
2757 snprintf(mode->info, sizeof(mode->info), "%s",
2758 burst_infos[i].info);
2768 hns3_check_sve_support(void)
2770 #if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
2771 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
2777 static eth_rx_burst_t
2778 hns3_get_rx_function(struct rte_eth_dev *dev)
2780 struct hns3_adapter *hns = dev->data->dev_private;
2781 uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
2782 bool vec_allowed, sve_allowed, simple_allowed;
2784 vec_allowed = hns->rx_vec_allowed &&
2785 hns3_rx_check_vec_support(dev) == 0;
2786 sve_allowed = vec_allowed && hns3_check_sve_support();
2787 simple_allowed = hns->rx_simple_allowed && !dev->data->scattered_rx &&
2788 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
2790 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
2791 return hns3_recv_pkts_vec;
2792 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
2793 return hns3_recv_pkts_vec_sve;
2794 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
2795 return hns3_recv_pkts;
2796 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
2797 return hns3_recv_scattered_pkts;
2800 return hns3_recv_pkts_vec;
2802 return hns3_recv_pkts;
2804 return hns3_recv_scattered_pkts;
2808 hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf,
2809 uint16_t nb_desc, uint16_t *tx_rs_thresh,
2810 uint16_t *tx_free_thresh, uint16_t idx)
2812 #define HNS3_TX_RS_FREE_THRESH_GAP 8
2813 uint16_t rs_thresh, free_thresh, fast_free_thresh;
2815 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
2816 nb_desc % HNS3_ALIGN_RING_DESC) {
2817 hns3_err(hw, "number (%u) of tx descriptors is invalid",
2822 rs_thresh = (conf->tx_rs_thresh > 0) ?
2823 conf->tx_rs_thresh : HNS3_DEFAULT_TX_RS_THRESH;
2824 free_thresh = (conf->tx_free_thresh > 0) ?
2825 conf->tx_free_thresh : HNS3_DEFAULT_TX_FREE_THRESH;
2826 if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
2827 rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
2828 free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
2829 hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
2830 "(%u) of tx descriptors for port=%u queue=%u check "
2832 rs_thresh, free_thresh, nb_desc, hw->data->port_id,
2837 if (conf->tx_free_thresh == 0) {
2838 /* Fast free Tx memory buffer to improve cache hit rate */
2839 fast_free_thresh = nb_desc - rs_thresh;
2840 if (fast_free_thresh >=
2841 HNS3_TX_FAST_FREE_AHEAD + HNS3_DEFAULT_TX_FREE_THRESH)
2842 free_thresh = fast_free_thresh -
2843 HNS3_TX_FAST_FREE_AHEAD;
2846 *tx_rs_thresh = rs_thresh;
2847 *tx_free_thresh = free_thresh;
2852 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
2853 unsigned int socket_id, const struct rte_eth_txconf *conf)
2855 struct hns3_adapter *hns = dev->data->dev_private;
2856 uint16_t tx_rs_thresh, tx_free_thresh;
2857 struct hns3_hw *hw = &hns->hw;
2858 struct hns3_queue_info q_info;
2859 struct hns3_tx_queue *txq;
2863 ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
2864 &tx_rs_thresh, &tx_free_thresh, idx);
2868 if (dev->data->tx_queues[idx] != NULL) {
2869 hns3_tx_queue_release(dev->data->tx_queues[idx]);
2870 dev->data->tx_queues[idx] = NULL;
2874 q_info.socket_id = socket_id;
2875 q_info.nb_desc = nb_desc;
2876 q_info.type = "hns3 TX queue";
2877 q_info.ring_name = "tx_ring";
2878 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
2881 "Failed to alloc mem and reserve DMA mem for tx ring!");
2885 txq->tx_deferred_start = conf->tx_deferred_start;
2886 if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
2887 hns3_warn(hw, "deferred start is not supported.");
2888 txq->tx_deferred_start = false;
2891 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
2892 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
2893 RTE_CACHE_LINE_SIZE, socket_id);
2894 if (txq->sw_ring == NULL) {
2895 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
2896 hns3_tx_queue_release(txq);
2901 txq->next_to_use = 0;
2902 txq->next_to_clean = 0;
2903 txq->tx_bd_ready = txq->nb_tx_desc - 1;
2904 txq->tx_free_thresh = tx_free_thresh;
2905 txq->tx_rs_thresh = tx_rs_thresh;
2906 txq->free = rte_zmalloc_socket("hns3 TX mbuf free array",
2907 sizeof(struct rte_mbuf *) * txq->tx_rs_thresh,
2908 RTE_CACHE_LINE_SIZE, socket_id);
2910 hns3_err(hw, "failed to allocate tx mbuf free array!");
2911 hns3_tx_queue_release(txq);
2915 txq->port_id = dev->data->port_id;
2917 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
2918 * the pvid_sw_shift_en in the queue struct should not be changed,
2919 * because PVID-related operations do not need to be processed by PMD
2920 * driver. For hns3 VF device, whether it needs to process PVID depends
2921 * on the configuration of PF kernel mode netdev driver. And the
2922 * related PF configuration is delivered through the mailbox and finally
2923 * reflectd in port_base_vlan_cfg.
2925 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
2926 txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
2927 HNS3_PORT_BASE_VLAN_ENABLE;
2929 txq->pvid_sw_shift_en = false;
2930 txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
2931 txq->configured = true;
2932 txq->io_base = (void *)((char *)hw->io_base +
2933 hns3_get_tqp_reg_offset(idx));
2934 txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
2935 HNS3_RING_TX_TAIL_REG);
2936 txq->min_tx_pkt_len = hw->min_tx_pkt_len;
2937 txq->tso_mode = hw->tso_mode;
2938 txq->udp_cksum_mode = hw->udp_cksum_mode;
2939 memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
2940 memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
2942 rte_spinlock_lock(&hw->lock);
2943 dev->data->tx_queues[idx] = txq;
2944 rte_spinlock_unlock(&hw->lock);
2950 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
2952 uint16_t tx_next_clean = txq->next_to_clean;
2953 uint16_t tx_next_use = txq->next_to_use;
2954 uint16_t tx_bd_ready = txq->tx_bd_ready;
2955 uint16_t tx_bd_max = txq->nb_tx_desc;
2956 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
2957 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
2958 struct rte_mbuf *mbuf;
2960 while ((!(desc->tx.tp_fe_sc_vld_ra_ri &
2961 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) &&
2962 tx_next_use != tx_next_clean) {
2963 mbuf = tx_bak_pkt->mbuf;
2965 rte_pktmbuf_free_seg(mbuf);
2966 tx_bak_pkt->mbuf = NULL;
2974 if (tx_next_clean >= tx_bd_max) {
2976 desc = txq->tx_ring;
2977 tx_bak_pkt = txq->sw_ring;
2981 txq->next_to_clean = tx_next_clean;
2982 txq->tx_bd_ready = tx_bd_ready;
2986 hns3_config_gro(struct hns3_hw *hw, bool en)
2988 struct hns3_cfg_gro_status_cmd *req;
2989 struct hns3_cmd_desc desc;
2992 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
2993 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
2995 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
2997 ret = hns3_cmd_send(hw, &desc, 1);
2999 hns3_err(hw, "%s hardware GRO failed, ret = %d",
3000 en ? "enable" : "disable", ret);
3006 hns3_restore_gro_conf(struct hns3_hw *hw)
3012 offloads = hw->data->dev_conf.rxmode.offloads;
3013 gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
3014 ret = hns3_config_gro(hw, gro_en);
3016 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
3017 gro_en ? "enabled" : "disabled", ret);
3023 hns3_pkt_is_tso(struct rte_mbuf *m)
3025 return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
3029 hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm)
3031 if (!hns3_pkt_is_tso(rxm))
3034 if (paylen <= rxm->tso_segsz)
3037 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B));
3038 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
3042 hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
3044 desc->addr = rte_mbuf_data_iova(rxm);
3045 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
3046 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
3050 hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
3051 struct rte_mbuf *rxm)
3053 uint64_t ol_flags = rxm->ol_flags;
3057 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
3058 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
3059 rxm->outer_l2_len + rxm->outer_l3_len : 0;
3060 paylen = rxm->pkt_len - hdr_len;
3061 desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen);
3062 hns3_set_tso(desc, paylen, rxm);
3065 * Currently, hardware doesn't support more than two layers VLAN offload
3066 * in Tx direction based on hns3 network engine. So when the number of
3067 * VLANs in the packets represented by rxm plus the number of VLAN
3068 * offload by hardware such as PVID etc, exceeds two, the packets will
3069 * be discarded or the original VLAN of the packets will be overwitted
3070 * by hardware. When the PF PVID is enabled by calling the API function
3071 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
3072 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
3073 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
3074 * be added to the position close to the IP header when PVID is enabled.
3076 if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT |
3078 desc->tx.ol_type_vlan_len_msec |=
3079 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
3080 if (ol_flags & PKT_TX_QINQ_PKT)
3081 desc->tx.outer_vlan_tag =
3082 rte_cpu_to_le_16(rxm->vlan_tci_outer);
3084 desc->tx.outer_vlan_tag =
3085 rte_cpu_to_le_16(rxm->vlan_tci);
3088 if (ol_flags & PKT_TX_QINQ_PKT ||
3089 ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
3090 desc->tx.type_cs_vlan_tso_len |=
3091 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
3092 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
3097 hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf,
3098 struct rte_mbuf **alloc_mbuf)
3100 #define MAX_NON_TSO_BD_PER_PKT 18
3101 struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT];
3104 /* Allocate enough mbufs */
3105 if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf))
3108 for (i = 0; i < nb_new_buf - 1; i++)
3109 pkt_segs[i]->next = pkt_segs[i + 1];
3111 pkt_segs[nb_new_buf - 1]->next = NULL;
3112 pkt_segs[0]->nb_segs = nb_new_buf;
3113 *alloc_mbuf = pkt_segs[0];
3119 hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
3121 new_pkt->ol_flags = old_pkt->ol_flags;
3122 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
3123 new_pkt->outer_l2_len = old_pkt->outer_l2_len;
3124 new_pkt->outer_l3_len = old_pkt->outer_l3_len;
3125 new_pkt->l2_len = old_pkt->l2_len;
3126 new_pkt->l3_len = old_pkt->l3_len;
3127 new_pkt->l4_len = old_pkt->l4_len;
3128 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
3129 new_pkt->vlan_tci = old_pkt->vlan_tci;
3133 hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
3134 uint8_t max_non_tso_bd_num)
3136 struct rte_mempool *mb_pool;
3137 struct rte_mbuf *new_mbuf;
3138 struct rte_mbuf *temp_new;
3139 struct rte_mbuf *temp;
3140 uint16_t last_buf_len;
3141 uint16_t nb_new_buf;
3151 mb_pool = tx_pkt->pool;
3152 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
3153 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
3154 if (nb_new_buf > max_non_tso_bd_num)
3157 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
3158 if (last_buf_len == 0)
3159 last_buf_len = buf_size;
3161 /* Allocate enough mbufs */
3162 ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf);
3166 /* Copy the original packet content to the new mbufs */
3168 s = rte_pktmbuf_mtod(temp, char *);
3169 len_s = rte_pktmbuf_data_len(temp);
3170 temp_new = new_mbuf;
3171 while (temp != NULL && temp_new != NULL) {
3172 d = rte_pktmbuf_mtod(temp_new, char *);
3173 buf_len = temp_new->next == NULL ? last_buf_len : buf_size;
3177 len = RTE_MIN(len_s, len_d);
3181 len_d = len_d - len;
3182 len_s = len_s - len;
3188 s = rte_pktmbuf_mtod(temp, char *);
3189 len_s = rte_pktmbuf_data_len(temp);
3193 temp_new->data_len = buf_len;
3194 temp_new = temp_new->next;
3196 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
3198 /* free original mbufs */
3199 rte_pktmbuf_free(tx_pkt);
3201 *new_pkt = new_mbuf;
3207 hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec)
3209 uint32_t tmp = *ol_type_vlan_len_msec;
3210 uint64_t ol_flags = m->ol_flags;
3212 /* (outer) IP header type */
3213 if (ol_flags & PKT_TX_OUTER_IPV4) {
3214 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
3215 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
3216 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
3218 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
3219 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM);
3220 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
3221 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
3224 /* OL3 header size, defined in 4 bytes */
3225 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3226 m->outer_l3_len >> HNS3_L3_LEN_UNIT);
3227 *ol_type_vlan_len_msec = tmp;
3231 hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
3232 uint32_t *type_cs_vlan_tso_len)
3234 #define HNS3_NVGRE_HLEN 8
3235 uint32_t tmp_outer = *ol_type_vlan_len_msec;
3236 uint32_t tmp_inner = *type_cs_vlan_tso_len;
3237 uint64_t ol_flags = m->ol_flags;
3238 uint16_t inner_l2_len;
3240 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
3241 case PKT_TX_TUNNEL_VXLAN_GPE:
3242 case PKT_TX_TUNNEL_GENEVE:
3243 case PKT_TX_TUNNEL_VXLAN:
3244 /* MAC in UDP tunnelling packet, include VxLAN and GENEVE */
3245 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
3246 HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP);
3248 * The inner l2 length of mbuf is the sum of outer l4 length,
3249 * tunneling header length and inner l2 length for a tunnel
3250 * packect. But in hns3 tx descriptor, the tunneling header
3251 * length is contained in the field of outer L4 length.
3252 * Therefore, driver need to calculate the outer L4 length and
3255 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
3257 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
3260 inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN;
3262 case PKT_TX_TUNNEL_GRE:
3263 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
3264 HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
3266 * For NVGRE tunnel packect, the outer L4 is empty. So only
3267 * fill the NVGRE header length to the outer L4 field.
3269 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
3271 (uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT);
3273 inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN;
3276 /* For non UDP / GRE tunneling, drop the tunnel packet */
3280 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
3281 inner_l2_len >> HNS3_L2_LEN_UNIT);
3282 /* OL2 header size, defined in 2 bytes */
3283 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
3284 m->outer_l2_len >> HNS3_L2_LEN_UNIT);
3286 *type_cs_vlan_tso_len = tmp_inner;
3287 *ol_type_vlan_len_msec = tmp_outer;
3293 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
3294 uint16_t tx_desc_id)
3296 struct hns3_desc *tx_ring = txq->tx_ring;
3297 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3298 uint64_t ol_flags = m->ol_flags;
3299 uint32_t tmp_outer = 0;
3300 uint32_t tmp_inner = 0;
3305 * The tunnel header is contained in the inner L2 header field of the
3306 * mbuf, but for hns3 descriptor, it is contained in the outer L4. So,
3307 * there is a need that switching between them. To avoid multiple
3308 * calculations, the length of the L2 header include the outer and
3309 * inner, will be filled during the parsing of tunnel packects.
3311 if (!(ol_flags & PKT_TX_TUNNEL_MASK)) {
3313 * For non tunnel type the tunnel type id is 0, so no need to
3314 * assign a value to it. Only the inner(normal) L2 header length
3317 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M,
3318 HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT);
3321 * If outer csum is not offload, the outer length may be filled
3322 * with 0. And the length of the outer header is added to the
3323 * inner l2_len. It would lead a cksum error. So driver has to
3324 * calculate the header length.
3326 if (unlikely(!(ol_flags &
3327 (PKT_TX_OUTER_IP_CKSUM | PKT_TX_OUTER_UDP_CKSUM)) &&
3328 m->outer_l2_len == 0)) {
3329 struct rte_net_hdr_lens hdr_len;
3330 (void)rte_net_get_ptype(m, &hdr_len,
3331 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
3332 m->outer_l3_len = hdr_len.l3_len;
3333 m->outer_l2_len = hdr_len.l2_len;
3334 m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len;
3336 hns3_parse_outer_params(m, &tmp_outer);
3337 ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner);
3342 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
3343 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
3344 tmp_ol4cs = ol_flags & PKT_TX_OUTER_UDP_CKSUM ?
3345 BIT(HNS3_TXD_OL4CS_B) : 0;
3346 desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs);
3352 hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3354 uint64_t ol_flags = m->ol_flags;
3358 tmp = *type_cs_vlan_tso_len;
3359 if (ol_flags & PKT_TX_IPV4)
3360 l3_type = HNS3_L3T_IPV4;
3361 else if (ol_flags & PKT_TX_IPV6)
3362 l3_type = HNS3_L3T_IPV6;
3364 l3_type = HNS3_L3T_NONE;
3366 /* inner(/normal) L3 header size, defined in 4 bytes */
3367 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3368 m->l3_len >> HNS3_L3_LEN_UNIT);
3370 tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type);
3372 /* Enable L3 checksum offloads */
3373 if (ol_flags & PKT_TX_IP_CKSUM)
3374 tmp |= BIT(HNS3_TXD_L3CS_B);
3375 *type_cs_vlan_tso_len = tmp;
3379 hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3381 uint64_t ol_flags = m->ol_flags;
3383 /* Enable L4 checksum offloads */
3384 switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
3385 case PKT_TX_TCP_CKSUM | PKT_TX_TCP_SEG:
3386 case PKT_TX_TCP_CKSUM:
3387 case PKT_TX_TCP_SEG:
3388 tmp = *type_cs_vlan_tso_len;
3389 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3392 case PKT_TX_UDP_CKSUM:
3393 tmp = *type_cs_vlan_tso_len;
3394 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3397 case PKT_TX_SCTP_CKSUM:
3398 tmp = *type_cs_vlan_tso_len;
3399 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3405 tmp |= BIT(HNS3_TXD_L4CS_B);
3406 tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
3407 m->l4_len >> HNS3_L4_LEN_UNIT);
3408 *type_cs_vlan_tso_len = tmp;
3412 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m,
3413 uint16_t tx_desc_id)
3415 struct hns3_desc *tx_ring = txq->tx_ring;
3416 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3419 hns3_parse_l3_cksum_params(m, &value);
3420 hns3_parse_l4_cksum_params(m, &value);
3422 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
3426 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
3427 uint32_t max_non_tso_bd_num)
3429 struct rte_mbuf *m_first = tx_pkts;
3430 struct rte_mbuf *m_last = tx_pkts;
3431 uint32_t tot_len = 0;
3436 * Hardware requires that the sum of the data length of every 8
3437 * consecutive buffers is greater than MSS in hns3 network engine.
3438 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
3439 * frags greater than gso header len + mss, and the remaining 7
3440 * consecutive frags greater than MSS except the last 7 frags.
3442 if (bd_num <= max_non_tso_bd_num)
3445 for (i = 0; m_last && i < max_non_tso_bd_num - 1;
3446 i++, m_last = m_last->next)
3447 tot_len += m_last->data_len;
3452 /* ensure the first 8 frags is greater than mss + header */
3453 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
3454 hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
3455 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
3456 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
3460 * ensure the sum of the data length of every 7 consecutive buffer
3461 * is greater than mss except the last one.
3463 for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) {
3464 tot_len -= m_first->data_len;
3465 tot_len += m_last->data_len;
3467 if (tot_len < tx_pkts->tso_segsz)
3470 m_first = m_first->next;
3471 m_last = m_last->next;
3478 hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
3481 struct rte_ipv4_hdr *ipv4_hdr;
3482 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
3484 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
3485 ipv4_hdr->hdr_checksum = 0;
3486 if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
3487 struct rte_udp_hdr *udp_hdr;
3489 * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
3490 * header for TSO packets
3492 if (ol_flags & PKT_TX_TCP_SEG)
3494 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3495 m->outer_l2_len + m->outer_l3_len);
3496 udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
3500 *l4_proto = ipv4_hdr->next_proto_id;
3505 hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
3508 struct rte_ipv6_hdr *ipv6_hdr;
3509 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
3511 if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
3512 struct rte_udp_hdr *udp_hdr;
3514 * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
3515 * header for TSO packets
3517 if (ol_flags & PKT_TX_TCP_SEG)
3519 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3520 m->outer_l2_len + m->outer_l3_len);
3521 udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
3525 *l4_proto = ipv6_hdr->proto;
3530 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
3532 uint64_t ol_flags = m->ol_flags;
3533 uint32_t paylen, hdr_len, l4_proto;
3534 struct rte_udp_hdr *udp_hdr;
3536 if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
3539 if (ol_flags & PKT_TX_OUTER_IPV4) {
3540 if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto))
3543 if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto))
3547 /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
3548 if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) {
3549 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3550 hdr_len += m->outer_l2_len + m->outer_l3_len;
3551 paylen = m->pkt_len - hdr_len;
3552 if (paylen <= m->tso_segsz)
3554 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3557 udp_hdr->dgram_cksum = 0;
3562 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
3564 uint32_t tmp_data_len_sum = 0;
3565 uint16_t nb_buf = m->nb_segs;
3566 uint32_t paylen, hdr_len;
3567 struct rte_mbuf *m_seg;
3570 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
3573 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3574 hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
3575 m->outer_l2_len + m->outer_l3_len : 0;
3576 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
3579 paylen = m->pkt_len - hdr_len;
3580 if (paylen > HNS3_MAX_BD_PAYLEN)
3584 * The TSO header (include outer and inner L2, L3 and L4 header)
3585 * should be provided by three descriptors in maximum in hns3 network
3589 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
3590 i++, m_seg = m_seg->next) {
3591 tmp_data_len_sum += m_seg->data_len;
3594 if (hdr_len > tmp_data_len_sum)
3600 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3602 hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
3604 struct rte_ether_hdr *eh;
3605 struct rte_vlan_hdr *vh;
3607 if (!txq->pvid_sw_shift_en)
3611 * Due to hardware limitations, we only support two-layer VLAN hardware
3612 * offload in Tx direction based on hns3 network engine, so when PVID is
3613 * enabled, QinQ insert is no longer supported.
3614 * And when PVID is enabled, in the following two cases:
3615 * i) packets with more than two VLAN tags.
3616 * ii) packets with one VLAN tag while the hardware VLAN insert is
3618 * The packets will be regarded as abnormal packets and discarded by
3619 * hardware in Tx direction. For debugging purposes, a validation check
3620 * for these types of packets is added to the '.tx_pkt_prepare' ops
3621 * implementation function named hns3_prep_pkts to inform users that
3622 * these packets will be discarded.
3624 if (m->ol_flags & PKT_TX_QINQ_PKT)
3627 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
3628 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
3629 if (m->ol_flags & PKT_TX_VLAN_PKT)
3632 /* Ensure the incoming packet is not a QinQ packet */
3633 vh = (struct rte_vlan_hdr *)(eh + 1);
3634 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
3643 hns3_udp_cksum_help(struct rte_mbuf *m)
3645 uint64_t ol_flags = m->ol_flags;
3649 if (ol_flags & PKT_TX_IPV4) {
3650 struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
3651 struct rte_ipv4_hdr *, m->l2_len);
3652 l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len;
3654 struct rte_ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m,
3655 struct rte_ipv6_hdr *, m->l2_len);
3656 l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
3659 rte_raw_cksum_mbuf(m, m->l2_len + m->l3_len, l4_len, &cksum);
3663 * RFC 768:If the computed checksum is zero for UDP, it is transmitted
3669 return (uint16_t)cksum;
3673 hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
3675 uint64_t ol_flags = m->ol_flags;
3676 struct rte_udp_hdr *udp_hdr;
3679 if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE ||
3680 ol_flags & PKT_TX_TUNNEL_MASK ||
3681 (ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM)
3684 * A UDP packet with the same dst_port as VXLAN\VXLAN_GPE\GENEVE will
3685 * be recognized as a tunnel packet in HW. In this case, if UDP CKSUM
3686 * offload is set and the tunnel mask has not been set, the CKSUM will
3687 * be wrong since the header length is wrong and driver should complete
3688 * the CKSUM to avoid CKSUM error.
3690 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3691 m->l2_len + m->l3_len);
3692 dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
3694 case RTE_VXLAN_DEFAULT_PORT:
3695 case RTE_VXLAN_GPE_DEFAULT_PORT:
3696 case RTE_GENEVE_DEFAULT_PORT:
3697 udp_hdr->dgram_cksum = hns3_udp_cksum_help(m);
3698 m->ol_flags = ol_flags & ~PKT_TX_L4_MASK;
3706 hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
3710 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3711 ret = rte_validate_tx_offload(m);
3717 ret = hns3_vld_vlan_chk(tx_queue, m);
3723 if (hns3_pkt_is_tso(m)) {
3724 if (hns3_pkt_need_linearized(m, m->nb_segs,
3725 tx_queue->max_non_tso_bd_num) ||
3726 hns3_check_tso_pkt_valid(m)) {
3731 if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
3733 * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means
3734 * hardware support recalculate the TCP pseudo header
3735 * checksum of packets that need TSO, so network driver
3736 * software not need to recalculate it.
3738 hns3_outer_header_cksum_prepare(m);
3743 ret = rte_net_intel_cksum_prepare(m);
3749 if (!hns3_validate_tunnel_cksum(tx_queue, m))
3752 hns3_outer_header_cksum_prepare(m);
3758 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3764 for (i = 0; i < nb_pkts; i++) {
3766 if (hns3_prep_pkt_proc(tx_queue, m))
3774 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
3777 struct hns3_desc *tx_ring = txq->tx_ring;
3778 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3780 /* Enable checksum offloading */
3781 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
3782 /* Fill in tunneling parameters if necessary */
3783 if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
3784 txq->dfx_stats.unsupported_tunnel_pkt_cnt++;
3788 hns3_txd_enable_checksum(txq, m, tx_desc_id);
3790 /* clear the control bit */
3791 desc->tx.type_cs_vlan_tso_len = 0;
3792 desc->tx.ol_type_vlan_len_msec = 0;
3799 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
3800 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
3802 uint8_t max_non_tso_bd_num;
3803 struct rte_mbuf *new_pkt;
3806 if (hns3_pkt_is_tso(*m_seg))
3810 * If packet length is greater than HNS3_MAX_FRAME_LEN
3811 * driver support, the packet will be ignored.
3813 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
3814 txq->dfx_stats.over_length_pkt_cnt++;
3818 max_non_tso_bd_num = txq->max_non_tso_bd_num;
3819 if (unlikely(nb_buf > max_non_tso_bd_num)) {
3820 txq->dfx_stats.exceed_limit_bd_pkt_cnt++;
3821 ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
3822 max_non_tso_bd_num);
3824 txq->dfx_stats.exceed_limit_bd_reassem_fail++;
3834 hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq)
3836 struct hns3_entry *tx_entry;
3837 struct hns3_desc *desc;
3838 uint16_t tx_next_clean;
3842 if (HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) < txq->tx_rs_thresh)
3846 * All mbufs can be released only when the VLD bits of all
3847 * descriptors in a batch are cleared.
3849 tx_next_clean = (txq->next_to_clean + txq->tx_rs_thresh - 1) %
3851 desc = &txq->tx_ring[tx_next_clean];
3852 for (i = 0; i < txq->tx_rs_thresh; i++) {
3853 if (rte_le_to_cpu_16(desc->tx.tp_fe_sc_vld_ra_ri) &
3854 BIT(HNS3_TXD_VLD_B))
3859 tx_entry = &txq->sw_ring[txq->next_to_clean];
3861 for (i = 0; i < txq->tx_rs_thresh; i++)
3862 rte_prefetch0((tx_entry + i)->mbuf);
3863 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
3864 rte_mempool_put(tx_entry->mbuf->pool, tx_entry->mbuf);
3865 tx_entry->mbuf = NULL;
3868 txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc;
3869 txq->tx_bd_ready += txq->tx_rs_thresh;
3874 hns3_tx_backup_1mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
3876 tx_entry->mbuf = pkts[0];
3880 hns3_tx_backup_4mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
3882 hns3_tx_backup_1mbuf(&tx_entry[0], &pkts[0]);
3883 hns3_tx_backup_1mbuf(&tx_entry[1], &pkts[1]);
3884 hns3_tx_backup_1mbuf(&tx_entry[2], &pkts[2]);
3885 hns3_tx_backup_1mbuf(&tx_entry[3], &pkts[3]);
3889 hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
3891 #define PER_LOOP_NUM 4
3892 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
3896 for (i = 0; i < PER_LOOP_NUM; i++, txdp++, pkts++) {
3897 dma_addr = rte_mbuf_data_iova(*pkts);
3898 txdp->addr = rte_cpu_to_le_64(dma_addr);
3899 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
3900 txdp->tx.paylen_fd_dop_ol4cs = 0;
3901 txdp->tx.type_cs_vlan_tso_len = 0;
3902 txdp->tx.ol_type_vlan_len_msec = 0;
3903 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
3908 hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
3910 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
3913 dma_addr = rte_mbuf_data_iova(*pkts);
3914 txdp->addr = rte_cpu_to_le_64(dma_addr);
3915 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
3916 txdp->tx.paylen_fd_dop_ol4cs = 0;
3917 txdp->tx.type_cs_vlan_tso_len = 0;
3918 txdp->tx.ol_type_vlan_len_msec = 0;
3919 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
3923 hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq,
3924 struct rte_mbuf **pkts,
3927 #define PER_LOOP_NUM 4
3928 #define PER_LOOP_MASK (PER_LOOP_NUM - 1)
3929 struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
3930 struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
3931 const uint32_t mainpart = (nb_pkts & ((uint32_t)~PER_LOOP_MASK));
3932 const uint32_t leftover = (nb_pkts & ((uint32_t)PER_LOOP_MASK));
3935 for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
3936 hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
3937 hns3_tx_setup_4bd(txdp + i, pkts + i);
3939 /* Increment bytes counter */
3941 for (j = 0; j < PER_LOOP_NUM; j++)
3942 txq->basic_stats.bytes += pkts[i + j]->pkt_len;
3944 if (unlikely(leftover > 0)) {
3945 for (i = 0; i < leftover; i++) {
3946 hns3_tx_backup_1mbuf(tx_entry + mainpart + i,
3947 pkts + mainpart + i);
3948 hns3_tx_setup_1bd(txdp + mainpart + i,
3949 pkts + mainpart + i);
3951 /* Increment bytes counter */
3952 txq->basic_stats.bytes += pkts[mainpart + i]->pkt_len;
3958 hns3_xmit_pkts_simple(void *tx_queue,
3959 struct rte_mbuf **tx_pkts,
3962 struct hns3_tx_queue *txq = tx_queue;
3965 hns3_tx_free_buffer_simple(txq);
3967 nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
3968 if (unlikely(nb_pkts == 0)) {
3969 if (txq->tx_bd_ready == 0)
3970 txq->dfx_stats.queue_full_cnt++;
3974 txq->tx_bd_ready -= nb_pkts;
3975 if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) {
3976 nb_tx = txq->nb_tx_desc - txq->next_to_use;
3977 hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx);
3978 txq->next_to_use = 0;
3981 hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
3982 txq->next_to_use += nb_pkts - nb_tx;
3984 hns3_write_reg_opt(txq->io_tail_reg, nb_pkts);
3990 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3992 struct hns3_tx_queue *txq = tx_queue;
3993 struct hns3_entry *tx_bak_pkt;
3994 struct hns3_desc *tx_ring;
3995 struct rte_mbuf *tx_pkt;
3996 struct rte_mbuf *m_seg;
3997 struct hns3_desc *desc;
3998 uint32_t nb_hold = 0;
3999 uint16_t tx_next_use;
4000 uint16_t tx_pkt_num;
4006 /* free useless buffer */
4007 hns3_tx_free_useless_buffer(txq);
4009 tx_next_use = txq->next_to_use;
4010 tx_bd_max = txq->nb_tx_desc;
4011 tx_pkt_num = nb_pkts;
4012 tx_ring = txq->tx_ring;
4015 tx_bak_pkt = &txq->sw_ring[tx_next_use];
4016 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
4017 tx_pkt = *tx_pkts++;
4019 nb_buf = tx_pkt->nb_segs;
4021 if (nb_buf > txq->tx_bd_ready) {
4022 txq->dfx_stats.queue_full_cnt++;
4030 * If packet length is less than minimum packet length supported
4031 * by hardware in Tx direction, driver need to pad it to avoid
4034 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
4035 txq->min_tx_pkt_len)) {
4039 add_len = txq->min_tx_pkt_len -
4040 rte_pktmbuf_pkt_len(tx_pkt);
4041 appended = rte_pktmbuf_append(tx_pkt, add_len);
4042 if (appended == NULL) {
4043 txq->dfx_stats.pkt_padding_fail_cnt++;
4047 memset(appended, 0, add_len);
4052 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
4055 if (hns3_parse_cksum(txq, tx_next_use, m_seg))
4059 desc = &tx_ring[tx_next_use];
4062 * If the packet is divided into multiple Tx Buffer Descriptors,
4063 * only need to fill vlan, paylen and tso into the first Tx
4064 * Buffer Descriptor.
4066 hns3_fill_first_desc(txq, desc, m_seg);
4069 desc = &tx_ring[tx_next_use];
4071 * Fill valid bits, DMA address and data length for each
4072 * Tx Buffer Descriptor.
4074 hns3_fill_per_desc(desc, m_seg);
4075 tx_bak_pkt->mbuf = m_seg;
4076 m_seg = m_seg->next;
4079 if (tx_next_use >= tx_bd_max) {
4081 tx_bak_pkt = txq->sw_ring;
4085 } while (m_seg != NULL);
4087 /* Add end flag for the last Tx Buffer Descriptor */
4088 desc->tx.tp_fe_sc_vld_ra_ri |=
4089 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
4091 /* Increment bytes counter */
4092 txq->basic_stats.bytes += tx_pkt->pkt_len;
4094 txq->next_to_use = tx_next_use;
4095 txq->tx_bd_ready -= i;
4101 hns3_write_reg_opt(txq->io_tail_reg, nb_hold);
4107 hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
4113 hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
4114 __rte_unused struct rte_mbuf **tx_pkts,
4115 __rte_unused uint16_t nb_pkts)
4121 hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue,
4122 struct rte_mbuf __rte_unused **tx_pkts,
4123 uint16_t __rte_unused nb_pkts)
4129 hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
4130 struct rte_eth_burst_mode *mode)
4132 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
4133 const char *info = NULL;
4135 if (pkt_burst == hns3_xmit_pkts_simple)
4136 info = "Scalar Simple";
4137 else if (pkt_burst == hns3_xmit_pkts)
4139 else if (pkt_burst == hns3_xmit_pkts_vec)
4140 info = "Vector Neon";
4141 else if (pkt_burst == hns3_xmit_pkts_vec_sve)
4142 info = "Vector Sve";
4147 snprintf(mode->info, sizeof(mode->info), "%s", info);
4152 static eth_tx_burst_t
4153 hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
4155 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
4156 struct hns3_adapter *hns = dev->data->dev_private;
4157 bool vec_allowed, sve_allowed, simple_allowed;
4159 vec_allowed = hns->tx_vec_allowed &&
4160 hns3_tx_check_vec_support(dev) == 0;
4161 sve_allowed = vec_allowed && hns3_check_sve_support();
4162 simple_allowed = hns->tx_simple_allowed &&
4163 offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
4167 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
4168 return hns3_xmit_pkts_vec;
4169 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
4170 return hns3_xmit_pkts_vec_sve;
4171 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
4172 return hns3_xmit_pkts_simple;
4173 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
4174 *prep = hns3_prep_pkts;
4175 return hns3_xmit_pkts;
4179 return hns3_xmit_pkts_vec;
4181 return hns3_xmit_pkts_simple;
4183 *prep = hns3_prep_pkts;
4184 return hns3_xmit_pkts;
4188 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
4189 struct rte_mbuf **pkts __rte_unused,
4190 uint16_t pkts_n __rte_unused)
4195 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
4197 struct hns3_adapter *hns = eth_dev->data->dev_private;
4198 eth_tx_prep_t prep = NULL;
4200 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
4201 __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
4202 eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
4203 eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
4204 eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
4205 eth_dev->tx_pkt_prepare = prep;
4206 eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
4208 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
4209 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
4210 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
4215 hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4216 struct rte_eth_rxq_info *qinfo)
4218 struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
4220 qinfo->mp = rxq->mb_pool;
4221 qinfo->nb_desc = rxq->nb_rx_desc;
4222 qinfo->scattered_rx = dev->data->scattered_rx;
4223 /* Report the HW Rx buffer length to user */
4224 qinfo->rx_buf_size = rxq->rx_buf_len;
4227 * If there are no available Rx buffer descriptors, incoming packets
4228 * are always dropped by hardware based on hns3 network engine.
4230 qinfo->conf.rx_drop_en = 1;
4231 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
4232 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4233 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4237 hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4238 struct rte_eth_txq_info *qinfo)
4240 struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
4242 qinfo->nb_desc = txq->nb_tx_desc;
4243 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
4244 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
4245 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4246 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4250 hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4252 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4253 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
4254 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
4257 if (!hns3_dev_indep_txrx_supported(hw))
4260 ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
4262 hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
4267 ret = hns3_init_rxq(hns, rx_queue_id);
4269 hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
4274 hns3_enable_rxq(rxq, true);
4275 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4281 hns3_reset_sw_rxq(struct hns3_rx_queue *rxq)
4283 rxq->next_to_use = 0;
4284 rxq->rx_rearm_start = 0;
4285 rxq->rx_free_hold = 0;
4286 rxq->rx_rearm_nb = 0;
4287 rxq->pkt_first_seg = NULL;
4288 rxq->pkt_last_seg = NULL;
4289 memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc));
4290 hns3_rxq_vec_setup(rxq);
4294 hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4296 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4297 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
4299 if (!hns3_dev_indep_txrx_supported(hw))
4302 hns3_enable_rxq(rxq, false);
4304 hns3_rx_queue_release_mbufs(rxq);
4306 hns3_reset_sw_rxq(rxq);
4307 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4313 hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4315 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4316 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
4319 if (!hns3_dev_indep_txrx_supported(hw))
4322 ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
4324 hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
4330 hns3_enable_txq(txq, true);
4331 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4337 hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4339 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4340 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
4342 if (!hns3_dev_indep_txrx_supported(hw))
4345 hns3_enable_txq(txq, false);
4346 hns3_tx_queue_release_mbufs(txq);
4348 * All the mbufs in sw_ring are released and all the pointers in sw_ring
4349 * are set to NULL. If this queue is still called by upper layer,
4350 * residual SW status of this txq may cause these pointers in sw_ring
4351 * which have been set to NULL to be released again. To avoid it,
4355 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4361 hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
4363 uint16_t next_to_clean = txq->next_to_clean;
4364 uint16_t next_to_use = txq->next_to_use;
4365 uint16_t tx_bd_ready = txq->tx_bd_ready;
4366 struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean];
4367 struct hns3_desc *desc = &txq->tx_ring[next_to_clean];
4370 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
4371 free_cnt = txq->nb_tx_desc;
4373 for (idx = 0; idx < free_cnt; idx++) {
4374 if (next_to_clean == next_to_use)
4377 if (desc->tx.tp_fe_sc_vld_ra_ri &
4378 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
4381 if (tx_pkt->mbuf != NULL) {
4382 rte_pktmbuf_free_seg(tx_pkt->mbuf);
4383 tx_pkt->mbuf = NULL;
4390 if (next_to_clean == txq->nb_tx_desc) {
4391 tx_pkt = txq->sw_ring;
4392 desc = txq->tx_ring;
4398 txq->next_to_clean = next_to_clean;
4399 txq->tx_bd_ready = tx_bd_ready;
4406 hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
4408 struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq;
4409 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
4411 if (dev->tx_pkt_burst == hns3_xmit_pkts)
4412 return hns3_tx_done_cleanup_full(q, free_cnt);
4413 else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst)
4420 hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
4422 volatile struct hns3_desc *rxdp;
4423 struct hns3_rx_queue *rxq;
4424 struct rte_eth_dev *dev;
4425 uint32_t bd_base_info;
4428 rxq = (struct hns3_rx_queue *)rx_queue;
4429 if (offset >= rxq->nb_rx_desc)
4432 desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc;
4433 rxdp = &rxq->rx_ring[desc_id];
4434 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
4435 dev = &rte_eth_devices[rxq->port_id];
4436 if (dev->rx_pkt_burst == hns3_recv_pkts ||
4437 dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
4438 if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
4439 return RTE_ETH_RX_DESC_UNAVAIL;
4440 } else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
4441 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve){
4442 if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
4443 return RTE_ETH_RX_DESC_UNAVAIL;
4445 return RTE_ETH_RX_DESC_UNAVAIL;
4448 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
4449 return RTE_ETH_RX_DESC_AVAIL;
4451 return RTE_ETH_RX_DESC_DONE;
4455 hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
4457 volatile struct hns3_desc *txdp;
4458 struct hns3_tx_queue *txq;
4459 struct rte_eth_dev *dev;
4462 txq = (struct hns3_tx_queue *)tx_queue;
4463 if (offset >= txq->nb_tx_desc)
4466 dev = &rte_eth_devices[txq->port_id];
4467 if (dev->tx_pkt_burst != hns3_xmit_pkts_simple &&
4468 dev->tx_pkt_burst != hns3_xmit_pkts &&
4469 dev->tx_pkt_burst != hns3_xmit_pkts_vec_sve &&
4470 dev->tx_pkt_burst != hns3_xmit_pkts_vec)
4471 return RTE_ETH_TX_DESC_UNAVAIL;
4473 desc_id = (txq->next_to_use + offset) % txq->nb_tx_desc;
4474 txdp = &txq->tx_ring[desc_id];
4475 if (txdp->tx.tp_fe_sc_vld_ra_ri & rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
4476 return RTE_ETH_TX_DESC_FULL;
4478 return RTE_ETH_TX_DESC_DONE;
4482 hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4485 * Number of BDs that have been processed by the driver
4486 * but have not been notified to the hardware.
4488 uint32_t driver_hold_bd_num;
4489 struct hns3_rx_queue *rxq;
4492 rxq = dev->data->rx_queues[rx_queue_id];
4493 fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
4494 if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
4495 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
4496 driver_hold_bd_num = rxq->rx_rearm_nb;
4498 driver_hold_bd_num = rxq->rx_free_hold;
4500 if (fbd_num <= driver_hold_bd_num)
4503 return fbd_num - driver_hold_bd_num;
4507 hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
4510 * If the hardware support rxd advanced layout, then driver enable it
4513 if (hns3_dev_rxd_adv_layout_supported(hw))
4514 hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);