1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_vxlan.h>
19 #include <rte_ethdev_driver.h>
24 #include <rte_malloc.h>
26 #if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
27 #include <rte_cpuflags.h>
30 #include "hns3_ethdev.h"
31 #include "hns3_rxtx.h"
32 #include "hns3_regs.h"
33 #include "hns3_logs.h"
35 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
36 #define HNS3_RX_RING_PREFETCTH_MASK 3
39 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
43 /* Note: Fake rx queue will not enter here */
44 if (rxq->sw_ring == NULL)
47 if (rxq->rx_rearm_nb == 0) {
48 for (i = 0; i < rxq->nb_rx_desc; i++) {
49 if (rxq->sw_ring[i].mbuf != NULL) {
50 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
51 rxq->sw_ring[i].mbuf = NULL;
55 for (i = rxq->next_to_use;
56 i != rxq->rx_rearm_start;
57 i = (i + 1) % rxq->nb_rx_desc) {
58 if (rxq->sw_ring[i].mbuf != NULL) {
59 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
60 rxq->sw_ring[i].mbuf = NULL;
65 for (i = 0; i < rxq->bulk_mbuf_num; i++)
66 rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]);
67 rxq->bulk_mbuf_num = 0;
69 if (rxq->pkt_first_seg) {
70 rte_pktmbuf_free(rxq->pkt_first_seg);
71 rxq->pkt_first_seg = NULL;
76 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
80 /* Note: Fake tx queue will not enter here */
82 for (i = 0; i < txq->nb_tx_desc; i++) {
83 if (txq->sw_ring[i].mbuf) {
84 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
85 txq->sw_ring[i].mbuf = NULL;
92 hns3_rx_queue_release(void *queue)
94 struct hns3_rx_queue *rxq = queue;
96 hns3_rx_queue_release_mbufs(rxq);
98 rte_memzone_free(rxq->mz);
100 rte_free(rxq->sw_ring);
106 hns3_tx_queue_release(void *queue)
108 struct hns3_tx_queue *txq = queue;
110 hns3_tx_queue_release_mbufs(txq);
112 rte_memzone_free(txq->mz);
114 rte_free(txq->sw_ring);
122 hns3_dev_rx_queue_release(void *queue)
124 struct hns3_rx_queue *rxq = queue;
125 struct hns3_adapter *hns;
131 rte_spinlock_lock(&hns->hw.lock);
132 hns3_rx_queue_release(queue);
133 rte_spinlock_unlock(&hns->hw.lock);
137 hns3_dev_tx_queue_release(void *queue)
139 struct hns3_tx_queue *txq = queue;
140 struct hns3_adapter *hns;
146 rte_spinlock_lock(&hns->hw.lock);
147 hns3_tx_queue_release(queue);
148 rte_spinlock_unlock(&hns->hw.lock);
152 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
154 struct hns3_rx_queue *rxq = queue;
155 struct hns3_adapter *hns;
165 if (hw->fkq_data.rx_queues[idx]) {
166 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
167 hw->fkq_data.rx_queues[idx] = NULL;
170 /* free fake rx queue arrays */
171 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
172 hw->fkq_data.nb_fake_rx_queues = 0;
173 rte_free(hw->fkq_data.rx_queues);
174 hw->fkq_data.rx_queues = NULL;
179 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
181 struct hns3_tx_queue *txq = queue;
182 struct hns3_adapter *hns;
192 if (hw->fkq_data.tx_queues[idx]) {
193 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
194 hw->fkq_data.tx_queues[idx] = NULL;
197 /* free fake tx queue arrays */
198 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
199 hw->fkq_data.nb_fake_tx_queues = 0;
200 rte_free(hw->fkq_data.tx_queues);
201 hw->fkq_data.tx_queues = NULL;
206 hns3_free_rx_queues(struct rte_eth_dev *dev)
208 struct hns3_adapter *hns = dev->data->dev_private;
209 struct hns3_fake_queue_data *fkq_data;
210 struct hns3_hw *hw = &hns->hw;
214 nb_rx_q = hw->data->nb_rx_queues;
215 for (i = 0; i < nb_rx_q; i++) {
216 if (dev->data->rx_queues[i]) {
217 hns3_rx_queue_release(dev->data->rx_queues[i]);
218 dev->data->rx_queues[i] = NULL;
222 /* Free fake Rx queues */
223 fkq_data = &hw->fkq_data;
224 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
225 if (fkq_data->rx_queues[i])
226 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
231 hns3_free_tx_queues(struct rte_eth_dev *dev)
233 struct hns3_adapter *hns = dev->data->dev_private;
234 struct hns3_fake_queue_data *fkq_data;
235 struct hns3_hw *hw = &hns->hw;
239 nb_tx_q = hw->data->nb_tx_queues;
240 for (i = 0; i < nb_tx_q; i++) {
241 if (dev->data->tx_queues[i]) {
242 hns3_tx_queue_release(dev->data->tx_queues[i]);
243 dev->data->tx_queues[i] = NULL;
247 /* Free fake Tx queues */
248 fkq_data = &hw->fkq_data;
249 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
250 if (fkq_data->tx_queues[i])
251 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
256 hns3_free_all_queues(struct rte_eth_dev *dev)
258 hns3_free_rx_queues(dev);
259 hns3_free_tx_queues(dev);
263 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
265 struct rte_mbuf *mbuf;
269 for (i = 0; i < rxq->nb_rx_desc; i++) {
270 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
271 if (unlikely(mbuf == NULL)) {
272 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
274 hns3_rx_queue_release_mbufs(rxq);
278 rte_mbuf_refcnt_set(mbuf, 1);
280 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
282 mbuf->port = rxq->port_id;
284 rxq->sw_ring[i].mbuf = mbuf;
285 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
286 rxq->rx_ring[i].addr = dma_addr;
287 rxq->rx_ring[i].rx.bd_base_info = 0;
294 hns3_buf_size2type(uint32_t buf_size)
300 bd_size_type = HNS3_BD_SIZE_512_TYPE;
303 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
306 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
309 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
316 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
318 uint32_t rx_buf_len = rxq->rx_buf_len;
319 uint64_t dma_addr = rxq->rx_ring_phys_addr;
321 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
322 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
323 (uint32_t)((dma_addr >> 31) >> 1));
325 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
326 hns3_buf_size2type(rx_buf_len));
327 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
328 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
332 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
334 uint64_t dma_addr = txq->tx_ring_phys_addr;
336 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
337 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
338 (uint32_t)((dma_addr >> 31) >> 1));
340 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
341 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
345 hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
347 uint16_t nb_rx_q = hw->data->nb_rx_queues;
348 uint16_t nb_tx_q = hw->data->nb_tx_queues;
349 struct hns3_rx_queue *rxq;
350 struct hns3_tx_queue *txq;
354 pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE;
355 for (i = 0; i < hw->cfg_max_queues; i++) {
357 rxq = hw->data->rx_queues[i];
359 rxq->pvid_sw_discard_en = pvid_en;
362 txq = hw->data->tx_queues[i];
364 txq->pvid_sw_shift_en = pvid_en;
370 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
372 uint16_t nb_rx_q = hw->data->nb_rx_queues;
373 uint16_t nb_tx_q = hw->data->nb_tx_queues;
374 struct hns3_rx_queue *rxq;
375 struct hns3_tx_queue *txq;
380 for (i = 0; i < hw->cfg_max_queues; i++) {
381 if (hns3_dev_indep_txrx_supported(hw)) {
382 rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
383 txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
385 * After initialization, rxq and txq won't be NULL at
389 tqp_base = rxq->io_base;
390 else if (txq != NULL)
391 tqp_base = txq->io_base;
395 rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
396 hw->fkq_data.rx_queues[i - nb_rx_q];
398 tqp_base = rxq->io_base;
401 * This is the master switch that used to control the enabling
402 * of a pair of Tx and Rx queues. Both the Rx and Tx point to
405 rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG);
407 rcb_reg |= BIT(HNS3_RING_EN_B);
409 rcb_reg &= ~BIT(HNS3_RING_EN_B);
410 hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg);
415 hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
417 struct hns3_hw *hw = &txq->hns->hw;
420 if (hns3_dev_indep_txrx_supported(hw)) {
421 reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
423 reg |= BIT(HNS3_RING_EN_B);
425 reg &= ~BIT(HNS3_RING_EN_B);
426 hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg);
432 hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
434 struct hns3_hw *hw = &rxq->hns->hw;
437 if (hns3_dev_indep_txrx_supported(hw)) {
438 reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
440 reg |= BIT(HNS3_RING_EN_B);
442 reg &= ~BIT(HNS3_RING_EN_B);
443 hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg);
449 hns3_start_all_txqs(struct rte_eth_dev *dev)
451 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
452 struct hns3_tx_queue *txq;
455 for (i = 0; i < dev->data->nb_tx_queues; i++) {
456 txq = hw->data->tx_queues[i];
458 hns3_err(hw, "Tx queue %u not available or setup.", i);
459 goto start_txqs_fail;
462 * Tx queue is enabled by default. Therefore, the Tx queues
463 * needs to be disabled when deferred_start is set. There is
464 * another master switch used to control the enabling of a pair
465 * of Tx and Rx queues. And the master switch is disabled by
468 if (txq->tx_deferred_start)
469 hns3_enable_txq(txq, false);
471 hns3_enable_txq(txq, true);
476 for (j = 0; j < i; j++) {
477 txq = hw->data->tx_queues[j];
478 hns3_enable_txq(txq, false);
484 hns3_start_all_rxqs(struct rte_eth_dev *dev)
486 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
487 struct hns3_rx_queue *rxq;
490 for (i = 0; i < dev->data->nb_rx_queues; i++) {
491 rxq = hw->data->rx_queues[i];
493 hns3_err(hw, "Rx queue %u not available or setup.", i);
494 goto start_rxqs_fail;
497 * Rx queue is enabled by default. Therefore, the Rx queues
498 * needs to be disabled when deferred_start is set. There is
499 * another master switch used to control the enabling of a pair
500 * of Tx and Rx queues. And the master switch is disabled by
503 if (rxq->rx_deferred_start)
504 hns3_enable_rxq(rxq, false);
506 hns3_enable_rxq(rxq, true);
511 for (j = 0; j < i; j++) {
512 rxq = hw->data->rx_queues[j];
513 hns3_enable_rxq(rxq, false);
519 hns3_stop_all_txqs(struct rte_eth_dev *dev)
521 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
522 struct hns3_tx_queue *txq;
525 for (i = 0; i < dev->data->nb_tx_queues; i++) {
526 txq = hw->data->tx_queues[i];
529 hns3_enable_txq(txq, false);
534 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
536 struct hns3_cfg_com_tqp_queue_cmd *req;
537 struct hns3_cmd_desc desc;
540 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
542 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
543 req->tqp_id = rte_cpu_to_le_16(queue_id);
545 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
547 ret = hns3_cmd_send(hw, &desc, 1);
549 hns3_err(hw, "TQP enable fail, ret = %d", ret);
555 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
557 struct hns3_reset_tqp_queue_cmd *req;
558 struct hns3_cmd_desc desc;
561 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
563 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
564 req->tqp_id = rte_cpu_to_le_16(queue_id);
565 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
566 ret = hns3_cmd_send(hw, &desc, 1);
568 hns3_err(hw, "send tqp reset cmd error, queue_id = %u, "
569 "ret = %d", queue_id, ret);
575 hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id,
576 uint8_t *reset_status)
578 struct hns3_reset_tqp_queue_cmd *req;
579 struct hns3_cmd_desc desc;
582 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
584 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
585 req->tqp_id = rte_cpu_to_le_16(queue_id);
587 ret = hns3_cmd_send(hw, &desc, 1);
589 hns3_err(hw, "get tqp reset status error, queue_id = %u, "
590 "ret = %d.", queue_id, ret);
593 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
598 hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
600 #define HNS3_TQP_RESET_TRY_MS 200
601 uint8_t reset_status;
605 ret = hns3_tqp_enable(hw, queue_id, false);
610 * In current version VF is not supported when PF is driven by DPDK
611 * driver, all task queue pairs are mapped to PF function, so PF's queue
612 * id is equals to the global queue id in PF range.
614 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
616 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
619 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
621 /* Wait for tqp hw reset */
622 rte_delay_ms(HNS3_POLL_RESPONE_MS);
623 ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
629 } while (get_timeofday_ms() < end);
633 hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d",
638 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
640 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
645 hns3_send_reset_tqp_cmd(hw, queue_id, false);
650 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
655 /* Disable VF's queue before send queue reset msg to PF */
656 ret = hns3_tqp_enable(hw, queue_id, false);
660 memcpy(msg_data, &queue_id, sizeof(uint16_t));
662 ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
663 sizeof(msg_data), true, NULL, 0);
665 hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
671 hns3_reset_tqp(struct hns3_adapter *hns, uint16_t queue_id)
673 struct hns3_hw *hw = &hns->hw;
676 return hns3vf_reset_tqp(hw, queue_id);
678 return hns3pf_reset_tqp(hw, queue_id);
682 hns3_reset_all_tqps(struct hns3_adapter *hns)
684 struct hns3_hw *hw = &hns->hw;
687 for (i = 0; i < hw->cfg_max_queues; i++) {
688 ret = hns3_reset_tqp(hns, i);
690 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
698 hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id,
699 enum hns3_ring_type queue_type, bool enable)
701 struct hns3_reset_tqp_queue_cmd *req;
702 struct hns3_cmd_desc desc;
706 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false);
708 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
709 req->tqp_id = rte_cpu_to_le_16(queue_id);
710 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
711 req->queue_direction = rte_cpu_to_le_16(queue_direction);
712 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
714 ret = hns3_cmd_send(hw, &desc, 1);
716 hns3_err(hw, "send queue reset cmd error, queue_id = %u, "
717 "queue_type = %s, ret = %d.", queue_id,
718 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
723 hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id,
724 enum hns3_ring_type queue_type,
725 uint8_t *reset_status)
727 struct hns3_reset_tqp_queue_cmd *req;
728 struct hns3_cmd_desc desc;
732 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true);
734 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
735 req->tqp_id = rte_cpu_to_le_16(queue_id);
736 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
737 req->queue_direction = rte_cpu_to_le_16(queue_direction);
739 ret = hns3_cmd_send(hw, &desc, 1);
741 hns3_err(hw, "get queue reset status error, queue_id = %u "
742 "queue_type = %s, ret = %d.", queue_id,
743 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
747 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
752 hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
753 enum hns3_ring_type queue_type)
755 #define HNS3_QUEUE_RESET_TRY_MS 200
756 struct hns3_tx_queue *txq;
757 struct hns3_rx_queue *rxq;
758 uint32_t reset_wait_times;
759 uint32_t max_wait_times;
760 uint8_t reset_status;
763 if (queue_type == HNS3_RING_TYPE_TX) {
764 txq = hw->data->tx_queues[queue_id];
765 hns3_enable_txq(txq, false);
767 rxq = hw->data->rx_queues[queue_id];
768 hns3_enable_rxq(rxq, false);
771 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true);
773 hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret);
777 reset_wait_times = 0;
778 max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS;
779 while (reset_wait_times < max_wait_times) {
780 /* Wait for queue hw reset */
781 rte_delay_ms(HNS3_POLL_RESPONE_MS);
782 ret = hns3_get_queue_reset_status(hw, queue_id,
783 queue_type, &reset_status);
785 goto queue_reset_fail;
793 hns3_err(hw, "reset queue timeout, queue_id = %u, "
794 "queue_type = %s", queue_id,
795 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx");
797 goto queue_reset_fail;
800 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
802 hns3_err(hw, "deassert queue reset fail, ret = %d.", ret);
807 hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
813 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
814 uint8_t gl_idx, uint16_t gl_value)
816 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
817 HNS3_TQP_INTR_GL1_REG,
818 HNS3_TQP_INTR_GL2_REG};
819 uint32_t addr, value;
821 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
824 addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
825 if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
826 value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
828 value = HNS3_GL_USEC_TO_REG(gl_value);
830 hns3_write_dev(hw, addr, value);
834 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
836 uint32_t addr, value;
838 if (rl_value > HNS3_TQP_INTR_RL_MAX)
841 addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
842 value = HNS3_RL_USEC_TO_REG(rl_value);
844 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
846 hns3_write_dev(hw, addr, value);
850 hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
854 if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
857 addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
858 hns3_write_dev(hw, addr, ql_value);
860 addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
861 hns3_write_dev(hw, addr, ql_value);
865 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
867 uint32_t addr, value;
869 addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
872 hns3_write_dev(hw, addr, value);
876 * Enable all rx queue interrupt when in interrupt rx mode.
877 * This api was called before enable queue rx&tx (in normal start or reset
878 * recover scenes), used to fix hardware rx queue interrupt enable was clear
882 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
884 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
885 uint16_t nb_rx_q = hw->data->nb_rx_queues;
888 if (dev->data->dev_conf.intr_conf.rxq == 0)
891 for (i = 0; i < nb_rx_q; i++)
892 hns3_queue_intr_enable(hw, i, en);
896 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
898 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
899 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
900 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
902 if (dev->data->dev_conf.intr_conf.rxq == 0)
905 hns3_queue_intr_enable(hw, queue_id, true);
907 return rte_intr_ack(intr_handle);
911 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
913 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
915 if (dev->data->dev_conf.intr_conf.rxq == 0)
918 hns3_queue_intr_enable(hw, queue_id, false);
924 hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx)
926 struct hns3_hw *hw = &hns->hw;
927 struct hns3_rx_queue *rxq;
930 PMD_INIT_FUNC_TRACE();
932 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
933 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
935 hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.",
940 rxq->next_to_use = 0;
941 rxq->rx_rearm_start = 0;
942 rxq->rx_free_hold = 0;
943 rxq->rx_rearm_nb = 0;
944 rxq->pkt_first_seg = NULL;
945 rxq->pkt_last_seg = NULL;
946 hns3_init_rx_queue_hw(rxq);
947 hns3_rxq_vec_setup(rxq);
953 hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx)
955 struct hns3_hw *hw = &hns->hw;
956 struct hns3_rx_queue *rxq;
958 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
959 rxq->next_to_use = 0;
960 rxq->rx_free_hold = 0;
961 rxq->rx_rearm_start = 0;
962 rxq->rx_rearm_nb = 0;
963 hns3_init_rx_queue_hw(rxq);
967 hns3_init_txq(struct hns3_tx_queue *txq)
969 struct hns3_desc *desc;
974 for (i = 0; i < txq->nb_tx_desc; i++) {
975 desc->tx.tp_fe_sc_vld_ra_ri = 0;
979 txq->next_to_use = 0;
980 txq->next_to_clean = 0;
981 txq->tx_bd_ready = txq->nb_tx_desc - 1;
982 hns3_init_tx_queue_hw(txq);
986 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
988 struct hns3_hw *hw = &hns->hw;
989 struct hns3_tx_queue *txq;
992 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
993 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
996 if (!tc_queue->enable)
999 for (j = 0; j < tc_queue->tqp_count; j++) {
1000 num = tc_queue->tqp_offset + j;
1001 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
1005 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
1011 hns3_init_rx_queues(struct hns3_adapter *hns)
1013 struct hns3_hw *hw = &hns->hw;
1014 struct hns3_rx_queue *rxq;
1018 /* Initialize RSS for queues */
1019 ret = hns3_config_rss(hns);
1021 hns3_err(hw, "failed to configure rss, ret = %d.", ret);
1025 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1026 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
1028 hns3_err(hw, "Rx queue %u not available or setup.", i);
1032 if (rxq->rx_deferred_start)
1035 ret = hns3_init_rxq(hns, i);
1037 hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i,
1043 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++)
1044 hns3_init_fake_rxq(hns, i);
1049 for (j = 0; j < i; j++) {
1050 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
1051 hns3_rx_queue_release_mbufs(rxq);
1058 hns3_init_tx_queues(struct hns3_adapter *hns)
1060 struct hns3_hw *hw = &hns->hw;
1061 struct hns3_tx_queue *txq;
1064 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1065 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
1067 hns3_err(hw, "Tx queue %u not available or setup.", i);
1071 if (txq->tx_deferred_start)
1076 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
1077 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
1080 hns3_init_tx_ring_tc(hns);
1087 * Note: just init and setup queues, and don't enable tqps.
1090 hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
1092 struct hns3_hw *hw = &hns->hw;
1096 ret = hns3_reset_all_tqps(hns);
1098 hns3_err(hw, "failed to reset all queues, ret = %d.",
1104 ret = hns3_init_rx_queues(hns);
1106 hns3_err(hw, "failed to init rx queues, ret = %d.", ret);
1110 ret = hns3_init_tx_queues(hns);
1112 hns3_dev_release_mbufs(hns);
1113 hns3_err(hw, "failed to init tx queues, ret = %d.", ret);
1120 hns3_start_tqps(struct hns3_hw *hw)
1122 struct hns3_tx_queue *txq;
1123 struct hns3_rx_queue *rxq;
1126 hns3_enable_all_queues(hw, true);
1128 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1129 txq = hw->data->tx_queues[i];
1131 hw->data->tx_queue_state[i] =
1132 RTE_ETH_QUEUE_STATE_STARTED;
1135 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1136 rxq = hw->data->rx_queues[i];
1138 hw->data->rx_queue_state[i] =
1139 RTE_ETH_QUEUE_STATE_STARTED;
1144 hns3_stop_tqps(struct hns3_hw *hw)
1148 hns3_enable_all_queues(hw, false);
1150 for (i = 0; i < hw->data->nb_tx_queues; i++)
1151 hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1153 for (i = 0; i < hw->data->nb_rx_queues; i++)
1154 hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1158 * Iterate over all Rx Queue, and call the callback() function for each Rx
1162 * The target eth dev.
1163 * @param[in] callback
1164 * The function to call for each queue.
1165 * if callback function return nonzero will stop iterate and return it's value
1167 * The arguments to provide the callback function with.
1170 * 0 on success, otherwise with errno set.
1173 hns3_rxq_iterate(struct rte_eth_dev *dev,
1174 int (*callback)(struct hns3_rx_queue *, void *), void *arg)
1179 if (dev->data->rx_queues == NULL)
1182 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1183 ret = callback(dev->data->rx_queues[i], arg);
1192 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
1193 struct hns3_queue_info *q_info)
1195 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1196 const struct rte_memzone *rx_mz;
1197 struct hns3_rx_queue *rxq;
1198 unsigned int rx_desc;
1200 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
1201 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1203 hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
1208 /* Allocate rx ring hardware descriptors. */
1209 rxq->queue_id = q_info->idx;
1210 rxq->nb_rx_desc = q_info->nb_desc;
1213 * Allocate a litter more memory because rx vector functions
1214 * don't check boundaries each time.
1216 rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1217 sizeof(struct hns3_desc);
1218 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1219 rx_desc, HNS3_RING_BASE_ALIGN,
1221 if (rx_mz == NULL) {
1222 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
1224 hns3_rx_queue_release(rxq);
1228 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
1229 rxq->rx_ring_phys_addr = rx_mz->iova;
1231 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
1232 rxq->rx_ring_phys_addr);
1238 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1239 uint16_t nb_desc, unsigned int socket_id)
1241 struct hns3_adapter *hns = dev->data->dev_private;
1242 struct hns3_hw *hw = &hns->hw;
1243 struct hns3_queue_info q_info;
1244 struct hns3_rx_queue *rxq;
1247 if (hw->fkq_data.rx_queues[idx]) {
1248 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
1249 hw->fkq_data.rx_queues[idx] = NULL;
1253 q_info.socket_id = socket_id;
1254 q_info.nb_desc = nb_desc;
1255 q_info.type = "hns3 fake RX queue";
1256 q_info.ring_name = "rx_fake_ring";
1257 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1259 hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
1263 /* Don't need alloc sw_ring, because upper applications don't use it */
1264 rxq->sw_ring = NULL;
1267 rxq->rx_deferred_start = false;
1268 rxq->port_id = dev->data->port_id;
1269 rxq->configured = true;
1270 nb_rx_q = dev->data->nb_rx_queues;
1271 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1272 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
1273 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
1275 rte_spinlock_lock(&hw->lock);
1276 hw->fkq_data.rx_queues[idx] = rxq;
1277 rte_spinlock_unlock(&hw->lock);
1283 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
1284 struct hns3_queue_info *q_info)
1286 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1287 const struct rte_memzone *tx_mz;
1288 struct hns3_tx_queue *txq;
1289 struct hns3_desc *desc;
1290 unsigned int tx_desc;
1293 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
1294 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1296 hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
1301 /* Allocate tx ring hardware descriptors. */
1302 txq->queue_id = q_info->idx;
1303 txq->nb_tx_desc = q_info->nb_desc;
1304 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
1305 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1306 tx_desc, HNS3_RING_BASE_ALIGN,
1308 if (tx_mz == NULL) {
1309 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
1311 hns3_tx_queue_release(txq);
1315 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
1316 txq->tx_ring_phys_addr = tx_mz->iova;
1318 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
1319 txq->tx_ring_phys_addr);
1322 desc = txq->tx_ring;
1323 for (i = 0; i < txq->nb_tx_desc; i++) {
1324 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1332 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1333 uint16_t nb_desc, unsigned int socket_id)
1335 struct hns3_adapter *hns = dev->data->dev_private;
1336 struct hns3_hw *hw = &hns->hw;
1337 struct hns3_queue_info q_info;
1338 struct hns3_tx_queue *txq;
1341 if (hw->fkq_data.tx_queues[idx] != NULL) {
1342 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
1343 hw->fkq_data.tx_queues[idx] = NULL;
1347 q_info.socket_id = socket_id;
1348 q_info.nb_desc = nb_desc;
1349 q_info.type = "hns3 fake TX queue";
1350 q_info.ring_name = "tx_fake_ring";
1351 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1353 hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
1357 /* Don't need alloc sw_ring, because upper applications don't use it */
1358 txq->sw_ring = NULL;
1362 txq->tx_deferred_start = false;
1363 txq->port_id = dev->data->port_id;
1364 txq->configured = true;
1365 nb_tx_q = dev->data->nb_tx_queues;
1366 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1367 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1369 rte_spinlock_lock(&hw->lock);
1370 hw->fkq_data.tx_queues[idx] = txq;
1371 rte_spinlock_unlock(&hw->lock);
1377 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1379 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1383 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1384 /* first time configuration */
1386 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1387 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1388 RTE_CACHE_LINE_SIZE);
1389 if (hw->fkq_data.rx_queues == NULL) {
1390 hw->fkq_data.nb_fake_rx_queues = 0;
1393 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1395 rxq = hw->fkq_data.rx_queues;
1396 for (i = nb_queues; i < old_nb_queues; i++)
1397 hns3_dev_rx_queue_release(rxq[i]);
1399 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1400 RTE_CACHE_LINE_SIZE);
1403 if (nb_queues > old_nb_queues) {
1404 uint16_t new_qs = nb_queues - old_nb_queues;
1405 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1408 hw->fkq_data.rx_queues = rxq;
1409 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1410 rxq = hw->fkq_data.rx_queues;
1411 for (i = nb_queues; i < old_nb_queues; i++)
1412 hns3_dev_rx_queue_release(rxq[i]);
1414 rte_free(hw->fkq_data.rx_queues);
1415 hw->fkq_data.rx_queues = NULL;
1418 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1424 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1426 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1430 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1431 /* first time configuration */
1433 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1434 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1435 RTE_CACHE_LINE_SIZE);
1436 if (hw->fkq_data.tx_queues == NULL) {
1437 hw->fkq_data.nb_fake_tx_queues = 0;
1440 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1442 txq = hw->fkq_data.tx_queues;
1443 for (i = nb_queues; i < old_nb_queues; i++)
1444 hns3_dev_tx_queue_release(txq[i]);
1445 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1446 RTE_CACHE_LINE_SIZE);
1449 if (nb_queues > old_nb_queues) {
1450 uint16_t new_qs = nb_queues - old_nb_queues;
1451 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1454 hw->fkq_data.tx_queues = txq;
1455 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1456 txq = hw->fkq_data.tx_queues;
1457 for (i = nb_queues; i < old_nb_queues; i++)
1458 hns3_dev_tx_queue_release(txq[i]);
1460 rte_free(hw->fkq_data.tx_queues);
1461 hw->fkq_data.tx_queues = NULL;
1463 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1469 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1472 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1473 uint16_t rx_need_add_nb_q;
1474 uint16_t tx_need_add_nb_q;
1479 /* Setup new number of fake RX/TX queues and reconfigure device. */
1480 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1481 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1482 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1484 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1488 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1490 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1491 goto cfg_fake_tx_q_fail;
1494 /* Allocate and set up fake RX queue per Ethernet port. */
1495 port_id = hw->data->port_id;
1496 for (q = 0; q < rx_need_add_nb_q; q++) {
1497 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1498 rte_eth_dev_socket_id(port_id));
1500 goto setup_fake_rx_q_fail;
1503 /* Allocate and set up fake TX queue per Ethernet port. */
1504 for (q = 0; q < tx_need_add_nb_q; q++) {
1505 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1506 rte_eth_dev_socket_id(port_id));
1508 goto setup_fake_tx_q_fail;
1513 setup_fake_tx_q_fail:
1514 setup_fake_rx_q_fail:
1515 (void)hns3_fake_tx_queue_config(hw, 0);
1517 (void)hns3_fake_rx_queue_config(hw, 0);
1523 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1525 struct rte_eth_dev_data *dev_data = hns->hw.data;
1526 struct hns3_rx_queue *rxq;
1527 struct hns3_tx_queue *txq;
1530 if (dev_data->rx_queues)
1531 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1532 rxq = dev_data->rx_queues[i];
1535 hns3_rx_queue_release_mbufs(rxq);
1538 if (dev_data->tx_queues)
1539 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1540 txq = dev_data->tx_queues[i];
1543 hns3_tx_queue_release_mbufs(txq);
1548 hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1550 uint16_t vld_buf_size;
1551 uint16_t num_hw_specs;
1555 * hns3 network engine only support to set 4 typical specification, and
1556 * different buffer size will affect the max packet_len and the max
1557 * number of segmentation when hw gro is turned on in receive side. The
1558 * relationship between them is as follows:
1559 * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg
1560 * ---------------------|-------------------|----------------
1561 * HNS3_4K_BD_BUF_SIZE | 60KB | 15
1562 * HNS3_2K_BD_BUF_SIZE | 62KB | 31
1563 * HNS3_1K_BD_BUF_SIZE | 63KB | 63
1564 * HNS3_512_BD_BUF_SIZE | 31.5KB | 63
1566 static const uint16_t hw_rx_buf_size[] = {
1567 HNS3_4K_BD_BUF_SIZE,
1568 HNS3_2K_BD_BUF_SIZE,
1569 HNS3_1K_BD_BUF_SIZE,
1570 HNS3_512_BD_BUF_SIZE
1573 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1574 RTE_PKTMBUF_HEADROOM);
1576 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1579 num_hw_specs = RTE_DIM(hw_rx_buf_size);
1580 for (i = 0; i < num_hw_specs; i++) {
1581 if (vld_buf_size >= hw_rx_buf_size[i]) {
1582 *rx_buf_len = hw_rx_buf_size[i];
1590 hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
1593 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1594 struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode;
1595 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
1596 uint16_t min_vec_bds;
1599 * HNS3 hardware network engine set scattered as default. If the driver
1600 * is not work in scattered mode and the pkts greater than buf_size
1601 * but smaller than max_rx_pkt_len will be distributed to multiple BDs.
1602 * Driver cannot handle this situation.
1604 if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) {
1605 hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater "
1606 "than rx_buf_len if scattered is off.");
1610 if (pkt_burst == hns3_recv_pkts_vec) {
1611 min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
1612 HNS3_DEFAULT_RX_BURST;
1613 if (nb_desc < min_vec_bds ||
1614 nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) {
1615 hns3_err(hw, "if Rx burst mode is vector, "
1616 "number of descriptor is required to be "
1617 "bigger than min vector bds:%u, and could be "
1618 "divided by rxq rearm thresh:%u.",
1619 min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH);
1627 hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
1628 struct rte_mempool *mp, uint16_t nb_desc,
1633 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1634 nb_desc % HNS3_ALIGN_RING_DESC) {
1635 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1640 if (conf->rx_drop_en == 0)
1641 hns3_warn(hw, "if no descriptors available, packets are always "
1642 "dropped and rx_drop_en (1) is fixed on");
1644 if (hns3_rx_buf_len_calc(mp, buf_size)) {
1645 hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! "
1646 "minimal data room size (%u).",
1647 rte_pktmbuf_data_room_size(mp),
1648 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1652 if (hw->data->dev_started) {
1653 ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc);
1655 hns3_err(hw, "Rx queue runtime setup fail.");
1664 hns3_get_tqp_reg_offset(uint16_t queue_id)
1666 uint32_t reg_offset;
1668 /* Need an extend offset to config queue > 1024 */
1669 if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID)
1670 reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE;
1672 reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET +
1673 (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) *
1680 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1681 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1682 struct rte_mempool *mp)
1684 struct hns3_adapter *hns = dev->data->dev_private;
1685 struct hns3_hw *hw = &hns->hw;
1686 struct hns3_queue_info q_info;
1687 struct hns3_rx_queue *rxq;
1688 uint16_t rx_buf_size;
1692 ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
1696 if (dev->data->rx_queues[idx]) {
1697 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1698 dev->data->rx_queues[idx] = NULL;
1702 q_info.socket_id = socket_id;
1703 q_info.nb_desc = nb_desc;
1704 q_info.type = "hns3 RX queue";
1705 q_info.ring_name = "rx_ring";
1707 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1710 "Failed to alloc mem and reserve DMA mem for rx ring!");
1715 rxq->ptype_tbl = &hns->ptype_tbl;
1717 rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
1718 conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
1720 rxq->rx_deferred_start = conf->rx_deferred_start;
1721 if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
1722 hns3_warn(hw, "deferred start is not supported.");
1723 rxq->rx_deferred_start = false;
1726 rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1727 sizeof(struct hns3_entry);
1728 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1729 RTE_CACHE_LINE_SIZE, socket_id);
1730 if (rxq->sw_ring == NULL) {
1731 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1732 hns3_rx_queue_release(rxq);
1736 rxq->next_to_use = 0;
1737 rxq->rx_free_hold = 0;
1738 rxq->rx_rearm_start = 0;
1739 rxq->rx_rearm_nb = 0;
1740 rxq->pkt_first_seg = NULL;
1741 rxq->pkt_last_seg = NULL;
1742 rxq->port_id = dev->data->port_id;
1744 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
1745 * the pvid_sw_discard_en in the queue struct should not be changed,
1746 * because PVID-related operations do not need to be processed by PMD
1747 * driver. For hns3 VF device, whether it needs to process PVID depends
1748 * on the configuration of PF kernel mode netdevice driver. And the
1749 * related PF configuration is delivered through the mailbox and finally
1750 * reflectd in port_base_vlan_cfg.
1752 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1753 rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
1754 HNS3_PORT_BASE_VLAN_ENABLE;
1756 rxq->pvid_sw_discard_en = false;
1757 rxq->configured = true;
1758 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1759 idx * HNS3_TQP_REG_SIZE);
1760 rxq->io_base = (void *)((char *)hw->io_base +
1761 hns3_get_tqp_reg_offset(idx));
1762 rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
1763 HNS3_RING_RX_HEAD_REG);
1764 rxq->rx_buf_len = rx_buf_size;
1766 rxq->pkt_len_errors = 0;
1767 rxq->l3_csum_errors = 0;
1768 rxq->l4_csum_errors = 0;
1769 rxq->ol3_csum_errors = 0;
1770 rxq->ol4_csum_errors = 0;
1772 /* CRC len set here is used for amending packet length */
1773 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1774 rxq->crc_len = RTE_ETHER_CRC_LEN;
1778 rxq->bulk_mbuf_num = 0;
1780 rte_spinlock_lock(&hw->lock);
1781 dev->data->rx_queues[idx] = rxq;
1782 rte_spinlock_unlock(&hw->lock);
1788 hns3_rx_scattered_reset(struct rte_eth_dev *dev)
1790 struct hns3_adapter *hns = dev->data->dev_private;
1791 struct hns3_hw *hw = &hns->hw;
1794 dev->data->scattered_rx = false;
1798 hns3_rx_scattered_calc(struct rte_eth_dev *dev)
1800 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1801 struct hns3_adapter *hns = dev->data->dev_private;
1802 struct hns3_hw *hw = &hns->hw;
1803 struct hns3_rx_queue *rxq;
1806 if (dev->data->rx_queues == NULL)
1809 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
1810 rxq = dev->data->rx_queues[queue_id];
1811 if (hw->rx_buf_len == 0)
1812 hw->rx_buf_len = rxq->rx_buf_len;
1814 hw->rx_buf_len = RTE_MIN(hw->rx_buf_len,
1818 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
1819 dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
1820 dev->data->scattered_rx = true;
1824 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1826 static const uint32_t ptypes[] = {
1828 RTE_PTYPE_L2_ETHER_VLAN,
1829 RTE_PTYPE_L2_ETHER_QINQ,
1830 RTE_PTYPE_L2_ETHER_LLDP,
1831 RTE_PTYPE_L2_ETHER_ARP,
1833 RTE_PTYPE_L3_IPV4_EXT,
1835 RTE_PTYPE_L3_IPV6_EXT,
1841 RTE_PTYPE_TUNNEL_GRE,
1842 RTE_PTYPE_INNER_L2_ETHER,
1843 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1844 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1845 RTE_PTYPE_INNER_L3_IPV4,
1846 RTE_PTYPE_INNER_L3_IPV6,
1847 RTE_PTYPE_INNER_L3_IPV4_EXT,
1848 RTE_PTYPE_INNER_L3_IPV6_EXT,
1849 RTE_PTYPE_INNER_L4_UDP,
1850 RTE_PTYPE_INNER_L4_TCP,
1851 RTE_PTYPE_INNER_L4_SCTP,
1852 RTE_PTYPE_INNER_L4_ICMP,
1853 RTE_PTYPE_TUNNEL_VXLAN,
1854 RTE_PTYPE_TUNNEL_NVGRE,
1858 if (dev->rx_pkt_burst == hns3_recv_pkts ||
1859 dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
1860 dev->rx_pkt_burst == hns3_recv_pkts_vec ||
1861 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
1868 hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
1870 tbl->l2l3table[0][0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
1871 tbl->l2l3table[0][1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
1872 tbl->l2l3table[0][2] = RTE_PTYPE_L2_ETHER_ARP;
1873 tbl->l2l3table[0][3] = RTE_PTYPE_L2_ETHER;
1874 tbl->l2l3table[0][4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
1875 tbl->l2l3table[0][5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
1876 tbl->l2l3table[0][6] = RTE_PTYPE_L2_ETHER_LLDP;
1877 tbl->l2l3table[0][15] = RTE_PTYPE_L2_ETHER;
1879 tbl->l2l3table[1][0] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4;
1880 tbl->l2l3table[1][1] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6;
1881 tbl->l2l3table[1][2] = RTE_PTYPE_L2_ETHER_ARP;
1882 tbl->l2l3table[1][3] = RTE_PTYPE_L2_ETHER_VLAN;
1883 tbl->l2l3table[1][4] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4_EXT;
1884 tbl->l2l3table[1][5] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6_EXT;
1885 tbl->l2l3table[1][6] = RTE_PTYPE_L2_ETHER_LLDP;
1886 tbl->l2l3table[1][15] = RTE_PTYPE_L2_ETHER_VLAN;
1888 tbl->l2l3table[2][0] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4;
1889 tbl->l2l3table[2][1] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6;
1890 tbl->l2l3table[2][2] = RTE_PTYPE_L2_ETHER_ARP;
1891 tbl->l2l3table[2][3] = RTE_PTYPE_L2_ETHER_QINQ;
1892 tbl->l2l3table[2][4] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4_EXT;
1893 tbl->l2l3table[2][5] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6_EXT;
1894 tbl->l2l3table[2][6] = RTE_PTYPE_L2_ETHER_LLDP;
1895 tbl->l2l3table[2][15] = RTE_PTYPE_L2_ETHER_QINQ;
1897 tbl->l4table[0] = RTE_PTYPE_L4_UDP;
1898 tbl->l4table[1] = RTE_PTYPE_L4_TCP;
1899 tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE;
1900 tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
1901 tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
1902 tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
1906 hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
1908 tbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER;
1909 tbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN;
1910 tbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ;
1912 tbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4;
1913 tbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6;
1914 /* There is not a ptype for inner ARP/RARP */
1915 tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN;
1916 tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN;
1917 tbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT;
1918 tbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT;
1920 tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
1921 tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
1922 /* There is not a ptype for inner GRE */
1923 tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN;
1924 tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
1925 /* There is not a ptype for inner IGMP */
1926 tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN;
1927 tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
1929 tbl->ol2table[0] = RTE_PTYPE_L2_ETHER;
1930 tbl->ol2table[1] = RTE_PTYPE_L2_ETHER_VLAN;
1931 tbl->ol2table[2] = RTE_PTYPE_L2_ETHER_QINQ;
1933 tbl->ol3table[0] = RTE_PTYPE_L3_IPV4;
1934 tbl->ol3table[1] = RTE_PTYPE_L3_IPV6;
1935 tbl->ol3table[2] = RTE_PTYPE_UNKNOWN;
1936 tbl->ol3table[3] = RTE_PTYPE_UNKNOWN;
1937 tbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT;
1938 tbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT;
1940 tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
1941 tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;
1942 tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
1946 hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
1948 struct hns3_adapter *hns = dev->data->dev_private;
1949 struct hns3_ptype_table *tbl = &hns->ptype_tbl;
1951 memset(tbl, 0, sizeof(*tbl));
1953 hns3_init_non_tunnel_ptype_tbl(tbl);
1954 hns3_init_tunnel_ptype_tbl(tbl);
1958 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
1959 uint32_t l234_info, const struct hns3_desc *rxd)
1961 #define HNS3_STRP_STATUS_NUM 0x4
1963 #define HNS3_NO_STRP_VLAN_VLD 0x0
1964 #define HNS3_INNER_STRP_VLAN_VLD 0x1
1965 #define HNS3_OUTER_STRP_VLAN_VLD 0x2
1966 uint32_t strip_status;
1967 uint32_t report_mode;
1970 * Since HW limitation, the vlan tag will always be inserted into RX
1971 * descriptor when strip the tag from packet, driver needs to determine
1972 * reporting which tag to mbuf according to the PVID configuration
1973 * and vlan striped status.
1975 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
1977 HNS3_NO_STRP_VLAN_VLD,
1978 HNS3_OUTER_STRP_VLAN_VLD,
1979 HNS3_INNER_STRP_VLAN_VLD,
1980 HNS3_OUTER_STRP_VLAN_VLD
1983 HNS3_NO_STRP_VLAN_VLD,
1984 HNS3_NO_STRP_VLAN_VLD,
1985 HNS3_NO_STRP_VLAN_VLD,
1986 HNS3_INNER_STRP_VLAN_VLD
1989 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
1990 HNS3_RXD_STRP_TAGP_S);
1991 report_mode = report_type[rxq->pvid_sw_discard_en][strip_status];
1992 switch (report_mode) {
1993 case HNS3_NO_STRP_VLAN_VLD:
1996 case HNS3_INNER_STRP_VLAN_VLD:
1997 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1998 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
2000 case HNS3_OUTER_STRP_VLAN_VLD:
2001 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2002 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
2011 recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
2012 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
2015 uint8_t crc_len = rxq->crc_len;
2017 if (data_len <= crc_len) {
2018 rte_pktmbuf_free_seg(rxm);
2019 first_seg->nb_segs--;
2020 last_seg->data_len = (uint16_t)(last_seg->data_len -
2021 (crc_len - data_len));
2022 last_seg->next = NULL;
2024 rxm->data_len = (uint16_t)(data_len - crc_len);
2027 static inline struct rte_mbuf *
2028 hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
2032 if (likely(rxq->bulk_mbuf_num > 0))
2033 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2035 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf,
2036 HNS3_BULK_ALLOC_MBUF_NUM);
2037 if (likely(ret == 0)) {
2038 rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM;
2039 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2041 return rte_mbuf_raw_alloc(rxq->mb_pool);
2045 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2047 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
2048 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
2049 struct hns3_rx_queue *rxq; /* RX queue */
2050 struct hns3_entry *sw_ring;
2051 struct hns3_entry *rxe;
2052 struct hns3_desc rxd;
2053 struct rte_mbuf *nmb; /* pointer of the new mbuf */
2054 struct rte_mbuf *rxm;
2055 uint32_t bd_base_info;
2068 rx_ring = rxq->rx_ring;
2069 sw_ring = rxq->sw_ring;
2070 rx_id = rxq->next_to_use;
2072 while (nb_rx < nb_pkts) {
2073 rxdp = &rx_ring[rx_id];
2074 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2075 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2078 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2079 (1u << HNS3_RXD_VLD_B)];
2081 nmb = hns3_rx_alloc_buffer(rxq);
2082 if (unlikely(nmb == NULL)) {
2085 port_id = rxq->port_id;
2086 rte_eth_devices[port_id].data->rx_mbuf_alloc_failed++;
2091 rxe = &sw_ring[rx_id];
2093 if (unlikely(rx_id == rxq->nb_rx_desc))
2096 rte_prefetch0(sw_ring[rx_id].mbuf);
2097 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2098 rte_prefetch0(&rx_ring[rx_id]);
2099 rte_prefetch0(&sw_ring[rx_id]);
2105 dma_addr = rte_mbuf_data_iova_default(nmb);
2106 rxdp->addr = rte_cpu_to_le_64(dma_addr);
2107 rxdp->rx.bd_base_info = 0;
2109 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2110 rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) -
2112 rxm->data_len = rxm->pkt_len;
2113 rxm->port = rxq->port_id;
2114 rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2115 rxm->ol_flags = PKT_RX_RSS_HASH;
2116 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2118 rte_le_to_cpu_16(rxd.rx.fd_id);
2119 rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2124 /* Load remained descriptor data and extract necessary fields */
2125 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2126 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2127 ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
2128 l234_info, &cksum_err);
2132 rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
2134 if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2135 hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
2137 hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
2139 rx_pkts[nb_rx++] = rxm;
2142 rte_pktmbuf_free(rxm);
2145 rxq->next_to_use = rx_id;
2146 rxq->rx_free_hold += nb_rx_bd;
2147 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2148 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2149 rxq->rx_free_hold = 0;
2156 hns3_recv_scattered_pkts(void *rx_queue,
2157 struct rte_mbuf **rx_pkts,
2160 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
2161 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
2162 struct hns3_rx_queue *rxq; /* RX queue */
2163 struct hns3_entry *sw_ring;
2164 struct hns3_entry *rxe;
2165 struct rte_mbuf *first_seg;
2166 struct rte_mbuf *last_seg;
2167 struct hns3_desc rxd;
2168 struct rte_mbuf *nmb; /* pointer of the new mbuf */
2169 struct rte_mbuf *rxm;
2170 struct rte_eth_dev *dev;
2171 uint32_t bd_base_info;
2186 rx_id = rxq->next_to_use;
2187 rx_ring = rxq->rx_ring;
2188 sw_ring = rxq->sw_ring;
2189 first_seg = rxq->pkt_first_seg;
2190 last_seg = rxq->pkt_last_seg;
2192 while (nb_rx < nb_pkts) {
2193 rxdp = &rx_ring[rx_id];
2194 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2195 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2199 * The interactive process between software and hardware of
2200 * receiving a new packet in hns3 network engine:
2201 * 1. Hardware network engine firstly writes the packet content
2202 * to the memory pointed by the 'addr' field of the Rx Buffer
2203 * Descriptor, secondly fills the result of parsing the
2204 * packet include the valid field into the Rx Buffer
2205 * Descriptor in one write operation.
2206 * 2. Driver reads the Rx BD's valid field in the loop to check
2207 * whether it's valid, if valid then assign a new address to
2208 * the addr field, clear the valid field, get the other
2209 * information of the packet by parsing Rx BD's other fields,
2210 * finally write back the number of Rx BDs processed by the
2211 * driver to the HNS3_RING_RX_HEAD_REG register to inform
2213 * In the above process, the ordering is very important. We must
2214 * make sure that CPU read Rx BD's other fields only after the
2217 * There are two type of re-ordering: compiler re-ordering and
2218 * CPU re-ordering under the ARMv8 architecture.
2219 * 1. we use volatile to deal with compiler re-ordering, so you
2220 * can see that rx_ring/rxdp defined with volatile.
2221 * 2. we commonly use memory barrier to deal with CPU
2222 * re-ordering, but the cost is high.
2224 * In order to solve the high cost of using memory barrier, we
2225 * use the data dependency order under the ARMv8 architecture,
2228 * instr02: load B <- A
2229 * the instr02 will always execute after instr01.
2231 * To construct the data dependency ordering, we use the
2232 * following assignment:
2233 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2234 * (1u<<HNS3_RXD_VLD_B)]
2235 * Using gcc compiler under the ARMv8 architecture, the related
2236 * assembly code example as follows:
2237 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
2238 * instr01: ldr w26, [x22, #28] --read bd_base_info
2239 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
2240 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
2242 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
2243 * instr05: ldp x2, x3, [x0]
2244 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
2245 * instr07: ldp x4, x5, [x0, #16]
2246 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
2247 * the instr05~08 depend on x0's value, x0 depent on w26's
2248 * value, the w26 is the bd_base_info, this form the data
2249 * dependency ordering.
2250 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
2251 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
2252 * assignment is correct.
2254 * So we use the data dependency ordering instead of memory
2255 * barrier to improve receive performance.
2257 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2258 (1u << HNS3_RXD_VLD_B)];
2260 nmb = hns3_rx_alloc_buffer(rxq);
2261 if (unlikely(nmb == NULL)) {
2262 dev = &rte_eth_devices[rxq->port_id];
2263 dev->data->rx_mbuf_alloc_failed++;
2268 rxe = &sw_ring[rx_id];
2270 if (unlikely(rx_id == rxq->nb_rx_desc))
2273 rte_prefetch0(sw_ring[rx_id].mbuf);
2274 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2275 rte_prefetch0(&rx_ring[rx_id]);
2276 rte_prefetch0(&sw_ring[rx_id]);
2282 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2283 rxdp->rx.bd_base_info = 0;
2284 rxdp->addr = dma_addr;
2286 if (first_seg == NULL) {
2288 first_seg->nb_segs = 1;
2290 first_seg->nb_segs++;
2291 last_seg->next = rxm;
2294 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2295 rxm->data_len = rte_le_to_cpu_16(rxd.rx.size);
2297 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2304 * The last buffer of the received packet. packet len from
2305 * buffer description may contains CRC len, packet len should
2306 * subtract it, same as data len.
2308 first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len);
2311 * This is the last buffer of the received packet. If the CRC
2312 * is not stripped by the hardware:
2313 * - Subtract the CRC length from the total packet length.
2314 * - If the last buffer only contains the whole CRC or a part
2315 * of it, free the mbuf associated to the last buffer. If part
2316 * of the CRC is also contained in the previous mbuf, subtract
2317 * the length of that CRC part from the data length of the
2321 if (unlikely(rxq->crc_len > 0)) {
2322 first_seg->pkt_len -= rxq->crc_len;
2323 recalculate_data_len(first_seg, last_seg, rxm, rxq,
2327 first_seg->port = rxq->port_id;
2328 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2329 first_seg->ol_flags = PKT_RX_RSS_HASH;
2330 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2331 first_seg->hash.fdir.hi =
2332 rte_le_to_cpu_16(rxd.rx.fd_id);
2333 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2336 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
2337 HNS3_RXD_GRO_SIZE_S);
2338 if (gro_size != 0) {
2339 first_seg->ol_flags |= PKT_RX_LRO;
2340 first_seg->tso_segsz = gro_size;
2343 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2344 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2345 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
2346 l234_info, &cksum_err);
2350 first_seg->packet_type = hns3_rx_calc_ptype(rxq,
2351 l234_info, ol_info);
2353 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
2354 hns3_rx_set_cksum_flag(first_seg,
2355 first_seg->packet_type,
2357 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
2359 rx_pkts[nb_rx++] = first_seg;
2363 rte_pktmbuf_free(first_seg);
2367 rxq->next_to_use = rx_id;
2368 rxq->pkt_first_seg = first_seg;
2369 rxq->pkt_last_seg = last_seg;
2371 rxq->rx_free_hold += nb_rx_bd;
2372 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2373 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2374 rxq->rx_free_hold = 0;
2381 hns3_rxq_vec_setup(__rte_unused struct hns3_rx_queue *rxq)
2386 hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
2392 hns3_recv_pkts_vec(__rte_unused void *tx_queue,
2393 __rte_unused struct rte_mbuf **rx_pkts,
2394 __rte_unused uint16_t nb_pkts)
2400 hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue,
2401 __rte_unused struct rte_mbuf **rx_pkts,
2402 __rte_unused uint16_t nb_pkts)
2408 hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2409 struct rte_eth_burst_mode *mode)
2411 static const struct {
2412 eth_rx_burst_t pkt_burst;
2415 { hns3_recv_pkts, "Scalar" },
2416 { hns3_recv_scattered_pkts, "Scalar Scattered" },
2417 { hns3_recv_pkts_vec, "Vector Neon" },
2418 { hns3_recv_pkts_vec_sve, "Vector Sve" },
2421 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2425 for (i = 0; i < RTE_DIM(burst_infos); i++) {
2426 if (pkt_burst == burst_infos[i].pkt_burst) {
2427 snprintf(mode->info, sizeof(mode->info), "%s",
2428 burst_infos[i].info);
2438 hns3_check_sve_support(void)
2440 #if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
2441 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
2447 static eth_rx_burst_t
2448 hns3_get_rx_function(struct rte_eth_dev *dev)
2450 struct hns3_adapter *hns = dev->data->dev_private;
2451 uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
2453 if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
2454 return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve :
2457 if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
2458 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
2459 return hns3_recv_pkts;
2461 return hns3_recv_scattered_pkts;
2465 hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf,
2466 uint16_t nb_desc, uint16_t *tx_rs_thresh,
2467 uint16_t *tx_free_thresh, uint16_t idx)
2469 #define HNS3_TX_RS_FREE_THRESH_GAP 8
2470 uint16_t rs_thresh, free_thresh, fast_free_thresh;
2472 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
2473 nb_desc % HNS3_ALIGN_RING_DESC) {
2474 hns3_err(hw, "number (%u) of tx descriptors is invalid",
2479 rs_thresh = (conf->tx_rs_thresh > 0) ?
2480 conf->tx_rs_thresh : HNS3_DEFAULT_TX_RS_THRESH;
2481 free_thresh = (conf->tx_free_thresh > 0) ?
2482 conf->tx_free_thresh : HNS3_DEFAULT_TX_FREE_THRESH;
2483 if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
2484 rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
2485 free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
2486 hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc "
2487 "(%d) of tx descriptors for port=%d queue=%d check "
2489 rs_thresh, free_thresh, nb_desc, hw->data->port_id,
2494 if (conf->tx_free_thresh == 0) {
2495 /* Fast free Tx memory buffer to improve cache hit rate */
2496 fast_free_thresh = nb_desc - rs_thresh;
2497 if (fast_free_thresh >=
2498 HNS3_TX_FAST_FREE_AHEAD + HNS3_DEFAULT_TX_FREE_THRESH)
2499 free_thresh = fast_free_thresh -
2500 HNS3_TX_FAST_FREE_AHEAD;
2503 *tx_rs_thresh = rs_thresh;
2504 *tx_free_thresh = free_thresh;
2509 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
2510 unsigned int socket_id, const struct rte_eth_txconf *conf)
2512 struct hns3_adapter *hns = dev->data->dev_private;
2513 uint16_t tx_rs_thresh, tx_free_thresh;
2514 struct hns3_hw *hw = &hns->hw;
2515 struct hns3_queue_info q_info;
2516 struct hns3_tx_queue *txq;
2520 ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
2521 &tx_rs_thresh, &tx_free_thresh, idx);
2525 if (dev->data->tx_queues[idx] != NULL) {
2526 hns3_tx_queue_release(dev->data->tx_queues[idx]);
2527 dev->data->tx_queues[idx] = NULL;
2531 q_info.socket_id = socket_id;
2532 q_info.nb_desc = nb_desc;
2533 q_info.type = "hns3 TX queue";
2534 q_info.ring_name = "tx_ring";
2535 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
2538 "Failed to alloc mem and reserve DMA mem for tx ring!");
2542 txq->tx_deferred_start = conf->tx_deferred_start;
2543 if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
2544 hns3_warn(hw, "deferred start is not supported.");
2545 txq->tx_deferred_start = false;
2548 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
2549 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
2550 RTE_CACHE_LINE_SIZE, socket_id);
2551 if (txq->sw_ring == NULL) {
2552 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
2553 hns3_tx_queue_release(txq);
2558 txq->next_to_use = 0;
2559 txq->next_to_clean = 0;
2560 txq->tx_bd_ready = txq->nb_tx_desc - 1;
2561 txq->tx_free_thresh = tx_free_thresh;
2562 txq->tx_rs_thresh = tx_rs_thresh;
2563 txq->free = rte_zmalloc_socket("hns3 TX mbuf free array",
2564 sizeof(struct rte_mbuf *) * txq->tx_rs_thresh,
2565 RTE_CACHE_LINE_SIZE, socket_id);
2567 hns3_err(hw, "failed to allocate tx mbuf free array!");
2568 hns3_tx_queue_release(txq);
2572 txq->port_id = dev->data->port_id;
2574 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
2575 * the pvid_sw_shift_en in the queue struct should not be changed,
2576 * because PVID-related operations do not need to be processed by PMD
2577 * driver. For hns3 VF device, whether it needs to process PVID depends
2578 * on the configuration of PF kernel mode netdev driver. And the
2579 * related PF configuration is delivered through the mailbox and finally
2580 * reflectd in port_base_vlan_cfg.
2582 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
2583 txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
2584 HNS3_PORT_BASE_VLAN_ENABLE;
2586 txq->pvid_sw_shift_en = false;
2587 txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
2588 txq->configured = true;
2589 txq->io_base = (void *)((char *)hw->io_base +
2590 hns3_get_tqp_reg_offset(idx));
2591 txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
2592 HNS3_RING_TX_TAIL_REG);
2593 txq->min_tx_pkt_len = hw->min_tx_pkt_len;
2594 txq->tso_mode = hw->tso_mode;
2595 txq->over_length_pkt_cnt = 0;
2596 txq->exceed_limit_bd_pkt_cnt = 0;
2597 txq->exceed_limit_bd_reassem_fail = 0;
2598 txq->unsupported_tunnel_pkt_cnt = 0;
2599 txq->queue_full_cnt = 0;
2600 txq->pkt_padding_fail_cnt = 0;
2601 rte_spinlock_lock(&hw->lock);
2602 dev->data->tx_queues[idx] = txq;
2603 rte_spinlock_unlock(&hw->lock);
2609 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
2611 uint16_t tx_next_clean = txq->next_to_clean;
2612 uint16_t tx_next_use = txq->next_to_use;
2613 uint16_t tx_bd_ready = txq->tx_bd_ready;
2614 uint16_t tx_bd_max = txq->nb_tx_desc;
2615 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
2616 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
2617 struct rte_mbuf *mbuf;
2619 while ((!(desc->tx.tp_fe_sc_vld_ra_ri &
2620 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) &&
2621 tx_next_use != tx_next_clean) {
2622 mbuf = tx_bak_pkt->mbuf;
2624 rte_pktmbuf_free_seg(mbuf);
2625 tx_bak_pkt->mbuf = NULL;
2633 if (tx_next_clean >= tx_bd_max) {
2635 desc = txq->tx_ring;
2636 tx_bak_pkt = txq->sw_ring;
2640 txq->next_to_clean = tx_next_clean;
2641 txq->tx_bd_ready = tx_bd_ready;
2645 hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
2646 struct rte_mbuf *rxm, uint8_t *l2_len)
2652 tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
2656 otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
2657 switch (tun_flags) {
2658 case PKT_TX_TUNNEL_GENEVE:
2659 case PKT_TX_TUNNEL_VXLAN:
2660 *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
2662 case PKT_TX_TUNNEL_GRE:
2664 * OL4 header size, defined in 4 Bytes, it contains outer
2665 * L4(GRE) length and tunneling length.
2667 ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
2669 *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
2672 /* For non UDP / GRE tunneling, drop the tunnel packet */
2675 hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2676 rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
2677 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
2683 hns3_config_gro(struct hns3_hw *hw, bool en)
2685 struct hns3_cfg_gro_status_cmd *req;
2686 struct hns3_cmd_desc desc;
2689 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
2690 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
2692 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
2694 ret = hns3_cmd_send(hw, &desc, 1);
2696 hns3_err(hw, "%s hardware GRO failed, ret = %d",
2697 en ? "enable" : "disable", ret);
2703 hns3_restore_gro_conf(struct hns3_hw *hw)
2709 offloads = hw->data->dev_conf.rxmode.offloads;
2710 gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
2711 ret = hns3_config_gro(hw, gro_en);
2713 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
2714 gro_en ? "enabled" : "disabled", ret);
2720 hns3_pkt_is_tso(struct rte_mbuf *m)
2722 return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
2726 hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags,
2727 uint32_t paylen, struct rte_mbuf *rxm)
2729 uint8_t l2_len = rxm->l2_len;
2732 if (!hns3_pkt_is_tso(rxm))
2735 if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
2738 if (paylen <= rxm->tso_segsz)
2741 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
2742 hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
2743 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2744 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
2745 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2746 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2747 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2748 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2749 l2_len >> HNS3_L2_LEN_UNIT);
2750 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
2751 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
2755 hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
2757 desc->addr = rte_mbuf_data_iova(rxm);
2758 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
2759 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
2763 hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
2764 struct rte_mbuf *rxm)
2766 uint64_t ol_flags = rxm->ol_flags;
2770 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
2771 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2772 rxm->outer_l2_len + rxm->outer_l3_len : 0;
2773 paylen = rxm->pkt_len - hdr_len;
2774 desc->tx.paylen = rte_cpu_to_le_32(paylen);
2775 hns3_set_tso(desc, ol_flags, paylen, rxm);
2778 * Currently, hardware doesn't support more than two layers VLAN offload
2779 * in Tx direction based on hns3 network engine. So when the number of
2780 * VLANs in the packets represented by rxm plus the number of VLAN
2781 * offload by hardware such as PVID etc, exceeds two, the packets will
2782 * be discarded or the original VLAN of the packets will be overwitted
2783 * by hardware. When the PF PVID is enabled by calling the API function
2784 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
2785 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
2786 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
2787 * be added to the position close to the IP header when PVID is enabled.
2789 if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT |
2791 desc->tx.ol_type_vlan_len_msec |=
2792 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
2793 if (ol_flags & PKT_TX_QINQ_PKT)
2794 desc->tx.outer_vlan_tag =
2795 rte_cpu_to_le_16(rxm->vlan_tci_outer);
2797 desc->tx.outer_vlan_tag =
2798 rte_cpu_to_le_16(rxm->vlan_tci);
2801 if (ol_flags & PKT_TX_QINQ_PKT ||
2802 ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
2803 desc->tx.type_cs_vlan_tso_len |=
2804 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
2805 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
2810 hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf,
2811 struct rte_mbuf **alloc_mbuf)
2813 #define MAX_NON_TSO_BD_PER_PKT 18
2814 struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT];
2817 /* Allocate enough mbufs */
2818 if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf))
2821 for (i = 0; i < nb_new_buf - 1; i++)
2822 pkt_segs[i]->next = pkt_segs[i + 1];
2824 pkt_segs[nb_new_buf - 1]->next = NULL;
2825 pkt_segs[0]->nb_segs = nb_new_buf;
2826 *alloc_mbuf = pkt_segs[0];
2832 hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
2834 new_pkt->ol_flags = old_pkt->ol_flags;
2835 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
2836 new_pkt->outer_l2_len = old_pkt->outer_l2_len;
2837 new_pkt->outer_l3_len = old_pkt->outer_l3_len;
2838 new_pkt->l2_len = old_pkt->l2_len;
2839 new_pkt->l3_len = old_pkt->l3_len;
2840 new_pkt->l4_len = old_pkt->l4_len;
2841 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
2842 new_pkt->vlan_tci = old_pkt->vlan_tci;
2846 hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
2847 uint8_t max_non_tso_bd_num)
2849 struct rte_mempool *mb_pool;
2850 struct rte_mbuf *new_mbuf;
2851 struct rte_mbuf *temp_new;
2852 struct rte_mbuf *temp;
2853 uint16_t last_buf_len;
2854 uint16_t nb_new_buf;
2864 mb_pool = tx_pkt->pool;
2865 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
2866 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
2867 if (nb_new_buf > max_non_tso_bd_num)
2870 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
2871 if (last_buf_len == 0)
2872 last_buf_len = buf_size;
2874 /* Allocate enough mbufs */
2875 ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf);
2879 /* Copy the original packet content to the new mbufs */
2881 s = rte_pktmbuf_mtod(temp, char *);
2882 len_s = rte_pktmbuf_data_len(temp);
2883 temp_new = new_mbuf;
2884 while (temp != NULL && temp_new != NULL) {
2885 d = rte_pktmbuf_mtod(temp_new, char *);
2886 buf_len = temp_new->next == NULL ? last_buf_len : buf_size;
2890 len = RTE_MIN(len_s, len_d);
2894 len_d = len_d - len;
2895 len_s = len_s - len;
2901 s = rte_pktmbuf_mtod(temp, char *);
2902 len_s = rte_pktmbuf_data_len(temp);
2906 temp_new->data_len = buf_len;
2907 temp_new = temp_new->next;
2909 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
2911 /* free original mbufs */
2912 rte_pktmbuf_free(tx_pkt);
2914 *new_pkt = new_mbuf;
2920 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
2922 uint32_t tmp = *ol_type_vlan_len_msec;
2924 /* (outer) IP header type */
2925 if (ol_flags & PKT_TX_OUTER_IPV4) {
2926 /* OL3 header size, defined in 4 bytes */
2927 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2928 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2929 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2930 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
2931 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
2933 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2934 HNS3_OL3T_IPV4_NO_CSUM);
2935 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
2936 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2938 /* OL3 header size, defined in 4 bytes */
2939 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2940 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2943 *ol_type_vlan_len_msec = tmp;
2947 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
2948 struct rte_net_hdr_lens *hdr_lens)
2950 uint32_t tmp = *ol_type_vlan_len_msec;
2953 /* OL2 header size, defined in 2 bytes */
2954 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2955 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2957 /* L4TUNT: L4 Tunneling Type */
2958 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2959 case PKT_TX_TUNNEL_GENEVE:
2960 case PKT_TX_TUNNEL_VXLAN:
2961 /* MAC in UDP tunnelling packet, include VxLAN */
2962 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2963 HNS3_TUN_MAC_IN_UDP);
2965 * OL4 header size, defined in 4 Bytes, it contains outer
2966 * L4(UDP) length and tunneling length.
2968 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2969 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
2972 case PKT_TX_TUNNEL_GRE:
2973 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2976 * OL4 header size, defined in 4 Bytes, it contains outer
2977 * L4(GRE) length and tunneling length.
2979 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
2980 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2981 l4_len >> HNS3_L4_LEN_UNIT);
2984 /* For non UDP / GRE tunneling, drop the tunnel packet */
2988 *ol_type_vlan_len_msec = tmp;
2994 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2996 struct rte_net_hdr_lens *hdr_lens)
2998 struct hns3_desc *tx_ring = txq->tx_ring;
2999 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3003 hns3_parse_outer_params(ol_flags, &value);
3004 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
3008 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
3014 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
3018 /* Enable L3 checksum offloads */
3019 if (ol_flags & PKT_TX_IPV4) {
3020 tmp = *type_cs_vlan_tso_len;
3021 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
3023 /* inner(/normal) L3 header size, defined in 4 bytes */
3024 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3025 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
3026 if (ol_flags & PKT_TX_IP_CKSUM)
3027 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
3028 *type_cs_vlan_tso_len = tmp;
3029 } else if (ol_flags & PKT_TX_IPV6) {
3030 tmp = *type_cs_vlan_tso_len;
3031 /* L3T, IPv6 don't do checksum */
3032 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
3034 /* inner(/normal) L3 header size, defined in 4 bytes */
3035 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3036 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
3037 *type_cs_vlan_tso_len = tmp;
3042 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
3046 /* Enable L4 checksum offloads */
3047 switch (ol_flags & PKT_TX_L4_MASK) {
3048 case PKT_TX_TCP_CKSUM:
3049 tmp = *type_cs_vlan_tso_len;
3050 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3052 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
3053 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
3054 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
3055 *type_cs_vlan_tso_len = tmp;
3057 case PKT_TX_UDP_CKSUM:
3058 tmp = *type_cs_vlan_tso_len;
3059 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3061 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
3062 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
3063 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
3064 *type_cs_vlan_tso_len = tmp;
3066 case PKT_TX_SCTP_CKSUM:
3067 tmp = *type_cs_vlan_tso_len;
3068 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3070 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
3071 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
3072 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
3073 *type_cs_vlan_tso_len = tmp;
3081 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
3084 struct hns3_desc *tx_ring = txq->tx_ring;
3085 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3088 /* inner(/normal) L2 header size, defined in 2 bytes */
3089 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
3090 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
3092 hns3_parse_l3_cksum_params(ol_flags, &value);
3093 hns3_parse_l4_cksum_params(ol_flags, &value);
3095 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
3099 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
3100 uint32_t max_non_tso_bd_num)
3102 struct rte_mbuf *m_first = tx_pkts;
3103 struct rte_mbuf *m_last = tx_pkts;
3104 uint32_t tot_len = 0;
3109 * Hardware requires that the sum of the data length of every 8
3110 * consecutive buffers is greater than MSS in hns3 network engine.
3111 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
3112 * frags greater than gso header len + mss, and the remaining 7
3113 * consecutive frags greater than MSS except the last 7 frags.
3115 if (bd_num <= max_non_tso_bd_num)
3118 for (i = 0; m_last && i < max_non_tso_bd_num - 1;
3119 i++, m_last = m_last->next)
3120 tot_len += m_last->data_len;
3125 /* ensure the first 8 frags is greater than mss + header */
3126 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
3127 hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
3128 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
3129 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
3133 * ensure the sum of the data length of every 7 consecutive buffer
3134 * is greater than mss except the last one.
3136 for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) {
3137 tot_len -= m_first->data_len;
3138 tot_len += m_last->data_len;
3140 if (tot_len < tx_pkts->tso_segsz)
3143 m_first = m_first->next;
3144 m_last = m_last->next;
3151 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
3153 uint64_t ol_flags = m->ol_flags;
3154 struct rte_ipv4_hdr *ipv4_hdr;
3155 struct rte_udp_hdr *udp_hdr;
3156 uint32_t paylen, hdr_len;
3158 if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
3161 if (ol_flags & PKT_TX_IPV4) {
3162 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
3165 if (ol_flags & PKT_TX_IP_CKSUM)
3166 ipv4_hdr->hdr_checksum = 0;
3169 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM &&
3170 ol_flags & PKT_TX_TCP_SEG) {
3171 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3172 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
3173 m->outer_l2_len + m->outer_l3_len : 0;
3174 paylen = m->pkt_len - hdr_len;
3175 if (paylen <= m->tso_segsz)
3177 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3180 udp_hdr->dgram_cksum = 0;
3185 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
3187 uint32_t tmp_data_len_sum = 0;
3188 uint16_t nb_buf = m->nb_segs;
3189 uint32_t paylen, hdr_len;
3190 struct rte_mbuf *m_seg;
3193 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
3196 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3197 hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
3198 m->outer_l2_len + m->outer_l3_len : 0;
3199 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
3202 paylen = m->pkt_len - hdr_len;
3203 if (paylen > HNS3_MAX_BD_PAYLEN)
3207 * The TSO header (include outer and inner L2, L3 and L4 header)
3208 * should be provided by three descriptors in maximum in hns3 network
3212 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
3213 i++, m_seg = m_seg->next) {
3214 tmp_data_len_sum += m_seg->data_len;
3217 if (hdr_len > tmp_data_len_sum)
3223 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3225 hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
3227 struct rte_ether_hdr *eh;
3228 struct rte_vlan_hdr *vh;
3230 if (!txq->pvid_sw_shift_en)
3234 * Due to hardware limitations, we only support two-layer VLAN hardware
3235 * offload in Tx direction based on hns3 network engine, so when PVID is
3236 * enabled, QinQ insert is no longer supported.
3237 * And when PVID is enabled, in the following two cases:
3238 * i) packets with more than two VLAN tags.
3239 * ii) packets with one VLAN tag while the hardware VLAN insert is
3241 * The packets will be regarded as abnormal packets and discarded by
3242 * hardware in Tx direction. For debugging purposes, a validation check
3243 * for these types of packets is added to the '.tx_pkt_prepare' ops
3244 * implementation function named hns3_prep_pkts to inform users that
3245 * these packets will be discarded.
3247 if (m->ol_flags & PKT_TX_QINQ_PKT)
3250 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
3251 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
3252 if (m->ol_flags & PKT_TX_VLAN_PKT)
3255 /* Ensure the incoming packet is not a QinQ packet */
3256 vh = (struct rte_vlan_hdr *)(eh + 1);
3257 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
3266 hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
3270 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3271 ret = rte_validate_tx_offload(m);
3277 ret = hns3_vld_vlan_chk(tx_queue, m);
3283 if (hns3_pkt_is_tso(m)) {
3284 if (hns3_pkt_need_linearized(m, m->nb_segs,
3285 tx_queue->max_non_tso_bd_num) ||
3286 hns3_check_tso_pkt_valid(m)) {
3291 if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
3293 * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means
3294 * hardware support recalculate the TCP pseudo header
3295 * checksum of packets that need TSO, so network driver
3296 * software not need to recalculate it.
3298 hns3_outer_header_cksum_prepare(m);
3303 ret = rte_net_intel_cksum_prepare(m);
3309 hns3_outer_header_cksum_prepare(m);
3315 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3321 for (i = 0; i < nb_pkts; i++) {
3323 if (hns3_prep_pkt_proc(tx_queue, m))
3331 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
3332 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
3334 /* Fill in tunneling parameters if necessary */
3335 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
3336 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
3337 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
3339 txq->unsupported_tunnel_pkt_cnt++;
3343 /* Enable checksum offloading */
3344 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
3345 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
3351 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
3352 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
3354 uint8_t max_non_tso_bd_num;
3355 struct rte_mbuf *new_pkt;
3358 if (hns3_pkt_is_tso(*m_seg))
3362 * If packet length is greater than HNS3_MAX_FRAME_LEN
3363 * driver support, the packet will be ignored.
3365 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
3366 txq->over_length_pkt_cnt++;
3370 max_non_tso_bd_num = txq->max_non_tso_bd_num;
3371 if (unlikely(nb_buf > max_non_tso_bd_num)) {
3372 txq->exceed_limit_bd_pkt_cnt++;
3373 ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
3374 max_non_tso_bd_num);
3376 txq->exceed_limit_bd_reassem_fail++;
3386 hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq)
3388 struct hns3_entry *tx_entry;
3389 struct hns3_desc *desc;
3390 uint16_t tx_next_clean;
3394 if (HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) < txq->tx_rs_thresh)
3398 * All mbufs can be released only when the VLD bits of all
3399 * descriptors in a batch are cleared.
3401 tx_next_clean = (txq->next_to_clean + txq->tx_rs_thresh - 1) %
3403 desc = &txq->tx_ring[tx_next_clean];
3404 for (i = 0; i < txq->tx_rs_thresh; i++) {
3405 if (rte_le_to_cpu_16(desc->tx.tp_fe_sc_vld_ra_ri) &
3406 BIT(HNS3_TXD_VLD_B))
3411 tx_entry = &txq->sw_ring[txq->next_to_clean];
3413 for (i = 0; i < txq->tx_rs_thresh; i++)
3414 rte_prefetch0((tx_entry + i)->mbuf);
3415 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
3416 rte_mempool_put(tx_entry->mbuf->pool, tx_entry->mbuf);
3417 tx_entry->mbuf = NULL;
3420 txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc;
3421 txq->tx_bd_ready += txq->tx_rs_thresh;
3426 hns3_tx_backup_1mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
3428 tx_entry->mbuf = pkts[0];
3432 hns3_tx_backup_4mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
3434 hns3_tx_backup_1mbuf(&tx_entry[0], &pkts[0]);
3435 hns3_tx_backup_1mbuf(&tx_entry[1], &pkts[1]);
3436 hns3_tx_backup_1mbuf(&tx_entry[2], &pkts[2]);
3437 hns3_tx_backup_1mbuf(&tx_entry[3], &pkts[3]);
3441 hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
3443 #define PER_LOOP_NUM 4
3444 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
3448 for (i = 0; i < PER_LOOP_NUM; i++, txdp++, pkts++) {
3449 dma_addr = rte_mbuf_data_iova(*pkts);
3450 txdp->addr = rte_cpu_to_le_64(dma_addr);
3451 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
3452 txdp->tx.paylen = 0;
3453 txdp->tx.type_cs_vlan_tso_len = 0;
3454 txdp->tx.ol_type_vlan_len_msec = 0;
3455 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
3460 hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
3462 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
3465 dma_addr = rte_mbuf_data_iova(*pkts);
3466 txdp->addr = rte_cpu_to_le_64(dma_addr);
3467 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
3468 txdp->tx.paylen = 0;
3469 txdp->tx.type_cs_vlan_tso_len = 0;
3470 txdp->tx.ol_type_vlan_len_msec = 0;
3471 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
3475 hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq,
3476 struct rte_mbuf **pkts,
3479 #define PER_LOOP_NUM 4
3480 #define PER_LOOP_MASK (PER_LOOP_NUM - 1)
3481 struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
3482 struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
3483 const uint32_t mainpart = (nb_pkts & ((uint32_t)~PER_LOOP_MASK));
3484 const uint32_t leftover = (nb_pkts & ((uint32_t)PER_LOOP_MASK));
3487 for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
3488 hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
3489 hns3_tx_setup_4bd(txdp + i, pkts + i);
3491 if (unlikely(leftover > 0)) {
3492 for (i = 0; i < leftover; i++) {
3493 hns3_tx_backup_1mbuf(tx_entry + mainpart + i,
3494 pkts + mainpart + i);
3495 hns3_tx_setup_1bd(txdp + mainpart + i,
3496 pkts + mainpart + i);
3502 hns3_xmit_pkts_simple(void *tx_queue,
3503 struct rte_mbuf **tx_pkts,
3506 struct hns3_tx_queue *txq = tx_queue;
3509 hns3_tx_free_buffer_simple(txq);
3511 nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
3512 if (unlikely(nb_pkts == 0)) {
3513 if (txq->tx_bd_ready == 0)
3514 txq->queue_full_cnt++;
3518 txq->tx_bd_ready -= nb_pkts;
3519 if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) {
3520 nb_tx = txq->nb_tx_desc - txq->next_to_use;
3521 hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx);
3522 txq->next_to_use = 0;
3525 hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
3526 txq->next_to_use += nb_pkts - nb_tx;
3528 hns3_write_reg_opt(txq->io_tail_reg, nb_pkts);
3534 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3536 struct rte_net_hdr_lens hdr_lens = {0};
3537 struct hns3_tx_queue *txq = tx_queue;
3538 struct hns3_entry *tx_bak_pkt;
3539 struct hns3_desc *tx_ring;
3540 struct rte_mbuf *tx_pkt;
3541 struct rte_mbuf *m_seg;
3542 struct hns3_desc *desc;
3543 uint32_t nb_hold = 0;
3544 uint16_t tx_next_use;
3545 uint16_t tx_pkt_num;
3551 /* free useless buffer */
3552 hns3_tx_free_useless_buffer(txq);
3554 tx_next_use = txq->next_to_use;
3555 tx_bd_max = txq->nb_tx_desc;
3556 tx_pkt_num = nb_pkts;
3557 tx_ring = txq->tx_ring;
3560 tx_bak_pkt = &txq->sw_ring[tx_next_use];
3561 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
3562 tx_pkt = *tx_pkts++;
3564 nb_buf = tx_pkt->nb_segs;
3566 if (nb_buf > txq->tx_bd_ready) {
3567 txq->queue_full_cnt++;
3575 * If packet length is less than minimum packet length supported
3576 * by hardware in Tx direction, driver need to pad it to avoid
3579 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
3580 txq->min_tx_pkt_len)) {
3584 add_len = txq->min_tx_pkt_len -
3585 rte_pktmbuf_pkt_len(tx_pkt);
3586 appended = rte_pktmbuf_append(tx_pkt, add_len);
3587 if (appended == NULL) {
3588 txq->pkt_padding_fail_cnt++;
3592 memset(appended, 0, add_len);
3597 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
3600 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
3604 desc = &tx_ring[tx_next_use];
3607 * If the packet is divided into multiple Tx Buffer Descriptors,
3608 * only need to fill vlan, paylen and tso into the first Tx
3609 * Buffer Descriptor.
3611 hns3_fill_first_desc(txq, desc, m_seg);
3614 desc = &tx_ring[tx_next_use];
3616 * Fill valid bits, DMA address and data length for each
3617 * Tx Buffer Descriptor.
3619 hns3_fill_per_desc(desc, m_seg);
3620 tx_bak_pkt->mbuf = m_seg;
3621 m_seg = m_seg->next;
3624 if (tx_next_use >= tx_bd_max) {
3626 tx_bak_pkt = txq->sw_ring;
3630 } while (m_seg != NULL);
3632 /* Add end flag for the last Tx Buffer Descriptor */
3633 desc->tx.tp_fe_sc_vld_ra_ri |=
3634 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
3637 txq->next_to_use = tx_next_use;
3638 txq->tx_bd_ready -= i;
3644 hns3_write_reg_opt(txq->io_tail_reg, nb_hold);
3650 hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
3656 hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
3657 __rte_unused struct rte_mbuf **tx_pkts,
3658 __rte_unused uint16_t nb_pkts)
3664 hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue,
3665 struct rte_mbuf __rte_unused **tx_pkts,
3666 uint16_t __rte_unused nb_pkts)
3672 hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3673 struct rte_eth_burst_mode *mode)
3675 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3676 const char *info = NULL;
3678 if (pkt_burst == hns3_xmit_pkts_simple)
3679 info = "Scalar Simple";
3680 else if (pkt_burst == hns3_xmit_pkts)
3682 else if (pkt_burst == hns3_xmit_pkts_vec)
3683 info = "Vector Neon";
3684 else if (pkt_burst == hns3_xmit_pkts_vec_sve)
3685 info = "Vector Sve";
3690 snprintf(mode->info, sizeof(mode->info), "%s", info);
3695 static eth_tx_burst_t
3696 hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
3698 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
3699 struct hns3_adapter *hns = dev->data->dev_private;
3701 if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
3703 return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve :
3707 if (hns->tx_simple_allowed &&
3708 offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) {
3710 return hns3_xmit_pkts_simple;
3713 *prep = hns3_prep_pkts;
3714 return hns3_xmit_pkts;
3718 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
3719 struct rte_mbuf **pkts __rte_unused,
3720 uint16_t pkts_n __rte_unused)
3725 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
3727 struct hns3_adapter *hns = eth_dev->data->dev_private;
3728 eth_tx_prep_t prep = NULL;
3730 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
3731 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
3732 eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
3733 eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
3734 eth_dev->tx_pkt_prepare = prep;
3736 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
3737 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
3738 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
3743 hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3744 struct rte_eth_rxq_info *qinfo)
3746 struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
3748 qinfo->mp = rxq->mb_pool;
3749 qinfo->nb_desc = rxq->nb_rx_desc;
3750 qinfo->scattered_rx = dev->data->scattered_rx;
3751 /* Report the HW Rx buffer length to user */
3752 qinfo->rx_buf_size = rxq->rx_buf_len;
3755 * If there are no available Rx buffer descriptors, incoming packets
3756 * are always dropped by hardware based on hns3 network engine.
3758 qinfo->conf.rx_drop_en = 1;
3759 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
3760 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
3761 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
3765 hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3766 struct rte_eth_txq_info *qinfo)
3768 struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
3770 qinfo->nb_desc = txq->nb_tx_desc;
3771 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
3772 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
3773 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
3774 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
3778 hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3780 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3781 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
3782 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3785 if (!hns3_dev_indep_txrx_supported(hw))
3788 ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
3790 hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
3795 ret = hns3_init_rxq(hns, rx_queue_id);
3797 hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
3802 hns3_enable_rxq(rxq, true);
3803 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
3809 hns3_reset_sw_rxq(struct hns3_rx_queue *rxq)
3811 rxq->next_to_use = 0;
3812 rxq->rx_rearm_start = 0;
3813 rxq->rx_free_hold = 0;
3814 rxq->rx_rearm_nb = 0;
3815 rxq->pkt_first_seg = NULL;
3816 rxq->pkt_last_seg = NULL;
3817 memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc));
3818 hns3_rxq_vec_setup(rxq);
3822 hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3824 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3825 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
3827 if (!hns3_dev_indep_txrx_supported(hw))
3830 hns3_enable_rxq(rxq, false);
3832 hns3_rx_queue_release_mbufs(rxq);
3834 hns3_reset_sw_rxq(rxq);
3835 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
3841 hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3843 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3844 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
3847 if (!hns3_dev_indep_txrx_supported(hw))
3850 ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
3852 hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
3858 hns3_enable_txq(txq, true);
3859 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
3865 hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3867 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3868 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
3870 if (!hns3_dev_indep_txrx_supported(hw))
3873 hns3_enable_txq(txq, false);
3874 hns3_tx_queue_release_mbufs(txq);
3876 * All the mbufs in sw_ring are released and all the pointers in sw_ring
3877 * are set to NULL. If this queue is still called by upper layer,
3878 * residual SW status of this txq may cause these pointers in sw_ring
3879 * which have been set to NULL to be released again. To avoid it,
3883 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
3889 hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3892 * Number of BDs that have been processed by the driver
3893 * but have not been notified to the hardware.
3895 uint32_t driver_hold_bd_num;
3896 struct hns3_rx_queue *rxq;
3899 rxq = dev->data->rx_queues[rx_queue_id];
3900 fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
3901 if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
3902 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
3903 driver_hold_bd_num = rxq->rx_rearm_nb;
3905 driver_hold_bd_num = rxq->rx_free_hold;
3907 if (fbd_num <= driver_hold_bd_num)
3910 return fbd_num - driver_hold_bd_num;