1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
5 #include <rte_bus_pci.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_geneve.h>
10 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #if defined(RTE_ARCH_ARM64)
15 #include <rte_cpuflags.h>
19 #include "hns3_ethdev.h"
20 #include "hns3_rxtx.h"
21 #include "hns3_regs.h"
22 #include "hns3_logs.h"
24 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
25 #define HNS3_RX_RING_PREFETCTH_MASK 3
28 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
32 /* Note: Fake rx queue will not enter here */
33 if (rxq->sw_ring == NULL)
36 if (rxq->rx_rearm_nb == 0) {
37 for (i = 0; i < rxq->nb_rx_desc; i++) {
38 if (rxq->sw_ring[i].mbuf != NULL) {
39 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
40 rxq->sw_ring[i].mbuf = NULL;
44 for (i = rxq->next_to_use;
45 i != rxq->rx_rearm_start;
46 i = (i + 1) % rxq->nb_rx_desc) {
47 if (rxq->sw_ring[i].mbuf != NULL) {
48 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
49 rxq->sw_ring[i].mbuf = NULL;
54 for (i = 0; i < rxq->bulk_mbuf_num; i++)
55 rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]);
56 rxq->bulk_mbuf_num = 0;
58 if (rxq->pkt_first_seg) {
59 rte_pktmbuf_free(rxq->pkt_first_seg);
60 rxq->pkt_first_seg = NULL;
65 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
69 /* Note: Fake tx queue will not enter here */
71 for (i = 0; i < txq->nb_tx_desc; i++) {
72 if (txq->sw_ring[i].mbuf) {
73 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
74 txq->sw_ring[i].mbuf = NULL;
81 hns3_rx_queue_release(void *queue)
83 struct hns3_rx_queue *rxq = queue;
85 hns3_rx_queue_release_mbufs(rxq);
87 rte_memzone_free(rxq->mz);
89 rte_free(rxq->sw_ring);
95 hns3_tx_queue_release(void *queue)
97 struct hns3_tx_queue *txq = queue;
99 hns3_tx_queue_release_mbufs(txq);
101 rte_memzone_free(txq->mz);
103 rte_free(txq->sw_ring);
111 hns3_dev_rx_queue_release(void *queue)
113 struct hns3_rx_queue *rxq = queue;
114 struct hns3_adapter *hns;
120 rte_spinlock_lock(&hns->hw.lock);
121 hns3_rx_queue_release(queue);
122 rte_spinlock_unlock(&hns->hw.lock);
126 hns3_dev_tx_queue_release(void *queue)
128 struct hns3_tx_queue *txq = queue;
129 struct hns3_adapter *hns;
135 rte_spinlock_lock(&hns->hw.lock);
136 hns3_tx_queue_release(queue);
137 rte_spinlock_unlock(&hns->hw.lock);
141 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
143 struct hns3_rx_queue *rxq = queue;
144 struct hns3_adapter *hns;
154 if (hw->fkq_data.rx_queues[idx]) {
155 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
156 hw->fkq_data.rx_queues[idx] = NULL;
159 /* free fake rx queue arrays */
160 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
161 hw->fkq_data.nb_fake_rx_queues = 0;
162 rte_free(hw->fkq_data.rx_queues);
163 hw->fkq_data.rx_queues = NULL;
168 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
170 struct hns3_tx_queue *txq = queue;
171 struct hns3_adapter *hns;
181 if (hw->fkq_data.tx_queues[idx]) {
182 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
183 hw->fkq_data.tx_queues[idx] = NULL;
186 /* free fake tx queue arrays */
187 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
188 hw->fkq_data.nb_fake_tx_queues = 0;
189 rte_free(hw->fkq_data.tx_queues);
190 hw->fkq_data.tx_queues = NULL;
195 hns3_free_rx_queues(struct rte_eth_dev *dev)
197 struct hns3_adapter *hns = dev->data->dev_private;
198 struct hns3_fake_queue_data *fkq_data;
199 struct hns3_hw *hw = &hns->hw;
203 nb_rx_q = hw->data->nb_rx_queues;
204 for (i = 0; i < nb_rx_q; i++) {
205 if (dev->data->rx_queues[i]) {
206 hns3_rx_queue_release(dev->data->rx_queues[i]);
207 dev->data->rx_queues[i] = NULL;
211 /* Free fake Rx queues */
212 fkq_data = &hw->fkq_data;
213 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
214 if (fkq_data->rx_queues[i])
215 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
220 hns3_free_tx_queues(struct rte_eth_dev *dev)
222 struct hns3_adapter *hns = dev->data->dev_private;
223 struct hns3_fake_queue_data *fkq_data;
224 struct hns3_hw *hw = &hns->hw;
228 nb_tx_q = hw->data->nb_tx_queues;
229 for (i = 0; i < nb_tx_q; i++) {
230 if (dev->data->tx_queues[i]) {
231 hns3_tx_queue_release(dev->data->tx_queues[i]);
232 dev->data->tx_queues[i] = NULL;
236 /* Free fake Tx queues */
237 fkq_data = &hw->fkq_data;
238 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
239 if (fkq_data->tx_queues[i])
240 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
245 hns3_free_all_queues(struct rte_eth_dev *dev)
247 hns3_free_rx_queues(dev);
248 hns3_free_tx_queues(dev);
252 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
254 struct rte_mbuf *mbuf;
258 for (i = 0; i < rxq->nb_rx_desc; i++) {
259 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
260 if (unlikely(mbuf == NULL)) {
261 hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
263 hns3_rx_queue_release_mbufs(rxq);
267 rte_mbuf_refcnt_set(mbuf, 1);
269 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
271 mbuf->port = rxq->port_id;
273 rxq->sw_ring[i].mbuf = mbuf;
274 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
275 rxq->rx_ring[i].addr = dma_addr;
276 rxq->rx_ring[i].rx.bd_base_info = 0;
283 hns3_buf_size2type(uint32_t buf_size)
289 bd_size_type = HNS3_BD_SIZE_512_TYPE;
292 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
295 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
298 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
305 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
307 uint32_t rx_buf_len = rxq->rx_buf_len;
308 uint64_t dma_addr = rxq->rx_ring_phys_addr;
310 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
311 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
312 (uint32_t)((dma_addr >> 31) >> 1));
314 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
315 hns3_buf_size2type(rx_buf_len));
316 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
317 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
321 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
323 uint64_t dma_addr = txq->tx_ring_phys_addr;
325 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
326 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
327 (uint32_t)((dma_addr >> 31) >> 1));
329 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
330 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
334 hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
336 uint16_t nb_rx_q = hw->data->nb_rx_queues;
337 uint16_t nb_tx_q = hw->data->nb_tx_queues;
338 struct hns3_rx_queue *rxq;
339 struct hns3_tx_queue *txq;
343 pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE;
344 for (i = 0; i < hw->cfg_max_queues; i++) {
346 rxq = hw->data->rx_queues[i];
348 rxq->pvid_sw_discard_en = pvid_en;
351 txq = hw->data->tx_queues[i];
353 txq->pvid_sw_shift_en = pvid_en;
359 hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type)
364 reg_offset = queue_type == HNS3_RING_TYPE_TX ?
365 HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG;
366 reg = hns3_read_reg(tqp_base, reg_offset);
367 reg &= ~BIT(HNS3_RING_EN_B);
368 hns3_write_reg(tqp_base, reg_offset, reg);
372 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
374 uint16_t nb_rx_q = hw->data->nb_rx_queues;
375 uint16_t nb_tx_q = hw->data->nb_tx_queues;
376 struct hns3_rx_queue *rxq;
377 struct hns3_tx_queue *txq;
382 for (i = 0; i < hw->cfg_max_queues; i++) {
383 if (hns3_dev_indep_txrx_supported(hw)) {
384 rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
385 txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
387 tqp_base = (void *)((char *)hw->io_base +
388 hns3_get_tqp_reg_offset(i));
390 * If queue struct is not initialized, it means the
391 * related HW ring has not been initialized yet.
392 * So, these queues should be disabled before enable
393 * the tqps to avoid a HW exception since the queues
394 * are enabled by default.
397 hns3_stop_unused_queue(tqp_base,
400 hns3_stop_unused_queue(tqp_base,
403 rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
404 hw->fkq_data.rx_queues[i - nb_rx_q];
406 tqp_base = rxq->io_base;
409 * This is the master switch that used to control the enabling
410 * of a pair of Tx and Rx queues. Both the Rx and Tx point to
413 rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG);
415 rcb_reg |= BIT(HNS3_RING_EN_B);
417 rcb_reg &= ~BIT(HNS3_RING_EN_B);
418 hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg);
423 hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
425 struct hns3_hw *hw = &txq->hns->hw;
428 if (hns3_dev_indep_txrx_supported(hw)) {
429 reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
431 reg |= BIT(HNS3_RING_EN_B);
433 reg &= ~BIT(HNS3_RING_EN_B);
434 hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg);
440 hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
442 struct hns3_hw *hw = &rxq->hns->hw;
445 if (hns3_dev_indep_txrx_supported(hw)) {
446 reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
448 reg |= BIT(HNS3_RING_EN_B);
450 reg &= ~BIT(HNS3_RING_EN_B);
451 hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg);
457 hns3_start_all_txqs(struct rte_eth_dev *dev)
459 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
460 struct hns3_tx_queue *txq;
463 for (i = 0; i < dev->data->nb_tx_queues; i++) {
464 txq = hw->data->tx_queues[i];
466 hns3_err(hw, "Tx queue %u not available or setup.", i);
467 goto start_txqs_fail;
470 * Tx queue is enabled by default. Therefore, the Tx queues
471 * needs to be disabled when deferred_start is set. There is
472 * another master switch used to control the enabling of a pair
473 * of Tx and Rx queues. And the master switch is disabled by
476 if (txq->tx_deferred_start)
477 hns3_enable_txq(txq, false);
479 hns3_enable_txq(txq, true);
484 for (j = 0; j < i; j++) {
485 txq = hw->data->tx_queues[j];
486 hns3_enable_txq(txq, false);
492 hns3_start_all_rxqs(struct rte_eth_dev *dev)
494 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
495 struct hns3_rx_queue *rxq;
498 for (i = 0; i < dev->data->nb_rx_queues; i++) {
499 rxq = hw->data->rx_queues[i];
501 hns3_err(hw, "Rx queue %u not available or setup.", i);
502 goto start_rxqs_fail;
505 * Rx queue is enabled by default. Therefore, the Rx queues
506 * needs to be disabled when deferred_start is set. There is
507 * another master switch used to control the enabling of a pair
508 * of Tx and Rx queues. And the master switch is disabled by
511 if (rxq->rx_deferred_start)
512 hns3_enable_rxq(rxq, false);
514 hns3_enable_rxq(rxq, true);
519 for (j = 0; j < i; j++) {
520 rxq = hw->data->rx_queues[j];
521 hns3_enable_rxq(rxq, false);
527 hns3_restore_tqp_enable_state(struct hns3_hw *hw)
529 struct hns3_rx_queue *rxq;
530 struct hns3_tx_queue *txq;
533 for (i = 0; i < hw->data->nb_rx_queues; i++) {
534 rxq = hw->data->rx_queues[i];
536 hns3_enable_rxq(rxq, rxq->enabled);
539 for (i = 0; i < hw->data->nb_tx_queues; i++) {
540 txq = hw->data->tx_queues[i];
542 hns3_enable_txq(txq, txq->enabled);
547 hns3_stop_all_txqs(struct rte_eth_dev *dev)
549 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
550 struct hns3_tx_queue *txq;
553 for (i = 0; i < dev->data->nb_tx_queues; i++) {
554 txq = hw->data->tx_queues[i];
557 hns3_enable_txq(txq, false);
562 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
564 struct hns3_cfg_com_tqp_queue_cmd *req;
565 struct hns3_cmd_desc desc;
568 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
570 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
571 req->tqp_id = rte_cpu_to_le_16(queue_id);
573 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
575 ret = hns3_cmd_send(hw, &desc, 1);
577 hns3_err(hw, "TQP enable fail, ret = %d", ret);
583 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
585 struct hns3_reset_tqp_queue_cmd *req;
586 struct hns3_cmd_desc desc;
589 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
591 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
592 req->tqp_id = rte_cpu_to_le_16(queue_id);
593 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
594 ret = hns3_cmd_send(hw, &desc, 1);
596 hns3_err(hw, "send tqp reset cmd error, queue_id = %u, "
597 "ret = %d", queue_id, ret);
603 hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id,
604 uint8_t *reset_status)
606 struct hns3_reset_tqp_queue_cmd *req;
607 struct hns3_cmd_desc desc;
610 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
612 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
613 req->tqp_id = rte_cpu_to_le_16(queue_id);
615 ret = hns3_cmd_send(hw, &desc, 1);
617 hns3_err(hw, "get tqp reset status error, queue_id = %u, "
618 "ret = %d.", queue_id, ret);
621 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
626 hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
628 #define HNS3_TQP_RESET_TRY_MS 200
629 uint16_t wait_time = 0;
630 uint8_t reset_status;
634 * In current version VF is not supported when PF is driven by DPDK
635 * driver, all task queue pairs are mapped to PF function, so PF's queue
636 * id is equals to the global queue id in PF range.
638 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
640 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
645 /* Wait for tqp hw reset */
646 rte_delay_ms(HNS3_POLL_RESPONE_MS);
647 wait_time += HNS3_POLL_RESPONE_MS;
648 ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
654 } while (wait_time < HNS3_TQP_RESET_TRY_MS);
658 hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d",
663 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
665 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
670 hns3_send_reset_tqp_cmd(hw, queue_id, false);
675 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
680 memcpy(msg_data, &queue_id, sizeof(uint16_t));
682 ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
683 sizeof(msg_data), true, NULL, 0);
685 hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
691 hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status)
693 struct hns3_reset_cmd *req;
694 struct hns3_cmd_desc desc;
697 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
698 req = (struct hns3_reset_cmd *)desc.data;
699 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_RCB_B, 1);
702 * The start qid should be the global qid of the first tqp of the
703 * function which should be reset in this port. Since our PF not
704 * support take over of VFs, so we only need to reset function 0,
705 * and its start qid is always 0.
707 req->fun_reset_rcb_vqid_start = rte_cpu_to_le_16(0);
708 req->fun_reset_rcb_vqid_num = rte_cpu_to_le_16(hw->cfg_max_queues);
710 ret = hns3_cmd_send(hw, &desc, 1);
712 hns3_err(hw, "fail to send rcb reset cmd, ret = %d.", ret);
716 *reset_status = req->fun_reset_rcb_return_status;
721 hns3pf_reset_all_tqps(struct hns3_hw *hw)
723 #define HNS3_RESET_RCB_NOT_SUPPORT 0U
724 #define HNS3_RESET_ALL_TQP_SUCCESS 1U
725 uint8_t reset_status;
729 ret = hns3_reset_rcb_cmd(hw, &reset_status);
734 * If the firmware version is low, it may not support the rcb reset
735 * which means reset all the tqps at a time. In this case, we should
736 * reset tqps one by one.
738 if (reset_status == HNS3_RESET_RCB_NOT_SUPPORT) {
739 for (i = 0; i < hw->cfg_max_queues; i++) {
740 ret = hns3pf_reset_tqp(hw, i);
743 "fail to reset tqp, queue_id = %d, ret = %d.",
748 } else if (reset_status != HNS3_RESET_ALL_TQP_SUCCESS) {
749 hns3_err(hw, "fail to reset all tqps, reset_status = %u.",
758 hns3vf_reset_all_tqps(struct hns3_hw *hw)
760 #define HNS3VF_RESET_ALL_TQP_DONE 1U
761 uint8_t reset_status;
766 memset(msg_data, 0, sizeof(uint16_t));
767 ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
768 sizeof(msg_data), true, &reset_status,
769 sizeof(reset_status));
771 hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret);
775 if (reset_status == HNS3VF_RESET_ALL_TQP_DONE)
779 * If the firmware version or kernel PF version is low, it may not
780 * support the rcb reset which means reset all the tqps at a time.
781 * In this case, we should reset tqps one by one.
783 for (i = 1; i < hw->cfg_max_queues; i++) {
784 ret = hns3vf_reset_tqp(hw, i);
793 hns3_reset_all_tqps(struct hns3_adapter *hns)
795 struct hns3_hw *hw = &hns->hw;
798 /* Disable all queues before reset all queues */
799 for (i = 0; i < hw->cfg_max_queues; i++) {
800 ret = hns3_tqp_enable(hw, i, false);
803 "fail to disable tqps before tqps reset, ret = %d.",
810 return hns3vf_reset_all_tqps(hw);
812 return hns3pf_reset_all_tqps(hw);
816 hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id,
817 enum hns3_ring_type queue_type, bool enable)
819 struct hns3_reset_tqp_queue_cmd *req;
820 struct hns3_cmd_desc desc;
824 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false);
826 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
827 req->tqp_id = rte_cpu_to_le_16(queue_id);
828 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
829 req->queue_direction = rte_cpu_to_le_16(queue_direction);
830 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
832 ret = hns3_cmd_send(hw, &desc, 1);
834 hns3_err(hw, "send queue reset cmd error, queue_id = %u, "
835 "queue_type = %s, ret = %d.", queue_id,
836 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
841 hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id,
842 enum hns3_ring_type queue_type,
843 uint8_t *reset_status)
845 struct hns3_reset_tqp_queue_cmd *req;
846 struct hns3_cmd_desc desc;
850 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true);
852 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
853 req->tqp_id = rte_cpu_to_le_16(queue_id);
854 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
855 req->queue_direction = rte_cpu_to_le_16(queue_direction);
857 ret = hns3_cmd_send(hw, &desc, 1);
859 hns3_err(hw, "get queue reset status error, queue_id = %u "
860 "queue_type = %s, ret = %d.", queue_id,
861 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
865 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
870 hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
871 enum hns3_ring_type queue_type)
873 #define HNS3_QUEUE_RESET_TRY_MS 200
874 struct hns3_tx_queue *txq;
875 struct hns3_rx_queue *rxq;
876 uint32_t reset_wait_times;
877 uint32_t max_wait_times;
878 uint8_t reset_status;
881 if (queue_type == HNS3_RING_TYPE_TX) {
882 txq = hw->data->tx_queues[queue_id];
883 hns3_enable_txq(txq, false);
885 rxq = hw->data->rx_queues[queue_id];
886 hns3_enable_rxq(rxq, false);
889 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true);
891 hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret);
895 reset_wait_times = 0;
896 max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS;
897 while (reset_wait_times < max_wait_times) {
898 /* Wait for queue hw reset */
899 rte_delay_ms(HNS3_POLL_RESPONE_MS);
900 ret = hns3_get_queue_reset_status(hw, queue_id,
901 queue_type, &reset_status);
903 goto queue_reset_fail;
911 hns3_err(hw, "reset queue timeout, queue_id = %u, "
912 "queue_type = %s", queue_id,
913 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx");
915 goto queue_reset_fail;
918 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
920 hns3_err(hw, "deassert queue reset fail, ret = %d.", ret);
925 hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
930 hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id)
934 /* Need an extend offset to config queues > 64 */
935 if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID)
936 reg_offset = HNS3_TQP_INTR_REG_BASE +
937 tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET;
939 reg_offset = HNS3_TQP_INTR_EXT_REG_BASE +
940 tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID *
941 HNS3_TQP_INTR_HIGH_ORDER_OFFSET +
942 tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID *
943 HNS3_TQP_INTR_LOW_ORDER_OFFSET;
949 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
950 uint8_t gl_idx, uint16_t gl_value)
952 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
953 HNS3_TQP_INTR_GL1_REG,
954 HNS3_TQP_INTR_GL2_REG};
955 uint32_t addr, value;
957 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
960 addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id);
961 if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
962 value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
964 value = HNS3_GL_USEC_TO_REG(gl_value);
966 hns3_write_dev(hw, addr, value);
970 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
972 uint32_t addr, value;
974 if (rl_value > HNS3_TQP_INTR_RL_MAX)
977 addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
978 value = HNS3_RL_USEC_TO_REG(rl_value);
980 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
982 hns3_write_dev(hw, addr, value);
986 hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
991 * int_ql_max == 0 means the hardware does not support QL,
992 * QL regs config is not permitted if QL is not supported,
995 if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE)
998 addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
999 hns3_write_dev(hw, addr, ql_value);
1001 addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
1002 hns3_write_dev(hw, addr, ql_value);
1006 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
1008 uint32_t addr, value;
1010 addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
1013 hns3_write_dev(hw, addr, value);
1017 * Enable all rx queue interrupt when in interrupt rx mode.
1018 * This api was called before enable queue rx&tx (in normal start or reset
1019 * recover scenes), used to fix hardware rx queue interrupt enable was clear
1023 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
1025 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1026 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1029 if (dev->data->dev_conf.intr_conf.rxq == 0)
1032 for (i = 0; i < nb_rx_q; i++)
1033 hns3_queue_intr_enable(hw, i, en);
1037 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1039 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1040 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1041 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1043 if (dev->data->dev_conf.intr_conf.rxq == 0)
1046 hns3_queue_intr_enable(hw, queue_id, true);
1048 return rte_intr_ack(intr_handle);
1052 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1054 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1056 if (dev->data->dev_conf.intr_conf.rxq == 0)
1059 hns3_queue_intr_enable(hw, queue_id, false);
1065 hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx)
1067 struct hns3_hw *hw = &hns->hw;
1068 struct hns3_rx_queue *rxq;
1071 PMD_INIT_FUNC_TRACE();
1073 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
1074 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
1076 hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.",
1081 rxq->next_to_use = 0;
1082 rxq->rx_rearm_start = 0;
1083 rxq->rx_free_hold = 0;
1084 rxq->rx_rearm_nb = 0;
1085 rxq->pkt_first_seg = NULL;
1086 rxq->pkt_last_seg = NULL;
1087 hns3_init_rx_queue_hw(rxq);
1088 hns3_rxq_vec_setup(rxq);
1094 hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx)
1096 struct hns3_hw *hw = &hns->hw;
1097 struct hns3_rx_queue *rxq;
1099 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
1100 rxq->next_to_use = 0;
1101 rxq->rx_free_hold = 0;
1102 rxq->rx_rearm_start = 0;
1103 rxq->rx_rearm_nb = 0;
1104 hns3_init_rx_queue_hw(rxq);
1108 hns3_init_txq(struct hns3_tx_queue *txq)
1110 struct hns3_desc *desc;
1114 desc = txq->tx_ring;
1115 for (i = 0; i < txq->nb_tx_desc; i++) {
1116 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1120 txq->next_to_use = 0;
1121 txq->next_to_clean = 0;
1122 txq->tx_bd_ready = txq->nb_tx_desc - 1;
1123 hns3_init_tx_queue_hw(txq);
1127 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
1129 struct hns3_hw *hw = &hns->hw;
1130 struct hns3_tx_queue *txq;
1133 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
1134 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
1137 if (!tc_queue->enable)
1140 for (j = 0; j < tc_queue->tqp_count; j++) {
1141 num = tc_queue->tqp_offset + j;
1142 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
1146 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
1152 hns3_init_rx_queues(struct hns3_adapter *hns)
1154 struct hns3_hw *hw = &hns->hw;
1155 struct hns3_rx_queue *rxq;
1159 /* Initialize RSS for queues */
1160 ret = hns3_config_rss(hns);
1162 hns3_err(hw, "failed to configure rss, ret = %d.", ret);
1166 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1167 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
1169 hns3_err(hw, "Rx queue %u not available or setup.", i);
1173 if (rxq->rx_deferred_start)
1176 ret = hns3_init_rxq(hns, i);
1178 hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i,
1184 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++)
1185 hns3_init_fake_rxq(hns, i);
1190 for (j = 0; j < i; j++) {
1191 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
1192 hns3_rx_queue_release_mbufs(rxq);
1199 hns3_init_tx_queues(struct hns3_adapter *hns)
1201 struct hns3_hw *hw = &hns->hw;
1202 struct hns3_tx_queue *txq;
1205 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1206 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
1208 hns3_err(hw, "Tx queue %u not available or setup.", i);
1212 if (txq->tx_deferred_start)
1217 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
1218 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
1221 hns3_init_tx_ring_tc(hns);
1228 * Note: just init and setup queues, and don't enable tqps.
1231 hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
1233 struct hns3_hw *hw = &hns->hw;
1237 ret = hns3_reset_all_tqps(hns);
1239 hns3_err(hw, "failed to reset all queues, ret = %d.",
1245 ret = hns3_init_rx_queues(hns);
1247 hns3_err(hw, "failed to init rx queues, ret = %d.", ret);
1251 ret = hns3_init_tx_queues(hns);
1253 hns3_dev_release_mbufs(hns);
1254 hns3_err(hw, "failed to init tx queues, ret = %d.", ret);
1261 hns3_start_tqps(struct hns3_hw *hw)
1263 struct hns3_tx_queue *txq;
1264 struct hns3_rx_queue *rxq;
1267 hns3_enable_all_queues(hw, true);
1269 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1270 txq = hw->data->tx_queues[i];
1272 hw->data->tx_queue_state[i] =
1273 RTE_ETH_QUEUE_STATE_STARTED;
1276 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1277 rxq = hw->data->rx_queues[i];
1279 hw->data->rx_queue_state[i] =
1280 RTE_ETH_QUEUE_STATE_STARTED;
1285 hns3_stop_tqps(struct hns3_hw *hw)
1289 hns3_enable_all_queues(hw, false);
1291 for (i = 0; i < hw->data->nb_tx_queues; i++)
1292 hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1294 for (i = 0; i < hw->data->nb_rx_queues; i++)
1295 hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1299 * Iterate over all Rx Queue, and call the callback() function for each Rx
1303 * The target eth dev.
1304 * @param[in] callback
1305 * The function to call for each queue.
1306 * if callback function return nonzero will stop iterate and return it's value
1308 * The arguments to provide the callback function with.
1311 * 0 on success, otherwise with errno set.
1314 hns3_rxq_iterate(struct rte_eth_dev *dev,
1315 int (*callback)(struct hns3_rx_queue *, void *), void *arg)
1320 if (dev->data->rx_queues == NULL)
1323 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1324 ret = callback(dev->data->rx_queues[i], arg);
1333 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
1334 struct hns3_queue_info *q_info)
1336 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337 const struct rte_memzone *rx_mz;
1338 struct hns3_rx_queue *rxq;
1339 unsigned int rx_desc;
1341 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
1342 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1344 hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
1349 /* Allocate rx ring hardware descriptors. */
1350 rxq->queue_id = q_info->idx;
1351 rxq->nb_rx_desc = q_info->nb_desc;
1354 * Allocate a litter more memory because rx vector functions
1355 * don't check boundaries each time.
1357 rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1358 sizeof(struct hns3_desc);
1359 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1360 rx_desc, HNS3_RING_BASE_ALIGN,
1362 if (rx_mz == NULL) {
1363 hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
1365 hns3_rx_queue_release(rxq);
1369 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
1370 rxq->rx_ring_phys_addr = rx_mz->iova;
1372 hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx,
1373 rxq->rx_ring_phys_addr);
1379 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1380 uint16_t nb_desc, unsigned int socket_id)
1382 struct hns3_adapter *hns = dev->data->dev_private;
1383 struct hns3_hw *hw = &hns->hw;
1384 struct hns3_queue_info q_info;
1385 struct hns3_rx_queue *rxq;
1388 if (hw->fkq_data.rx_queues[idx]) {
1389 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
1390 hw->fkq_data.rx_queues[idx] = NULL;
1394 q_info.socket_id = socket_id;
1395 q_info.nb_desc = nb_desc;
1396 q_info.type = "hns3 fake RX queue";
1397 q_info.ring_name = "rx_fake_ring";
1398 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1400 hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
1404 /* Don't need alloc sw_ring, because upper applications don't use it */
1405 rxq->sw_ring = NULL;
1408 rxq->rx_deferred_start = false;
1409 rxq->port_id = dev->data->port_id;
1410 rxq->configured = true;
1411 nb_rx_q = dev->data->nb_rx_queues;
1412 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1413 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
1414 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
1416 rte_spinlock_lock(&hw->lock);
1417 hw->fkq_data.rx_queues[idx] = rxq;
1418 rte_spinlock_unlock(&hw->lock);
1424 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
1425 struct hns3_queue_info *q_info)
1427 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1428 const struct rte_memzone *tx_mz;
1429 struct hns3_tx_queue *txq;
1430 struct hns3_desc *desc;
1431 unsigned int tx_desc;
1434 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
1435 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1437 hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
1442 /* Allocate tx ring hardware descriptors. */
1443 txq->queue_id = q_info->idx;
1444 txq->nb_tx_desc = q_info->nb_desc;
1445 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
1446 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1447 tx_desc, HNS3_RING_BASE_ALIGN,
1449 if (tx_mz == NULL) {
1450 hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
1452 hns3_tx_queue_release(txq);
1456 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
1457 txq->tx_ring_phys_addr = tx_mz->iova;
1459 hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx,
1460 txq->tx_ring_phys_addr);
1463 desc = txq->tx_ring;
1464 for (i = 0; i < txq->nb_tx_desc; i++) {
1465 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1473 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1474 uint16_t nb_desc, unsigned int socket_id)
1476 struct hns3_adapter *hns = dev->data->dev_private;
1477 struct hns3_hw *hw = &hns->hw;
1478 struct hns3_queue_info q_info;
1479 struct hns3_tx_queue *txq;
1482 if (hw->fkq_data.tx_queues[idx] != NULL) {
1483 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
1484 hw->fkq_data.tx_queues[idx] = NULL;
1488 q_info.socket_id = socket_id;
1489 q_info.nb_desc = nb_desc;
1490 q_info.type = "hns3 fake TX queue";
1491 q_info.ring_name = "tx_fake_ring";
1492 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1494 hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
1498 /* Don't need alloc sw_ring, because upper applications don't use it */
1499 txq->sw_ring = NULL;
1503 txq->tx_deferred_start = false;
1504 txq->port_id = dev->data->port_id;
1505 txq->configured = true;
1506 nb_tx_q = dev->data->nb_tx_queues;
1507 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1508 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1510 rte_spinlock_lock(&hw->lock);
1511 hw->fkq_data.tx_queues[idx] = txq;
1512 rte_spinlock_unlock(&hw->lock);
1518 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1520 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1524 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1525 /* first time configuration */
1527 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1528 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1529 RTE_CACHE_LINE_SIZE);
1530 if (hw->fkq_data.rx_queues == NULL) {
1531 hw->fkq_data.nb_fake_rx_queues = 0;
1534 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1536 rxq = hw->fkq_data.rx_queues;
1537 for (i = nb_queues; i < old_nb_queues; i++)
1538 hns3_dev_rx_queue_release(rxq[i]);
1540 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1541 RTE_CACHE_LINE_SIZE);
1544 if (nb_queues > old_nb_queues) {
1545 uint16_t new_qs = nb_queues - old_nb_queues;
1546 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1549 hw->fkq_data.rx_queues = rxq;
1550 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1551 rxq = hw->fkq_data.rx_queues;
1552 for (i = nb_queues; i < old_nb_queues; i++)
1553 hns3_dev_rx_queue_release(rxq[i]);
1555 rte_free(hw->fkq_data.rx_queues);
1556 hw->fkq_data.rx_queues = NULL;
1559 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1565 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1567 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1571 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1572 /* first time configuration */
1574 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1575 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1576 RTE_CACHE_LINE_SIZE);
1577 if (hw->fkq_data.tx_queues == NULL) {
1578 hw->fkq_data.nb_fake_tx_queues = 0;
1581 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1583 txq = hw->fkq_data.tx_queues;
1584 for (i = nb_queues; i < old_nb_queues; i++)
1585 hns3_dev_tx_queue_release(txq[i]);
1586 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1587 RTE_CACHE_LINE_SIZE);
1590 if (nb_queues > old_nb_queues) {
1591 uint16_t new_qs = nb_queues - old_nb_queues;
1592 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1595 hw->fkq_data.tx_queues = txq;
1596 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1597 txq = hw->fkq_data.tx_queues;
1598 for (i = nb_queues; i < old_nb_queues; i++)
1599 hns3_dev_tx_queue_release(txq[i]);
1601 rte_free(hw->fkq_data.tx_queues);
1602 hw->fkq_data.tx_queues = NULL;
1604 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1610 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1613 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1614 uint16_t rx_need_add_nb_q;
1615 uint16_t tx_need_add_nb_q;
1620 if (hns3_dev_indep_txrx_supported(hw))
1623 /* Setup new number of fake RX/TX queues and reconfigure device. */
1624 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1625 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1626 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1628 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1632 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1634 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1635 goto cfg_fake_tx_q_fail;
1638 /* Allocate and set up fake RX queue per Ethernet port. */
1639 port_id = hw->data->port_id;
1640 for (q = 0; q < rx_need_add_nb_q; q++) {
1641 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1642 rte_eth_dev_socket_id(port_id));
1644 goto setup_fake_rx_q_fail;
1647 /* Allocate and set up fake TX queue per Ethernet port. */
1648 for (q = 0; q < tx_need_add_nb_q; q++) {
1649 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1650 rte_eth_dev_socket_id(port_id));
1652 goto setup_fake_tx_q_fail;
1657 setup_fake_tx_q_fail:
1658 setup_fake_rx_q_fail:
1659 (void)hns3_fake_tx_queue_config(hw, 0);
1661 (void)hns3_fake_rx_queue_config(hw, 0);
1667 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1669 struct rte_eth_dev_data *dev_data = hns->hw.data;
1670 struct hns3_rx_queue *rxq;
1671 struct hns3_tx_queue *txq;
1674 if (dev_data->rx_queues)
1675 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1676 rxq = dev_data->rx_queues[i];
1679 hns3_rx_queue_release_mbufs(rxq);
1682 if (dev_data->tx_queues)
1683 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1684 txq = dev_data->tx_queues[i];
1687 hns3_tx_queue_release_mbufs(txq);
1692 hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1694 uint16_t vld_buf_size;
1695 uint16_t num_hw_specs;
1699 * hns3 network engine only support to set 4 typical specification, and
1700 * different buffer size will affect the max packet_len and the max
1701 * number of segmentation when hw gro is turned on in receive side. The
1702 * relationship between them is as follows:
1703 * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg
1704 * ---------------------|-------------------|----------------
1705 * HNS3_4K_BD_BUF_SIZE | 60KB | 15
1706 * HNS3_2K_BD_BUF_SIZE | 62KB | 31
1707 * HNS3_1K_BD_BUF_SIZE | 63KB | 63
1708 * HNS3_512_BD_BUF_SIZE | 31.5KB | 63
1710 static const uint16_t hw_rx_buf_size[] = {
1711 HNS3_4K_BD_BUF_SIZE,
1712 HNS3_2K_BD_BUF_SIZE,
1713 HNS3_1K_BD_BUF_SIZE,
1714 HNS3_512_BD_BUF_SIZE
1717 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1718 RTE_PKTMBUF_HEADROOM);
1719 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1722 num_hw_specs = RTE_DIM(hw_rx_buf_size);
1723 for (i = 0; i < num_hw_specs; i++) {
1724 if (vld_buf_size >= hw_rx_buf_size[i]) {
1725 *rx_buf_len = hw_rx_buf_size[i];
1733 hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
1736 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1737 struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode;
1738 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
1739 uint16_t min_vec_bds;
1742 * HNS3 hardware network engine set scattered as default. If the driver
1743 * is not work in scattered mode and the pkts greater than buf_size
1744 * but smaller than max_rx_pkt_len will be distributed to multiple BDs.
1745 * Driver cannot handle this situation.
1747 if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) {
1748 hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater "
1749 "than rx_buf_len if scattered is off.");
1753 if (pkt_burst == hns3_recv_pkts_vec) {
1754 min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
1755 HNS3_DEFAULT_RX_BURST;
1756 if (nb_desc < min_vec_bds ||
1757 nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) {
1758 hns3_err(hw, "if Rx burst mode is vector, "
1759 "number of descriptor is required to be "
1760 "bigger than min vector bds:%u, and could be "
1761 "divided by rxq rearm thresh:%u.",
1762 min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH);
1770 hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
1771 struct rte_mempool *mp, uint16_t nb_desc,
1776 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1777 nb_desc % HNS3_ALIGN_RING_DESC) {
1778 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1783 if (conf->rx_drop_en == 0)
1784 hns3_warn(hw, "if no descriptors available, packets are always "
1785 "dropped and rx_drop_en (1) is fixed on");
1787 if (hns3_rx_buf_len_calc(mp, buf_size)) {
1788 hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! "
1789 "minimal data room size (%u).",
1790 rte_pktmbuf_data_room_size(mp),
1791 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1795 if (hw->data->dev_started) {
1796 ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc);
1798 hns3_err(hw, "Rx queue runtime setup fail.");
1807 hns3_get_tqp_reg_offset(uint16_t queue_id)
1809 uint32_t reg_offset;
1811 /* Need an extend offset to config queue > 1024 */
1812 if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID)
1813 reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE;
1815 reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET +
1816 (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) *
1823 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1824 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1825 struct rte_mempool *mp)
1827 struct hns3_adapter *hns = dev->data->dev_private;
1828 struct hns3_hw *hw = &hns->hw;
1829 struct hns3_queue_info q_info;
1830 struct hns3_rx_queue *rxq;
1831 uint16_t rx_buf_size;
1835 ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
1839 if (dev->data->rx_queues[idx]) {
1840 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1841 dev->data->rx_queues[idx] = NULL;
1845 q_info.socket_id = socket_id;
1846 q_info.nb_desc = nb_desc;
1847 q_info.type = "hns3 RX queue";
1848 q_info.ring_name = "rx_ring";
1850 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1853 "Failed to alloc mem and reserve DMA mem for rx ring!");
1858 rxq->ptype_tbl = &hns->ptype_tbl;
1860 rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
1861 conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
1863 rxq->rx_deferred_start = conf->rx_deferred_start;
1864 if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
1865 hns3_warn(hw, "deferred start is not supported.");
1866 rxq->rx_deferred_start = false;
1869 rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1870 sizeof(struct hns3_entry);
1871 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1872 RTE_CACHE_LINE_SIZE, socket_id);
1873 if (rxq->sw_ring == NULL) {
1874 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1875 hns3_rx_queue_release(rxq);
1879 rxq->next_to_use = 0;
1880 rxq->rx_free_hold = 0;
1881 rxq->rx_rearm_start = 0;
1882 rxq->rx_rearm_nb = 0;
1883 rxq->pkt_first_seg = NULL;
1884 rxq->pkt_last_seg = NULL;
1885 rxq->port_id = dev->data->port_id;
1887 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
1888 * the pvid_sw_discard_en in the queue struct should not be changed,
1889 * because PVID-related operations do not need to be processed by PMD
1890 * driver. For hns3 VF device, whether it needs to process PVID depends
1891 * on the configuration of PF kernel mode netdevice driver. And the
1892 * related PF configuration is delivered through the mailbox and finally
1893 * reflectd in port_base_vlan_cfg.
1895 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1896 rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
1897 HNS3_PORT_BASE_VLAN_ENABLE;
1899 rxq->pvid_sw_discard_en = false;
1900 rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false;
1901 rxq->configured = true;
1902 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1903 idx * HNS3_TQP_REG_SIZE);
1904 rxq->io_base = (void *)((char *)hw->io_base +
1905 hns3_get_tqp_reg_offset(idx));
1906 rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
1907 HNS3_RING_RX_HEAD_REG);
1908 rxq->rx_buf_len = rx_buf_size;
1909 memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats));
1910 memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats));
1911 memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
1913 /* CRC len set here is used for amending packet length */
1914 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1915 rxq->crc_len = RTE_ETHER_CRC_LEN;
1919 rxq->bulk_mbuf_num = 0;
1921 rte_spinlock_lock(&hw->lock);
1922 dev->data->rx_queues[idx] = rxq;
1923 rte_spinlock_unlock(&hw->lock);
1929 hns3_rx_scattered_reset(struct rte_eth_dev *dev)
1931 struct hns3_adapter *hns = dev->data->dev_private;
1932 struct hns3_hw *hw = &hns->hw;
1935 dev->data->scattered_rx = false;
1939 hns3_rx_scattered_calc(struct rte_eth_dev *dev)
1941 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1942 struct hns3_adapter *hns = dev->data->dev_private;
1943 struct hns3_hw *hw = &hns->hw;
1944 struct hns3_rx_queue *rxq;
1947 if (dev->data->rx_queues == NULL)
1950 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
1951 rxq = dev->data->rx_queues[queue_id];
1952 if (hw->rx_buf_len == 0)
1953 hw->rx_buf_len = rxq->rx_buf_len;
1955 hw->rx_buf_len = RTE_MIN(hw->rx_buf_len,
1959 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
1960 dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
1961 dev->data->scattered_rx = true;
1965 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1967 static const uint32_t ptypes[] = {
1969 RTE_PTYPE_L2_ETHER_LLDP,
1970 RTE_PTYPE_L2_ETHER_ARP,
1972 RTE_PTYPE_L3_IPV4_EXT,
1974 RTE_PTYPE_L3_IPV6_EXT,
1980 RTE_PTYPE_TUNNEL_GRE,
1981 RTE_PTYPE_INNER_L2_ETHER,
1982 RTE_PTYPE_INNER_L3_IPV4,
1983 RTE_PTYPE_INNER_L3_IPV6,
1984 RTE_PTYPE_INNER_L3_IPV4_EXT,
1985 RTE_PTYPE_INNER_L3_IPV6_EXT,
1986 RTE_PTYPE_INNER_L4_UDP,
1987 RTE_PTYPE_INNER_L4_TCP,
1988 RTE_PTYPE_INNER_L4_SCTP,
1989 RTE_PTYPE_INNER_L4_ICMP,
1990 RTE_PTYPE_TUNNEL_VXLAN,
1991 RTE_PTYPE_TUNNEL_NVGRE,
1994 static const uint32_t adv_layout_ptypes[] = {
1996 RTE_PTYPE_L2_ETHER_TIMESYNC,
1997 RTE_PTYPE_L2_ETHER_LLDP,
1998 RTE_PTYPE_L2_ETHER_ARP,
1999 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2000 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2002 RTE_PTYPE_L4_NONFRAG,
2008 RTE_PTYPE_TUNNEL_GRE,
2009 RTE_PTYPE_TUNNEL_GRENAT,
2010 RTE_PTYPE_INNER_L2_ETHER,
2011 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2012 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2013 RTE_PTYPE_INNER_L4_FRAG,
2014 RTE_PTYPE_INNER_L4_ICMP,
2015 RTE_PTYPE_INNER_L4_NONFRAG,
2016 RTE_PTYPE_INNER_L4_UDP,
2017 RTE_PTYPE_INNER_L4_TCP,
2018 RTE_PTYPE_INNER_L4_SCTP,
2019 RTE_PTYPE_INNER_L4_ICMP,
2022 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2024 if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
2025 dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
2026 dev->rx_pkt_burst == hns3_recv_pkts_vec ||
2027 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
2028 if (hns3_dev_rxd_adv_layout_supported(hw))
2029 return adv_layout_ptypes;
2038 hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
2040 tbl->l3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
2041 tbl->l3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
2042 tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP;
2043 tbl->l3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
2044 tbl->l3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
2045 tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP;
2047 tbl->l4table[0] = RTE_PTYPE_L4_UDP;
2048 tbl->l4table[1] = RTE_PTYPE_L4_TCP;
2049 tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE;
2050 tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
2051 tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
2052 tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
2056 hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
2058 tbl->inner_l3table[0] = RTE_PTYPE_INNER_L2_ETHER |
2059 RTE_PTYPE_INNER_L3_IPV4;
2060 tbl->inner_l3table[1] = RTE_PTYPE_INNER_L2_ETHER |
2061 RTE_PTYPE_INNER_L3_IPV6;
2062 /* There is not a ptype for inner ARP/RARP */
2063 tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN;
2064 tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN;
2065 tbl->inner_l3table[4] = RTE_PTYPE_INNER_L2_ETHER |
2066 RTE_PTYPE_INNER_L3_IPV4_EXT;
2067 tbl->inner_l3table[5] = RTE_PTYPE_INNER_L2_ETHER |
2068 RTE_PTYPE_INNER_L3_IPV6_EXT;
2070 tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
2071 tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
2072 /* There is not a ptype for inner GRE */
2073 tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN;
2074 tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
2075 /* There is not a ptype for inner IGMP */
2076 tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN;
2077 tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
2079 tbl->ol3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
2080 tbl->ol3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
2081 tbl->ol3table[2] = RTE_PTYPE_UNKNOWN;
2082 tbl->ol3table[3] = RTE_PTYPE_UNKNOWN;
2083 tbl->ol3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
2084 tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
2086 tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
2087 tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN;
2088 tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
2092 hns3_init_adv_layout_ptype(struct hns3_ptype_table *tbl)
2094 uint32_t *ptype = tbl->ptype;
2097 ptype[1] = RTE_PTYPE_L2_ETHER_ARP;
2098 ptype[3] = RTE_PTYPE_L2_ETHER_LLDP;
2099 ptype[8] = RTE_PTYPE_L2_ETHER_TIMESYNC;
2101 /* Non-tunnel IPv4 */
2102 ptype[17] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2104 ptype[18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2105 RTE_PTYPE_L4_NONFRAG;
2106 ptype[19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2108 ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2110 ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2111 RTE_PTYPE_TUNNEL_GRE;
2112 ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2114 ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2116 ptype[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2118 /* The next ptype is PTP over IPv4 + UDP */
2119 ptype[25] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2122 /* IPv4 --> GRE/Teredo/VXLAN */
2123 ptype[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2124 RTE_PTYPE_TUNNEL_GRENAT;
2125 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2126 ptype[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2127 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
2129 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2130 ptype[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2131 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2132 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2133 RTE_PTYPE_INNER_L4_FRAG;
2134 ptype[32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2135 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2136 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2137 RTE_PTYPE_INNER_L4_NONFRAG;
2138 ptype[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2139 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2140 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2141 RTE_PTYPE_INNER_L4_UDP;
2142 ptype[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2143 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2144 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2145 RTE_PTYPE_INNER_L4_TCP;
2146 ptype[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2147 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2148 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2149 RTE_PTYPE_INNER_L4_SCTP;
2150 /* The next ptype's inner L4 is IGMP */
2151 ptype[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2152 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2153 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
2154 ptype[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2155 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2156 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2157 RTE_PTYPE_INNER_L4_ICMP;
2159 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2160 ptype[39] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2161 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2162 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2163 RTE_PTYPE_INNER_L4_FRAG;
2164 ptype[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2165 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2166 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2167 RTE_PTYPE_INNER_L4_NONFRAG;
2168 ptype[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2169 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2170 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2171 RTE_PTYPE_INNER_L4_UDP;
2172 ptype[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2173 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2174 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2175 RTE_PTYPE_INNER_L4_TCP;
2176 ptype[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2177 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2178 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2179 RTE_PTYPE_INNER_L4_SCTP;
2180 /* The next ptype's inner L4 is IGMP */
2181 ptype[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2182 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2183 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
2184 ptype[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2185 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2186 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2187 RTE_PTYPE_INNER_L4_ICMP;
2189 /* Non-tunnel IPv6 */
2190 ptype[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2192 ptype[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2193 RTE_PTYPE_L4_NONFRAG;
2194 ptype[113] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2196 ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2198 ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2199 RTE_PTYPE_TUNNEL_GRE;
2200 ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2202 ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2204 ptype[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2206 /* Special for PTP over IPv6 + UDP */
2207 ptype[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2210 /* IPv6 --> GRE/Teredo/VXLAN */
2211 ptype[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2212 RTE_PTYPE_TUNNEL_GRENAT;
2213 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2214 ptype[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2215 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
2217 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2218 ptype[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2219 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2220 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2221 RTE_PTYPE_INNER_L4_FRAG;
2222 ptype[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2223 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2224 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2225 RTE_PTYPE_INNER_L4_NONFRAG;
2226 ptype[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2227 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2228 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2229 RTE_PTYPE_INNER_L4_UDP;
2230 ptype[128] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2231 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2232 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2233 RTE_PTYPE_INNER_L4_TCP;
2234 ptype[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2235 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2236 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2237 RTE_PTYPE_INNER_L4_SCTP;
2238 /* The next ptype's inner L4 is IGMP */
2239 ptype[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2240 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2241 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
2242 ptype[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2243 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2244 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2245 RTE_PTYPE_INNER_L4_ICMP;
2247 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2248 ptype[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2249 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2250 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2251 RTE_PTYPE_INNER_L4_FRAG;
2252 ptype[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2253 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2254 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2255 RTE_PTYPE_INNER_L4_NONFRAG;
2256 ptype[135] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2257 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2258 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2259 RTE_PTYPE_INNER_L4_UDP;
2260 ptype[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2261 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2262 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2263 RTE_PTYPE_INNER_L4_TCP;
2264 ptype[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2265 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2266 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2267 RTE_PTYPE_INNER_L4_SCTP;
2268 /* The next ptype's inner L4 is IGMP */
2269 ptype[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2270 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2271 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
2272 ptype[139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2273 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2274 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2275 RTE_PTYPE_INNER_L4_ICMP;
2279 hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
2281 struct hns3_adapter *hns = dev->data->dev_private;
2282 struct hns3_ptype_table *tbl = &hns->ptype_tbl;
2284 memset(tbl, 0, sizeof(*tbl));
2286 hns3_init_non_tunnel_ptype_tbl(tbl);
2287 hns3_init_tunnel_ptype_tbl(tbl);
2288 hns3_init_adv_layout_ptype(tbl);
2292 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
2293 uint32_t l234_info, const struct hns3_desc *rxd)
2295 #define HNS3_STRP_STATUS_NUM 0x4
2297 #define HNS3_NO_STRP_VLAN_VLD 0x0
2298 #define HNS3_INNER_STRP_VLAN_VLD 0x1
2299 #define HNS3_OUTER_STRP_VLAN_VLD 0x2
2300 uint32_t strip_status;
2301 uint32_t report_mode;
2304 * Since HW limitation, the vlan tag will always be inserted into RX
2305 * descriptor when strip the tag from packet, driver needs to determine
2306 * reporting which tag to mbuf according to the PVID configuration
2307 * and vlan striped status.
2309 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
2311 HNS3_NO_STRP_VLAN_VLD,
2312 HNS3_OUTER_STRP_VLAN_VLD,
2313 HNS3_INNER_STRP_VLAN_VLD,
2314 HNS3_OUTER_STRP_VLAN_VLD
2317 HNS3_NO_STRP_VLAN_VLD,
2318 HNS3_NO_STRP_VLAN_VLD,
2319 HNS3_NO_STRP_VLAN_VLD,
2320 HNS3_INNER_STRP_VLAN_VLD
2323 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
2324 HNS3_RXD_STRP_TAGP_S);
2325 report_mode = report_type[rxq->pvid_sw_discard_en][strip_status];
2326 switch (report_mode) {
2327 case HNS3_NO_STRP_VLAN_VLD:
2330 case HNS3_INNER_STRP_VLAN_VLD:
2331 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2332 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
2334 case HNS3_OUTER_STRP_VLAN_VLD:
2335 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2336 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
2345 recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
2346 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
2349 uint8_t crc_len = rxq->crc_len;
2351 if (data_len <= crc_len) {
2352 rte_pktmbuf_free_seg(rxm);
2353 first_seg->nb_segs--;
2354 last_seg->data_len = (uint16_t)(last_seg->data_len -
2355 (crc_len - data_len));
2356 last_seg->next = NULL;
2358 rxm->data_len = (uint16_t)(data_len - crc_len);
2361 static inline struct rte_mbuf *
2362 hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
2366 if (likely(rxq->bulk_mbuf_num > 0))
2367 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2369 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf,
2370 HNS3_BULK_ALLOC_MBUF_NUM);
2371 if (likely(ret == 0)) {
2372 rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM;
2373 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2375 return rte_mbuf_raw_alloc(rxq->mb_pool);
2379 hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
2380 volatile struct hns3_desc *rxd)
2382 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
2383 uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp);
2385 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
2386 if (hns3_timestamp_rx_dynflag > 0) {
2387 *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
2388 rte_mbuf_timestamp_t *) = timestamp;
2389 mbuf->ol_flags |= hns3_timestamp_rx_dynflag;
2392 pf->rx_timestamp = timestamp;
2396 hns3_recv_pkts_simple(void *rx_queue,
2397 struct rte_mbuf **rx_pkts,
2400 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
2401 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
2402 struct hns3_rx_queue *rxq; /* RX queue */
2403 struct hns3_entry *sw_ring;
2404 struct hns3_entry *rxe;
2405 struct hns3_desc rxd;
2406 struct rte_mbuf *nmb; /* pointer of the new mbuf */
2407 struct rte_mbuf *rxm;
2408 uint32_t bd_base_info;
2420 rx_ring = rxq->rx_ring;
2421 sw_ring = rxq->sw_ring;
2422 rx_id = rxq->next_to_use;
2424 while (nb_rx < nb_pkts) {
2425 rxdp = &rx_ring[rx_id];
2426 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2427 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2430 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2431 (1u << HNS3_RXD_VLD_B)];
2433 nmb = hns3_rx_alloc_buffer(rxq);
2434 if (unlikely(nmb == NULL)) {
2437 port_id = rxq->port_id;
2438 rte_eth_devices[port_id].data->rx_mbuf_alloc_failed++;
2443 rxe = &sw_ring[rx_id];
2445 if (unlikely(rx_id == rxq->nb_rx_desc))
2448 rte_prefetch0(sw_ring[rx_id].mbuf);
2449 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2450 rte_prefetch0(&rx_ring[rx_id]);
2451 rte_prefetch0(&sw_ring[rx_id]);
2458 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
2459 hns3_rx_ptp_timestamp_handle(rxq, rxm, rxdp);
2461 dma_addr = rte_mbuf_data_iova_default(nmb);
2462 rxdp->addr = rte_cpu_to_le_64(dma_addr);
2463 rxdp->rx.bd_base_info = 0;
2465 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2466 rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) -
2468 rxm->data_len = rxm->pkt_len;
2469 rxm->port = rxq->port_id;
2470 rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2471 rxm->ol_flags |= PKT_RX_RSS_HASH;
2472 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2474 rte_le_to_cpu_16(rxd.rx.fd_id);
2475 rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2480 /* Load remained descriptor data and extract necessary fields */
2481 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2482 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2483 ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, l234_info);
2487 rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
2489 if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
2490 rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
2492 hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
2494 /* Increment bytes counter */
2495 rxq->basic_stats.bytes += rxm->pkt_len;
2497 rx_pkts[nb_rx++] = rxm;
2500 rte_pktmbuf_free(rxm);
2503 rxq->next_to_use = rx_id;
2504 rxq->rx_free_hold += nb_rx_bd;
2505 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2506 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2507 rxq->rx_free_hold = 0;
2514 hns3_recv_scattered_pkts(void *rx_queue,
2515 struct rte_mbuf **rx_pkts,
2518 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
2519 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
2520 struct hns3_rx_queue *rxq; /* RX queue */
2521 struct hns3_entry *sw_ring;
2522 struct hns3_entry *rxe;
2523 struct rte_mbuf *first_seg;
2524 struct rte_mbuf *last_seg;
2525 struct hns3_desc rxd;
2526 struct rte_mbuf *nmb; /* pointer of the new mbuf */
2527 struct rte_mbuf *rxm;
2528 struct rte_eth_dev *dev;
2529 uint32_t bd_base_info;
2543 rx_id = rxq->next_to_use;
2544 rx_ring = rxq->rx_ring;
2545 sw_ring = rxq->sw_ring;
2546 first_seg = rxq->pkt_first_seg;
2547 last_seg = rxq->pkt_last_seg;
2549 while (nb_rx < nb_pkts) {
2550 rxdp = &rx_ring[rx_id];
2551 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2552 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2556 * The interactive process between software and hardware of
2557 * receiving a new packet in hns3 network engine:
2558 * 1. Hardware network engine firstly writes the packet content
2559 * to the memory pointed by the 'addr' field of the Rx Buffer
2560 * Descriptor, secondly fills the result of parsing the
2561 * packet include the valid field into the Rx Buffer
2562 * Descriptor in one write operation.
2563 * 2. Driver reads the Rx BD's valid field in the loop to check
2564 * whether it's valid, if valid then assign a new address to
2565 * the addr field, clear the valid field, get the other
2566 * information of the packet by parsing Rx BD's other fields,
2567 * finally write back the number of Rx BDs processed by the
2568 * driver to the HNS3_RING_RX_HEAD_REG register to inform
2570 * In the above process, the ordering is very important. We must
2571 * make sure that CPU read Rx BD's other fields only after the
2574 * There are two type of re-ordering: compiler re-ordering and
2575 * CPU re-ordering under the ARMv8 architecture.
2576 * 1. we use volatile to deal with compiler re-ordering, so you
2577 * can see that rx_ring/rxdp defined with volatile.
2578 * 2. we commonly use memory barrier to deal with CPU
2579 * re-ordering, but the cost is high.
2581 * In order to solve the high cost of using memory barrier, we
2582 * use the data dependency order under the ARMv8 architecture,
2585 * instr02: load B <- A
2586 * the instr02 will always execute after instr01.
2588 * To construct the data dependency ordering, we use the
2589 * following assignment:
2590 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2591 * (1u<<HNS3_RXD_VLD_B)]
2592 * Using gcc compiler under the ARMv8 architecture, the related
2593 * assembly code example as follows:
2594 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
2595 * instr01: ldr w26, [x22, #28] --read bd_base_info
2596 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
2597 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
2599 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
2600 * instr05: ldp x2, x3, [x0]
2601 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
2602 * instr07: ldp x4, x5, [x0, #16]
2603 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
2604 * the instr05~08 depend on x0's value, x0 depent on w26's
2605 * value, the w26 is the bd_base_info, this form the data
2606 * dependency ordering.
2607 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
2608 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
2609 * assignment is correct.
2611 * So we use the data dependency ordering instead of memory
2612 * barrier to improve receive performance.
2614 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2615 (1u << HNS3_RXD_VLD_B)];
2617 nmb = hns3_rx_alloc_buffer(rxq);
2618 if (unlikely(nmb == NULL)) {
2619 dev = &rte_eth_devices[rxq->port_id];
2620 dev->data->rx_mbuf_alloc_failed++;
2625 rxe = &sw_ring[rx_id];
2627 if (unlikely(rx_id == rxq->nb_rx_desc))
2630 rte_prefetch0(sw_ring[rx_id].mbuf);
2631 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2632 rte_prefetch0(&rx_ring[rx_id]);
2633 rte_prefetch0(&sw_ring[rx_id]);
2639 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2640 rxdp->rx.bd_base_info = 0;
2641 rxdp->addr = dma_addr;
2643 if (first_seg == NULL) {
2645 first_seg->nb_segs = 1;
2647 first_seg->nb_segs++;
2648 last_seg->next = rxm;
2651 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2652 rxm->data_len = rte_le_to_cpu_16(rxd.rx.size);
2654 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2660 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
2661 hns3_rx_ptp_timestamp_handle(rxq, first_seg, rxdp);
2664 * The last buffer of the received packet. packet len from
2665 * buffer description may contains CRC len, packet len should
2666 * subtract it, same as data len.
2668 first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len);
2671 * This is the last buffer of the received packet. If the CRC
2672 * is not stripped by the hardware:
2673 * - Subtract the CRC length from the total packet length.
2674 * - If the last buffer only contains the whole CRC or a part
2675 * of it, free the mbuf associated to the last buffer. If part
2676 * of the CRC is also contained in the previous mbuf, subtract
2677 * the length of that CRC part from the data length of the
2681 if (unlikely(rxq->crc_len > 0)) {
2682 first_seg->pkt_len -= rxq->crc_len;
2683 recalculate_data_len(first_seg, last_seg, rxm, rxq,
2687 first_seg->port = rxq->port_id;
2688 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2689 first_seg->ol_flags = PKT_RX_RSS_HASH;
2690 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2691 first_seg->hash.fdir.hi =
2692 rte_le_to_cpu_16(rxd.rx.fd_id);
2693 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2696 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
2697 HNS3_RXD_GRO_SIZE_S);
2698 if (gro_size != 0) {
2699 first_seg->ol_flags |= PKT_RX_LRO;
2700 first_seg->tso_segsz = gro_size;
2703 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2704 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2705 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
2710 first_seg->packet_type = hns3_rx_calc_ptype(rxq,
2711 l234_info, ol_info);
2713 if (first_seg->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
2714 rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
2716 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
2718 /* Increment bytes counter */
2719 rxq->basic_stats.bytes += first_seg->pkt_len;
2721 rx_pkts[nb_rx++] = first_seg;
2725 rte_pktmbuf_free(first_seg);
2729 rxq->next_to_use = rx_id;
2730 rxq->pkt_first_seg = first_seg;
2731 rxq->pkt_last_seg = last_seg;
2733 rxq->rx_free_hold += nb_rx_bd;
2734 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2735 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2736 rxq->rx_free_hold = 0;
2743 hns3_rxq_vec_setup(__rte_unused struct hns3_rx_queue *rxq)
2748 hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
2754 hns3_recv_pkts_vec(__rte_unused void *tx_queue,
2755 __rte_unused struct rte_mbuf **rx_pkts,
2756 __rte_unused uint16_t nb_pkts)
2762 hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue,
2763 __rte_unused struct rte_mbuf **rx_pkts,
2764 __rte_unused uint16_t nb_pkts)
2770 hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2771 struct rte_eth_burst_mode *mode)
2773 static const struct {
2774 eth_rx_burst_t pkt_burst;
2777 { hns3_recv_pkts_simple, "Scalar Simple" },
2778 { hns3_recv_scattered_pkts, "Scalar Scattered" },
2779 { hns3_recv_pkts_vec, "Vector Neon" },
2780 { hns3_recv_pkts_vec_sve, "Vector Sve" },
2783 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2787 for (i = 0; i < RTE_DIM(burst_infos); i++) {
2788 if (pkt_burst == burst_infos[i].pkt_burst) {
2789 snprintf(mode->info, sizeof(mode->info), "%s",
2790 burst_infos[i].info);
2800 hns3_get_default_vec_support(void)
2802 #if defined(RTE_ARCH_ARM64)
2803 if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
2805 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
2812 hns3_get_sve_support(void)
2814 #if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
2815 if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_256)
2817 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
2823 static eth_rx_burst_t
2824 hns3_get_rx_function(struct rte_eth_dev *dev)
2826 struct hns3_adapter *hns = dev->data->dev_private;
2827 uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
2828 bool vec_allowed, sve_allowed, simple_allowed;
2831 vec_support = hns3_rx_check_vec_support(dev) == 0;
2832 vec_allowed = vec_support && hns3_get_default_vec_support();
2833 sve_allowed = vec_support && hns3_get_sve_support();
2834 simple_allowed = !dev->data->scattered_rx &&
2835 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
2837 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
2838 return hns3_recv_pkts_vec;
2839 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
2840 return hns3_recv_pkts_vec_sve;
2841 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
2842 return hns3_recv_pkts_simple;
2843 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
2844 return hns3_recv_scattered_pkts;
2847 return hns3_recv_pkts_vec;
2849 return hns3_recv_pkts_simple;
2851 return hns3_recv_scattered_pkts;
2855 hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf,
2856 uint16_t nb_desc, uint16_t *tx_rs_thresh,
2857 uint16_t *tx_free_thresh, uint16_t idx)
2859 #define HNS3_TX_RS_FREE_THRESH_GAP 8
2860 uint16_t rs_thresh, free_thresh, fast_free_thresh;
2862 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
2863 nb_desc % HNS3_ALIGN_RING_DESC) {
2864 hns3_err(hw, "number (%u) of tx descriptors is invalid",
2869 rs_thresh = (conf->tx_rs_thresh > 0) ?
2870 conf->tx_rs_thresh : HNS3_DEFAULT_TX_RS_THRESH;
2871 free_thresh = (conf->tx_free_thresh > 0) ?
2872 conf->tx_free_thresh : HNS3_DEFAULT_TX_FREE_THRESH;
2873 if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
2874 rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
2875 free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
2876 hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
2877 "(%u) of tx descriptors for port=%u queue=%u check "
2879 rs_thresh, free_thresh, nb_desc, hw->data->port_id,
2884 if (conf->tx_free_thresh == 0) {
2885 /* Fast free Tx memory buffer to improve cache hit rate */
2886 fast_free_thresh = nb_desc - rs_thresh;
2887 if (fast_free_thresh >=
2888 HNS3_TX_FAST_FREE_AHEAD + HNS3_DEFAULT_TX_FREE_THRESH)
2889 free_thresh = fast_free_thresh -
2890 HNS3_TX_FAST_FREE_AHEAD;
2893 *tx_rs_thresh = rs_thresh;
2894 *tx_free_thresh = free_thresh;
2899 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
2900 unsigned int socket_id, const struct rte_eth_txconf *conf)
2902 struct hns3_adapter *hns = dev->data->dev_private;
2903 uint16_t tx_rs_thresh, tx_free_thresh;
2904 struct hns3_hw *hw = &hns->hw;
2905 struct hns3_queue_info q_info;
2906 struct hns3_tx_queue *txq;
2910 ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
2911 &tx_rs_thresh, &tx_free_thresh, idx);
2915 if (dev->data->tx_queues[idx] != NULL) {
2916 hns3_tx_queue_release(dev->data->tx_queues[idx]);
2917 dev->data->tx_queues[idx] = NULL;
2921 q_info.socket_id = socket_id;
2922 q_info.nb_desc = nb_desc;
2923 q_info.type = "hns3 TX queue";
2924 q_info.ring_name = "tx_ring";
2925 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
2928 "Failed to alloc mem and reserve DMA mem for tx ring!");
2932 txq->tx_deferred_start = conf->tx_deferred_start;
2933 if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
2934 hns3_warn(hw, "deferred start is not supported.");
2935 txq->tx_deferred_start = false;
2938 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
2939 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
2940 RTE_CACHE_LINE_SIZE, socket_id);
2941 if (txq->sw_ring == NULL) {
2942 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
2943 hns3_tx_queue_release(txq);
2948 txq->next_to_use = 0;
2949 txq->next_to_clean = 0;
2950 txq->tx_bd_ready = txq->nb_tx_desc - 1;
2951 txq->tx_free_thresh = tx_free_thresh;
2952 txq->tx_rs_thresh = tx_rs_thresh;
2953 txq->free = rte_zmalloc_socket("hns3 TX mbuf free array",
2954 sizeof(struct rte_mbuf *) * txq->tx_rs_thresh,
2955 RTE_CACHE_LINE_SIZE, socket_id);
2957 hns3_err(hw, "failed to allocate tx mbuf free array!");
2958 hns3_tx_queue_release(txq);
2962 txq->port_id = dev->data->port_id;
2964 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
2965 * the pvid_sw_shift_en in the queue struct should not be changed,
2966 * because PVID-related operations do not need to be processed by PMD
2967 * driver. For hns3 VF device, whether it needs to process PVID depends
2968 * on the configuration of PF kernel mode netdev driver. And the
2969 * related PF configuration is delivered through the mailbox and finally
2970 * reflectd in port_base_vlan_cfg.
2972 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
2973 txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
2974 HNS3_PORT_BASE_VLAN_ENABLE;
2976 txq->pvid_sw_shift_en = false;
2977 txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
2978 txq->configured = true;
2979 txq->io_base = (void *)((char *)hw->io_base +
2980 hns3_get_tqp_reg_offset(idx));
2981 txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
2982 HNS3_RING_TX_TAIL_REG);
2983 txq->min_tx_pkt_len = hw->min_tx_pkt_len;
2984 txq->tso_mode = hw->tso_mode;
2985 txq->udp_cksum_mode = hw->udp_cksum_mode;
2986 memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
2987 memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
2989 rte_spinlock_lock(&hw->lock);
2990 dev->data->tx_queues[idx] = txq;
2991 rte_spinlock_unlock(&hw->lock);
2997 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
2999 uint16_t tx_next_clean = txq->next_to_clean;
3000 uint16_t tx_next_use = txq->next_to_use;
3001 uint16_t tx_bd_ready = txq->tx_bd_ready;
3002 uint16_t tx_bd_max = txq->nb_tx_desc;
3003 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
3004 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
3005 struct rte_mbuf *mbuf;
3007 while ((!(desc->tx.tp_fe_sc_vld_ra_ri &
3008 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) &&
3009 tx_next_use != tx_next_clean) {
3010 mbuf = tx_bak_pkt->mbuf;
3012 rte_pktmbuf_free_seg(mbuf);
3013 tx_bak_pkt->mbuf = NULL;
3021 if (tx_next_clean >= tx_bd_max) {
3023 desc = txq->tx_ring;
3024 tx_bak_pkt = txq->sw_ring;
3028 txq->next_to_clean = tx_next_clean;
3029 txq->tx_bd_ready = tx_bd_ready;
3033 hns3_config_gro(struct hns3_hw *hw, bool en)
3035 struct hns3_cfg_gro_status_cmd *req;
3036 struct hns3_cmd_desc desc;
3039 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
3040 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
3042 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
3044 ret = hns3_cmd_send(hw, &desc, 1);
3046 hns3_err(hw, "%s hardware GRO failed, ret = %d",
3047 en ? "enable" : "disable", ret);
3053 hns3_restore_gro_conf(struct hns3_hw *hw)
3059 offloads = hw->data->dev_conf.rxmode.offloads;
3060 gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
3061 ret = hns3_config_gro(hw, gro_en);
3063 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
3064 gro_en ? "enabled" : "disabled", ret);
3070 hns3_pkt_is_tso(struct rte_mbuf *m)
3072 return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
3076 hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm)
3078 if (!hns3_pkt_is_tso(rxm))
3081 if (paylen <= rxm->tso_segsz)
3084 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B));
3085 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
3089 hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
3091 desc->addr = rte_mbuf_data_iova(rxm);
3092 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
3093 desc->tx.tp_fe_sc_vld_ra_ri |= rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
3097 hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
3098 struct rte_mbuf *rxm)
3100 uint64_t ol_flags = rxm->ol_flags;
3104 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
3105 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
3106 rxm->outer_l2_len + rxm->outer_l3_len : 0;
3107 paylen = rxm->pkt_len - hdr_len;
3108 desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen);
3109 hns3_set_tso(desc, paylen, rxm);
3112 * Currently, hardware doesn't support more than two layers VLAN offload
3113 * in Tx direction based on hns3 network engine. So when the number of
3114 * VLANs in the packets represented by rxm plus the number of VLAN
3115 * offload by hardware such as PVID etc, exceeds two, the packets will
3116 * be discarded or the original VLAN of the packets will be overwitted
3117 * by hardware. When the PF PVID is enabled by calling the API function
3118 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
3119 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
3120 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
3121 * be added to the position close to the IP header when PVID is enabled.
3123 if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT |
3125 desc->tx.ol_type_vlan_len_msec |=
3126 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
3127 if (ol_flags & PKT_TX_QINQ_PKT)
3128 desc->tx.outer_vlan_tag =
3129 rte_cpu_to_le_16(rxm->vlan_tci_outer);
3131 desc->tx.outer_vlan_tag =
3132 rte_cpu_to_le_16(rxm->vlan_tci);
3135 if (ol_flags & PKT_TX_QINQ_PKT ||
3136 ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
3137 desc->tx.type_cs_vlan_tso_len |=
3138 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
3139 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
3142 if (ol_flags & PKT_TX_IEEE1588_TMST)
3143 desc->tx.tp_fe_sc_vld_ra_ri |=
3144 rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B));
3148 hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf,
3149 struct rte_mbuf **alloc_mbuf)
3151 #define MAX_NON_TSO_BD_PER_PKT 18
3152 struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT];
3155 /* Allocate enough mbufs */
3156 if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf))
3159 for (i = 0; i < nb_new_buf - 1; i++)
3160 pkt_segs[i]->next = pkt_segs[i + 1];
3162 pkt_segs[nb_new_buf - 1]->next = NULL;
3163 pkt_segs[0]->nb_segs = nb_new_buf;
3164 *alloc_mbuf = pkt_segs[0];
3170 hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
3172 new_pkt->ol_flags = old_pkt->ol_flags;
3173 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
3174 new_pkt->outer_l2_len = old_pkt->outer_l2_len;
3175 new_pkt->outer_l3_len = old_pkt->outer_l3_len;
3176 new_pkt->l2_len = old_pkt->l2_len;
3177 new_pkt->l3_len = old_pkt->l3_len;
3178 new_pkt->l4_len = old_pkt->l4_len;
3179 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
3180 new_pkt->vlan_tci = old_pkt->vlan_tci;
3184 hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
3185 uint8_t max_non_tso_bd_num)
3187 struct rte_mempool *mb_pool;
3188 struct rte_mbuf *new_mbuf;
3189 struct rte_mbuf *temp_new;
3190 struct rte_mbuf *temp;
3191 uint16_t last_buf_len;
3192 uint16_t nb_new_buf;
3202 mb_pool = tx_pkt->pool;
3203 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
3204 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
3205 if (nb_new_buf > max_non_tso_bd_num)
3208 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
3209 if (last_buf_len == 0)
3210 last_buf_len = buf_size;
3212 /* Allocate enough mbufs */
3213 ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf);
3217 /* Copy the original packet content to the new mbufs */
3219 s = rte_pktmbuf_mtod(temp, char *);
3220 len_s = rte_pktmbuf_data_len(temp);
3221 temp_new = new_mbuf;
3222 while (temp != NULL && temp_new != NULL) {
3223 d = rte_pktmbuf_mtod(temp_new, char *);
3224 buf_len = temp_new->next == NULL ? last_buf_len : buf_size;
3228 len = RTE_MIN(len_s, len_d);
3232 len_d = len_d - len;
3233 len_s = len_s - len;
3239 s = rte_pktmbuf_mtod(temp, char *);
3240 len_s = rte_pktmbuf_data_len(temp);
3244 temp_new->data_len = buf_len;
3245 temp_new = temp_new->next;
3247 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
3249 /* free original mbufs */
3250 rte_pktmbuf_free(tx_pkt);
3252 *new_pkt = new_mbuf;
3258 hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec)
3260 uint32_t tmp = *ol_type_vlan_len_msec;
3261 uint64_t ol_flags = m->ol_flags;
3263 /* (outer) IP header type */
3264 if (ol_flags & PKT_TX_OUTER_IPV4) {
3265 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
3266 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
3267 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
3269 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
3270 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM);
3271 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
3272 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
3275 /* OL3 header size, defined in 4 bytes */
3276 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3277 m->outer_l3_len >> HNS3_L3_LEN_UNIT);
3278 *ol_type_vlan_len_msec = tmp;
3282 hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
3283 uint32_t *type_cs_vlan_tso_len)
3285 #define HNS3_NVGRE_HLEN 8
3286 uint32_t tmp_outer = *ol_type_vlan_len_msec;
3287 uint32_t tmp_inner = *type_cs_vlan_tso_len;
3288 uint64_t ol_flags = m->ol_flags;
3289 uint16_t inner_l2_len;
3291 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
3292 case PKT_TX_TUNNEL_VXLAN_GPE:
3293 case PKT_TX_TUNNEL_GENEVE:
3294 case PKT_TX_TUNNEL_VXLAN:
3295 /* MAC in UDP tunnelling packet, include VxLAN and GENEVE */
3296 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
3297 HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP);
3299 * The inner l2 length of mbuf is the sum of outer l4 length,
3300 * tunneling header length and inner l2 length for a tunnel
3301 * packect. But in hns3 tx descriptor, the tunneling header
3302 * length is contained in the field of outer L4 length.
3303 * Therefore, driver need to calculate the outer L4 length and
3306 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
3308 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
3311 inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN;
3313 case PKT_TX_TUNNEL_GRE:
3314 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
3315 HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
3317 * For NVGRE tunnel packect, the outer L4 is empty. So only
3318 * fill the NVGRE header length to the outer L4 field.
3320 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
3322 (uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT);
3324 inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN;
3327 /* For non UDP / GRE tunneling, drop the tunnel packet */
3331 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
3332 inner_l2_len >> HNS3_L2_LEN_UNIT);
3333 /* OL2 header size, defined in 2 bytes */
3334 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
3335 m->outer_l2_len >> HNS3_L2_LEN_UNIT);
3337 *type_cs_vlan_tso_len = tmp_inner;
3338 *ol_type_vlan_len_msec = tmp_outer;
3344 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
3345 uint16_t tx_desc_id)
3347 struct hns3_desc *tx_ring = txq->tx_ring;
3348 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3349 uint64_t ol_flags = m->ol_flags;
3350 uint32_t tmp_outer = 0;
3351 uint32_t tmp_inner = 0;
3356 * The tunnel header is contained in the inner L2 header field of the
3357 * mbuf, but for hns3 descriptor, it is contained in the outer L4. So,
3358 * there is a need that switching between them. To avoid multiple
3359 * calculations, the length of the L2 header include the outer and
3360 * inner, will be filled during the parsing of tunnel packects.
3362 if (!(ol_flags & PKT_TX_TUNNEL_MASK)) {
3364 * For non tunnel type the tunnel type id is 0, so no need to
3365 * assign a value to it. Only the inner(normal) L2 header length
3368 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M,
3369 HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT);
3372 * If outer csum is not offload, the outer length may be filled
3373 * with 0. And the length of the outer header is added to the
3374 * inner l2_len. It would lead a cksum error. So driver has to
3375 * calculate the header length.
3377 if (unlikely(!(ol_flags &
3378 (PKT_TX_OUTER_IP_CKSUM | PKT_TX_OUTER_UDP_CKSUM)) &&
3379 m->outer_l2_len == 0)) {
3380 struct rte_net_hdr_lens hdr_len;
3381 (void)rte_net_get_ptype(m, &hdr_len,
3382 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
3383 m->outer_l3_len = hdr_len.l3_len;
3384 m->outer_l2_len = hdr_len.l2_len;
3385 m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len;
3387 hns3_parse_outer_params(m, &tmp_outer);
3388 ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner);
3393 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
3394 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
3395 tmp_ol4cs = ol_flags & PKT_TX_OUTER_UDP_CKSUM ?
3396 BIT(HNS3_TXD_OL4CS_B) : 0;
3397 desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs);
3403 hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3405 uint64_t ol_flags = m->ol_flags;
3409 tmp = *type_cs_vlan_tso_len;
3410 if (ol_flags & PKT_TX_IPV4)
3411 l3_type = HNS3_L3T_IPV4;
3412 else if (ol_flags & PKT_TX_IPV6)
3413 l3_type = HNS3_L3T_IPV6;
3415 l3_type = HNS3_L3T_NONE;
3417 /* inner(/normal) L3 header size, defined in 4 bytes */
3418 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3419 m->l3_len >> HNS3_L3_LEN_UNIT);
3421 tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type);
3423 /* Enable L3 checksum offloads */
3424 if (ol_flags & PKT_TX_IP_CKSUM)
3425 tmp |= BIT(HNS3_TXD_L3CS_B);
3426 *type_cs_vlan_tso_len = tmp;
3430 hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3432 uint64_t ol_flags = m->ol_flags;
3434 /* Enable L4 checksum offloads */
3435 switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
3436 case PKT_TX_TCP_CKSUM | PKT_TX_TCP_SEG:
3437 case PKT_TX_TCP_CKSUM:
3438 case PKT_TX_TCP_SEG:
3439 tmp = *type_cs_vlan_tso_len;
3440 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3443 case PKT_TX_UDP_CKSUM:
3444 tmp = *type_cs_vlan_tso_len;
3445 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3448 case PKT_TX_SCTP_CKSUM:
3449 tmp = *type_cs_vlan_tso_len;
3450 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3456 tmp |= BIT(HNS3_TXD_L4CS_B);
3457 tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
3458 m->l4_len >> HNS3_L4_LEN_UNIT);
3459 *type_cs_vlan_tso_len = tmp;
3463 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m,
3464 uint16_t tx_desc_id)
3466 struct hns3_desc *tx_ring = txq->tx_ring;
3467 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3470 hns3_parse_l3_cksum_params(m, &value);
3471 hns3_parse_l4_cksum_params(m, &value);
3473 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
3477 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
3478 uint32_t max_non_tso_bd_num)
3480 struct rte_mbuf *m_first = tx_pkts;
3481 struct rte_mbuf *m_last = tx_pkts;
3482 uint32_t tot_len = 0;
3487 * Hardware requires that the sum of the data length of every 8
3488 * consecutive buffers is greater than MSS in hns3 network engine.
3489 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
3490 * frags greater than gso header len + mss, and the remaining 7
3491 * consecutive frags greater than MSS except the last 7 frags.
3493 if (bd_num <= max_non_tso_bd_num)
3496 for (i = 0; m_last && i < max_non_tso_bd_num - 1;
3497 i++, m_last = m_last->next)
3498 tot_len += m_last->data_len;
3503 /* ensure the first 8 frags is greater than mss + header */
3504 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
3505 hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
3506 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
3507 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
3511 * ensure the sum of the data length of every 7 consecutive buffer
3512 * is greater than mss except the last one.
3514 for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) {
3515 tot_len -= m_first->data_len;
3516 tot_len += m_last->data_len;
3518 if (tot_len < tx_pkts->tso_segsz)
3521 m_first = m_first->next;
3522 m_last = m_last->next;
3529 hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
3532 struct rte_ipv4_hdr *ipv4_hdr;
3533 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
3535 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
3536 ipv4_hdr->hdr_checksum = 0;
3537 if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
3538 struct rte_udp_hdr *udp_hdr;
3540 * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
3541 * header for TSO packets
3543 if (ol_flags & PKT_TX_TCP_SEG)
3545 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3546 m->outer_l2_len + m->outer_l3_len);
3547 udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
3551 *l4_proto = ipv4_hdr->next_proto_id;
3556 hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
3559 struct rte_ipv6_hdr *ipv6_hdr;
3560 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
3562 if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
3563 struct rte_udp_hdr *udp_hdr;
3565 * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
3566 * header for TSO packets
3568 if (ol_flags & PKT_TX_TCP_SEG)
3570 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3571 m->outer_l2_len + m->outer_l3_len);
3572 udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
3576 *l4_proto = ipv6_hdr->proto;
3581 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
3583 uint64_t ol_flags = m->ol_flags;
3584 uint32_t paylen, hdr_len, l4_proto;
3585 struct rte_udp_hdr *udp_hdr;
3587 if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
3590 if (ol_flags & PKT_TX_OUTER_IPV4) {
3591 if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto))
3594 if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto))
3598 /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
3599 if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) {
3600 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3601 hdr_len += m->outer_l2_len + m->outer_l3_len;
3602 paylen = m->pkt_len - hdr_len;
3603 if (paylen <= m->tso_segsz)
3605 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3608 udp_hdr->dgram_cksum = 0;
3613 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
3615 uint32_t tmp_data_len_sum = 0;
3616 uint16_t nb_buf = m->nb_segs;
3617 uint32_t paylen, hdr_len;
3618 struct rte_mbuf *m_seg;
3621 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
3624 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3625 hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
3626 m->outer_l2_len + m->outer_l3_len : 0;
3627 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
3630 paylen = m->pkt_len - hdr_len;
3631 if (paylen > HNS3_MAX_BD_PAYLEN)
3635 * The TSO header (include outer and inner L2, L3 and L4 header)
3636 * should be provided by three descriptors in maximum in hns3 network
3640 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
3641 i++, m_seg = m_seg->next) {
3642 tmp_data_len_sum += m_seg->data_len;
3645 if (hdr_len > tmp_data_len_sum)
3651 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3653 hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
3655 struct rte_ether_hdr *eh;
3656 struct rte_vlan_hdr *vh;
3658 if (!txq->pvid_sw_shift_en)
3662 * Due to hardware limitations, we only support two-layer VLAN hardware
3663 * offload in Tx direction based on hns3 network engine, so when PVID is
3664 * enabled, QinQ insert is no longer supported.
3665 * And when PVID is enabled, in the following two cases:
3666 * i) packets with more than two VLAN tags.
3667 * ii) packets with one VLAN tag while the hardware VLAN insert is
3669 * The packets will be regarded as abnormal packets and discarded by
3670 * hardware in Tx direction. For debugging purposes, a validation check
3671 * for these types of packets is added to the '.tx_pkt_prepare' ops
3672 * implementation function named hns3_prep_pkts to inform users that
3673 * these packets will be discarded.
3675 if (m->ol_flags & PKT_TX_QINQ_PKT)
3678 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
3679 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
3680 if (m->ol_flags & PKT_TX_VLAN_PKT)
3683 /* Ensure the incoming packet is not a QinQ packet */
3684 vh = (struct rte_vlan_hdr *)(eh + 1);
3685 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
3694 hns3_udp_cksum_help(struct rte_mbuf *m)
3696 uint64_t ol_flags = m->ol_flags;
3700 if (ol_flags & PKT_TX_IPV4) {
3701 struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
3702 struct rte_ipv4_hdr *, m->l2_len);
3703 l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len;
3705 struct rte_ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m,
3706 struct rte_ipv6_hdr *, m->l2_len);
3707 l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
3710 rte_raw_cksum_mbuf(m, m->l2_len + m->l3_len, l4_len, &cksum);
3714 * RFC 768:If the computed checksum is zero for UDP, it is transmitted
3720 return (uint16_t)cksum;
3724 hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
3726 uint64_t ol_flags = m->ol_flags;
3727 struct rte_udp_hdr *udp_hdr;
3730 if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE ||
3731 ol_flags & PKT_TX_TUNNEL_MASK ||
3732 (ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM)
3735 * A UDP packet with the same dst_port as VXLAN\VXLAN_GPE\GENEVE will
3736 * be recognized as a tunnel packet in HW. In this case, if UDP CKSUM
3737 * offload is set and the tunnel mask has not been set, the CKSUM will
3738 * be wrong since the header length is wrong and driver should complete
3739 * the CKSUM to avoid CKSUM error.
3741 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3742 m->l2_len + m->l3_len);
3743 dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
3745 case RTE_VXLAN_DEFAULT_PORT:
3746 case RTE_VXLAN_GPE_DEFAULT_PORT:
3747 case RTE_GENEVE_DEFAULT_PORT:
3748 udp_hdr->dgram_cksum = hns3_udp_cksum_help(m);
3749 m->ol_flags = ol_flags & ~PKT_TX_L4_MASK;
3757 hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
3761 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3762 ret = rte_validate_tx_offload(m);
3768 ret = hns3_vld_vlan_chk(tx_queue, m);
3774 if (hns3_pkt_is_tso(m)) {
3775 if (hns3_pkt_need_linearized(m, m->nb_segs,
3776 tx_queue->max_non_tso_bd_num) ||
3777 hns3_check_tso_pkt_valid(m)) {
3782 if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
3784 * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means
3785 * hardware support recalculate the TCP pseudo header
3786 * checksum of packets that need TSO, so network driver
3787 * software not need to recalculate it.
3789 hns3_outer_header_cksum_prepare(m);
3794 ret = rte_net_intel_cksum_prepare(m);
3800 if (!hns3_validate_tunnel_cksum(tx_queue, m))
3803 hns3_outer_header_cksum_prepare(m);
3809 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3815 for (i = 0; i < nb_pkts; i++) {
3817 if (hns3_prep_pkt_proc(tx_queue, m))
3825 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
3828 struct hns3_desc *tx_ring = txq->tx_ring;
3829 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3831 /* Enable checksum offloading */
3832 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
3833 /* Fill in tunneling parameters if necessary */
3834 if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
3835 txq->dfx_stats.unsupported_tunnel_pkt_cnt++;
3839 hns3_txd_enable_checksum(txq, m, tx_desc_id);
3841 /* clear the control bit */
3842 desc->tx.type_cs_vlan_tso_len = 0;
3843 desc->tx.ol_type_vlan_len_msec = 0;
3850 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
3851 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
3853 uint8_t max_non_tso_bd_num;
3854 struct rte_mbuf *new_pkt;
3857 if (hns3_pkt_is_tso(*m_seg))
3861 * If packet length is greater than HNS3_MAX_FRAME_LEN
3862 * driver support, the packet will be ignored.
3864 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
3865 txq->dfx_stats.over_length_pkt_cnt++;
3869 max_non_tso_bd_num = txq->max_non_tso_bd_num;
3870 if (unlikely(nb_buf > max_non_tso_bd_num)) {
3871 txq->dfx_stats.exceed_limit_bd_pkt_cnt++;
3872 ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
3873 max_non_tso_bd_num);
3875 txq->dfx_stats.exceed_limit_bd_reassem_fail++;
3885 hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq)
3887 struct hns3_entry *tx_entry;
3888 struct hns3_desc *desc;
3889 uint16_t tx_next_clean;
3893 if (HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) < txq->tx_rs_thresh)
3897 * All mbufs can be released only when the VLD bits of all
3898 * descriptors in a batch are cleared.
3900 tx_next_clean = (txq->next_to_clean + txq->tx_rs_thresh - 1) %
3902 desc = &txq->tx_ring[tx_next_clean];
3903 for (i = 0; i < txq->tx_rs_thresh; i++) {
3904 if (rte_le_to_cpu_16(desc->tx.tp_fe_sc_vld_ra_ri) &
3905 BIT(HNS3_TXD_VLD_B))
3910 tx_entry = &txq->sw_ring[txq->next_to_clean];
3912 for (i = 0; i < txq->tx_rs_thresh; i++)
3913 rte_prefetch0((tx_entry + i)->mbuf);
3914 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
3915 rte_mempool_put(tx_entry->mbuf->pool, tx_entry->mbuf);
3916 tx_entry->mbuf = NULL;
3919 txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc;
3920 txq->tx_bd_ready += txq->tx_rs_thresh;
3925 hns3_tx_backup_1mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
3927 tx_entry->mbuf = pkts[0];
3931 hns3_tx_backup_4mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
3933 hns3_tx_backup_1mbuf(&tx_entry[0], &pkts[0]);
3934 hns3_tx_backup_1mbuf(&tx_entry[1], &pkts[1]);
3935 hns3_tx_backup_1mbuf(&tx_entry[2], &pkts[2]);
3936 hns3_tx_backup_1mbuf(&tx_entry[3], &pkts[3]);
3940 hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
3942 #define PER_LOOP_NUM 4
3943 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
3947 for (i = 0; i < PER_LOOP_NUM; i++, txdp++, pkts++) {
3948 dma_addr = rte_mbuf_data_iova(*pkts);
3949 txdp->addr = rte_cpu_to_le_64(dma_addr);
3950 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
3951 txdp->tx.paylen_fd_dop_ol4cs = 0;
3952 txdp->tx.type_cs_vlan_tso_len = 0;
3953 txdp->tx.ol_type_vlan_len_msec = 0;
3954 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
3959 hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
3961 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
3964 dma_addr = rte_mbuf_data_iova(*pkts);
3965 txdp->addr = rte_cpu_to_le_64(dma_addr);
3966 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
3967 txdp->tx.paylen_fd_dop_ol4cs = 0;
3968 txdp->tx.type_cs_vlan_tso_len = 0;
3969 txdp->tx.ol_type_vlan_len_msec = 0;
3970 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
3974 hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq,
3975 struct rte_mbuf **pkts,
3978 #define PER_LOOP_NUM 4
3979 #define PER_LOOP_MASK (PER_LOOP_NUM - 1)
3980 struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
3981 struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
3982 const uint32_t mainpart = (nb_pkts & ((uint32_t)~PER_LOOP_MASK));
3983 const uint32_t leftover = (nb_pkts & ((uint32_t)PER_LOOP_MASK));
3986 for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
3987 hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
3988 hns3_tx_setup_4bd(txdp + i, pkts + i);
3990 /* Increment bytes counter */
3992 for (j = 0; j < PER_LOOP_NUM; j++)
3993 txq->basic_stats.bytes += pkts[i + j]->pkt_len;
3995 if (unlikely(leftover > 0)) {
3996 for (i = 0; i < leftover; i++) {
3997 hns3_tx_backup_1mbuf(tx_entry + mainpart + i,
3998 pkts + mainpart + i);
3999 hns3_tx_setup_1bd(txdp + mainpart + i,
4000 pkts + mainpart + i);
4002 /* Increment bytes counter */
4003 txq->basic_stats.bytes += pkts[mainpart + i]->pkt_len;
4009 hns3_xmit_pkts_simple(void *tx_queue,
4010 struct rte_mbuf **tx_pkts,
4013 struct hns3_tx_queue *txq = tx_queue;
4016 hns3_tx_free_buffer_simple(txq);
4018 nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
4019 if (unlikely(nb_pkts == 0)) {
4020 if (txq->tx_bd_ready == 0)
4021 txq->dfx_stats.queue_full_cnt++;
4025 txq->tx_bd_ready -= nb_pkts;
4026 if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) {
4027 nb_tx = txq->nb_tx_desc - txq->next_to_use;
4028 hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx);
4029 txq->next_to_use = 0;
4032 hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
4033 txq->next_to_use += nb_pkts - nb_tx;
4035 hns3_write_reg_opt(txq->io_tail_reg, nb_pkts);
4041 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4043 struct hns3_tx_queue *txq = tx_queue;
4044 struct hns3_entry *tx_bak_pkt;
4045 struct hns3_desc *tx_ring;
4046 struct rte_mbuf *tx_pkt;
4047 struct rte_mbuf *m_seg;
4048 struct hns3_desc *desc;
4049 uint32_t nb_hold = 0;
4050 uint16_t tx_next_use;
4051 uint16_t tx_pkt_num;
4057 /* free useless buffer */
4058 hns3_tx_free_useless_buffer(txq);
4060 tx_next_use = txq->next_to_use;
4061 tx_bd_max = txq->nb_tx_desc;
4062 tx_pkt_num = nb_pkts;
4063 tx_ring = txq->tx_ring;
4066 tx_bak_pkt = &txq->sw_ring[tx_next_use];
4067 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
4068 tx_pkt = *tx_pkts++;
4070 nb_buf = tx_pkt->nb_segs;
4072 if (nb_buf > txq->tx_bd_ready) {
4073 txq->dfx_stats.queue_full_cnt++;
4081 * If packet length is less than minimum packet length supported
4082 * by hardware in Tx direction, driver need to pad it to avoid
4085 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
4086 txq->min_tx_pkt_len)) {
4090 add_len = txq->min_tx_pkt_len -
4091 rte_pktmbuf_pkt_len(tx_pkt);
4092 appended = rte_pktmbuf_append(tx_pkt, add_len);
4093 if (appended == NULL) {
4094 txq->dfx_stats.pkt_padding_fail_cnt++;
4098 memset(appended, 0, add_len);
4103 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
4106 if (hns3_parse_cksum(txq, tx_next_use, m_seg))
4110 desc = &tx_ring[tx_next_use];
4113 * If the packet is divided into multiple Tx Buffer Descriptors,
4114 * only need to fill vlan, paylen and tso into the first Tx
4115 * Buffer Descriptor.
4117 hns3_fill_first_desc(txq, desc, m_seg);
4120 desc = &tx_ring[tx_next_use];
4122 * Fill valid bits, DMA address and data length for each
4123 * Tx Buffer Descriptor.
4125 hns3_fill_per_desc(desc, m_seg);
4126 tx_bak_pkt->mbuf = m_seg;
4127 m_seg = m_seg->next;
4130 if (tx_next_use >= tx_bd_max) {
4132 tx_bak_pkt = txq->sw_ring;
4136 } while (m_seg != NULL);
4138 /* Add end flag for the last Tx Buffer Descriptor */
4139 desc->tx.tp_fe_sc_vld_ra_ri |=
4140 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
4142 /* Increment bytes counter */
4143 txq->basic_stats.bytes += tx_pkt->pkt_len;
4145 txq->next_to_use = tx_next_use;
4146 txq->tx_bd_ready -= i;
4152 hns3_write_reg_opt(txq->io_tail_reg, nb_hold);
4158 hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
4164 hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
4165 __rte_unused struct rte_mbuf **tx_pkts,
4166 __rte_unused uint16_t nb_pkts)
4172 hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue,
4173 struct rte_mbuf __rte_unused **tx_pkts,
4174 uint16_t __rte_unused nb_pkts)
4180 hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
4181 struct rte_eth_burst_mode *mode)
4183 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
4184 const char *info = NULL;
4186 if (pkt_burst == hns3_xmit_pkts_simple)
4187 info = "Scalar Simple";
4188 else if (pkt_burst == hns3_xmit_pkts)
4190 else if (pkt_burst == hns3_xmit_pkts_vec)
4191 info = "Vector Neon";
4192 else if (pkt_burst == hns3_xmit_pkts_vec_sve)
4193 info = "Vector Sve";
4198 snprintf(mode->info, sizeof(mode->info), "%s", info);
4204 hns3_tx_check_simple_support(struct rte_eth_dev *dev)
4206 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
4208 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4209 if (hns3_dev_ptp_supported(hw))
4212 return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
4216 hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
4218 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4220 /* always perform tx_prepare when debug */
4223 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
4224 DEV_TX_OFFLOAD_IPV4_CKSUM | \
4225 DEV_TX_OFFLOAD_TCP_CKSUM | \
4226 DEV_TX_OFFLOAD_UDP_CKSUM | \
4227 DEV_TX_OFFLOAD_SCTP_CKSUM | \
4228 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
4229 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
4230 DEV_TX_OFFLOAD_TCP_TSO | \
4231 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
4232 DEV_TX_OFFLOAD_GRE_TNL_TSO | \
4233 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
4235 uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
4236 if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
4243 static eth_tx_burst_t
4244 hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
4246 struct hns3_adapter *hns = dev->data->dev_private;
4247 bool vec_allowed, sve_allowed, simple_allowed;
4248 bool vec_support, tx_prepare_needed;
4250 vec_support = hns3_tx_check_vec_support(dev) == 0;
4251 vec_allowed = vec_support && hns3_get_default_vec_support();
4252 sve_allowed = vec_support && hns3_get_sve_support();
4253 simple_allowed = hns3_tx_check_simple_support(dev);
4254 tx_prepare_needed = hns3_get_tx_prep_needed(dev);
4258 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
4259 return hns3_xmit_pkts_vec;
4260 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
4261 return hns3_xmit_pkts_vec_sve;
4262 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
4263 return hns3_xmit_pkts_simple;
4264 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
4265 if (tx_prepare_needed)
4266 *prep = hns3_prep_pkts;
4267 return hns3_xmit_pkts;
4271 return hns3_xmit_pkts_vec;
4273 return hns3_xmit_pkts_simple;
4275 if (tx_prepare_needed)
4276 *prep = hns3_prep_pkts;
4277 return hns3_xmit_pkts;
4281 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
4282 struct rte_mbuf **pkts __rte_unused,
4283 uint16_t pkts_n __rte_unused)
4289 hns3_trace_rxtx_function(struct rte_eth_dev *dev)
4291 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4292 struct rte_eth_burst_mode rx_mode;
4293 struct rte_eth_burst_mode tx_mode;
4295 memset(&rx_mode, 0, sizeof(rx_mode));
4296 memset(&tx_mode, 0, sizeof(tx_mode));
4297 (void)hns3_rx_burst_mode_get(dev, 0, &rx_mode);
4298 (void)hns3_tx_burst_mode_get(dev, 0, &tx_mode);
4300 hns3_dbg(hw, "using rx_pkt_burst: %s, tx_pkt_burst: %s.",
4301 rx_mode.info, tx_mode.info);
4304 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
4306 struct hns3_adapter *hns = eth_dev->data->dev_private;
4307 eth_tx_prep_t prep = NULL;
4309 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
4310 __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
4311 eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
4312 eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
4313 eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
4314 eth_dev->tx_pkt_prepare = prep;
4315 eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
4316 hns3_trace_rxtx_function(eth_dev);
4318 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
4319 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
4320 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
4325 hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4326 struct rte_eth_rxq_info *qinfo)
4328 struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
4330 qinfo->mp = rxq->mb_pool;
4331 qinfo->nb_desc = rxq->nb_rx_desc;
4332 qinfo->scattered_rx = dev->data->scattered_rx;
4333 /* Report the HW Rx buffer length to user */
4334 qinfo->rx_buf_size = rxq->rx_buf_len;
4337 * If there are no available Rx buffer descriptors, incoming packets
4338 * are always dropped by hardware based on hns3 network engine.
4340 qinfo->conf.rx_drop_en = 1;
4341 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
4342 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4343 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4347 hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4348 struct rte_eth_txq_info *qinfo)
4350 struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
4352 qinfo->nb_desc = txq->nb_tx_desc;
4353 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
4354 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
4355 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4356 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4360 hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4362 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4363 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
4364 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
4367 if (!hns3_dev_indep_txrx_supported(hw))
4370 rte_spinlock_lock(&hw->lock);
4371 ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
4373 hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
4375 rte_spinlock_unlock(&hw->lock);
4379 ret = hns3_init_rxq(hns, rx_queue_id);
4381 hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
4383 rte_spinlock_unlock(&hw->lock);
4387 hns3_enable_rxq(rxq, true);
4388 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4389 rte_spinlock_unlock(&hw->lock);
4395 hns3_reset_sw_rxq(struct hns3_rx_queue *rxq)
4397 rxq->next_to_use = 0;
4398 rxq->rx_rearm_start = 0;
4399 rxq->rx_free_hold = 0;
4400 rxq->rx_rearm_nb = 0;
4401 rxq->pkt_first_seg = NULL;
4402 rxq->pkt_last_seg = NULL;
4403 memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc));
4404 hns3_rxq_vec_setup(rxq);
4408 hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4410 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4411 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
4413 if (!hns3_dev_indep_txrx_supported(hw))
4416 rte_spinlock_lock(&hw->lock);
4417 hns3_enable_rxq(rxq, false);
4419 hns3_rx_queue_release_mbufs(rxq);
4421 hns3_reset_sw_rxq(rxq);
4422 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4423 rte_spinlock_unlock(&hw->lock);
4429 hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4431 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4432 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
4435 if (!hns3_dev_indep_txrx_supported(hw))
4438 rte_spinlock_lock(&hw->lock);
4439 ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
4441 hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
4443 rte_spinlock_unlock(&hw->lock);
4448 hns3_enable_txq(txq, true);
4449 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4450 rte_spinlock_unlock(&hw->lock);
4456 hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4458 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4459 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
4461 if (!hns3_dev_indep_txrx_supported(hw))
4464 rte_spinlock_lock(&hw->lock);
4465 hns3_enable_txq(txq, false);
4466 hns3_tx_queue_release_mbufs(txq);
4468 * All the mbufs in sw_ring are released and all the pointers in sw_ring
4469 * are set to NULL. If this queue is still called by upper layer,
4470 * residual SW status of this txq may cause these pointers in sw_ring
4471 * which have been set to NULL to be released again. To avoid it,
4475 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4476 rte_spinlock_unlock(&hw->lock);
4482 hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
4484 uint16_t next_to_clean = txq->next_to_clean;
4485 uint16_t next_to_use = txq->next_to_use;
4486 uint16_t tx_bd_ready = txq->tx_bd_ready;
4487 struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean];
4488 struct hns3_desc *desc = &txq->tx_ring[next_to_clean];
4491 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
4492 free_cnt = txq->nb_tx_desc;
4494 for (idx = 0; idx < free_cnt; idx++) {
4495 if (next_to_clean == next_to_use)
4498 if (desc->tx.tp_fe_sc_vld_ra_ri &
4499 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
4502 if (tx_pkt->mbuf != NULL) {
4503 rte_pktmbuf_free_seg(tx_pkt->mbuf);
4504 tx_pkt->mbuf = NULL;
4511 if (next_to_clean == txq->nb_tx_desc) {
4512 tx_pkt = txq->sw_ring;
4513 desc = txq->tx_ring;
4519 txq->next_to_clean = next_to_clean;
4520 txq->tx_bd_ready = tx_bd_ready;
4527 hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
4529 struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq;
4530 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
4532 if (dev->tx_pkt_burst == hns3_xmit_pkts)
4533 return hns3_tx_done_cleanup_full(q, free_cnt);
4534 else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst)
4541 hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
4543 volatile struct hns3_desc *rxdp;
4544 struct hns3_rx_queue *rxq;
4545 struct rte_eth_dev *dev;
4546 uint32_t bd_base_info;
4549 rxq = (struct hns3_rx_queue *)rx_queue;
4550 if (offset >= rxq->nb_rx_desc)
4553 desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc;
4554 rxdp = &rxq->rx_ring[desc_id];
4555 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
4556 dev = &rte_eth_devices[rxq->port_id];
4557 if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
4558 dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
4559 if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
4560 return RTE_ETH_RX_DESC_UNAVAIL;
4561 } else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
4562 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
4563 if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
4564 return RTE_ETH_RX_DESC_UNAVAIL;
4566 return RTE_ETH_RX_DESC_UNAVAIL;
4569 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
4570 return RTE_ETH_RX_DESC_AVAIL;
4572 return RTE_ETH_RX_DESC_DONE;
4576 hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
4578 volatile struct hns3_desc *txdp;
4579 struct hns3_tx_queue *txq;
4580 struct rte_eth_dev *dev;
4583 txq = (struct hns3_tx_queue *)tx_queue;
4584 if (offset >= txq->nb_tx_desc)
4587 dev = &rte_eth_devices[txq->port_id];
4588 if (dev->tx_pkt_burst != hns3_xmit_pkts_simple &&
4589 dev->tx_pkt_burst != hns3_xmit_pkts &&
4590 dev->tx_pkt_burst != hns3_xmit_pkts_vec_sve &&
4591 dev->tx_pkt_burst != hns3_xmit_pkts_vec)
4592 return RTE_ETH_TX_DESC_UNAVAIL;
4594 desc_id = (txq->next_to_use + offset) % txq->nb_tx_desc;
4595 txdp = &txq->tx_ring[desc_id];
4596 if (txdp->tx.tp_fe_sc_vld_ra_ri & rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
4597 return RTE_ETH_TX_DESC_FULL;
4599 return RTE_ETH_TX_DESC_DONE;
4603 hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4606 * Number of BDs that have been processed by the driver
4607 * but have not been notified to the hardware.
4609 uint32_t driver_hold_bd_num;
4610 struct hns3_rx_queue *rxq;
4613 rxq = dev->data->rx_queues[rx_queue_id];
4614 fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
4615 if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
4616 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
4617 driver_hold_bd_num = rxq->rx_rearm_nb;
4619 driver_hold_bd_num = rxq->rx_free_hold;
4621 if (fbd_num <= driver_hold_bd_num)
4624 return fbd_num - driver_hold_bd_num;
4628 hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
4631 * If the hardware support rxd advanced layout, then driver enable it
4634 if (hns3_dev_rxd_adv_layout_supported(hw))
4635 hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);