1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
5 #include <rte_bus_pci.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_geneve.h>
10 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #if defined(RTE_ARCH_ARM64)
15 #include <rte_cpuflags.h>
19 #include "hns3_common.h"
20 #include "hns3_rxtx.h"
21 #include "hns3_regs.h"
22 #include "hns3_logs.h"
25 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
26 #define HNS3_RX_RING_PREFETCTH_MASK 3
29 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
33 /* Note: Fake rx queue will not enter here */
34 if (rxq->sw_ring == NULL)
37 if (rxq->rx_rearm_nb == 0) {
38 for (i = 0; i < rxq->nb_rx_desc; i++) {
39 if (rxq->sw_ring[i].mbuf != NULL) {
40 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
41 rxq->sw_ring[i].mbuf = NULL;
45 for (i = rxq->next_to_use;
46 i != rxq->rx_rearm_start;
47 i = (i + 1) % rxq->nb_rx_desc) {
48 if (rxq->sw_ring[i].mbuf != NULL) {
49 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
50 rxq->sw_ring[i].mbuf = NULL;
55 for (i = 0; i < rxq->bulk_mbuf_num; i++)
56 rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]);
57 rxq->bulk_mbuf_num = 0;
59 if (rxq->pkt_first_seg) {
60 rte_pktmbuf_free(rxq->pkt_first_seg);
61 rxq->pkt_first_seg = NULL;
66 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
70 /* Note: Fake tx queue will not enter here */
72 for (i = 0; i < txq->nb_tx_desc; i++) {
73 if (txq->sw_ring[i].mbuf) {
74 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
75 txq->sw_ring[i].mbuf = NULL;
82 hns3_rx_queue_release(void *queue)
84 struct hns3_rx_queue *rxq = queue;
86 hns3_rx_queue_release_mbufs(rxq);
88 rte_memzone_free(rxq->mz);
90 rte_free(rxq->sw_ring);
96 hns3_tx_queue_release(void *queue)
98 struct hns3_tx_queue *txq = queue;
100 hns3_tx_queue_release_mbufs(txq);
102 rte_memzone_free(txq->mz);
104 rte_free(txq->sw_ring);
112 hns3_rx_queue_release_lock(void *queue)
114 struct hns3_rx_queue *rxq = queue;
115 struct hns3_adapter *hns;
121 rte_spinlock_lock(&hns->hw.lock);
122 hns3_rx_queue_release(queue);
123 rte_spinlock_unlock(&hns->hw.lock);
127 hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
129 hns3_rx_queue_release_lock(dev->data->rx_queues[queue_id]);
133 hns3_tx_queue_release_lock(void *queue)
135 struct hns3_tx_queue *txq = queue;
136 struct hns3_adapter *hns;
142 rte_spinlock_lock(&hns->hw.lock);
143 hns3_tx_queue_release(queue);
144 rte_spinlock_unlock(&hns->hw.lock);
148 hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
150 hns3_tx_queue_release_lock(dev->data->tx_queues[queue_id]);
154 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
156 struct hns3_rx_queue *rxq = queue;
157 struct hns3_adapter *hns;
167 if (hw->fkq_data.rx_queues[idx]) {
168 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
169 hw->fkq_data.rx_queues[idx] = NULL;
172 /* free fake rx queue arrays */
173 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
174 hw->fkq_data.nb_fake_rx_queues = 0;
175 rte_free(hw->fkq_data.rx_queues);
176 hw->fkq_data.rx_queues = NULL;
181 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
183 struct hns3_tx_queue *txq = queue;
184 struct hns3_adapter *hns;
194 if (hw->fkq_data.tx_queues[idx]) {
195 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
196 hw->fkq_data.tx_queues[idx] = NULL;
199 /* free fake tx queue arrays */
200 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
201 hw->fkq_data.nb_fake_tx_queues = 0;
202 rte_free(hw->fkq_data.tx_queues);
203 hw->fkq_data.tx_queues = NULL;
208 hns3_free_rx_queues(struct rte_eth_dev *dev)
210 struct hns3_adapter *hns = dev->data->dev_private;
211 struct hns3_fake_queue_data *fkq_data;
212 struct hns3_hw *hw = &hns->hw;
216 nb_rx_q = hw->data->nb_rx_queues;
217 for (i = 0; i < nb_rx_q; i++) {
218 if (dev->data->rx_queues[i]) {
219 hns3_rx_queue_release(dev->data->rx_queues[i]);
220 dev->data->rx_queues[i] = NULL;
224 /* Free fake Rx queues */
225 fkq_data = &hw->fkq_data;
226 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
227 if (fkq_data->rx_queues[i])
228 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
233 hns3_free_tx_queues(struct rte_eth_dev *dev)
235 struct hns3_adapter *hns = dev->data->dev_private;
236 struct hns3_fake_queue_data *fkq_data;
237 struct hns3_hw *hw = &hns->hw;
241 nb_tx_q = hw->data->nb_tx_queues;
242 for (i = 0; i < nb_tx_q; i++) {
243 if (dev->data->tx_queues[i]) {
244 hns3_tx_queue_release(dev->data->tx_queues[i]);
245 dev->data->tx_queues[i] = NULL;
249 /* Free fake Tx queues */
250 fkq_data = &hw->fkq_data;
251 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
252 if (fkq_data->tx_queues[i])
253 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
258 hns3_free_all_queues(struct rte_eth_dev *dev)
260 hns3_free_rx_queues(dev);
261 hns3_free_tx_queues(dev);
265 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
267 struct rte_mbuf *mbuf;
271 for (i = 0; i < rxq->nb_rx_desc; i++) {
272 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
273 if (unlikely(mbuf == NULL)) {
274 hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
276 hns3_rx_queue_release_mbufs(rxq);
280 rte_mbuf_refcnt_set(mbuf, 1);
282 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
284 mbuf->port = rxq->port_id;
286 rxq->sw_ring[i].mbuf = mbuf;
287 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
288 rxq->rx_ring[i].addr = dma_addr;
289 rxq->rx_ring[i].rx.bd_base_info = 0;
296 hns3_buf_size2type(uint32_t buf_size)
302 bd_size_type = HNS3_BD_SIZE_512_TYPE;
305 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
308 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
311 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
318 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
320 uint32_t rx_buf_len = rxq->rx_buf_len;
321 uint64_t dma_addr = rxq->rx_ring_phys_addr;
323 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
324 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
325 (uint32_t)(dma_addr >> 32));
327 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
328 hns3_buf_size2type(rx_buf_len));
329 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
330 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
334 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
336 uint64_t dma_addr = txq->tx_ring_phys_addr;
338 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
339 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
340 (uint32_t)(dma_addr >> 32));
342 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
343 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
347 hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
349 uint16_t nb_rx_q = hw->data->nb_rx_queues;
350 uint16_t nb_tx_q = hw->data->nb_tx_queues;
351 struct hns3_rx_queue *rxq;
352 struct hns3_tx_queue *txq;
356 pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE;
357 for (i = 0; i < hw->cfg_max_queues; i++) {
359 rxq = hw->data->rx_queues[i];
361 rxq->pvid_sw_discard_en = pvid_en;
364 txq = hw->data->tx_queues[i];
366 txq->pvid_sw_shift_en = pvid_en;
372 hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type)
377 reg_offset = queue_type == HNS3_RING_TYPE_TX ?
378 HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG;
379 reg = hns3_read_reg(tqp_base, reg_offset);
380 reg &= ~BIT(HNS3_RING_EN_B);
381 hns3_write_reg(tqp_base, reg_offset, reg);
385 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
387 uint16_t nb_rx_q = hw->data->nb_rx_queues;
388 uint16_t nb_tx_q = hw->data->nb_tx_queues;
389 struct hns3_rx_queue *rxq;
390 struct hns3_tx_queue *txq;
395 for (i = 0; i < hw->cfg_max_queues; i++) {
396 if (hns3_dev_get_support(hw, INDEP_TXRX)) {
397 rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
398 txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
400 tqp_base = (void *)((char *)hw->io_base +
401 hns3_get_tqp_reg_offset(i));
403 * If queue struct is not initialized, it means the
404 * related HW ring has not been initialized yet.
405 * So, these queues should be disabled before enable
406 * the tqps to avoid a HW exception since the queues
407 * are enabled by default.
410 hns3_stop_unused_queue(tqp_base,
413 hns3_stop_unused_queue(tqp_base,
416 rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
417 hw->fkq_data.rx_queues[i - nb_rx_q];
419 tqp_base = rxq->io_base;
422 * This is the master switch that used to control the enabling
423 * of a pair of Tx and Rx queues. Both the Rx and Tx point to
426 rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG);
428 rcb_reg |= BIT(HNS3_RING_EN_B);
430 rcb_reg &= ~BIT(HNS3_RING_EN_B);
431 hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg);
436 hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
438 struct hns3_hw *hw = &txq->hns->hw;
441 if (hns3_dev_get_support(hw, INDEP_TXRX)) {
442 reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
444 reg |= BIT(HNS3_RING_EN_B);
446 reg &= ~BIT(HNS3_RING_EN_B);
447 hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg);
453 hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
455 struct hns3_hw *hw = &rxq->hns->hw;
458 if (hns3_dev_get_support(hw, INDEP_TXRX)) {
459 reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
461 reg |= BIT(HNS3_RING_EN_B);
463 reg &= ~BIT(HNS3_RING_EN_B);
464 hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg);
470 hns3_start_all_txqs(struct rte_eth_dev *dev)
472 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
473 struct hns3_tx_queue *txq;
476 for (i = 0; i < dev->data->nb_tx_queues; i++) {
477 txq = hw->data->tx_queues[i];
479 hns3_err(hw, "Tx queue %u not available or setup.", i);
480 goto start_txqs_fail;
483 * Tx queue is enabled by default. Therefore, the Tx queues
484 * needs to be disabled when deferred_start is set. There is
485 * another master switch used to control the enabling of a pair
486 * of Tx and Rx queues. And the master switch is disabled by
489 if (txq->tx_deferred_start)
490 hns3_enable_txq(txq, false);
492 hns3_enable_txq(txq, true);
497 for (j = 0; j < i; j++) {
498 txq = hw->data->tx_queues[j];
499 hns3_enable_txq(txq, false);
505 hns3_start_all_rxqs(struct rte_eth_dev *dev)
507 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
508 struct hns3_rx_queue *rxq;
511 for (i = 0; i < dev->data->nb_rx_queues; i++) {
512 rxq = hw->data->rx_queues[i];
514 hns3_err(hw, "Rx queue %u not available or setup.", i);
515 goto start_rxqs_fail;
518 * Rx queue is enabled by default. Therefore, the Rx queues
519 * needs to be disabled when deferred_start is set. There is
520 * another master switch used to control the enabling of a pair
521 * of Tx and Rx queues. And the master switch is disabled by
524 if (rxq->rx_deferred_start)
525 hns3_enable_rxq(rxq, false);
527 hns3_enable_rxq(rxq, true);
532 for (j = 0; j < i; j++) {
533 rxq = hw->data->rx_queues[j];
534 hns3_enable_rxq(rxq, false);
540 hns3_restore_tqp_enable_state(struct hns3_hw *hw)
542 struct hns3_rx_queue *rxq;
543 struct hns3_tx_queue *txq;
546 for (i = 0; i < hw->data->nb_rx_queues; i++) {
547 rxq = hw->data->rx_queues[i];
549 hns3_enable_rxq(rxq, rxq->enabled);
552 for (i = 0; i < hw->data->nb_tx_queues; i++) {
553 txq = hw->data->tx_queues[i];
555 hns3_enable_txq(txq, txq->enabled);
560 hns3_stop_all_txqs(struct rte_eth_dev *dev)
562 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
563 struct hns3_tx_queue *txq;
566 for (i = 0; i < dev->data->nb_tx_queues; i++) {
567 txq = hw->data->tx_queues[i];
570 hns3_enable_txq(txq, false);
575 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
577 struct hns3_cfg_com_tqp_queue_cmd *req;
578 struct hns3_cmd_desc desc;
581 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
583 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
584 req->tqp_id = rte_cpu_to_le_16(queue_id);
586 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
588 ret = hns3_cmd_send(hw, &desc, 1);
590 hns3_err(hw, "TQP enable fail, ret = %d", ret);
596 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
598 struct hns3_reset_tqp_queue_cmd *req;
599 struct hns3_cmd_desc desc;
602 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
604 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
605 req->tqp_id = rte_cpu_to_le_16(queue_id);
606 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
607 ret = hns3_cmd_send(hw, &desc, 1);
609 hns3_err(hw, "send tqp reset cmd error, queue_id = %u, "
610 "ret = %d", queue_id, ret);
616 hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id,
617 uint8_t *reset_status)
619 struct hns3_reset_tqp_queue_cmd *req;
620 struct hns3_cmd_desc desc;
623 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
625 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
626 req->tqp_id = rte_cpu_to_le_16(queue_id);
628 ret = hns3_cmd_send(hw, &desc, 1);
630 hns3_err(hw, "get tqp reset status error, queue_id = %u, "
631 "ret = %d.", queue_id, ret);
634 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
639 hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
641 #define HNS3_TQP_RESET_TRY_MS 200
642 uint16_t wait_time = 0;
643 uint8_t reset_status;
647 * In current version VF is not supported when PF is driven by DPDK
648 * driver, all task queue pairs are mapped to PF function, so PF's queue
649 * id is equals to the global queue id in PF range.
651 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
653 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
658 /* Wait for tqp hw reset */
659 rte_delay_ms(HNS3_POLL_RESPONE_MS);
660 wait_time += HNS3_POLL_RESPONE_MS;
661 ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
667 } while (wait_time < HNS3_TQP_RESET_TRY_MS);
671 hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d",
676 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
678 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
683 hns3_send_reset_tqp_cmd(hw, queue_id, false);
688 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
693 memcpy(msg_data, &queue_id, sizeof(uint16_t));
695 ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
696 sizeof(msg_data), true, NULL, 0);
698 hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
704 hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status)
706 struct hns3_reset_cmd *req;
707 struct hns3_cmd_desc desc;
710 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
711 req = (struct hns3_reset_cmd *)desc.data;
712 hns3_set_bit(req->fun_reset_rcb, HNS3_CFG_RESET_RCB_B, 1);
715 * The start qid should be the global qid of the first tqp of the
716 * function which should be reset in this port. Since our PF not
717 * support take over of VFs, so we only need to reset function 0,
718 * and its start qid is always 0.
720 req->fun_reset_rcb_vqid_start = rte_cpu_to_le_16(0);
721 req->fun_reset_rcb_vqid_num = rte_cpu_to_le_16(hw->cfg_max_queues);
723 ret = hns3_cmd_send(hw, &desc, 1);
725 hns3_err(hw, "fail to send rcb reset cmd, ret = %d.", ret);
729 *reset_status = req->fun_reset_rcb_return_status;
734 hns3pf_reset_all_tqps(struct hns3_hw *hw)
736 #define HNS3_RESET_RCB_NOT_SUPPORT 0U
737 #define HNS3_RESET_ALL_TQP_SUCCESS 1U
738 uint8_t reset_status;
742 ret = hns3_reset_rcb_cmd(hw, &reset_status);
747 * If the firmware version is low, it may not support the rcb reset
748 * which means reset all the tqps at a time. In this case, we should
749 * reset tqps one by one.
751 if (reset_status == HNS3_RESET_RCB_NOT_SUPPORT) {
752 for (i = 0; i < hw->cfg_max_queues; i++) {
753 ret = hns3pf_reset_tqp(hw, i);
756 "fail to reset tqp, queue_id = %d, ret = %d.",
761 } else if (reset_status != HNS3_RESET_ALL_TQP_SUCCESS) {
762 hns3_err(hw, "fail to reset all tqps, reset_status = %u.",
771 hns3vf_reset_all_tqps(struct hns3_hw *hw)
773 #define HNS3VF_RESET_ALL_TQP_DONE 1U
774 uint8_t reset_status;
779 memset(msg_data, 0, sizeof(uint16_t));
780 ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
781 sizeof(msg_data), true, &reset_status,
782 sizeof(reset_status));
784 hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret);
788 if (reset_status == HNS3VF_RESET_ALL_TQP_DONE)
792 * If the firmware version or kernel PF version is low, it may not
793 * support the rcb reset which means reset all the tqps at a time.
794 * In this case, we should reset tqps one by one.
796 for (i = 1; i < hw->cfg_max_queues; i++) {
797 ret = hns3vf_reset_tqp(hw, i);
806 hns3_reset_all_tqps(struct hns3_adapter *hns)
808 struct hns3_hw *hw = &hns->hw;
811 /* Disable all queues before reset all queues */
812 for (i = 0; i < hw->cfg_max_queues; i++) {
813 ret = hns3_tqp_enable(hw, i, false);
816 "fail to disable tqps before tqps reset, ret = %d.",
823 return hns3vf_reset_all_tqps(hw);
825 return hns3pf_reset_all_tqps(hw);
829 hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id,
830 enum hns3_ring_type queue_type, bool enable)
832 struct hns3_reset_tqp_queue_cmd *req;
833 struct hns3_cmd_desc desc;
837 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false);
839 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
840 req->tqp_id = rte_cpu_to_le_16(queue_id);
841 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
842 req->queue_direction = rte_cpu_to_le_16(queue_direction);
843 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
845 ret = hns3_cmd_send(hw, &desc, 1);
847 hns3_err(hw, "send queue reset cmd error, queue_id = %u, "
848 "queue_type = %s, ret = %d.", queue_id,
849 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
854 hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id,
855 enum hns3_ring_type queue_type,
856 uint8_t *reset_status)
858 struct hns3_reset_tqp_queue_cmd *req;
859 struct hns3_cmd_desc desc;
863 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true);
865 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
866 req->tqp_id = rte_cpu_to_le_16(queue_id);
867 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
868 req->queue_direction = rte_cpu_to_le_16(queue_direction);
870 ret = hns3_cmd_send(hw, &desc, 1);
872 hns3_err(hw, "get queue reset status error, queue_id = %u "
873 "queue_type = %s, ret = %d.", queue_id,
874 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
878 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
883 hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
884 enum hns3_ring_type queue_type)
886 #define HNS3_QUEUE_RESET_TRY_MS 200
887 struct hns3_tx_queue *txq;
888 struct hns3_rx_queue *rxq;
889 uint32_t reset_wait_times;
890 uint32_t max_wait_times;
891 uint8_t reset_status;
894 if (queue_type == HNS3_RING_TYPE_TX) {
895 txq = hw->data->tx_queues[queue_id];
896 hns3_enable_txq(txq, false);
898 rxq = hw->data->rx_queues[queue_id];
899 hns3_enable_rxq(rxq, false);
902 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true);
904 hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret);
908 reset_wait_times = 0;
909 max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS;
910 while (reset_wait_times < max_wait_times) {
911 /* Wait for queue hw reset */
912 rte_delay_ms(HNS3_POLL_RESPONE_MS);
913 ret = hns3_get_queue_reset_status(hw, queue_id,
914 queue_type, &reset_status);
916 goto queue_reset_fail;
924 hns3_err(hw, "reset queue timeout, queue_id = %u, "
925 "queue_type = %s", queue_id,
926 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx");
928 goto queue_reset_fail;
931 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
933 hns3_err(hw, "deassert queue reset fail, ret = %d.", ret);
938 hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
943 hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id)
947 /* Need an extend offset to config queues > 64 */
948 if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID)
949 reg_offset = HNS3_TQP_INTR_REG_BASE +
950 tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET;
952 reg_offset = HNS3_TQP_INTR_EXT_REG_BASE +
953 tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID *
954 HNS3_TQP_INTR_HIGH_ORDER_OFFSET +
955 tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID *
956 HNS3_TQP_INTR_LOW_ORDER_OFFSET;
962 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
963 uint8_t gl_idx, uint16_t gl_value)
965 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
966 HNS3_TQP_INTR_GL1_REG,
967 HNS3_TQP_INTR_GL2_REG};
968 uint32_t addr, value;
970 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
973 addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id);
974 if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
975 value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
977 value = HNS3_GL_USEC_TO_REG(gl_value);
979 hns3_write_dev(hw, addr, value);
983 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
985 uint32_t addr, value;
987 if (rl_value > HNS3_TQP_INTR_RL_MAX)
990 addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
991 value = HNS3_RL_USEC_TO_REG(rl_value);
993 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
995 hns3_write_dev(hw, addr, value);
999 hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
1004 * int_ql_max == 0 means the hardware does not support QL,
1005 * QL regs config is not permitted if QL is not supported,
1008 if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE)
1011 addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
1012 hns3_write_dev(hw, addr, ql_value);
1014 addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
1015 hns3_write_dev(hw, addr, ql_value);
1019 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
1021 uint32_t addr, value;
1023 addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
1026 hns3_write_dev(hw, addr, value);
1030 * Enable all rx queue interrupt when in interrupt rx mode.
1031 * This api was called before enable queue rx&tx (in normal start or reset
1032 * recover scenes), used to fix hardware rx queue interrupt enable was clear
1036 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
1038 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1039 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1042 if (dev->data->dev_conf.intr_conf.rxq == 0)
1045 for (i = 0; i < nb_rx_q; i++)
1046 hns3_queue_intr_enable(hw, i, en);
1050 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1052 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1053 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1054 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1056 if (dev->data->dev_conf.intr_conf.rxq == 0)
1059 hns3_queue_intr_enable(hw, queue_id, true);
1061 return rte_intr_ack(intr_handle);
1065 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1067 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1069 if (dev->data->dev_conf.intr_conf.rxq == 0)
1072 hns3_queue_intr_enable(hw, queue_id, false);
1078 hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx)
1080 struct hns3_hw *hw = &hns->hw;
1081 struct hns3_rx_queue *rxq;
1084 PMD_INIT_FUNC_TRACE();
1086 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
1087 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
1089 hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.",
1094 rxq->next_to_use = 0;
1095 rxq->rx_rearm_start = 0;
1096 rxq->rx_free_hold = 0;
1097 rxq->rx_rearm_nb = 0;
1098 rxq->pkt_first_seg = NULL;
1099 rxq->pkt_last_seg = NULL;
1100 hns3_init_rx_queue_hw(rxq);
1101 hns3_rxq_vec_setup(rxq);
1107 hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx)
1109 struct hns3_hw *hw = &hns->hw;
1110 struct hns3_rx_queue *rxq;
1112 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
1113 rxq->next_to_use = 0;
1114 rxq->rx_free_hold = 0;
1115 rxq->rx_rearm_start = 0;
1116 rxq->rx_rearm_nb = 0;
1117 hns3_init_rx_queue_hw(rxq);
1121 hns3_init_txq(struct hns3_tx_queue *txq)
1123 struct hns3_desc *desc;
1127 desc = txq->tx_ring;
1128 for (i = 0; i < txq->nb_tx_desc; i++) {
1129 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1133 txq->next_to_use = 0;
1134 txq->next_to_clean = 0;
1135 txq->tx_bd_ready = txq->nb_tx_desc - 1;
1136 hns3_init_tx_queue_hw(txq);
1140 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
1142 struct hns3_hw *hw = &hns->hw;
1143 struct hns3_tx_queue *txq;
1146 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
1147 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
1150 if (!tc_queue->enable)
1153 for (j = 0; j < tc_queue->tqp_count; j++) {
1154 num = tc_queue->tqp_offset + j;
1155 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
1159 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
1165 hns3_init_rx_queues(struct hns3_adapter *hns)
1167 struct hns3_hw *hw = &hns->hw;
1168 struct hns3_rx_queue *rxq;
1172 /* Initialize RSS for queues */
1173 ret = hns3_config_rss(hns);
1175 hns3_err(hw, "failed to configure rss, ret = %d.", ret);
1179 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1180 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
1182 hns3_err(hw, "Rx queue %u not available or setup.", i);
1186 if (rxq->rx_deferred_start)
1189 ret = hns3_init_rxq(hns, i);
1191 hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i,
1197 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++)
1198 hns3_init_fake_rxq(hns, i);
1203 for (j = 0; j < i; j++) {
1204 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
1205 hns3_rx_queue_release_mbufs(rxq);
1212 hns3_init_tx_queues(struct hns3_adapter *hns)
1214 struct hns3_hw *hw = &hns->hw;
1215 struct hns3_tx_queue *txq;
1218 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1219 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
1221 hns3_err(hw, "Tx queue %u not available or setup.", i);
1225 if (txq->tx_deferred_start)
1230 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
1231 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
1234 hns3_init_tx_ring_tc(hns);
1241 * Note: just init and setup queues, and don't enable tqps.
1244 hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
1246 struct hns3_hw *hw = &hns->hw;
1250 ret = hns3_reset_all_tqps(hns);
1252 hns3_err(hw, "failed to reset all queues, ret = %d.",
1258 ret = hns3_init_rx_queues(hns);
1260 hns3_err(hw, "failed to init rx queues, ret = %d.", ret);
1264 ret = hns3_init_tx_queues(hns);
1266 hns3_dev_release_mbufs(hns);
1267 hns3_err(hw, "failed to init tx queues, ret = %d.", ret);
1274 hns3_start_tqps(struct hns3_hw *hw)
1276 struct hns3_tx_queue *txq;
1277 struct hns3_rx_queue *rxq;
1280 hns3_enable_all_queues(hw, true);
1282 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1283 txq = hw->data->tx_queues[i];
1285 hw->data->tx_queue_state[i] =
1286 RTE_ETH_QUEUE_STATE_STARTED;
1289 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1290 rxq = hw->data->rx_queues[i];
1292 hw->data->rx_queue_state[i] =
1293 RTE_ETH_QUEUE_STATE_STARTED;
1298 hns3_stop_tqps(struct hns3_hw *hw)
1302 hns3_enable_all_queues(hw, false);
1304 for (i = 0; i < hw->data->nb_tx_queues; i++)
1305 hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1307 for (i = 0; i < hw->data->nb_rx_queues; i++)
1308 hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1312 * Iterate over all Rx Queue, and call the callback() function for each Rx
1316 * The target eth dev.
1317 * @param[in] callback
1318 * The function to call for each queue.
1319 * if callback function return nonzero will stop iterate and return it's value
1321 * The arguments to provide the callback function with.
1324 * 0 on success, otherwise with errno set.
1327 hns3_rxq_iterate(struct rte_eth_dev *dev,
1328 int (*callback)(struct hns3_rx_queue *, void *), void *arg)
1333 if (dev->data->rx_queues == NULL)
1336 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1337 ret = callback(dev->data->rx_queues[i], arg);
1346 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
1347 struct hns3_queue_info *q_info)
1349 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1350 const struct rte_memzone *rx_mz;
1351 struct hns3_rx_queue *rxq;
1352 unsigned int rx_desc;
1354 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
1355 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1357 hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
1362 /* Allocate rx ring hardware descriptors. */
1363 rxq->queue_id = q_info->idx;
1364 rxq->nb_rx_desc = q_info->nb_desc;
1367 * Allocate a litter more memory because rx vector functions
1368 * don't check boundaries each time.
1370 rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1371 sizeof(struct hns3_desc);
1372 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1373 rx_desc, HNS3_RING_BASE_ALIGN,
1375 if (rx_mz == NULL) {
1376 hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
1378 hns3_rx_queue_release(rxq);
1382 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
1383 rxq->rx_ring_phys_addr = rx_mz->iova;
1389 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1390 uint16_t nb_desc, unsigned int socket_id)
1392 struct hns3_adapter *hns = dev->data->dev_private;
1393 struct hns3_hw *hw = &hns->hw;
1394 struct hns3_queue_info q_info;
1395 struct hns3_rx_queue *rxq;
1398 if (hw->fkq_data.rx_queues[idx]) {
1399 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
1400 hw->fkq_data.rx_queues[idx] = NULL;
1404 q_info.socket_id = socket_id;
1405 q_info.nb_desc = nb_desc;
1406 q_info.type = "hns3 fake RX queue";
1407 q_info.ring_name = "rx_fake_ring";
1408 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1410 hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
1414 /* Don't need alloc sw_ring, because upper applications don't use it */
1415 rxq->sw_ring = NULL;
1418 rxq->rx_deferred_start = false;
1419 rxq->port_id = dev->data->port_id;
1420 rxq->configured = true;
1421 nb_rx_q = dev->data->nb_rx_queues;
1422 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1423 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
1424 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
1426 rte_spinlock_lock(&hw->lock);
1427 hw->fkq_data.rx_queues[idx] = rxq;
1428 rte_spinlock_unlock(&hw->lock);
1434 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
1435 struct hns3_queue_info *q_info)
1437 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1438 const struct rte_memzone *tx_mz;
1439 struct hns3_tx_queue *txq;
1440 struct hns3_desc *desc;
1441 unsigned int tx_desc;
1444 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
1445 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1447 hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
1452 /* Allocate tx ring hardware descriptors. */
1453 txq->queue_id = q_info->idx;
1454 txq->nb_tx_desc = q_info->nb_desc;
1455 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
1456 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1457 tx_desc, HNS3_RING_BASE_ALIGN,
1459 if (tx_mz == NULL) {
1460 hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
1462 hns3_tx_queue_release(txq);
1466 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
1467 txq->tx_ring_phys_addr = tx_mz->iova;
1470 desc = txq->tx_ring;
1471 for (i = 0; i < txq->nb_tx_desc; i++) {
1472 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1480 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1481 uint16_t nb_desc, unsigned int socket_id)
1483 struct hns3_adapter *hns = dev->data->dev_private;
1484 struct hns3_hw *hw = &hns->hw;
1485 struct hns3_queue_info q_info;
1486 struct hns3_tx_queue *txq;
1489 if (hw->fkq_data.tx_queues[idx] != NULL) {
1490 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
1491 hw->fkq_data.tx_queues[idx] = NULL;
1495 q_info.socket_id = socket_id;
1496 q_info.nb_desc = nb_desc;
1497 q_info.type = "hns3 fake TX queue";
1498 q_info.ring_name = "tx_fake_ring";
1499 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1501 hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
1505 /* Don't need alloc sw_ring, because upper applications don't use it */
1506 txq->sw_ring = NULL;
1510 txq->tx_deferred_start = false;
1511 txq->port_id = dev->data->port_id;
1512 txq->configured = true;
1513 nb_tx_q = dev->data->nb_tx_queues;
1514 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1515 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1517 rte_spinlock_lock(&hw->lock);
1518 hw->fkq_data.tx_queues[idx] = txq;
1519 rte_spinlock_unlock(&hw->lock);
1525 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1527 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1531 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1532 /* first time configuration */
1534 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1535 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1536 RTE_CACHE_LINE_SIZE);
1537 if (hw->fkq_data.rx_queues == NULL) {
1538 hw->fkq_data.nb_fake_rx_queues = 0;
1541 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1543 rxq = hw->fkq_data.rx_queues;
1544 for (i = nb_queues; i < old_nb_queues; i++)
1545 hns3_rx_queue_release_lock(rxq[i]);
1547 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1548 RTE_CACHE_LINE_SIZE);
1551 if (nb_queues > old_nb_queues) {
1552 uint16_t new_qs = nb_queues - old_nb_queues;
1553 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1556 hw->fkq_data.rx_queues = rxq;
1557 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1558 rxq = hw->fkq_data.rx_queues;
1559 for (i = nb_queues; i < old_nb_queues; i++)
1560 hns3_rx_queue_release_lock(rxq[i]);
1562 rte_free(hw->fkq_data.rx_queues);
1563 hw->fkq_data.rx_queues = NULL;
1566 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1572 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1574 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1578 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1579 /* first time configuration */
1581 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1582 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1583 RTE_CACHE_LINE_SIZE);
1584 if (hw->fkq_data.tx_queues == NULL) {
1585 hw->fkq_data.nb_fake_tx_queues = 0;
1588 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1590 txq = hw->fkq_data.tx_queues;
1591 for (i = nb_queues; i < old_nb_queues; i++)
1592 hns3_tx_queue_release_lock(txq[i]);
1593 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1594 RTE_CACHE_LINE_SIZE);
1597 if (nb_queues > old_nb_queues) {
1598 uint16_t new_qs = nb_queues - old_nb_queues;
1599 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1602 hw->fkq_data.tx_queues = txq;
1603 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1604 txq = hw->fkq_data.tx_queues;
1605 for (i = nb_queues; i < old_nb_queues; i++)
1606 hns3_tx_queue_release_lock(txq[i]);
1608 rte_free(hw->fkq_data.tx_queues);
1609 hw->fkq_data.tx_queues = NULL;
1611 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1617 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1620 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1621 uint16_t rx_need_add_nb_q;
1622 uint16_t tx_need_add_nb_q;
1627 if (hns3_dev_get_support(hw, INDEP_TXRX))
1630 /* Setup new number of fake RX/TX queues and reconfigure device. */
1631 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1632 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1633 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1635 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1639 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1641 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1642 goto cfg_fake_tx_q_fail;
1645 /* Allocate and set up fake RX queue per Ethernet port. */
1646 port_id = hw->data->port_id;
1647 for (q = 0; q < rx_need_add_nb_q; q++) {
1648 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1649 rte_eth_dev_socket_id(port_id));
1651 goto setup_fake_rx_q_fail;
1654 /* Allocate and set up fake TX queue per Ethernet port. */
1655 for (q = 0; q < tx_need_add_nb_q; q++) {
1656 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1657 rte_eth_dev_socket_id(port_id));
1659 goto setup_fake_tx_q_fail;
1664 setup_fake_tx_q_fail:
1665 setup_fake_rx_q_fail:
1666 (void)hns3_fake_tx_queue_config(hw, 0);
1668 (void)hns3_fake_rx_queue_config(hw, 0);
1674 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1676 struct rte_eth_dev_data *dev_data = hns->hw.data;
1677 struct hns3_rx_queue *rxq;
1678 struct hns3_tx_queue *txq;
1681 if (dev_data->rx_queues)
1682 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1683 rxq = dev_data->rx_queues[i];
1686 hns3_rx_queue_release_mbufs(rxq);
1689 if (dev_data->tx_queues)
1690 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1691 txq = dev_data->tx_queues[i];
1694 hns3_tx_queue_release_mbufs(txq);
1699 hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1701 uint16_t vld_buf_size;
1702 uint16_t num_hw_specs;
1706 * hns3 network engine only support to set 4 typical specification, and
1707 * different buffer size will affect the max packet_len and the max
1708 * number of segmentation when hw gro is turned on in receive side. The
1709 * relationship between them is as follows:
1710 * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg
1711 * ---------------------|-------------------|----------------
1712 * HNS3_4K_BD_BUF_SIZE | 60KB | 15
1713 * HNS3_2K_BD_BUF_SIZE | 62KB | 31
1714 * HNS3_1K_BD_BUF_SIZE | 63KB | 63
1715 * HNS3_512_BD_BUF_SIZE | 31.5KB | 63
1717 static const uint16_t hw_rx_buf_size[] = {
1718 HNS3_4K_BD_BUF_SIZE,
1719 HNS3_2K_BD_BUF_SIZE,
1720 HNS3_1K_BD_BUF_SIZE,
1721 HNS3_512_BD_BUF_SIZE
1724 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1725 RTE_PKTMBUF_HEADROOM);
1726 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1729 num_hw_specs = RTE_DIM(hw_rx_buf_size);
1730 for (i = 0; i < num_hw_specs; i++) {
1731 if (vld_buf_size >= hw_rx_buf_size[i]) {
1732 *rx_buf_len = hw_rx_buf_size[i];
1740 hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
1743 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1744 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
1745 uint32_t frame_size = dev->data->mtu + HNS3_ETH_OVERHEAD;
1746 uint16_t min_vec_bds;
1749 * HNS3 hardware network engine set scattered as default. If the driver
1750 * is not work in scattered mode and the pkts greater than buf_size
1751 * but smaller than frame size will be distributed to multiple BDs.
1752 * Driver cannot handle this situation.
1754 if (!hw->data->scattered_rx && frame_size > buf_size) {
1755 hns3_err(hw, "frame size is not allowed to be set greater "
1756 "than rx_buf_len if scattered is off.");
1760 if (pkt_burst == hns3_recv_pkts_vec) {
1761 min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
1762 HNS3_DEFAULT_RX_BURST;
1763 if (nb_desc < min_vec_bds ||
1764 nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) {
1765 hns3_err(hw, "if Rx burst mode is vector, "
1766 "number of descriptor is required to be "
1767 "bigger than min vector bds:%u, and could be "
1768 "divided by rxq rearm thresh:%u.",
1769 min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH);
1777 hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
1778 struct rte_mempool *mp, uint16_t nb_desc,
1783 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1784 nb_desc % HNS3_ALIGN_RING_DESC) {
1785 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1790 if (conf->rx_drop_en == 0)
1791 hns3_warn(hw, "if no descriptors available, packets are always "
1792 "dropped and rx_drop_en (1) is fixed on");
1794 if (hns3_rx_buf_len_calc(mp, buf_size)) {
1795 hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! "
1796 "minimal data room size (%u).",
1797 rte_pktmbuf_data_room_size(mp),
1798 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1802 if (hw->data->dev_started) {
1803 ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc);
1805 hns3_err(hw, "Rx queue runtime setup fail.");
1814 hns3_get_tqp_reg_offset(uint16_t queue_id)
1816 uint32_t reg_offset;
1818 /* Need an extend offset to config queue > 1024 */
1819 if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID)
1820 reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE;
1822 reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET +
1823 (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) *
1830 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1831 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1832 struct rte_mempool *mp)
1834 struct hns3_adapter *hns = dev->data->dev_private;
1835 struct hns3_hw *hw = &hns->hw;
1836 struct hns3_queue_info q_info;
1837 struct hns3_rx_queue *rxq;
1838 uint16_t rx_buf_size;
1842 ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
1846 if (dev->data->rx_queues[idx]) {
1847 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1848 dev->data->rx_queues[idx] = NULL;
1852 q_info.socket_id = socket_id;
1853 q_info.nb_desc = nb_desc;
1854 q_info.type = "hns3 RX queue";
1855 q_info.ring_name = "rx_ring";
1857 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1860 "Failed to alloc mem and reserve DMA mem for rx ring!");
1865 rxq->ptype_tbl = &hns->ptype_tbl;
1867 rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
1868 conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
1870 rxq->rx_deferred_start = conf->rx_deferred_start;
1871 if (rxq->rx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
1872 hns3_warn(hw, "deferred start is not supported.");
1873 rxq->rx_deferred_start = false;
1876 rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1877 sizeof(struct hns3_entry);
1878 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1879 RTE_CACHE_LINE_SIZE, socket_id);
1880 if (rxq->sw_ring == NULL) {
1881 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1882 hns3_rx_queue_release(rxq);
1886 rxq->next_to_use = 0;
1887 rxq->rx_free_hold = 0;
1888 rxq->rx_rearm_start = 0;
1889 rxq->rx_rearm_nb = 0;
1890 rxq->pkt_first_seg = NULL;
1891 rxq->pkt_last_seg = NULL;
1892 rxq->port_id = dev->data->port_id;
1894 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
1895 * the pvid_sw_discard_en in the queue struct should not be changed,
1896 * because PVID-related operations do not need to be processed by PMD.
1897 * For hns3 VF device, whether it needs to process PVID depends
1898 * on the configuration of PF kernel mode netdevice driver. And the
1899 * related PF configuration is delivered through the mailbox and finally
1900 * reflected in port_base_vlan_cfg.
1902 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1903 rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
1904 HNS3_PORT_BASE_VLAN_ENABLE;
1906 rxq->pvid_sw_discard_en = false;
1907 rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false;
1908 rxq->configured = true;
1909 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1910 idx * HNS3_TQP_REG_SIZE);
1911 rxq->io_base = (void *)((char *)hw->io_base +
1912 hns3_get_tqp_reg_offset(idx));
1913 rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
1914 HNS3_RING_RX_HEAD_REG);
1915 rxq->rx_buf_len = rx_buf_size;
1916 memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats));
1917 memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats));
1918 memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
1920 /* CRC len set here is used for amending packet length */
1921 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
1922 rxq->crc_len = RTE_ETHER_CRC_LEN;
1926 rxq->bulk_mbuf_num = 0;
1928 rte_spinlock_lock(&hw->lock);
1929 dev->data->rx_queues[idx] = rxq;
1930 rte_spinlock_unlock(&hw->lock);
1936 hns3_rx_scattered_reset(struct rte_eth_dev *dev)
1938 struct hns3_adapter *hns = dev->data->dev_private;
1939 struct hns3_hw *hw = &hns->hw;
1942 dev->data->scattered_rx = false;
1946 hns3_rx_scattered_calc(struct rte_eth_dev *dev)
1948 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1949 struct hns3_adapter *hns = dev->data->dev_private;
1950 struct hns3_hw *hw = &hns->hw;
1951 struct hns3_rx_queue *rxq;
1954 if (dev->data->rx_queues == NULL)
1957 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
1958 rxq = dev->data->rx_queues[queue_id];
1959 if (hw->rx_buf_len == 0)
1960 hw->rx_buf_len = rxq->rx_buf_len;
1962 hw->rx_buf_len = RTE_MIN(hw->rx_buf_len,
1966 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
1967 dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
1968 dev->data->scattered_rx = true;
1972 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1974 static const uint32_t ptypes[] = {
1976 RTE_PTYPE_L2_ETHER_LLDP,
1977 RTE_PTYPE_L2_ETHER_ARP,
1979 RTE_PTYPE_L3_IPV4_EXT,
1981 RTE_PTYPE_L3_IPV6_EXT,
1987 RTE_PTYPE_TUNNEL_GRE,
1988 RTE_PTYPE_INNER_L2_ETHER,
1989 RTE_PTYPE_INNER_L3_IPV4,
1990 RTE_PTYPE_INNER_L3_IPV6,
1991 RTE_PTYPE_INNER_L3_IPV4_EXT,
1992 RTE_PTYPE_INNER_L3_IPV6_EXT,
1993 RTE_PTYPE_INNER_L4_UDP,
1994 RTE_PTYPE_INNER_L4_TCP,
1995 RTE_PTYPE_INNER_L4_SCTP,
1996 RTE_PTYPE_INNER_L4_ICMP,
1997 RTE_PTYPE_TUNNEL_VXLAN,
1998 RTE_PTYPE_TUNNEL_NVGRE,
2001 static const uint32_t adv_layout_ptypes[] = {
2003 RTE_PTYPE_L2_ETHER_TIMESYNC,
2004 RTE_PTYPE_L2_ETHER_LLDP,
2005 RTE_PTYPE_L2_ETHER_ARP,
2006 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2007 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2009 RTE_PTYPE_L4_NONFRAG,
2015 RTE_PTYPE_TUNNEL_GRE,
2016 RTE_PTYPE_TUNNEL_GRENAT,
2017 RTE_PTYPE_INNER_L2_ETHER,
2018 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2019 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2020 RTE_PTYPE_INNER_L4_FRAG,
2021 RTE_PTYPE_INNER_L4_ICMP,
2022 RTE_PTYPE_INNER_L4_NONFRAG,
2023 RTE_PTYPE_INNER_L4_UDP,
2024 RTE_PTYPE_INNER_L4_TCP,
2025 RTE_PTYPE_INNER_L4_SCTP,
2026 RTE_PTYPE_INNER_L4_ICMP,
2029 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2031 if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
2032 dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
2033 dev->rx_pkt_burst == hns3_recv_pkts_vec ||
2034 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
2035 if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
2036 return adv_layout_ptypes;
2045 hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
2047 tbl->l3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
2048 tbl->l3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
2049 tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP;
2050 tbl->l3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
2051 tbl->l3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
2052 tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP;
2054 tbl->l4table[0] = RTE_PTYPE_L4_UDP;
2055 tbl->l4table[1] = RTE_PTYPE_L4_TCP;
2056 tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE;
2057 tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
2058 tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
2059 tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
2063 hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
2065 tbl->inner_l3table[0] = RTE_PTYPE_INNER_L2_ETHER |
2066 RTE_PTYPE_INNER_L3_IPV4;
2067 tbl->inner_l3table[1] = RTE_PTYPE_INNER_L2_ETHER |
2068 RTE_PTYPE_INNER_L3_IPV6;
2069 /* There is not a ptype for inner ARP/RARP */
2070 tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN;
2071 tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN;
2072 tbl->inner_l3table[4] = RTE_PTYPE_INNER_L2_ETHER |
2073 RTE_PTYPE_INNER_L3_IPV4_EXT;
2074 tbl->inner_l3table[5] = RTE_PTYPE_INNER_L2_ETHER |
2075 RTE_PTYPE_INNER_L3_IPV6_EXT;
2077 tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
2078 tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
2079 /* There is not a ptype for inner GRE */
2080 tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN;
2081 tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
2082 /* There is not a ptype for inner IGMP */
2083 tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN;
2084 tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
2086 tbl->ol3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
2087 tbl->ol3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
2088 tbl->ol3table[2] = RTE_PTYPE_UNKNOWN;
2089 tbl->ol3table[3] = RTE_PTYPE_UNKNOWN;
2090 tbl->ol3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
2091 tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
2093 tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
2094 tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN;
2095 tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
2099 hns3_init_adv_layout_ptype(struct hns3_ptype_table *tbl)
2101 uint32_t *ptype = tbl->ptype;
2104 ptype[1] = RTE_PTYPE_L2_ETHER_ARP;
2105 ptype[3] = RTE_PTYPE_L2_ETHER_LLDP;
2106 ptype[8] = RTE_PTYPE_L2_ETHER_TIMESYNC;
2108 /* Non-tunnel IPv4 */
2109 ptype[17] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2111 ptype[18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2112 RTE_PTYPE_L4_NONFRAG;
2113 ptype[19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2115 ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2117 ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2118 RTE_PTYPE_TUNNEL_GRE;
2119 ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2121 ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2123 ptype[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2125 /* The next ptype is PTP over IPv4 + UDP */
2126 ptype[25] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2129 /* IPv4 --> GRE/Teredo/VXLAN */
2130 ptype[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2131 RTE_PTYPE_TUNNEL_GRENAT;
2132 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2133 ptype[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2134 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
2136 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2137 ptype[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2138 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2139 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2140 RTE_PTYPE_INNER_L4_FRAG;
2141 ptype[32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2142 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2143 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2144 RTE_PTYPE_INNER_L4_NONFRAG;
2145 ptype[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2146 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2147 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2148 RTE_PTYPE_INNER_L4_UDP;
2149 ptype[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2150 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2151 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2152 RTE_PTYPE_INNER_L4_TCP;
2153 ptype[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2154 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2155 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2156 RTE_PTYPE_INNER_L4_SCTP;
2157 /* The next ptype's inner L4 is IGMP */
2158 ptype[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2159 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2160 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
2161 ptype[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2162 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2163 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2164 RTE_PTYPE_INNER_L4_ICMP;
2166 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2167 ptype[39] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2168 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2169 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2170 RTE_PTYPE_INNER_L4_FRAG;
2171 ptype[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2172 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2173 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2174 RTE_PTYPE_INNER_L4_NONFRAG;
2175 ptype[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2176 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2177 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2178 RTE_PTYPE_INNER_L4_UDP;
2179 ptype[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2180 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2181 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2182 RTE_PTYPE_INNER_L4_TCP;
2183 ptype[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2184 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2185 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2186 RTE_PTYPE_INNER_L4_SCTP;
2187 /* The next ptype's inner L4 is IGMP */
2188 ptype[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2189 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2190 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
2191 ptype[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2192 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2193 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2194 RTE_PTYPE_INNER_L4_ICMP;
2196 /* Non-tunnel IPv6 */
2197 ptype[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2199 ptype[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2200 RTE_PTYPE_L4_NONFRAG;
2201 ptype[113] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2203 ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2205 ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2206 RTE_PTYPE_TUNNEL_GRE;
2207 ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2209 ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2211 ptype[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2213 /* Special for PTP over IPv6 + UDP */
2214 ptype[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2217 /* IPv6 --> GRE/Teredo/VXLAN */
2218 ptype[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2219 RTE_PTYPE_TUNNEL_GRENAT;
2220 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2221 ptype[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2222 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
2224 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2225 ptype[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2226 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2227 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2228 RTE_PTYPE_INNER_L4_FRAG;
2229 ptype[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2230 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2231 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2232 RTE_PTYPE_INNER_L4_NONFRAG;
2233 ptype[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2234 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2235 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2236 RTE_PTYPE_INNER_L4_UDP;
2237 ptype[128] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2238 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2239 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2240 RTE_PTYPE_INNER_L4_TCP;
2241 ptype[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2242 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2243 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2244 RTE_PTYPE_INNER_L4_SCTP;
2245 /* The next ptype's inner L4 is IGMP */
2246 ptype[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2247 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2248 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
2249 ptype[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2250 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2251 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2252 RTE_PTYPE_INNER_L4_ICMP;
2254 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2255 ptype[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2256 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2257 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2258 RTE_PTYPE_INNER_L4_FRAG;
2259 ptype[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2260 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2261 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2262 RTE_PTYPE_INNER_L4_NONFRAG;
2263 ptype[135] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2264 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2265 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2266 RTE_PTYPE_INNER_L4_UDP;
2267 ptype[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2268 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2269 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2270 RTE_PTYPE_INNER_L4_TCP;
2271 ptype[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2272 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2273 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2274 RTE_PTYPE_INNER_L4_SCTP;
2275 /* The next ptype's inner L4 is IGMP */
2276 ptype[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2277 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2278 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
2279 ptype[139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2280 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2281 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2282 RTE_PTYPE_INNER_L4_ICMP;
2286 hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
2288 struct hns3_adapter *hns = dev->data->dev_private;
2289 struct hns3_ptype_table *tbl = &hns->ptype_tbl;
2291 memset(tbl, 0, sizeof(*tbl));
2293 hns3_init_non_tunnel_ptype_tbl(tbl);
2294 hns3_init_tunnel_ptype_tbl(tbl);
2295 hns3_init_adv_layout_ptype(tbl);
2299 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
2300 uint32_t l234_info, const struct hns3_desc *rxd)
2302 #define HNS3_STRP_STATUS_NUM 0x4
2304 #define HNS3_NO_STRP_VLAN_VLD 0x0
2305 #define HNS3_INNER_STRP_VLAN_VLD 0x1
2306 #define HNS3_OUTER_STRP_VLAN_VLD 0x2
2307 uint32_t strip_status;
2308 uint32_t report_mode;
2311 * Since HW limitation, the vlan tag will always be inserted into RX
2312 * descriptor when strip the tag from packet, driver needs to determine
2313 * reporting which tag to mbuf according to the PVID configuration
2314 * and vlan striped status.
2316 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
2318 HNS3_NO_STRP_VLAN_VLD,
2319 HNS3_OUTER_STRP_VLAN_VLD,
2320 HNS3_INNER_STRP_VLAN_VLD,
2321 HNS3_OUTER_STRP_VLAN_VLD
2324 HNS3_NO_STRP_VLAN_VLD,
2325 HNS3_NO_STRP_VLAN_VLD,
2326 HNS3_NO_STRP_VLAN_VLD,
2327 HNS3_INNER_STRP_VLAN_VLD
2330 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
2331 HNS3_RXD_STRP_TAGP_S);
2332 report_mode = report_type[rxq->pvid_sw_discard_en][strip_status];
2333 switch (report_mode) {
2334 case HNS3_NO_STRP_VLAN_VLD:
2337 case HNS3_INNER_STRP_VLAN_VLD:
2338 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
2339 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
2341 case HNS3_OUTER_STRP_VLAN_VLD:
2342 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
2343 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
2352 recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
2353 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
2356 uint8_t crc_len = rxq->crc_len;
2358 if (data_len <= crc_len) {
2359 rte_pktmbuf_free_seg(rxm);
2360 first_seg->nb_segs--;
2361 last_seg->data_len = (uint16_t)(last_seg->data_len -
2362 (crc_len - data_len));
2363 last_seg->next = NULL;
2365 rxm->data_len = (uint16_t)(data_len - crc_len);
2368 static inline struct rte_mbuf *
2369 hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
2373 if (likely(rxq->bulk_mbuf_num > 0))
2374 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2376 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf,
2377 HNS3_BULK_ALLOC_MBUF_NUM);
2378 if (likely(ret == 0)) {
2379 rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM;
2380 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2382 return rte_mbuf_raw_alloc(rxq->mb_pool);
2386 hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
2389 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
2391 mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP |
2392 RTE_MBUF_F_RX_IEEE1588_TMST;
2393 if (hns3_timestamp_rx_dynflag > 0) {
2394 *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
2395 rte_mbuf_timestamp_t *) = timestamp;
2396 mbuf->ol_flags |= hns3_timestamp_rx_dynflag;
2399 pf->rx_timestamp = timestamp;
2403 hns3_recv_pkts_simple(void *rx_queue,
2404 struct rte_mbuf **rx_pkts,
2407 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
2408 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
2409 struct hns3_rx_queue *rxq; /* RX queue */
2410 struct hns3_entry *sw_ring;
2411 struct hns3_entry *rxe;
2412 struct hns3_desc rxd;
2413 struct rte_mbuf *nmb; /* pointer of the new mbuf */
2414 struct rte_mbuf *rxm;
2415 uint32_t bd_base_info;
2427 rx_ring = rxq->rx_ring;
2428 sw_ring = rxq->sw_ring;
2429 rx_id = rxq->next_to_use;
2431 while (nb_rx < nb_pkts) {
2432 rxdp = &rx_ring[rx_id];
2433 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2434 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2437 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2438 (1u << HNS3_RXD_VLD_B)];
2440 nmb = hns3_rx_alloc_buffer(rxq);
2441 if (unlikely(nmb == NULL)) {
2444 port_id = rxq->port_id;
2445 rte_eth_devices[port_id].data->rx_mbuf_alloc_failed++;
2450 rxe = &sw_ring[rx_id];
2452 if (unlikely(rx_id == rxq->nb_rx_desc))
2455 rte_prefetch0(sw_ring[rx_id].mbuf);
2456 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2457 rte_prefetch0(&rx_ring[rx_id]);
2458 rte_prefetch0(&sw_ring[rx_id]);
2465 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
2466 hns3_rx_ptp_timestamp_handle(rxq, rxm,
2467 rte_le_to_cpu_64(rxdp->timestamp));
2469 dma_addr = rte_mbuf_data_iova_default(nmb);
2470 rxdp->addr = rte_cpu_to_le_64(dma_addr);
2471 rxdp->rx.bd_base_info = 0;
2473 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2474 rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) -
2476 rxm->data_len = rxm->pkt_len;
2477 rxm->port = rxq->port_id;
2478 rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2479 rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
2480 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2482 rte_le_to_cpu_16(rxd.rx.fd_id);
2483 rxm->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2488 /* Load remained descriptor data and extract necessary fields */
2489 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2490 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2491 ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, l234_info);
2495 rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
2497 if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
2498 rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
2500 hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
2502 /* Increment bytes counter */
2503 rxq->basic_stats.bytes += rxm->pkt_len;
2505 rx_pkts[nb_rx++] = rxm;
2508 rte_pktmbuf_free(rxm);
2511 rxq->next_to_use = rx_id;
2512 rxq->rx_free_hold += nb_rx_bd;
2513 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2514 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2515 rxq->rx_free_hold = 0;
2522 hns3_recv_scattered_pkts(void *rx_queue,
2523 struct rte_mbuf **rx_pkts,
2526 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
2527 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
2528 struct hns3_rx_queue *rxq; /* RX queue */
2529 struct hns3_entry *sw_ring;
2530 struct hns3_entry *rxe;
2531 struct rte_mbuf *first_seg;
2532 struct rte_mbuf *last_seg;
2533 struct hns3_desc rxd;
2534 struct rte_mbuf *nmb; /* pointer of the new mbuf */
2535 struct rte_mbuf *rxm;
2536 struct rte_eth_dev *dev;
2537 uint32_t bd_base_info;
2552 rx_id = rxq->next_to_use;
2553 rx_ring = rxq->rx_ring;
2554 sw_ring = rxq->sw_ring;
2555 first_seg = rxq->pkt_first_seg;
2556 last_seg = rxq->pkt_last_seg;
2558 while (nb_rx < nb_pkts) {
2559 rxdp = &rx_ring[rx_id];
2560 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2561 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2565 * The interactive process between software and hardware of
2566 * receiving a new packet in hns3 network engine:
2567 * 1. Hardware network engine firstly writes the packet content
2568 * to the memory pointed by the 'addr' field of the Rx Buffer
2569 * Descriptor, secondly fills the result of parsing the
2570 * packet include the valid field into the Rx Buffer
2571 * Descriptor in one write operation.
2572 * 2. Driver reads the Rx BD's valid field in the loop to check
2573 * whether it's valid, if valid then assign a new address to
2574 * the addr field, clear the valid field, get the other
2575 * information of the packet by parsing Rx BD's other fields,
2576 * finally write back the number of Rx BDs processed by the
2577 * driver to the HNS3_RING_RX_HEAD_REG register to inform
2579 * In the above process, the ordering is very important. We must
2580 * make sure that CPU read Rx BD's other fields only after the
2583 * There are two type of re-ordering: compiler re-ordering and
2584 * CPU re-ordering under the ARMv8 architecture.
2585 * 1. we use volatile to deal with compiler re-ordering, so you
2586 * can see that rx_ring/rxdp defined with volatile.
2587 * 2. we commonly use memory barrier to deal with CPU
2588 * re-ordering, but the cost is high.
2590 * In order to solve the high cost of using memory barrier, we
2591 * use the data dependency order under the ARMv8 architecture,
2594 * instr02: load B <- A
2595 * the instr02 will always execute after instr01.
2597 * To construct the data dependency ordering, we use the
2598 * following assignment:
2599 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2600 * (1u<<HNS3_RXD_VLD_B)]
2601 * Using gcc compiler under the ARMv8 architecture, the related
2602 * assembly code example as follows:
2603 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
2604 * instr01: ldr w26, [x22, #28] --read bd_base_info
2605 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
2606 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
2608 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
2609 * instr05: ldp x2, x3, [x0]
2610 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
2611 * instr07: ldp x4, x5, [x0, #16]
2612 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
2613 * the instr05~08 depend on x0's value, x0 depent on w26's
2614 * value, the w26 is the bd_base_info, this form the data
2615 * dependency ordering.
2616 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
2617 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
2618 * assignment is correct.
2620 * So we use the data dependency ordering instead of memory
2621 * barrier to improve receive performance.
2623 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2624 (1u << HNS3_RXD_VLD_B)];
2626 nmb = hns3_rx_alloc_buffer(rxq);
2627 if (unlikely(nmb == NULL)) {
2628 dev = &rte_eth_devices[rxq->port_id];
2629 dev->data->rx_mbuf_alloc_failed++;
2634 rxe = &sw_ring[rx_id];
2636 if (unlikely(rx_id == rxq->nb_rx_desc))
2639 rte_prefetch0(sw_ring[rx_id].mbuf);
2640 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2641 rte_prefetch0(&rx_ring[rx_id]);
2642 rte_prefetch0(&sw_ring[rx_id]);
2648 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
2649 timestamp = rte_le_to_cpu_64(rxdp->timestamp);
2651 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2652 rxdp->rx.bd_base_info = 0;
2653 rxdp->addr = dma_addr;
2655 if (first_seg == NULL) {
2657 first_seg->nb_segs = 1;
2659 first_seg->nb_segs++;
2660 last_seg->next = rxm;
2663 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2664 rxm->data_len = rte_le_to_cpu_16(rxd.rx.size);
2666 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2672 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
2673 hns3_rx_ptp_timestamp_handle(rxq, first_seg, timestamp);
2676 * The last buffer of the received packet. packet len from
2677 * buffer description may contains CRC len, packet len should
2678 * subtract it, same as data len.
2680 first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len);
2683 * This is the last buffer of the received packet. If the CRC
2684 * is not stripped by the hardware:
2685 * - Subtract the CRC length from the total packet length.
2686 * - If the last buffer only contains the whole CRC or a part
2687 * of it, free the mbuf associated to the last buffer. If part
2688 * of the CRC is also contained in the previous mbuf, subtract
2689 * the length of that CRC part from the data length of the
2693 if (unlikely(rxq->crc_len > 0)) {
2694 first_seg->pkt_len -= rxq->crc_len;
2695 recalculate_data_len(first_seg, last_seg, rxm, rxq,
2699 first_seg->port = rxq->port_id;
2700 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2701 first_seg->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
2702 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2703 first_seg->hash.fdir.hi =
2704 rte_le_to_cpu_16(rxd.rx.fd_id);
2705 first_seg->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2708 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
2709 HNS3_RXD_GRO_SIZE_S);
2710 if (gro_size != 0) {
2711 first_seg->ol_flags |= RTE_MBUF_F_RX_LRO;
2712 first_seg->tso_segsz = gro_size;
2715 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2716 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2717 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
2722 first_seg->packet_type = hns3_rx_calc_ptype(rxq,
2723 l234_info, ol_info);
2725 if (first_seg->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
2726 rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
2728 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
2730 /* Increment bytes counter */
2731 rxq->basic_stats.bytes += first_seg->pkt_len;
2733 rx_pkts[nb_rx++] = first_seg;
2737 rte_pktmbuf_free(first_seg);
2741 rxq->next_to_use = rx_id;
2742 rxq->pkt_first_seg = first_seg;
2743 rxq->pkt_last_seg = last_seg;
2745 rxq->rx_free_hold += nb_rx_bd;
2746 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2747 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2748 rxq->rx_free_hold = 0;
2755 hns3_rxq_vec_setup(__rte_unused struct hns3_rx_queue *rxq)
2760 hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
2766 hns3_recv_pkts_vec(__rte_unused void *tx_queue,
2767 __rte_unused struct rte_mbuf **rx_pkts,
2768 __rte_unused uint16_t nb_pkts)
2774 hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue,
2775 __rte_unused struct rte_mbuf **rx_pkts,
2776 __rte_unused uint16_t nb_pkts)
2782 hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2783 struct rte_eth_burst_mode *mode)
2785 static const struct {
2786 eth_rx_burst_t pkt_burst;
2789 { hns3_recv_pkts_simple, "Scalar Simple" },
2790 { hns3_recv_scattered_pkts, "Scalar Scattered" },
2791 { hns3_recv_pkts_vec, "Vector Neon" },
2792 { hns3_recv_pkts_vec_sve, "Vector Sve" },
2795 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2799 for (i = 0; i < RTE_DIM(burst_infos); i++) {
2800 if (pkt_burst == burst_infos[i].pkt_burst) {
2801 snprintf(mode->info, sizeof(mode->info), "%s",
2802 burst_infos[i].info);
2812 hns3_get_default_vec_support(void)
2814 #if defined(RTE_ARCH_ARM64)
2815 if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
2817 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
2824 hns3_get_sve_support(void)
2826 #if defined(RTE_HAS_SVE_ACLE)
2827 if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_256)
2829 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
2835 static eth_rx_burst_t
2836 hns3_get_rx_function(struct rte_eth_dev *dev)
2838 struct hns3_adapter *hns = dev->data->dev_private;
2839 uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
2840 bool vec_allowed, sve_allowed, simple_allowed;
2843 vec_support = hns3_rx_check_vec_support(dev) == 0;
2844 vec_allowed = vec_support && hns3_get_default_vec_support();
2845 sve_allowed = vec_support && hns3_get_sve_support();
2846 simple_allowed = !dev->data->scattered_rx &&
2847 (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0;
2849 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
2850 return hns3_recv_pkts_vec;
2851 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
2852 return hns3_recv_pkts_vec_sve;
2853 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
2854 return hns3_recv_pkts_simple;
2855 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
2856 return hns3_recv_scattered_pkts;
2859 return hns3_recv_pkts_vec;
2861 return hns3_recv_pkts_simple;
2863 return hns3_recv_scattered_pkts;
2867 hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf,
2868 uint16_t nb_desc, uint16_t *tx_rs_thresh,
2869 uint16_t *tx_free_thresh, uint16_t idx)
2871 #define HNS3_TX_RS_FREE_THRESH_GAP 8
2872 uint16_t rs_thresh, free_thresh, fast_free_thresh;
2874 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
2875 nb_desc % HNS3_ALIGN_RING_DESC) {
2876 hns3_err(hw, "number (%u) of tx descriptors is invalid",
2881 rs_thresh = (conf->tx_rs_thresh > 0) ?
2882 conf->tx_rs_thresh : HNS3_DEFAULT_TX_RS_THRESH;
2883 free_thresh = (conf->tx_free_thresh > 0) ?
2884 conf->tx_free_thresh : HNS3_DEFAULT_TX_FREE_THRESH;
2885 if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
2886 rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
2887 free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
2888 hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
2889 "(%u) of tx descriptors for port=%u queue=%u check "
2891 rs_thresh, free_thresh, nb_desc, hw->data->port_id,
2896 if (conf->tx_free_thresh == 0) {
2897 /* Fast free Tx memory buffer to improve cache hit rate */
2898 fast_free_thresh = nb_desc - rs_thresh;
2899 if (fast_free_thresh >=
2900 HNS3_TX_FAST_FREE_AHEAD + HNS3_DEFAULT_TX_FREE_THRESH)
2901 free_thresh = fast_free_thresh -
2902 HNS3_TX_FAST_FREE_AHEAD;
2905 *tx_rs_thresh = rs_thresh;
2906 *tx_free_thresh = free_thresh;
2911 hns3_tx_push_get_queue_tail_reg(struct rte_eth_dev *dev, uint16_t queue_id)
2913 #define HNS3_TX_PUSH_TQP_REGION_SIZE 0x10000
2914 #define HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET 64
2915 #define HNS3_TX_PUSH_PCI_BAR_INDEX 4
2917 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
2918 uint8_t bar_id = HNS3_TX_PUSH_PCI_BAR_INDEX;
2921 * If device support Tx push then its PCIe bar45 must exist, and DPDK
2922 * framework will mmap the bar45 default in PCI probe stage.
2924 * In the bar45, the first half is for RoCE (RDMA over Converged
2925 * Ethernet), and the second half is for NIC, every TQP occupy 64KB.
2927 * The quick doorbell located at 64B offset in the TQP region.
2929 return (char *)pci_dev->mem_resource[bar_id].addr +
2930 (pci_dev->mem_resource[bar_id].len >> 1) +
2931 HNS3_TX_PUSH_TQP_REGION_SIZE * queue_id +
2932 HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET;
2936 hns3_tx_push_init(struct rte_eth_dev *dev)
2938 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2939 volatile uint32_t *reg;
2942 if (!hns3_dev_get_support(hw, TX_PUSH))
2945 reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
2947 * Because the size of bar45 is about 8GB size, it may take a long time
2948 * to do the page fault in Tx process when work with vfio-pci, so use
2949 * one read operation to make kernel setup page table mapping for bar45
2950 * in the init stage.
2951 * Note: the bar45 is readable but the result is all 1.
2958 hns3_tx_push_queue_init(struct rte_eth_dev *dev,
2960 struct hns3_tx_queue *txq)
2962 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2963 if (!hns3_dev_get_support(hw, TX_PUSH)) {
2964 txq->tx_push_enable = false;
2968 txq->io_tail_reg = (volatile void *)hns3_tx_push_get_queue_tail_reg(dev,
2970 txq->tx_push_enable = true;
2974 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
2975 unsigned int socket_id, const struct rte_eth_txconf *conf)
2977 struct hns3_adapter *hns = dev->data->dev_private;
2978 uint16_t tx_rs_thresh, tx_free_thresh;
2979 struct hns3_hw *hw = &hns->hw;
2980 struct hns3_queue_info q_info;
2981 struct hns3_tx_queue *txq;
2985 ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
2986 &tx_rs_thresh, &tx_free_thresh, idx);
2990 if (dev->data->tx_queues[idx] != NULL) {
2991 hns3_tx_queue_release(dev->data->tx_queues[idx]);
2992 dev->data->tx_queues[idx] = NULL;
2996 q_info.socket_id = socket_id;
2997 q_info.nb_desc = nb_desc;
2998 q_info.type = "hns3 TX queue";
2999 q_info.ring_name = "tx_ring";
3000 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
3003 "Failed to alloc mem and reserve DMA mem for tx ring!");
3007 txq->tx_deferred_start = conf->tx_deferred_start;
3008 if (txq->tx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
3009 hns3_warn(hw, "deferred start is not supported.");
3010 txq->tx_deferred_start = false;
3013 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
3014 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
3015 RTE_CACHE_LINE_SIZE, socket_id);
3016 if (txq->sw_ring == NULL) {
3017 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
3018 hns3_tx_queue_release(txq);
3023 txq->next_to_use = 0;
3024 txq->next_to_clean = 0;
3025 txq->tx_bd_ready = txq->nb_tx_desc - 1;
3026 txq->tx_free_thresh = tx_free_thresh;
3027 txq->tx_rs_thresh = tx_rs_thresh;
3028 txq->free = rte_zmalloc_socket("hns3 TX mbuf free array",
3029 sizeof(struct rte_mbuf *) * txq->tx_rs_thresh,
3030 RTE_CACHE_LINE_SIZE, socket_id);
3032 hns3_err(hw, "failed to allocate tx mbuf free array!");
3033 hns3_tx_queue_release(txq);
3037 txq->port_id = dev->data->port_id;
3039 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
3040 * the pvid_sw_shift_en in the queue struct should not be changed,
3041 * because PVID-related operations do not need to be processed by PMD.
3042 * For hns3 VF device, whether it needs to process PVID depends
3043 * on the configuration of PF kernel mode netdev driver. And the
3044 * related PF configuration is delivered through the mailbox and finally
3045 * reflected in port_base_vlan_cfg.
3047 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
3048 txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
3049 HNS3_PORT_BASE_VLAN_ENABLE;
3051 txq->pvid_sw_shift_en = false;
3052 txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
3053 txq->configured = true;
3054 txq->io_base = (void *)((char *)hw->io_base +
3055 hns3_get_tqp_reg_offset(idx));
3056 txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
3057 HNS3_RING_TX_TAIL_REG);
3058 txq->min_tx_pkt_len = hw->min_tx_pkt_len;
3059 txq->tso_mode = hw->tso_mode;
3060 txq->udp_cksum_mode = hw->udp_cksum_mode;
3061 txq->mbuf_fast_free_en = !!(dev->data->dev_conf.txmode.offloads &
3062 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
3063 memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
3064 memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
3067 * Call hns3_tx_push_queue_init after assigned io_tail_reg field because
3068 * it may overwrite the io_tail_reg field.
3070 hns3_tx_push_queue_init(dev, idx, txq);
3072 rte_spinlock_lock(&hw->lock);
3073 dev->data->tx_queues[idx] = txq;
3074 rte_spinlock_unlock(&hw->lock);
3080 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
3082 uint16_t tx_next_clean = txq->next_to_clean;
3083 uint16_t tx_next_use = txq->next_to_use;
3084 struct hns3_entry *tx_entry = &txq->sw_ring[tx_next_clean];
3085 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
3088 if (tx_next_use >= tx_next_clean &&
3089 tx_next_use < tx_next_clean + txq->tx_rs_thresh)
3093 * All mbufs can be released only when the VLD bits of all
3094 * descriptors in a batch are cleared.
3096 for (i = 0; i < txq->tx_rs_thresh; i++) {
3097 if (desc[i].tx.tp_fe_sc_vld_ra_ri &
3098 rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B)))
3102 for (i = 0; i < txq->tx_rs_thresh; i++) {
3103 rte_pktmbuf_free_seg(tx_entry[i].mbuf);
3104 tx_entry[i].mbuf = NULL;
3107 /* Update numbers of available descriptor due to buffer freed */
3108 txq->tx_bd_ready += txq->tx_rs_thresh;
3109 txq->next_to_clean += txq->tx_rs_thresh;
3110 if (txq->next_to_clean >= txq->nb_tx_desc)
3111 txq->next_to_clean = 0;
3117 hns3_tx_free_required_buffer(struct hns3_tx_queue *txq, uint16_t required_bds)
3119 while (required_bds > txq->tx_bd_ready) {
3120 if (hns3_tx_free_useless_buffer(txq) != 0)
3127 hns3_config_gro(struct hns3_hw *hw, bool en)
3129 struct hns3_cfg_gro_status_cmd *req;
3130 struct hns3_cmd_desc desc;
3133 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
3134 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
3136 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
3138 ret = hns3_cmd_send(hw, &desc, 1);
3140 hns3_err(hw, "%s hardware GRO failed, ret = %d",
3141 en ? "enable" : "disable", ret);
3147 hns3_restore_gro_conf(struct hns3_hw *hw)
3153 offloads = hw->data->dev_conf.rxmode.offloads;
3154 gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
3155 ret = hns3_config_gro(hw, gro_en);
3157 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
3158 gro_en ? "enabled" : "disabled", ret);
3164 hns3_pkt_is_tso(struct rte_mbuf *m)
3166 return (m->tso_segsz != 0 && m->ol_flags & RTE_MBUF_F_TX_TCP_SEG);
3170 hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm)
3172 if (!hns3_pkt_is_tso(rxm))
3175 if (paylen <= rxm->tso_segsz)
3178 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B));
3179 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
3183 hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
3185 desc->addr = rte_mbuf_data_iova(rxm);
3186 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
3187 desc->tx.tp_fe_sc_vld_ra_ri |= rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
3191 hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
3192 struct rte_mbuf *rxm)
3194 uint64_t ol_flags = rxm->ol_flags;
3198 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
3199 hdr_len += (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
3200 rxm->outer_l2_len + rxm->outer_l3_len : 0;
3201 paylen = rxm->pkt_len - hdr_len;
3202 desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen);
3203 hns3_set_tso(desc, paylen, rxm);
3206 * Currently, hardware doesn't support more than two layers VLAN offload
3207 * in Tx direction based on hns3 network engine. So when the number of
3208 * VLANs in the packets represented by rxm plus the number of VLAN
3209 * offload by hardware such as PVID etc, exceeds two, the packets will
3210 * be discarded or the original VLAN of the packets will be overwritten
3211 * by hardware. When the PF PVID is enabled by calling the API function
3212 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
3213 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
3214 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
3215 * be added to the position close to the IP header when PVID is enabled.
3217 if (!txq->pvid_sw_shift_en &&
3218 ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
3219 desc->tx.ol_type_vlan_len_msec |=
3220 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
3221 if (ol_flags & RTE_MBUF_F_TX_QINQ)
3222 desc->tx.outer_vlan_tag =
3223 rte_cpu_to_le_16(rxm->vlan_tci_outer);
3225 desc->tx.outer_vlan_tag =
3226 rte_cpu_to_le_16(rxm->vlan_tci);
3229 if (ol_flags & RTE_MBUF_F_TX_QINQ ||
3230 ((ol_flags & RTE_MBUF_F_TX_VLAN) && txq->pvid_sw_shift_en)) {
3231 desc->tx.type_cs_vlan_tso_len |=
3232 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
3233 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
3236 if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
3237 desc->tx.tp_fe_sc_vld_ra_ri |=
3238 rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B));
3242 hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf,
3243 struct rte_mbuf **alloc_mbuf)
3245 #define MAX_NON_TSO_BD_PER_PKT 18
3246 struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT];
3249 /* Allocate enough mbufs */
3250 if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf))
3253 for (i = 0; i < nb_new_buf - 1; i++)
3254 pkt_segs[i]->next = pkt_segs[i + 1];
3256 pkt_segs[nb_new_buf - 1]->next = NULL;
3257 pkt_segs[0]->nb_segs = nb_new_buf;
3258 *alloc_mbuf = pkt_segs[0];
3264 hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
3266 new_pkt->ol_flags = old_pkt->ol_flags;
3267 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
3268 new_pkt->outer_l2_len = old_pkt->outer_l2_len;
3269 new_pkt->outer_l3_len = old_pkt->outer_l3_len;
3270 new_pkt->l2_len = old_pkt->l2_len;
3271 new_pkt->l3_len = old_pkt->l3_len;
3272 new_pkt->l4_len = old_pkt->l4_len;
3273 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
3274 new_pkt->vlan_tci = old_pkt->vlan_tci;
3278 hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
3279 uint8_t max_non_tso_bd_num)
3281 struct rte_mempool *mb_pool;
3282 struct rte_mbuf *new_mbuf;
3283 struct rte_mbuf *temp_new;
3284 struct rte_mbuf *temp;
3285 uint16_t last_buf_len;
3286 uint16_t nb_new_buf;
3296 mb_pool = tx_pkt->pool;
3297 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
3298 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
3299 if (nb_new_buf > max_non_tso_bd_num)
3302 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
3303 if (last_buf_len == 0)
3304 last_buf_len = buf_size;
3306 /* Allocate enough mbufs */
3307 ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf);
3311 /* Copy the original packet content to the new mbufs */
3313 s = rte_pktmbuf_mtod(temp, char *);
3314 len_s = rte_pktmbuf_data_len(temp);
3315 temp_new = new_mbuf;
3316 while (temp != NULL && temp_new != NULL) {
3317 d = rte_pktmbuf_mtod(temp_new, char *);
3318 buf_len = temp_new->next == NULL ? last_buf_len : buf_size;
3322 len = RTE_MIN(len_s, len_d);
3326 len_d = len_d - len;
3327 len_s = len_s - len;
3333 s = rte_pktmbuf_mtod(temp, char *);
3334 len_s = rte_pktmbuf_data_len(temp);
3338 temp_new->data_len = buf_len;
3339 temp_new = temp_new->next;
3341 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
3343 /* free original mbufs */
3344 rte_pktmbuf_free(tx_pkt);
3346 *new_pkt = new_mbuf;
3352 hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec)
3354 uint32_t tmp = *ol_type_vlan_len_msec;
3355 uint64_t ol_flags = m->ol_flags;
3357 /* (outer) IP header type */
3358 if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
3359 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
3360 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
3361 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
3363 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
3364 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM);
3365 } else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
3366 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
3369 /* OL3 header size, defined in 4 bytes */
3370 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3371 m->outer_l3_len >> HNS3_L3_LEN_UNIT);
3372 *ol_type_vlan_len_msec = tmp;
3376 hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
3377 uint32_t *type_cs_vlan_tso_len)
3379 #define HNS3_NVGRE_HLEN 8
3380 uint32_t tmp_outer = *ol_type_vlan_len_msec;
3381 uint32_t tmp_inner = *type_cs_vlan_tso_len;
3382 uint64_t ol_flags = m->ol_flags;
3383 uint16_t inner_l2_len;
3385 switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
3386 case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
3387 case RTE_MBUF_F_TX_TUNNEL_GENEVE:
3388 case RTE_MBUF_F_TX_TUNNEL_VXLAN:
3389 /* MAC in UDP tunnelling packet, include VxLAN and GENEVE */
3390 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
3391 HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP);
3393 * The inner l2 length of mbuf is the sum of outer l4 length,
3394 * tunneling header length and inner l2 length for a tunnel
3395 * packet. But in hns3 tx descriptor, the tunneling header
3396 * length is contained in the field of outer L4 length.
3397 * Therefore, driver need to calculate the outer L4 length and
3400 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
3402 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
3405 inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN;
3407 case RTE_MBUF_F_TX_TUNNEL_GRE:
3408 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
3409 HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
3411 * For NVGRE tunnel packet, the outer L4 is empty. So only
3412 * fill the NVGRE header length to the outer L4 field.
3414 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
3416 (uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT);
3418 inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN;
3421 /* For non UDP / GRE tunneling, drop the tunnel packet */
3425 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
3426 inner_l2_len >> HNS3_L2_LEN_UNIT);
3427 /* OL2 header size, defined in 2 bytes */
3428 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
3429 m->outer_l2_len >> HNS3_L2_LEN_UNIT);
3431 *type_cs_vlan_tso_len = tmp_inner;
3432 *ol_type_vlan_len_msec = tmp_outer;
3438 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
3439 uint16_t tx_desc_id)
3441 struct hns3_desc *tx_ring = txq->tx_ring;
3442 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3443 uint64_t ol_flags = m->ol_flags;
3444 uint32_t tmp_outer = 0;
3445 uint32_t tmp_inner = 0;
3450 * The tunnel header is contained in the inner L2 header field of the
3451 * mbuf, but for hns3 descriptor, it is contained in the outer L4. So,
3452 * there is a need that switching between them. To avoid multiple
3453 * calculations, the length of the L2 header include the outer and
3454 * inner, will be filled during the parsing of tunnel packets.
3456 if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
3458 * For non tunnel type the tunnel type id is 0, so no need to
3459 * assign a value to it. Only the inner(normal) L2 header length
3462 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M,
3463 HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT);
3466 * If outer csum is not offload, the outer length may be filled
3467 * with 0. And the length of the outer header is added to the
3468 * inner l2_len. It would lead a cksum error. So driver has to
3469 * calculate the header length.
3471 if (unlikely(!(ol_flags &
3472 (RTE_MBUF_F_TX_OUTER_IP_CKSUM | RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
3473 m->outer_l2_len == 0)) {
3474 struct rte_net_hdr_lens hdr_len;
3475 (void)rte_net_get_ptype(m, &hdr_len,
3476 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
3477 m->outer_l3_len = hdr_len.l3_len;
3478 m->outer_l2_len = hdr_len.l2_len;
3479 m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len;
3481 hns3_parse_outer_params(m, &tmp_outer);
3482 ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner);
3487 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
3488 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
3489 tmp_ol4cs = ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM ?
3490 BIT(HNS3_TXD_OL4CS_B) : 0;
3491 desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs);
3497 hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3499 uint64_t ol_flags = m->ol_flags;
3503 tmp = *type_cs_vlan_tso_len;
3504 if (ol_flags & RTE_MBUF_F_TX_IPV4)
3505 l3_type = HNS3_L3T_IPV4;
3506 else if (ol_flags & RTE_MBUF_F_TX_IPV6)
3507 l3_type = HNS3_L3T_IPV6;
3509 l3_type = HNS3_L3T_NONE;
3511 /* inner(/normal) L3 header size, defined in 4 bytes */
3512 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3513 m->l3_len >> HNS3_L3_LEN_UNIT);
3515 tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type);
3517 /* Enable L3 checksum offloads */
3518 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
3519 tmp |= BIT(HNS3_TXD_L3CS_B);
3520 *type_cs_vlan_tso_len = tmp;
3524 hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3526 uint64_t ol_flags = m->ol_flags;
3528 /* Enable L4 checksum offloads */
3529 switch (ol_flags & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG)) {
3530 case RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_TCP_SEG:
3531 case RTE_MBUF_F_TX_TCP_CKSUM:
3532 case RTE_MBUF_F_TX_TCP_SEG:
3533 tmp = *type_cs_vlan_tso_len;
3534 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3537 case RTE_MBUF_F_TX_UDP_CKSUM:
3538 tmp = *type_cs_vlan_tso_len;
3539 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3542 case RTE_MBUF_F_TX_SCTP_CKSUM:
3543 tmp = *type_cs_vlan_tso_len;
3544 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3550 tmp |= BIT(HNS3_TXD_L4CS_B);
3551 tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
3552 m->l4_len >> HNS3_L4_LEN_UNIT);
3553 *type_cs_vlan_tso_len = tmp;
3557 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m,
3558 uint16_t tx_desc_id)
3560 struct hns3_desc *tx_ring = txq->tx_ring;
3561 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3564 hns3_parse_l3_cksum_params(m, &value);
3565 hns3_parse_l4_cksum_params(m, &value);
3567 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
3571 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
3572 uint32_t max_non_tso_bd_num)
3574 struct rte_mbuf *m_first = tx_pkts;
3575 struct rte_mbuf *m_last = tx_pkts;
3576 uint32_t tot_len = 0;
3581 * Hardware requires that the sum of the data length of every 8
3582 * consecutive buffers is greater than MSS in hns3 network engine.
3583 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
3584 * frags greater than gso header len + mss, and the remaining 7
3585 * consecutive frags greater than MSS except the last 7 frags.
3587 if (bd_num <= max_non_tso_bd_num)
3590 for (i = 0; m_last && i < max_non_tso_bd_num - 1;
3591 i++, m_last = m_last->next)
3592 tot_len += m_last->data_len;
3597 /* ensure the first 8 frags is greater than mss + header */
3598 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
3599 hdr_len += (tx_pkts->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
3600 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
3601 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
3605 * ensure the sum of the data length of every 7 consecutive buffer
3606 * is greater than mss except the last one.
3608 for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) {
3609 tot_len -= m_first->data_len;
3610 tot_len += m_last->data_len;
3612 if (tot_len < tx_pkts->tso_segsz)
3615 m_first = m_first->next;
3616 m_last = m_last->next;
3623 hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
3626 struct rte_ipv4_hdr *ipv4_hdr;
3627 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
3629 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
3630 ipv4_hdr->hdr_checksum = 0;
3631 if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
3632 struct rte_udp_hdr *udp_hdr;
3634 * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo
3635 * header for TSO packets
3637 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
3639 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3640 m->outer_l2_len + m->outer_l3_len);
3641 udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
3645 *l4_proto = ipv4_hdr->next_proto_id;
3650 hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
3653 struct rte_ipv6_hdr *ipv6_hdr;
3654 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
3656 if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
3657 struct rte_udp_hdr *udp_hdr;
3659 * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo
3660 * header for TSO packets
3662 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
3664 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3665 m->outer_l2_len + m->outer_l3_len);
3666 udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
3670 *l4_proto = ipv6_hdr->proto;
3675 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
3677 uint64_t ol_flags = m->ol_flags;
3678 uint32_t paylen, hdr_len, l4_proto;
3679 struct rte_udp_hdr *udp_hdr;
3681 if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)))
3684 if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
3685 if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto))
3688 if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto))
3692 /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
3693 if (l4_proto == IPPROTO_UDP && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
3694 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3695 hdr_len += m->outer_l2_len + m->outer_l3_len;
3696 paylen = m->pkt_len - hdr_len;
3697 if (paylen <= m->tso_segsz)
3699 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3702 udp_hdr->dgram_cksum = 0;
3707 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
3709 uint32_t tmp_data_len_sum = 0;
3710 uint16_t nb_buf = m->nb_segs;
3711 uint32_t paylen, hdr_len;
3712 struct rte_mbuf *m_seg;
3715 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
3718 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3719 hdr_len += (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
3720 m->outer_l2_len + m->outer_l3_len : 0;
3721 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
3724 paylen = m->pkt_len - hdr_len;
3725 if (paylen > HNS3_MAX_BD_PAYLEN)
3729 * The TSO header (include outer and inner L2, L3 and L4 header)
3730 * should be provided by three descriptors in maximum in hns3 network
3734 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
3735 i++, m_seg = m_seg->next) {
3736 tmp_data_len_sum += m_seg->data_len;
3739 if (hdr_len > tmp_data_len_sum)
3745 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3747 hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
3749 struct rte_ether_hdr *eh;
3750 struct rte_vlan_hdr *vh;
3752 if (!txq->pvid_sw_shift_en)
3756 * Due to hardware limitations, we only support two-layer VLAN hardware
3757 * offload in Tx direction based on hns3 network engine, so when PVID is
3758 * enabled, QinQ insert is no longer supported.
3759 * And when PVID is enabled, in the following two cases:
3760 * i) packets with more than two VLAN tags.
3761 * ii) packets with one VLAN tag while the hardware VLAN insert is
3763 * The packets will be regarded as abnormal packets and discarded by
3764 * hardware in Tx direction. For debugging purposes, a validation check
3765 * for these types of packets is added to the '.tx_pkt_prepare' ops
3766 * implementation function named hns3_prep_pkts to inform users that
3767 * these packets will be discarded.
3769 if (m->ol_flags & RTE_MBUF_F_TX_QINQ)
3772 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
3773 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
3774 if (m->ol_flags & RTE_MBUF_F_TX_VLAN)
3777 /* Ensure the incoming packet is not a QinQ packet */
3778 vh = (struct rte_vlan_hdr *)(eh + 1);
3779 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
3788 hns3_udp_cksum_help(struct rte_mbuf *m)
3790 uint64_t ol_flags = m->ol_flags;
3794 if (ol_flags & RTE_MBUF_F_TX_IPV4) {
3795 struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
3796 struct rte_ipv4_hdr *, m->l2_len);
3797 l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len;
3799 struct rte_ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m,
3800 struct rte_ipv6_hdr *, m->l2_len);
3801 l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
3804 rte_raw_cksum_mbuf(m, m->l2_len + m->l3_len, l4_len, &cksum);
3808 * RFC 768:If the computed checksum is zero for UDP, it is transmitted
3814 return (uint16_t)cksum;
3818 hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
3820 uint64_t ol_flags = m->ol_flags;
3821 struct rte_udp_hdr *udp_hdr;
3824 if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE ||
3825 ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK ||
3826 (ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_UDP_CKSUM)
3829 * A UDP packet with the same dst_port as VXLAN\VXLAN_GPE\GENEVE will
3830 * be recognized as a tunnel packet in HW. In this case, if UDP CKSUM
3831 * offload is set and the tunnel mask has not been set, the CKSUM will
3832 * be wrong since the header length is wrong and driver should complete
3833 * the CKSUM to avoid CKSUM error.
3835 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3836 m->l2_len + m->l3_len);
3837 dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
3839 case RTE_VXLAN_DEFAULT_PORT:
3840 case RTE_VXLAN_GPE_DEFAULT_PORT:
3841 case RTE_GENEVE_DEFAULT_PORT:
3842 udp_hdr->dgram_cksum = hns3_udp_cksum_help(m);
3843 m->ol_flags = ol_flags & ~RTE_MBUF_F_TX_L4_MASK;
3851 hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
3855 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3856 ret = rte_validate_tx_offload(m);
3862 ret = hns3_vld_vlan_chk(tx_queue, m);
3868 if (hns3_pkt_is_tso(m)) {
3869 if (hns3_pkt_need_linearized(m, m->nb_segs,
3870 tx_queue->max_non_tso_bd_num) ||
3871 hns3_check_tso_pkt_valid(m)) {
3876 if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
3878 * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means
3879 * hardware support recalculate the TCP pseudo header
3880 * checksum of packets that need TSO, so network driver
3881 * software not need to recalculate it.
3883 hns3_outer_header_cksum_prepare(m);
3888 ret = rte_net_intel_cksum_prepare(m);
3894 if (!hns3_validate_tunnel_cksum(tx_queue, m))
3897 hns3_outer_header_cksum_prepare(m);
3903 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3909 for (i = 0; i < nb_pkts; i++) {
3911 if (hns3_prep_pkt_proc(tx_queue, m))
3919 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
3922 struct hns3_desc *tx_ring = txq->tx_ring;
3923 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3925 /* Enable checksum offloading */
3926 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
3927 /* Fill in tunneling parameters if necessary */
3928 if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
3929 txq->dfx_stats.unsupported_tunnel_pkt_cnt++;
3933 hns3_txd_enable_checksum(txq, m, tx_desc_id);
3935 /* clear the control bit */
3936 desc->tx.type_cs_vlan_tso_len = 0;
3937 desc->tx.ol_type_vlan_len_msec = 0;
3944 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
3945 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
3947 uint8_t max_non_tso_bd_num;
3948 struct rte_mbuf *new_pkt;
3951 if (hns3_pkt_is_tso(*m_seg))
3955 * If packet length is greater than HNS3_MAX_FRAME_LEN
3956 * driver support, the packet will be ignored.
3958 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
3959 txq->dfx_stats.over_length_pkt_cnt++;
3963 max_non_tso_bd_num = txq->max_non_tso_bd_num;
3964 if (unlikely(nb_buf > max_non_tso_bd_num)) {
3965 txq->dfx_stats.exceed_limit_bd_pkt_cnt++;
3966 ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
3967 max_non_tso_bd_num);
3969 txq->dfx_stats.exceed_limit_bd_reassem_fail++;
3979 hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq)
3981 struct hns3_entry *tx_entry;
3982 struct hns3_desc *desc;
3983 uint16_t tx_next_clean;
3987 if (HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) < txq->tx_rs_thresh)
3991 * All mbufs can be released only when the VLD bits of all
3992 * descriptors in a batch are cleared.
3994 tx_next_clean = (txq->next_to_clean + txq->tx_rs_thresh - 1) %
3996 desc = &txq->tx_ring[tx_next_clean];
3997 for (i = 0; i < txq->tx_rs_thresh; i++) {
3998 if (rte_le_to_cpu_16(desc->tx.tp_fe_sc_vld_ra_ri) &
3999 BIT(HNS3_TXD_VLD_B))
4004 tx_entry = &txq->sw_ring[txq->next_to_clean];
4006 if (txq->mbuf_fast_free_en) {
4007 rte_mempool_put_bulk(tx_entry->mbuf->pool,
4008 (void **)tx_entry, txq->tx_rs_thresh);
4009 for (i = 0; i < txq->tx_rs_thresh; i++)
4010 tx_entry[i].mbuf = NULL;
4014 for (i = 0; i < txq->tx_rs_thresh; i++)
4015 rte_prefetch0((tx_entry + i)->mbuf);
4016 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
4017 rte_mempool_put(tx_entry->mbuf->pool, tx_entry->mbuf);
4018 tx_entry->mbuf = NULL;
4022 txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc;
4023 txq->tx_bd_ready += txq->tx_rs_thresh;
4028 hns3_tx_backup_1mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
4030 tx_entry->mbuf = pkts[0];
4034 hns3_tx_backup_4mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
4036 hns3_tx_backup_1mbuf(&tx_entry[0], &pkts[0]);
4037 hns3_tx_backup_1mbuf(&tx_entry[1], &pkts[1]);
4038 hns3_tx_backup_1mbuf(&tx_entry[2], &pkts[2]);
4039 hns3_tx_backup_1mbuf(&tx_entry[3], &pkts[3]);
4043 hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
4045 #define PER_LOOP_NUM 4
4046 uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
4050 for (i = 0; i < PER_LOOP_NUM; i++, txdp++, pkts++) {
4051 dma_addr = rte_mbuf_data_iova(*pkts);
4052 txdp->addr = rte_cpu_to_le_64(dma_addr);
4053 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
4054 txdp->tx.paylen_fd_dop_ol4cs = 0;
4055 txdp->tx.type_cs_vlan_tso_len = 0;
4056 txdp->tx.ol_type_vlan_len_msec = 0;
4057 if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST))
4058 bd_flag |= BIT(HNS3_TXD_TSYN_B);
4059 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
4064 hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
4066 uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
4069 dma_addr = rte_mbuf_data_iova(*pkts);
4070 txdp->addr = rte_cpu_to_le_64(dma_addr);
4071 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
4072 txdp->tx.paylen_fd_dop_ol4cs = 0;
4073 txdp->tx.type_cs_vlan_tso_len = 0;
4074 txdp->tx.ol_type_vlan_len_msec = 0;
4075 if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST))
4076 bd_flag |= BIT(HNS3_TXD_TSYN_B);
4077 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
4081 hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq,
4082 struct rte_mbuf **pkts,
4085 #define PER_LOOP_NUM 4
4086 #define PER_LOOP_MASK (PER_LOOP_NUM - 1)
4087 struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
4088 struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
4089 const uint32_t mainpart = (nb_pkts & ((uint32_t)~PER_LOOP_MASK));
4090 const uint32_t leftover = (nb_pkts & ((uint32_t)PER_LOOP_MASK));
4093 for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
4094 hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
4095 hns3_tx_setup_4bd(txdp + i, pkts + i);
4097 /* Increment bytes counter */
4099 for (j = 0; j < PER_LOOP_NUM; j++)
4100 txq->basic_stats.bytes += pkts[i + j]->pkt_len;
4102 if (unlikely(leftover > 0)) {
4103 for (i = 0; i < leftover; i++) {
4104 hns3_tx_backup_1mbuf(tx_entry + mainpart + i,
4105 pkts + mainpart + i);
4106 hns3_tx_setup_1bd(txdp + mainpart + i,
4107 pkts + mainpart + i);
4109 /* Increment bytes counter */
4110 txq->basic_stats.bytes += pkts[mainpart + i]->pkt_len;
4116 hns3_xmit_pkts_simple(void *tx_queue,
4117 struct rte_mbuf **tx_pkts,
4120 struct hns3_tx_queue *txq = tx_queue;
4123 hns3_tx_free_buffer_simple(txq);
4125 nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
4126 if (unlikely(nb_pkts == 0)) {
4127 if (txq->tx_bd_ready == 0)
4128 txq->dfx_stats.queue_full_cnt++;
4132 txq->tx_bd_ready -= nb_pkts;
4133 if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) {
4134 nb_tx = txq->nb_tx_desc - txq->next_to_use;
4135 hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx);
4136 txq->next_to_use = 0;
4139 hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
4140 txq->next_to_use += nb_pkts - nb_tx;
4142 hns3_write_txq_tail_reg(txq, nb_pkts);
4148 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4150 struct hns3_tx_queue *txq = tx_queue;
4151 struct hns3_entry *tx_bak_pkt;
4152 struct hns3_desc *tx_ring;
4153 struct rte_mbuf *tx_pkt;
4154 struct rte_mbuf *m_seg;
4155 struct hns3_desc *desc;
4156 uint32_t nb_hold = 0;
4157 uint16_t tx_next_use;
4158 uint16_t tx_pkt_num;
4164 if (txq->tx_bd_ready < txq->tx_free_thresh)
4165 (void)hns3_tx_free_useless_buffer(txq);
4167 tx_next_use = txq->next_to_use;
4168 tx_bd_max = txq->nb_tx_desc;
4169 tx_pkt_num = nb_pkts;
4170 tx_ring = txq->tx_ring;
4173 tx_bak_pkt = &txq->sw_ring[tx_next_use];
4174 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
4175 tx_pkt = *tx_pkts++;
4177 nb_buf = tx_pkt->nb_segs;
4179 if (nb_buf > txq->tx_bd_ready) {
4180 /* Try to release the required MBUF, but avoid releasing
4181 * all MBUFs, otherwise, the MBUFs will be released for
4182 * a long time and may cause jitter.
4184 if (hns3_tx_free_required_buffer(txq, nb_buf) != 0) {
4185 txq->dfx_stats.queue_full_cnt++;
4191 * If packet length is less than minimum packet length supported
4192 * by hardware in Tx direction, driver need to pad it to avoid
4195 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
4196 txq->min_tx_pkt_len)) {
4200 add_len = txq->min_tx_pkt_len -
4201 rte_pktmbuf_pkt_len(tx_pkt);
4202 appended = rte_pktmbuf_append(tx_pkt, add_len);
4203 if (appended == NULL) {
4204 txq->dfx_stats.pkt_padding_fail_cnt++;
4208 memset(appended, 0, add_len);
4213 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
4216 if (hns3_parse_cksum(txq, tx_next_use, m_seg))
4220 desc = &tx_ring[tx_next_use];
4223 * If the packet is divided into multiple Tx Buffer Descriptors,
4224 * only need to fill vlan, paylen and tso into the first Tx
4225 * Buffer Descriptor.
4227 hns3_fill_first_desc(txq, desc, m_seg);
4230 desc = &tx_ring[tx_next_use];
4232 * Fill valid bits, DMA address and data length for each
4233 * Tx Buffer Descriptor.
4235 hns3_fill_per_desc(desc, m_seg);
4236 tx_bak_pkt->mbuf = m_seg;
4237 m_seg = m_seg->next;
4240 if (tx_next_use >= tx_bd_max) {
4242 tx_bak_pkt = txq->sw_ring;
4246 } while (m_seg != NULL);
4248 /* Add end flag for the last Tx Buffer Descriptor */
4249 desc->tx.tp_fe_sc_vld_ra_ri |=
4250 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
4252 /* Increment bytes counter */
4253 txq->basic_stats.bytes += tx_pkt->pkt_len;
4255 txq->next_to_use = tx_next_use;
4256 txq->tx_bd_ready -= i;
4262 hns3_write_txq_tail_reg(txq, nb_hold);
4268 hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
4274 hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
4275 __rte_unused struct rte_mbuf **tx_pkts,
4276 __rte_unused uint16_t nb_pkts)
4282 hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue,
4283 struct rte_mbuf __rte_unused **tx_pkts,
4284 uint16_t __rte_unused nb_pkts)
4290 hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
4291 struct rte_eth_burst_mode *mode)
4293 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
4294 const char *info = NULL;
4296 if (pkt_burst == hns3_xmit_pkts_simple)
4297 info = "Scalar Simple";
4298 else if (pkt_burst == hns3_xmit_pkts)
4300 else if (pkt_burst == hns3_xmit_pkts_vec)
4301 info = "Vector Neon";
4302 else if (pkt_burst == hns3_xmit_pkts_vec_sve)
4303 info = "Vector Sve";
4308 snprintf(mode->info, sizeof(mode->info), "%s", info);
4314 hns3_tx_check_simple_support(struct rte_eth_dev *dev)
4316 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
4318 return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
4322 hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
4324 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4326 /* always perform tx_prepare when debug */
4329 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
4330 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
4331 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
4332 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
4333 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
4334 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
4335 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
4336 RTE_ETH_TX_OFFLOAD_TCP_TSO | \
4337 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
4338 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
4339 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
4341 uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
4342 if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
4350 hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
4352 struct hns3_adapter *hns = dev->data->dev_private;
4353 bool vec_allowed, sve_allowed, simple_allowed;
4354 bool vec_support, tx_prepare_needed;
4356 vec_support = hns3_tx_check_vec_support(dev) == 0;
4357 vec_allowed = vec_support && hns3_get_default_vec_support();
4358 sve_allowed = vec_support && hns3_get_sve_support();
4359 simple_allowed = hns3_tx_check_simple_support(dev);
4360 tx_prepare_needed = hns3_get_tx_prep_needed(dev);
4364 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
4365 return hns3_xmit_pkts_vec;
4366 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
4367 return hns3_xmit_pkts_vec_sve;
4368 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
4369 return hns3_xmit_pkts_simple;
4370 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
4371 if (tx_prepare_needed)
4372 *prep = hns3_prep_pkts;
4373 return hns3_xmit_pkts;
4377 return hns3_xmit_pkts_vec;
4379 return hns3_xmit_pkts_simple;
4381 if (tx_prepare_needed)
4382 *prep = hns3_prep_pkts;
4383 return hns3_xmit_pkts;
4387 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
4388 struct rte_mbuf **pkts __rte_unused,
4389 uint16_t pkts_n __rte_unused)
4395 hns3_trace_rxtx_function(struct rte_eth_dev *dev)
4397 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4398 struct rte_eth_burst_mode rx_mode;
4399 struct rte_eth_burst_mode tx_mode;
4401 memset(&rx_mode, 0, sizeof(rx_mode));
4402 memset(&tx_mode, 0, sizeof(tx_mode));
4403 (void)hns3_rx_burst_mode_get(dev, 0, &rx_mode);
4404 (void)hns3_tx_burst_mode_get(dev, 0, &tx_mode);
4406 hns3_dbg(hw, "using rx_pkt_burst: %s, tx_pkt_burst: %s.",
4407 rx_mode.info, tx_mode.info);
4411 hns3_eth_dev_fp_ops_config(const struct rte_eth_dev *dev)
4413 struct rte_eth_fp_ops *fpo = rte_eth_fp_ops;
4414 uint16_t port_id = dev->data->port_id;
4416 fpo[port_id].rx_pkt_burst = dev->rx_pkt_burst;
4417 fpo[port_id].tx_pkt_burst = dev->tx_pkt_burst;
4418 fpo[port_id].tx_pkt_prepare = dev->tx_pkt_prepare;
4419 fpo[port_id].rx_descriptor_status = dev->rx_descriptor_status;
4420 fpo[port_id].tx_descriptor_status = dev->tx_descriptor_status;
4424 hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
4426 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
4427 struct hns3_adapter *hns = eth_dev->data->dev_private;
4428 eth_tx_prep_t prep = NULL;
4430 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
4431 __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
4432 eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
4433 eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
4434 eth_dev->tx_pkt_burst = hw->set_link_down ?
4435 hns3_dummy_rxtx_burst :
4436 hns3_get_tx_function(eth_dev, &prep);
4437 eth_dev->tx_pkt_prepare = prep;
4438 eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
4439 hns3_trace_rxtx_function(eth_dev);
4441 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
4442 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
4443 eth_dev->tx_pkt_prepare = NULL;
4446 hns3_eth_dev_fp_ops_config(eth_dev);
4450 hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4451 struct rte_eth_rxq_info *qinfo)
4453 struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
4455 qinfo->mp = rxq->mb_pool;
4456 qinfo->nb_desc = rxq->nb_rx_desc;
4457 qinfo->scattered_rx = dev->data->scattered_rx;
4458 /* Report the HW Rx buffer length to user */
4459 qinfo->rx_buf_size = rxq->rx_buf_len;
4462 * If there are no available Rx buffer descriptors, incoming packets
4463 * are always dropped by hardware based on hns3 network engine.
4465 qinfo->conf.rx_drop_en = 1;
4466 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
4467 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4468 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4472 hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4473 struct rte_eth_txq_info *qinfo)
4475 struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
4477 qinfo->nb_desc = txq->nb_tx_desc;
4478 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
4479 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
4480 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4481 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4485 hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4487 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4488 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
4489 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
4492 if (!hns3_dev_get_support(hw, INDEP_TXRX))
4495 rte_spinlock_lock(&hw->lock);
4496 ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
4498 hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
4500 rte_spinlock_unlock(&hw->lock);
4504 ret = hns3_init_rxq(hns, rx_queue_id);
4506 hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
4508 rte_spinlock_unlock(&hw->lock);
4512 hns3_enable_rxq(rxq, true);
4513 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4514 rte_spinlock_unlock(&hw->lock);
4520 hns3_reset_sw_rxq(struct hns3_rx_queue *rxq)
4522 rxq->next_to_use = 0;
4523 rxq->rx_rearm_start = 0;
4524 rxq->rx_free_hold = 0;
4525 rxq->rx_rearm_nb = 0;
4526 rxq->pkt_first_seg = NULL;
4527 rxq->pkt_last_seg = NULL;
4528 memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc));
4529 hns3_rxq_vec_setup(rxq);
4533 hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4535 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4536 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
4538 if (!hns3_dev_get_support(hw, INDEP_TXRX))
4541 rte_spinlock_lock(&hw->lock);
4542 hns3_enable_rxq(rxq, false);
4544 hns3_rx_queue_release_mbufs(rxq);
4546 hns3_reset_sw_rxq(rxq);
4547 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4548 rte_spinlock_unlock(&hw->lock);
4554 hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4556 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4557 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
4560 if (!hns3_dev_get_support(hw, INDEP_TXRX))
4563 rte_spinlock_lock(&hw->lock);
4564 ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
4566 hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
4568 rte_spinlock_unlock(&hw->lock);
4573 hns3_enable_txq(txq, true);
4574 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4575 rte_spinlock_unlock(&hw->lock);
4581 hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4583 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4584 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
4586 if (!hns3_dev_get_support(hw, INDEP_TXRX))
4589 rte_spinlock_lock(&hw->lock);
4590 hns3_enable_txq(txq, false);
4591 hns3_tx_queue_release_mbufs(txq);
4593 * All the mbufs in sw_ring are released and all the pointers in sw_ring
4594 * are set to NULL. If this queue is still called by upper layer,
4595 * residual SW status of this txq may cause these pointers in sw_ring
4596 * which have been set to NULL to be released again. To avoid it,
4600 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4601 rte_spinlock_unlock(&hw->lock);
4607 hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
4609 uint16_t round_free_cnt;
4612 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
4613 free_cnt = txq->nb_tx_desc;
4615 if (txq->tx_rs_thresh == 0)
4618 round_free_cnt = roundup(free_cnt, txq->tx_rs_thresh);
4619 for (idx = 0; idx < round_free_cnt; idx += txq->tx_rs_thresh) {
4620 if (hns3_tx_free_useless_buffer(txq) != 0)
4624 return RTE_MIN(idx, free_cnt);
4628 hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
4630 struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq;
4631 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
4633 if (dev->tx_pkt_burst == hns3_xmit_pkts)
4634 return hns3_tx_done_cleanup_full(q, free_cnt);
4635 else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst)
4642 hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
4644 volatile struct hns3_desc *rxdp;
4645 struct hns3_rx_queue *rxq;
4646 struct rte_eth_dev *dev;
4647 uint32_t bd_base_info;
4650 rxq = (struct hns3_rx_queue *)rx_queue;
4651 if (offset >= rxq->nb_rx_desc)
4654 desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc;
4655 rxdp = &rxq->rx_ring[desc_id];
4656 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
4657 dev = &rte_eth_devices[rxq->port_id];
4658 if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
4659 dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
4660 if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
4661 return RTE_ETH_RX_DESC_UNAVAIL;
4662 } else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
4663 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
4664 if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
4665 return RTE_ETH_RX_DESC_UNAVAIL;
4667 return RTE_ETH_RX_DESC_UNAVAIL;
4670 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
4671 return RTE_ETH_RX_DESC_AVAIL;
4673 return RTE_ETH_RX_DESC_DONE;
4677 hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
4679 volatile struct hns3_desc *txdp;
4680 struct hns3_tx_queue *txq;
4681 struct rte_eth_dev *dev;
4684 txq = (struct hns3_tx_queue *)tx_queue;
4685 if (offset >= txq->nb_tx_desc)
4688 dev = &rte_eth_devices[txq->port_id];
4689 if (dev->tx_pkt_burst != hns3_xmit_pkts_simple &&
4690 dev->tx_pkt_burst != hns3_xmit_pkts &&
4691 dev->tx_pkt_burst != hns3_xmit_pkts_vec_sve &&
4692 dev->tx_pkt_burst != hns3_xmit_pkts_vec)
4693 return RTE_ETH_TX_DESC_UNAVAIL;
4695 desc_id = (txq->next_to_use + offset) % txq->nb_tx_desc;
4696 txdp = &txq->tx_ring[desc_id];
4697 if (txdp->tx.tp_fe_sc_vld_ra_ri & rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
4698 return RTE_ETH_TX_DESC_FULL;
4700 return RTE_ETH_TX_DESC_DONE;
4704 hns3_rx_queue_count(void *rx_queue)
4707 * Number of BDs that have been processed by the driver
4708 * but have not been notified to the hardware.
4710 uint32_t driver_hold_bd_num;
4711 struct hns3_rx_queue *rxq;
4712 const struct rte_eth_dev *dev;
4716 dev = &rte_eth_devices[rxq->port_id];
4718 fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
4719 if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
4720 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
4721 driver_hold_bd_num = rxq->rx_rearm_nb;
4723 driver_hold_bd_num = rxq->rx_free_hold;
4725 if (fbd_num <= driver_hold_bd_num)
4728 return fbd_num - driver_hold_bd_num;
4732 hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
4735 * If the hardware support rxd advanced layout, then driver enable it
4738 if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
4739 hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
4743 hns3_stop_tx_datapath(struct rte_eth_dev *dev)
4745 dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
4746 dev->tx_pkt_prepare = NULL;
4747 hns3_eth_dev_fp_ops_config(dev);
4749 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
4753 /* Disable tx datapath on secondary process. */
4754 hns3_mp_req_stop_tx(dev);
4755 /* Prevent crashes when queues are still in use. */
4756 rte_delay_ms(dev->data->nb_tx_queues);
4760 hns3_start_tx_datapath(struct rte_eth_dev *dev)
4762 eth_tx_prep_t prep = NULL;
4764 dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep);
4765 dev->tx_pkt_prepare = prep;
4766 hns3_eth_dev_fp_ops_config(dev);
4768 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
4771 hns3_mp_req_start_tx(dev);