1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_vxlan.h>
19 #include <rte_ethdev_driver.h>
24 #include <rte_malloc.h>
27 #include "hns3_ethdev.h"
28 #include "hns3_rxtx.h"
29 #include "hns3_regs.h"
30 #include "hns3_logs.h"
32 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
33 #define DEFAULT_RX_FREE_THRESH 32
36 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
40 /* Note: Fake rx queue will not enter here */
42 for (i = 0; i < rxq->nb_rx_desc; i++) {
43 if (rxq->sw_ring[i].mbuf) {
44 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
45 rxq->sw_ring[i].mbuf = NULL;
52 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
56 /* Note: Fake rx queue will not enter here */
58 for (i = 0; i < txq->nb_tx_desc; i++) {
59 if (txq->sw_ring[i].mbuf) {
60 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
61 txq->sw_ring[i].mbuf = NULL;
68 hns3_rx_queue_release(void *queue)
70 struct hns3_rx_queue *rxq = queue;
72 hns3_rx_queue_release_mbufs(rxq);
74 rte_memzone_free(rxq->mz);
76 rte_free(rxq->sw_ring);
82 hns3_tx_queue_release(void *queue)
84 struct hns3_tx_queue *txq = queue;
86 hns3_tx_queue_release_mbufs(txq);
88 rte_memzone_free(txq->mz);
90 rte_free(txq->sw_ring);
96 hns3_dev_rx_queue_release(void *queue)
98 struct hns3_rx_queue *rxq = queue;
99 struct hns3_adapter *hns;
105 rte_spinlock_lock(&hns->hw.lock);
106 hns3_rx_queue_release(queue);
107 rte_spinlock_unlock(&hns->hw.lock);
111 hns3_dev_tx_queue_release(void *queue)
113 struct hns3_tx_queue *txq = queue;
114 struct hns3_adapter *hns;
120 rte_spinlock_lock(&hns->hw.lock);
121 hns3_tx_queue_release(queue);
122 rte_spinlock_unlock(&hns->hw.lock);
126 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
128 struct hns3_rx_queue *rxq = queue;
129 struct hns3_adapter *hns;
139 if (hw->fkq_data.rx_queues[idx]) {
140 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
141 hw->fkq_data.rx_queues[idx] = NULL;
144 /* free fake rx queue arrays */
145 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
146 hw->fkq_data.nb_fake_rx_queues = 0;
147 rte_free(hw->fkq_data.rx_queues);
148 hw->fkq_data.rx_queues = NULL;
153 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
155 struct hns3_tx_queue *txq = queue;
156 struct hns3_adapter *hns;
166 if (hw->fkq_data.tx_queues[idx]) {
167 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
168 hw->fkq_data.tx_queues[idx] = NULL;
171 /* free fake tx queue arrays */
172 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
173 hw->fkq_data.nb_fake_tx_queues = 0;
174 rte_free(hw->fkq_data.tx_queues);
175 hw->fkq_data.tx_queues = NULL;
180 hns3_free_rx_queues(struct rte_eth_dev *dev)
182 struct hns3_adapter *hns = dev->data->dev_private;
183 struct hns3_fake_queue_data *fkq_data;
184 struct hns3_hw *hw = &hns->hw;
188 nb_rx_q = hw->data->nb_rx_queues;
189 for (i = 0; i < nb_rx_q; i++) {
190 if (dev->data->rx_queues[i]) {
191 hns3_rx_queue_release(dev->data->rx_queues[i]);
192 dev->data->rx_queues[i] = NULL;
196 /* Free fake Rx queues */
197 fkq_data = &hw->fkq_data;
198 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
199 if (fkq_data->rx_queues[i])
200 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
205 hns3_free_tx_queues(struct rte_eth_dev *dev)
207 struct hns3_adapter *hns = dev->data->dev_private;
208 struct hns3_fake_queue_data *fkq_data;
209 struct hns3_hw *hw = &hns->hw;
213 nb_tx_q = hw->data->nb_tx_queues;
214 for (i = 0; i < nb_tx_q; i++) {
215 if (dev->data->tx_queues[i]) {
216 hns3_tx_queue_release(dev->data->tx_queues[i]);
217 dev->data->tx_queues[i] = NULL;
221 /* Free fake Tx queues */
222 fkq_data = &hw->fkq_data;
223 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
224 if (fkq_data->tx_queues[i])
225 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
230 hns3_free_all_queues(struct rte_eth_dev *dev)
232 hns3_free_rx_queues(dev);
233 hns3_free_tx_queues(dev);
237 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
239 struct rte_mbuf *mbuf;
243 for (i = 0; i < rxq->nb_rx_desc; i++) {
244 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
245 if (unlikely(mbuf == NULL)) {
246 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
248 hns3_rx_queue_release_mbufs(rxq);
252 rte_mbuf_refcnt_set(mbuf, 1);
254 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
256 mbuf->port = rxq->port_id;
258 rxq->sw_ring[i].mbuf = mbuf;
259 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
260 rxq->rx_ring[i].addr = dma_addr;
261 rxq->rx_ring[i].rx.bd_base_info = 0;
268 hns3_buf_size2type(uint32_t buf_size)
274 bd_size_type = HNS3_BD_SIZE_512_TYPE;
277 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
280 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
283 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
290 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
292 uint32_t rx_buf_len = rxq->rx_buf_len;
293 uint64_t dma_addr = rxq->rx_ring_phys_addr;
295 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
296 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
297 (uint32_t)((dma_addr >> 31) >> 1));
299 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
300 hns3_buf_size2type(rx_buf_len));
301 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
302 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
306 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
308 uint64_t dma_addr = txq->tx_ring_phys_addr;
310 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
311 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
312 (uint32_t)((dma_addr >> 31) >> 1));
314 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
315 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
319 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
321 uint16_t nb_rx_q = hw->data->nb_rx_queues;
322 uint16_t nb_tx_q = hw->data->nb_tx_queues;
323 struct hns3_rx_queue *rxq;
324 struct hns3_tx_queue *txq;
328 for (i = 0; i < hw->cfg_max_queues; i++) {
330 rxq = hw->data->rx_queues[i];
332 rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
334 txq = hw->data->tx_queues[i];
336 txq = hw->fkq_data.tx_queues[i - nb_tx_q];
337 if (rxq == NULL || txq == NULL ||
338 (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
341 rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
343 rcb_reg |= BIT(HNS3_RING_EN_B);
345 rcb_reg &= ~BIT(HNS3_RING_EN_B);
346 hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
351 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
353 struct hns3_cfg_com_tqp_queue_cmd *req;
354 struct hns3_cmd_desc desc;
357 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
359 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
360 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
362 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
364 ret = hns3_cmd_send(hw, &desc, 1);
366 hns3_err(hw, "TQP enable fail, ret = %d", ret);
372 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
374 struct hns3_reset_tqp_queue_cmd *req;
375 struct hns3_cmd_desc desc;
378 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
380 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
381 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
382 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
384 ret = hns3_cmd_send(hw, &desc, 1);
386 hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
392 hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
394 struct hns3_reset_tqp_queue_cmd *req;
395 struct hns3_cmd_desc desc;
398 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
400 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
401 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
403 ret = hns3_cmd_send(hw, &desc, 1);
405 hns3_err(hw, "Get reset status error, ret =%d", ret);
409 return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
413 hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
415 #define HNS3_TQP_RESET_TRY_MS 200
420 ret = hns3_tqp_enable(hw, queue_id, false);
425 * In current version VF is not supported when PF is driven by DPDK
426 * driver, all task queue pairs are mapped to PF function, so PF's queue
427 * id is equals to the global queue id in PF range.
429 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
431 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
435 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
437 /* Wait for tqp hw reset */
438 rte_delay_ms(HNS3_POLL_RESPONE_MS);
439 reset_status = hns3_get_reset_status(hw, queue_id);
444 } while (get_timeofday_ms() < end);
447 hns3_err(hw, "Reset TQP fail, ret = %d", ret);
451 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
453 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
459 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
464 /* Disable VF's queue before send queue reset msg to PF */
465 ret = hns3_tqp_enable(hw, queue_id, false);
469 memcpy(msg_data, &queue_id, sizeof(uint16_t));
471 return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
472 sizeof(msg_data), true, NULL, 0);
476 hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
478 struct hns3_hw *hw = &hns->hw;
480 return hns3vf_reset_tqp(hw, queue_id);
482 return hns3_reset_tqp(hw, queue_id);
486 hns3_reset_all_queues(struct hns3_adapter *hns)
488 struct hns3_hw *hw = &hns->hw;
491 for (i = 0; i < hw->cfg_max_queues; i++) {
492 ret = hns3_reset_queue(hns, i);
494 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
502 hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
504 uint32_t addr, value;
506 addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
509 hns3_write_dev(hw, addr, value);
513 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
515 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
516 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
517 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
519 if (dev->data->dev_conf.intr_conf.rxq == 0)
522 /* enable the vectors */
523 hns3_tqp_intr_enable(hw, queue_id, true);
525 return rte_intr_ack(intr_handle);
529 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
531 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
533 if (dev->data->dev_conf.intr_conf.rxq == 0)
536 /* disable the vectors */
537 hns3_tqp_intr_enable(hw, queue_id, false);
543 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
545 struct hns3_hw *hw = &hns->hw;
546 struct hns3_rx_queue *rxq;
549 PMD_INIT_FUNC_TRACE();
551 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
552 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
554 hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
559 rxq->next_to_use = 0;
560 rxq->next_to_clean = 0;
562 hns3_init_rx_queue_hw(rxq);
568 hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
570 struct hns3_hw *hw = &hns->hw;
571 struct hns3_rx_queue *rxq;
573 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
574 rxq->next_to_use = 0;
575 rxq->next_to_clean = 0;
577 hns3_init_rx_queue_hw(rxq);
581 hns3_init_tx_queue(struct hns3_tx_queue *queue)
583 struct hns3_tx_queue *txq = queue;
584 struct hns3_desc *desc;
589 for (i = 0; i < txq->nb_tx_desc; i++) {
590 desc->tx.tp_fe_sc_vld_ra_ri = 0;
594 txq->next_to_use = 0;
595 txq->next_to_clean = 0;
596 txq->tx_bd_ready = txq->nb_tx_desc - 1;
597 hns3_init_tx_queue_hw(txq);
601 hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
603 struct hns3_hw *hw = &hns->hw;
604 struct hns3_tx_queue *txq;
606 txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
607 hns3_init_tx_queue(txq);
611 hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
613 struct hns3_hw *hw = &hns->hw;
614 struct hns3_tx_queue *txq;
616 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
617 hns3_init_tx_queue(txq);
621 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
623 struct hns3_hw *hw = &hns->hw;
624 struct hns3_tx_queue *txq;
627 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
628 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
631 if (!tc_queue->enable)
634 for (j = 0; j < tc_queue->tqp_count; j++) {
635 num = tc_queue->tqp_offset + j;
636 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
640 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
646 hns3_start_rx_queues(struct hns3_adapter *hns)
648 struct hns3_hw *hw = &hns->hw;
649 struct hns3_rx_queue *rxq;
653 /* Initialize RSS for queues */
654 ret = hns3_config_rss(hns);
656 hns3_err(hw, "Failed to configure rss %d", ret);
660 for (i = 0; i < hw->data->nb_rx_queues; i++) {
661 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
662 if (rxq == NULL || rxq->rx_deferred_start)
664 ret = hns3_dev_rx_queue_start(hns, i);
666 hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
672 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
673 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
674 if (rxq == NULL || rxq->rx_deferred_start)
676 hns3_fake_rx_queue_start(hns, i);
681 for (j = 0; j < i; j++) {
682 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
683 hns3_rx_queue_release_mbufs(rxq);
690 hns3_start_tx_queues(struct hns3_adapter *hns)
692 struct hns3_hw *hw = &hns->hw;
693 struct hns3_tx_queue *txq;
696 for (i = 0; i < hw->data->nb_tx_queues; i++) {
697 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
698 if (txq == NULL || txq->tx_deferred_start)
700 hns3_dev_tx_queue_start(hns, i);
703 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
704 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
705 if (txq == NULL || txq->tx_deferred_start)
707 hns3_fake_tx_queue_start(hns, i);
710 hns3_init_tx_ring_tc(hns);
714 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
716 struct hns3_hw *hw = &hns->hw;
720 ret = hns3_reset_all_queues(hns);
722 hns3_err(hw, "Failed to reset all queues %d", ret);
727 ret = hns3_start_rx_queues(hns);
729 hns3_err(hw, "Failed to start rx queues: %d", ret);
733 hns3_start_tx_queues(hns);
734 hns3_enable_all_queues(hw, true);
740 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
742 struct hns3_hw *hw = &hns->hw;
745 hns3_enable_all_queues(hw, false);
747 ret = hns3_reset_all_queues(hns);
749 hns3_err(hw, "Failed to reset all queues %d", ret);
757 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
758 struct hns3_queue_info *q_info)
760 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
761 const struct rte_memzone *rx_mz;
762 struct hns3_rx_queue *rxq;
763 unsigned int rx_desc;
765 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
766 RTE_CACHE_LINE_SIZE, q_info->socket_id);
768 hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
773 /* Allocate rx ring hardware descriptors. */
774 rxq->queue_id = q_info->idx;
775 rxq->nb_rx_desc = q_info->nb_desc;
776 rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
777 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
778 rx_desc, HNS3_RING_BASE_ALIGN,
781 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
783 hns3_rx_queue_release(rxq);
787 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
788 rxq->rx_ring_phys_addr = rx_mz->iova;
790 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
791 rxq->rx_ring_phys_addr);
797 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
798 uint16_t nb_desc, unsigned int socket_id)
800 struct hns3_adapter *hns = dev->data->dev_private;
801 struct hns3_hw *hw = &hns->hw;
802 struct hns3_queue_info q_info;
803 struct hns3_rx_queue *rxq;
806 if (hw->fkq_data.rx_queues[idx]) {
807 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
808 hw->fkq_data.rx_queues[idx] = NULL;
812 q_info.socket_id = socket_id;
813 q_info.nb_desc = nb_desc;
814 q_info.type = "hns3 fake RX queue";
815 q_info.ring_name = "rx_fake_ring";
816 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
818 hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
822 /* Don't need alloc sw_ring, because upper applications don't use it */
826 rxq->rx_deferred_start = false;
827 rxq->port_id = dev->data->port_id;
828 rxq->configured = true;
829 nb_rx_q = dev->data->nb_rx_queues;
830 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
831 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
832 rxq->rx_buf_len = hw->rx_buf_len;
834 rte_spinlock_lock(&hw->lock);
835 hw->fkq_data.rx_queues[idx] = rxq;
836 rte_spinlock_unlock(&hw->lock);
842 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
843 struct hns3_queue_info *q_info)
845 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
846 const struct rte_memzone *tx_mz;
847 struct hns3_tx_queue *txq;
848 struct hns3_desc *desc;
849 unsigned int tx_desc;
852 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
853 RTE_CACHE_LINE_SIZE, q_info->socket_id);
855 hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
860 /* Allocate tx ring hardware descriptors. */
861 txq->queue_id = q_info->idx;
862 txq->nb_tx_desc = q_info->nb_desc;
863 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
864 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
865 tx_desc, HNS3_RING_BASE_ALIGN,
868 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
870 hns3_tx_queue_release(txq);
874 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
875 txq->tx_ring_phys_addr = tx_mz->iova;
877 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
878 txq->tx_ring_phys_addr);
882 for (i = 0; i < txq->nb_tx_desc; i++) {
883 desc->tx.tp_fe_sc_vld_ra_ri = 0;
891 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
892 uint16_t nb_desc, unsigned int socket_id)
894 struct hns3_adapter *hns = dev->data->dev_private;
895 struct hns3_hw *hw = &hns->hw;
896 struct hns3_queue_info q_info;
897 struct hns3_tx_queue *txq;
900 if (hw->fkq_data.tx_queues[idx] != NULL) {
901 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
902 hw->fkq_data.tx_queues[idx] = NULL;
906 q_info.socket_id = socket_id;
907 q_info.nb_desc = nb_desc;
908 q_info.type = "hns3 fake TX queue";
909 q_info.ring_name = "tx_fake_ring";
910 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
912 hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
916 /* Don't need alloc sw_ring, because upper applications don't use it */
920 txq->tx_deferred_start = false;
921 txq->port_id = dev->data->port_id;
922 txq->configured = true;
923 nb_tx_q = dev->data->nb_tx_queues;
924 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
925 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
927 rte_spinlock_lock(&hw->lock);
928 hw->fkq_data.tx_queues[idx] = txq;
929 rte_spinlock_unlock(&hw->lock);
935 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
937 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
941 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
942 /* first time configuration */
945 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
946 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
947 RTE_CACHE_LINE_SIZE);
948 if (hw->fkq_data.rx_queues == NULL) {
949 hw->fkq_data.nb_fake_rx_queues = 0;
952 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
955 rxq = hw->fkq_data.rx_queues;
956 for (i = nb_queues; i < old_nb_queues; i++)
957 hns3_dev_rx_queue_release(rxq[i]);
959 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
960 RTE_CACHE_LINE_SIZE);
963 if (nb_queues > old_nb_queues) {
964 uint16_t new_qs = nb_queues - old_nb_queues;
965 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
968 hw->fkq_data.rx_queues = rxq;
969 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
970 rxq = hw->fkq_data.rx_queues;
971 for (i = nb_queues; i < old_nb_queues; i++)
972 hns3_dev_rx_queue_release(rxq[i]);
974 rte_free(hw->fkq_data.rx_queues);
975 hw->fkq_data.rx_queues = NULL;
978 hw->fkq_data.nb_fake_rx_queues = nb_queues;
984 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
986 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
990 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
991 /* first time configuration */
994 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
995 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
996 RTE_CACHE_LINE_SIZE);
997 if (hw->fkq_data.tx_queues == NULL) {
998 hw->fkq_data.nb_fake_tx_queues = 0;
1001 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1004 txq = hw->fkq_data.tx_queues;
1005 for (i = nb_queues; i < old_nb_queues; i++)
1006 hns3_dev_tx_queue_release(txq[i]);
1007 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1008 RTE_CACHE_LINE_SIZE);
1011 if (nb_queues > old_nb_queues) {
1012 uint16_t new_qs = nb_queues - old_nb_queues;
1013 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1016 hw->fkq_data.tx_queues = txq;
1017 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1018 txq = hw->fkq_data.tx_queues;
1019 for (i = nb_queues; i < old_nb_queues; i++)
1020 hns3_dev_tx_queue_release(txq[i]);
1022 rte_free(hw->fkq_data.tx_queues);
1023 hw->fkq_data.tx_queues = NULL;
1025 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1031 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1034 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1035 uint16_t rx_need_add_nb_q;
1036 uint16_t tx_need_add_nb_q;
1041 /* Setup new number of fake RX/TX queues and reconfigure device. */
1042 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
1043 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1044 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1045 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1047 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1048 goto cfg_fake_rx_q_fail;
1051 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1053 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1054 goto cfg_fake_tx_q_fail;
1057 /* Allocate and set up fake RX queue per Ethernet port. */
1058 port_id = hw->data->port_id;
1059 for (q = 0; q < rx_need_add_nb_q; q++) {
1060 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1061 rte_eth_dev_socket_id(port_id));
1063 goto setup_fake_rx_q_fail;
1066 /* Allocate and set up fake TX queue per Ethernet port. */
1067 for (q = 0; q < tx_need_add_nb_q; q++) {
1068 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1069 rte_eth_dev_socket_id(port_id));
1071 goto setup_fake_tx_q_fail;
1076 setup_fake_tx_q_fail:
1077 setup_fake_rx_q_fail:
1078 (void)hns3_fake_tx_queue_config(hw, 0);
1080 (void)hns3_fake_rx_queue_config(hw, 0);
1082 hw->cfg_max_queues = 0;
1088 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1090 struct rte_eth_dev_data *dev_data = hns->hw.data;
1091 struct hns3_rx_queue *rxq;
1092 struct hns3_tx_queue *txq;
1095 if (dev_data->rx_queues)
1096 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1097 rxq = dev_data->rx_queues[i];
1098 if (rxq == NULL || rxq->rx_deferred_start)
1100 hns3_rx_queue_release_mbufs(rxq);
1103 if (dev_data->tx_queues)
1104 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1105 txq = dev_data->tx_queues[i];
1106 if (txq == NULL || txq->tx_deferred_start)
1108 hns3_tx_queue_release_mbufs(txq);
1113 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1114 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1115 struct rte_mempool *mp)
1117 struct hns3_adapter *hns = dev->data->dev_private;
1118 struct hns3_hw *hw = &hns->hw;
1119 struct hns3_queue_info q_info;
1120 struct hns3_rx_queue *rxq;
1123 if (dev->data->dev_started) {
1124 hns3_err(hw, "rx_queue_setup after dev_start no supported");
1128 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1129 nb_desc % HNS3_ALIGN_RING_DESC) {
1130 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1135 if (dev->data->rx_queues[idx]) {
1136 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1137 dev->data->rx_queues[idx] = NULL;
1141 q_info.socket_id = socket_id;
1142 q_info.nb_desc = nb_desc;
1143 q_info.type = "hns3 RX queue";
1144 q_info.ring_name = "rx_ring";
1145 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1148 "Failed to alloc mem and reserve DMA mem for rx ring!");
1154 if (conf->rx_free_thresh <= 0)
1155 rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;
1157 rxq->rx_free_thresh = conf->rx_free_thresh;
1158 rxq->rx_deferred_start = conf->rx_deferred_start;
1160 rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc;
1161 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1162 RTE_CACHE_LINE_SIZE, socket_id);
1163 if (rxq->sw_ring == NULL) {
1164 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1165 hns3_rx_queue_release(rxq);
1169 rxq->next_to_use = 0;
1170 rxq->next_to_clean = 0;
1171 rxq->nb_rx_hold = 0;
1172 rxq->pkt_first_seg = NULL;
1173 rxq->pkt_last_seg = NULL;
1174 rxq->port_id = dev->data->port_id;
1175 rxq->configured = true;
1176 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1177 idx * HNS3_TQP_REG_SIZE);
1178 rxq->rx_buf_len = hw->rx_buf_len;
1180 rxq->pkt_len_errors = 0;
1181 rxq->l3_csum_erros = 0;
1182 rxq->l4_csum_erros = 0;
1183 rxq->ol3_csum_erros = 0;
1184 rxq->ol4_csum_erros = 0;
1186 rte_spinlock_lock(&hw->lock);
1187 dev->data->rx_queues[idx] = rxq;
1188 rte_spinlock_unlock(&hw->lock);
1193 static inline uint32_t
1194 rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info)
1196 #define HNS3_L2TBL_NUM 4
1197 #define HNS3_L3TBL_NUM 16
1198 #define HNS3_L4TBL_NUM 16
1199 #define HNS3_OL3TBL_NUM 16
1200 #define HNS3_OL4TBL_NUM 16
1201 uint32_t pkt_type = 0;
1202 uint32_t l2id, l3id, l4id;
1203 uint32_t ol3id, ol4id;
1205 static const uint32_t l2table[HNS3_L2TBL_NUM] = {
1207 RTE_PTYPE_L2_ETHER_VLAN,
1208 RTE_PTYPE_L2_ETHER_QINQ,
1212 static const uint32_t l3table[HNS3_L3TBL_NUM] = {
1215 RTE_PTYPE_L2_ETHER_ARP,
1217 RTE_PTYPE_L3_IPV4_EXT,
1218 RTE_PTYPE_L3_IPV6_EXT,
1219 RTE_PTYPE_L2_ETHER_LLDP,
1220 0, 0, 0, 0, 0, 0, 0, 0, 0
1223 static const uint32_t l4table[HNS3_L4TBL_NUM] = {
1226 RTE_PTYPE_TUNNEL_GRE,
1230 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1233 static const uint32_t inner_l2table[HNS3_L2TBL_NUM] = {
1234 RTE_PTYPE_INNER_L2_ETHER,
1235 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1236 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1240 static const uint32_t inner_l3table[HNS3_L3TBL_NUM] = {
1241 RTE_PTYPE_INNER_L3_IPV4,
1242 RTE_PTYPE_INNER_L3_IPV6,
1244 RTE_PTYPE_INNER_L2_ETHER,
1245 RTE_PTYPE_INNER_L3_IPV4_EXT,
1246 RTE_PTYPE_INNER_L3_IPV6_EXT,
1247 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1250 static const uint32_t inner_l4table[HNS3_L4TBL_NUM] = {
1251 RTE_PTYPE_INNER_L4_UDP,
1252 RTE_PTYPE_INNER_L4_TCP,
1253 RTE_PTYPE_TUNNEL_GRE,
1254 RTE_PTYPE_INNER_L4_SCTP,
1256 RTE_PTYPE_INNER_L4_ICMP,
1257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1260 static const uint32_t ol3table[HNS3_OL3TBL_NUM] = {
1264 RTE_PTYPE_L3_IPV4_EXT,
1265 RTE_PTYPE_L3_IPV6_EXT,
1266 0, 0, 0, 0, 0, 0, 0, 0, 0,
1270 static const uint32_t ol4table[HNS3_OL4TBL_NUM] = {
1272 RTE_PTYPE_TUNNEL_VXLAN,
1273 RTE_PTYPE_TUNNEL_NVGRE,
1274 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1277 l2id = hns3_get_field(pkt_info, HNS3_RXD_STRP_TAGP_M,
1278 HNS3_RXD_STRP_TAGP_S);
1279 l3id = hns3_get_field(pkt_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
1280 l4id = hns3_get_field(pkt_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
1281 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
1282 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1284 if (ol4table[ol4id])
1285 pkt_type |= (inner_l2table[l2id] | inner_l3table[l3id] |
1286 inner_l4table[l4id] | ol3table[ol3id] |
1289 pkt_type |= (l2table[l2id] | l3table[l3id] | l4table[l4id]);
1294 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1296 static const uint32_t ptypes[] = {
1298 RTE_PTYPE_L2_ETHER_VLAN,
1299 RTE_PTYPE_L2_ETHER_QINQ,
1300 RTE_PTYPE_L2_ETHER_LLDP,
1301 RTE_PTYPE_L2_ETHER_ARP,
1303 RTE_PTYPE_L3_IPV4_EXT,
1305 RTE_PTYPE_L3_IPV6_EXT,
1311 RTE_PTYPE_TUNNEL_GRE,
1315 if (dev->rx_pkt_burst == hns3_recv_pkts)
1322 hns3_clean_rx_buffers(struct hns3_rx_queue *rxq, int count)
1324 rxq->next_to_use += count;
1325 if (rxq->next_to_use >= rxq->nb_rx_desc)
1326 rxq->next_to_use -= rxq->nb_rx_desc;
1328 hns3_write_dev(rxq, HNS3_RING_RX_HEAD_REG, count);
1332 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
1333 uint32_t bd_base_info, uint32_t l234_info,
1334 uint32_t *cksum_err)
1338 if (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) {
1343 if (unlikely(rxm->pkt_len == 0 ||
1344 (l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) {
1345 rxq->pkt_len_errors++;
1349 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
1350 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
1351 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1352 rxq->l3_csum_erros++;
1353 tmp |= HNS3_L3_CKSUM_ERR;
1356 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
1357 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1358 rxq->l4_csum_erros++;
1359 tmp |= HNS3_L4_CKSUM_ERR;
1362 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
1363 rxq->ol3_csum_erros++;
1364 tmp |= HNS3_OUTER_L3_CKSUM_ERR;
1367 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
1368 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
1369 rxq->ol4_csum_erros++;
1370 tmp |= HNS3_OUTER_L4_CKSUM_ERR;
1379 hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type,
1380 const uint32_t cksum_err)
1382 if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
1383 if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
1384 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
1385 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1386 if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
1387 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
1388 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1389 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
1390 (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
1391 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
1393 if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
1394 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
1395 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1396 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
1397 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
1398 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1403 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1405 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
1406 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
1407 struct hns3_rx_queue *rxq; /* RX queue */
1408 struct hns3_entry *sw_ring;
1409 struct hns3_entry *rxe;
1410 struct rte_mbuf *first_seg;
1411 struct rte_mbuf *last_seg;
1412 struct hns3_desc rxd;
1413 struct rte_mbuf *nmb; /* pointer of the new mbuf */
1414 struct rte_mbuf *rxm;
1415 struct rte_eth_dev *dev;
1416 uint32_t bd_base_info;
1431 dev = &rte_eth_devices[rxq->port_id];
1433 rx_id = rxq->next_to_clean;
1434 rx_ring = rxq->rx_ring;
1435 first_seg = rxq->pkt_first_seg;
1436 last_seg = rxq->pkt_last_seg;
1437 sw_ring = rxq->sw_ring;
1439 while (nb_rx < nb_pkts) {
1440 rxdp = &rx_ring[rx_id];
1441 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
1442 if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
1445 * The interactive process between software and hardware of
1446 * receiving a new packet in hns3 network engine:
1447 * 1. Hardware network engine firstly writes the packet content
1448 * to the memory pointed by the 'addr' field of the Rx Buffer
1449 * Descriptor, secondly fills the result of parsing the
1450 * packet include the valid field into the Rx Buffer
1451 * Descriptor in one write operation.
1452 * 2. Driver reads the Rx BD's valid field in the loop to check
1453 * whether it's valid, if valid then assign a new address to
1454 * the addr field, clear the valid field, get the other
1455 * information of the packet by parsing Rx BD's other fields,
1456 * finally write back the number of Rx BDs processed by the
1457 * driver to the HNS3_RING_RX_HEAD_REG register to inform
1459 * In the above process, the ordering is very important. We must
1460 * make sure that CPU read Rx BD's other fields only after the
1463 * There are two type of re-ordering: compiler re-ordering and
1464 * CPU re-ordering under the ARMv8 architecture.
1465 * 1. we use volatile to deal with compiler re-ordering, so you
1466 * can see that rx_ring/rxdp defined with volatile.
1467 * 2. we commonly use memory barrier to deal with CPU
1468 * re-ordering, but the cost is high.
1470 * In order to solve the high cost of using memory barrier, we
1471 * use the data dependency order under the ARMv8 architecture,
1474 * instr02: load B <- A
1475 * the instr02 will always execute after instr01.
1477 * To construct the data dependency ordering, we use the
1478 * following assignment:
1479 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1480 * (1u<<HNS3_RXD_VLD_B)]
1481 * Using gcc compiler under the ARMv8 architecture, the related
1482 * assembly code example as follows:
1483 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
1484 * instr01: ldr w26, [x22, #28] --read bd_base_info
1485 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
1486 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
1488 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
1489 * instr05: ldp x2, x3, [x0]
1490 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
1491 * instr07: ldp x4, x5, [x0, #16]
1492 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
1493 * the instr05~08 depend on x0's value, x0 depent on w26's
1494 * value, the w26 is the bd_base_info, this form the data
1495 * dependency ordering.
1496 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
1497 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
1498 * assignment is correct.
1500 * So we use the data dependency ordering instead of memory
1501 * barrier to improve receive performance.
1503 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1504 (1u << HNS3_RXD_VLD_B)];
1506 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1507 if (unlikely(nmb == NULL)) {
1508 dev->data->rx_mbuf_alloc_failed++;
1513 rxe = &sw_ring[rx_id];
1515 if (unlikely(rx_id == rxq->nb_rx_desc))
1518 rte_prefetch0(sw_ring[rx_id].mbuf);
1519 if ((rx_id & 0x3) == 0) {
1520 rte_prefetch0(&rx_ring[rx_id]);
1521 rte_prefetch0(&sw_ring[rx_id]);
1527 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1528 rxdp->rx.bd_base_info = 0;
1529 rxdp->addr = dma_addr;
1531 /* Load remained descriptor data and extract necessary fields */
1532 data_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.size));
1533 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
1534 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
1536 if (first_seg == NULL) {
1538 first_seg->nb_segs = 1;
1540 first_seg->nb_segs++;
1541 last_seg->next = rxm;
1544 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1545 rxm->data_len = data_len;
1547 if (!hns3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
1552 /* The last buffer of the received packet */
1553 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len));
1554 first_seg->pkt_len = pkt_len;
1555 first_seg->port = rxq->port_id;
1556 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
1557 first_seg->ol_flags |= PKT_RX_RSS_HASH;
1558 if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
1559 first_seg->hash.fdir.hi =
1560 rte_le_to_cpu_32(rxd.rx.fd_id);
1561 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1565 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
1566 l234_info, &cksum_err);
1570 first_seg->packet_type = rxd_pkt_info_to_pkt_type(l234_info,
1573 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
1574 hns3_rx_set_cksum_flag(rxm, first_seg->packet_type,
1577 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.rx.vlan_tag);
1578 first_seg->vlan_tci_outer =
1579 rte_le_to_cpu_16(rxd.rx.ot_vlan_tag);
1580 rx_pkts[nb_rx++] = first_seg;
1584 rte_pktmbuf_free(first_seg);
1588 rxq->next_to_clean = rx_id;
1589 rxq->pkt_first_seg = first_seg;
1590 rxq->pkt_last_seg = last_seg;
1592 nb_rx_bd = nb_rx_bd + rxq->nb_rx_hold;
1593 if (nb_rx_bd > rxq->rx_free_thresh) {
1594 hns3_clean_rx_buffers(rxq, nb_rx_bd);
1597 rxq->nb_rx_hold = nb_rx_bd;
1603 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1604 unsigned int socket_id, const struct rte_eth_txconf *conf)
1606 struct hns3_adapter *hns = dev->data->dev_private;
1607 struct hns3_hw *hw = &hns->hw;
1608 struct hns3_queue_info q_info;
1609 struct hns3_tx_queue *txq;
1612 if (dev->data->dev_started) {
1613 hns3_err(hw, "tx_queue_setup after dev_start no supported");
1617 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1618 nb_desc % HNS3_ALIGN_RING_DESC) {
1619 hns3_err(hw, "Number (%u) of tx descriptors is invalid",
1624 if (dev->data->tx_queues[idx] != NULL) {
1625 hns3_tx_queue_release(dev->data->tx_queues[idx]);
1626 dev->data->tx_queues[idx] = NULL;
1630 q_info.socket_id = socket_id;
1631 q_info.nb_desc = nb_desc;
1632 q_info.type = "hns3 TX queue";
1633 q_info.ring_name = "tx_ring";
1634 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1637 "Failed to alloc mem and reserve DMA mem for tx ring!");
1641 txq->tx_deferred_start = conf->tx_deferred_start;
1642 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
1643 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
1644 RTE_CACHE_LINE_SIZE, socket_id);
1645 if (txq->sw_ring == NULL) {
1646 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
1647 hns3_tx_queue_release(txq);
1652 txq->next_to_use = 0;
1653 txq->next_to_clean = 0;
1654 txq->tx_bd_ready = txq->nb_tx_desc - 1;
1655 txq->port_id = dev->data->port_id;
1656 txq->configured = true;
1657 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1658 idx * HNS3_TQP_REG_SIZE);
1659 rte_spinlock_lock(&hw->lock);
1660 dev->data->tx_queues[idx] = txq;
1661 rte_spinlock_unlock(&hw->lock);
1667 hns3_queue_xmit(struct hns3_tx_queue *txq, uint32_t buf_num)
1669 hns3_write_dev(txq, HNS3_RING_TX_TAIL_REG, buf_num);
1673 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
1675 uint16_t tx_next_clean = txq->next_to_clean;
1676 uint16_t tx_next_use = txq->next_to_use;
1677 uint16_t tx_bd_ready = txq->tx_bd_ready;
1678 uint16_t tx_bd_max = txq->nb_tx_desc;
1679 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
1680 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
1681 struct rte_mbuf *mbuf;
1683 while ((!hns3_get_bit(desc->tx.tp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B)) &&
1684 tx_next_use != tx_next_clean) {
1685 mbuf = tx_bak_pkt->mbuf;
1687 rte_pktmbuf_free_seg(mbuf);
1688 tx_bak_pkt->mbuf = NULL;
1696 if (tx_next_clean >= tx_bd_max) {
1698 desc = txq->tx_ring;
1699 tx_bak_pkt = txq->sw_ring;
1703 txq->next_to_clean = tx_next_clean;
1704 txq->tx_bd_ready = tx_bd_ready;
1708 fill_desc(struct hns3_tx_queue *txq, uint16_t tx_desc_id, struct rte_mbuf *rxm,
1709 bool first, int offset)
1711 struct hns3_desc *tx_ring = txq->tx_ring;
1712 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1713 uint8_t frag_end = rxm->next == NULL ? 1 : 0;
1714 uint16_t size = rxm->data_len;
1716 uint64_t ol_flags = rxm->ol_flags;
1721 desc->addr = rte_mbuf_data_iova(rxm) + offset;
1722 desc->tx.send_size = rte_cpu_to_le_16(size);
1723 hns3_set_bit(rrcfv, HNS3_TXD_VLD_B, 1);
1726 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
1727 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
1728 rxm->outer_l2_len + rxm->outer_l3_len : 0;
1729 paylen = rxm->pkt_len - hdr_len;
1730 desc->tx.paylen = rte_cpu_to_le_32(paylen);
1733 hns3_set_bit(rrcfv, HNS3_TXD_FE_B, frag_end);
1734 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(rrcfv);
1737 if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1738 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
1739 hns3_set_bit(tmp, HNS3_TXD_VLAN_B, 1);
1740 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
1741 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
1744 if (ol_flags & PKT_TX_QINQ_PKT) {
1745 tmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
1746 hns3_set_bit(tmp, HNS3_TXD_OVLAN_B, 1);
1747 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp);
1748 desc->tx.outer_vlan_tag =
1749 rte_cpu_to_le_16(rxm->vlan_tci_outer);
1755 hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool,
1756 uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf)
1758 struct rte_mbuf *new_mbuf = NULL;
1759 struct rte_eth_dev *dev;
1760 struct rte_mbuf *temp;
1764 /* Allocate enough mbufs */
1765 for (i = 0; i < nb_new_buf; i++) {
1766 temp = rte_pktmbuf_alloc(mb_pool);
1767 if (unlikely(temp == NULL)) {
1768 dev = &rte_eth_devices[txq->port_id];
1769 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1770 hns3_err(hw, "Failed to alloc TX mbuf port_id=%d,"
1771 "queue_id=%d in reassemble tx pkts.",
1772 txq->port_id, txq->queue_id);
1773 rte_pktmbuf_free(new_mbuf);
1776 temp->next = new_mbuf;
1780 if (new_mbuf == NULL)
1783 new_mbuf->nb_segs = nb_new_buf;
1784 *alloc_mbuf = new_mbuf;
1790 hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
1791 struct rte_mbuf **new_pkt)
1793 struct hns3_tx_queue *txq = tx_queue;
1794 struct rte_mempool *mb_pool;
1795 struct rte_mbuf *new_mbuf;
1796 struct rte_mbuf *temp_new;
1797 struct rte_mbuf *temp;
1798 uint16_t last_buf_len;
1799 uint16_t nb_new_buf;
1810 mb_pool = tx_pkt->pool;
1811 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
1812 nb_new_buf = (tx_pkt->pkt_len - 1) / buf_size + 1;
1814 last_buf_len = tx_pkt->pkt_len % buf_size;
1815 if (last_buf_len == 0)
1816 last_buf_len = buf_size;
1818 /* Allocate enough mbufs */
1819 ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf);
1823 /* Copy the original packet content to the new mbufs */
1825 s = rte_pktmbuf_mtod(temp, char *);
1826 len_s = temp->data_len;
1827 temp_new = new_mbuf;
1828 for (i = 0; i < nb_new_buf; i++) {
1829 d = rte_pktmbuf_mtod(temp_new, char *);
1830 if (i < nb_new_buf - 1)
1833 buf_len = last_buf_len;
1837 len = RTE_MIN(len_s, len_d);
1841 len_d = len_d - len;
1842 len_s = len_s - len;
1848 s = rte_pktmbuf_mtod(temp, char *);
1849 len_s = temp->data_len;
1853 temp_new->data_len = buf_len;
1854 temp_new = temp_new->next;
1857 /* free original mbufs */
1858 rte_pktmbuf_free(tx_pkt);
1860 *new_pkt = new_mbuf;
1866 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
1868 uint32_t tmp = *ol_type_vlan_len_msec;
1870 /* (outer) IP header type */
1871 if (ol_flags & PKT_TX_OUTER_IPV4) {
1872 /* OL3 header size, defined in 4 bytes */
1873 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1874 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
1875 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1876 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
1877 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
1879 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
1880 HNS3_OL3T_IPV4_NO_CSUM);
1881 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
1882 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
1884 /* OL3 header size, defined in 4 bytes */
1885 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1886 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
1889 *ol_type_vlan_len_msec = tmp;
1893 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
1894 struct rte_net_hdr_lens *hdr_lens)
1896 uint32_t tmp = *ol_type_vlan_len_msec;
1899 /* OL2 header size, defined in 2 bytes */
1900 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1901 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
1903 /* L4TUNT: L4 Tunneling Type */
1904 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
1905 case PKT_TX_TUNNEL_GENEVE:
1906 case PKT_TX_TUNNEL_VXLAN:
1907 /* MAC in UDP tunnelling packet, include VxLAN */
1908 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
1909 HNS3_TUN_MAC_IN_UDP);
1911 * OL4 header size, defined in 4 Bytes, it contains outer
1912 * L4(UDP) length and tunneling length.
1914 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1915 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
1918 case PKT_TX_TUNNEL_GRE:
1919 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
1922 * OL4 header size, defined in 4 Bytes, it contains outer
1923 * L4(GRE) length and tunneling length.
1925 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
1926 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1927 l4_len >> HNS3_L4_LEN_UNIT);
1930 /* For non UDP / GRE tunneling, drop the tunnel packet */
1934 *ol_type_vlan_len_msec = tmp;
1940 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1942 struct rte_net_hdr_lens *hdr_lens)
1944 struct hns3_desc *tx_ring = txq->tx_ring;
1945 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1949 hns3_parse_outer_params(ol_flags, &value);
1950 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
1954 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
1960 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
1964 /* Enable L3 checksum offloads */
1965 if (ol_flags & PKT_TX_IPV4) {
1966 tmp = *type_cs_vlan_tso_len;
1967 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
1969 /* inner(/normal) L3 header size, defined in 4 bytes */
1970 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1971 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
1972 if (ol_flags & PKT_TX_IP_CKSUM)
1973 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
1974 *type_cs_vlan_tso_len = tmp;
1975 } else if (ol_flags & PKT_TX_IPV6) {
1976 tmp = *type_cs_vlan_tso_len;
1977 /* L3T, IPv6 don't do checksum */
1978 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
1980 /* inner(/normal) L3 header size, defined in 4 bytes */
1981 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1982 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
1983 *type_cs_vlan_tso_len = tmp;
1988 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
1992 /* Enable L4 checksum offloads */
1993 switch (ol_flags & PKT_TX_L4_MASK) {
1994 case PKT_TX_TCP_CKSUM:
1995 tmp = *type_cs_vlan_tso_len;
1996 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1998 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1999 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2000 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2001 *type_cs_vlan_tso_len = tmp;
2003 case PKT_TX_UDP_CKSUM:
2004 tmp = *type_cs_vlan_tso_len;
2005 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2007 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2008 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2009 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
2010 *type_cs_vlan_tso_len = tmp;
2012 case PKT_TX_SCTP_CKSUM:
2013 tmp = *type_cs_vlan_tso_len;
2014 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2016 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2017 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2018 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
2019 *type_cs_vlan_tso_len = tmp;
2027 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2030 struct hns3_desc *tx_ring = txq->tx_ring;
2031 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2034 /* inner(/normal) L2 header size, defined in 2 bytes */
2035 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2036 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2038 hns3_parse_l3_cksum_params(ol_flags, &value);
2039 hns3_parse_l4_cksum_params(ol_flags, &value);
2041 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
2045 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2052 for (i = 0; i < nb_pkts; i++) {
2055 /* check the size of packet */
2056 if (m->pkt_len < RTE_ETHER_MIN_LEN) {
2061 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2062 ret = rte_validate_tx_offload(m);
2068 ret = rte_net_intel_cksum_prepare(m);
2079 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2080 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
2082 /* Fill in tunneling parameters if necessary */
2083 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
2084 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
2085 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
2089 /* Enable checksum offloading */
2090 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
2091 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
2097 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2099 struct rte_net_hdr_lens hdr_lens = {0};
2100 struct hns3_tx_queue *txq = tx_queue;
2101 struct hns3_entry *tx_bak_pkt;
2102 struct rte_mbuf *new_pkt;
2103 struct rte_mbuf *tx_pkt;
2104 struct rte_mbuf *m_seg;
2105 uint32_t nb_hold = 0;
2106 uint16_t tx_next_use;
2107 uint16_t tx_pkt_num;
2113 /* free useless buffer */
2114 hns3_tx_free_useless_buffer(txq);
2116 tx_next_use = txq->next_to_use;
2117 tx_bd_max = txq->nb_tx_desc;
2118 tx_pkt_num = nb_pkts;
2121 tx_bak_pkt = &txq->sw_ring[tx_next_use];
2122 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
2123 tx_pkt = *tx_pkts++;
2125 nb_buf = tx_pkt->nb_segs;
2127 if (nb_buf > txq->tx_bd_ready) {
2135 * If packet length is greater than HNS3_MAX_FRAME_LEN
2136 * driver support, the packet will be ignored.
2138 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
2142 * If packet length is less than minimum packet size, driver
2145 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < HNS3_MIN_PKT_SIZE)) {
2149 add_len = HNS3_MIN_PKT_SIZE -
2150 rte_pktmbuf_pkt_len(tx_pkt);
2151 appended = rte_pktmbuf_append(tx_pkt, add_len);
2152 if (appended == NULL)
2155 memset(appended, 0, add_len);
2159 if (unlikely(nb_buf > HNS3_MAX_TX_BD_PER_PKT)) {
2160 if (hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt))
2163 nb_buf = m_seg->nb_segs;
2166 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
2171 fill_desc(txq, tx_next_use, m_seg, (i == 0), 0);
2172 tx_bak_pkt->mbuf = m_seg;
2173 m_seg = m_seg->next;
2176 if (tx_next_use >= tx_bd_max) {
2178 tx_bak_pkt = txq->sw_ring;
2182 } while (m_seg != NULL);
2185 txq->next_to_use = tx_next_use;
2186 txq->tx_bd_ready -= i;
2192 hns3_queue_xmit(txq, nb_hold);
2198 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
2199 struct rte_mbuf **pkts __rte_unused,
2200 uint16_t pkts_n __rte_unused)
2205 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
2207 struct hns3_adapter *hns = eth_dev->data->dev_private;
2209 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
2210 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
2211 eth_dev->rx_pkt_burst = hns3_recv_pkts;
2212 eth_dev->tx_pkt_burst = hns3_xmit_pkts;
2213 eth_dev->tx_pkt_prepare = hns3_prep_pkts;
2215 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
2216 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
2217 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;