1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
5 #include <rte_bus_pci.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
9 #include <ethdev_driver.h>
12 #include <rte_malloc.h>
13 #if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
14 #include <rte_cpuflags.h>
17 #include "hns3_ethdev.h"
18 #include "hns3_rxtx.h"
19 #include "hns3_regs.h"
20 #include "hns3_logs.h"
22 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
23 #define HNS3_RX_RING_PREFETCTH_MASK 3
26 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
30 /* Note: Fake rx queue will not enter here */
31 if (rxq->sw_ring == NULL)
34 if (rxq->rx_rearm_nb == 0) {
35 for (i = 0; i < rxq->nb_rx_desc; i++) {
36 if (rxq->sw_ring[i].mbuf != NULL) {
37 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
38 rxq->sw_ring[i].mbuf = NULL;
42 for (i = rxq->next_to_use;
43 i != rxq->rx_rearm_start;
44 i = (i + 1) % rxq->nb_rx_desc) {
45 if (rxq->sw_ring[i].mbuf != NULL) {
46 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
47 rxq->sw_ring[i].mbuf = NULL;
52 for (i = 0; i < rxq->bulk_mbuf_num; i++)
53 rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]);
54 rxq->bulk_mbuf_num = 0;
56 if (rxq->pkt_first_seg) {
57 rte_pktmbuf_free(rxq->pkt_first_seg);
58 rxq->pkt_first_seg = NULL;
63 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
67 /* Note: Fake tx queue will not enter here */
69 for (i = 0; i < txq->nb_tx_desc; i++) {
70 if (txq->sw_ring[i].mbuf) {
71 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
72 txq->sw_ring[i].mbuf = NULL;
79 hns3_rx_queue_release(void *queue)
81 struct hns3_rx_queue *rxq = queue;
83 hns3_rx_queue_release_mbufs(rxq);
85 rte_memzone_free(rxq->mz);
87 rte_free(rxq->sw_ring);
93 hns3_tx_queue_release(void *queue)
95 struct hns3_tx_queue *txq = queue;
97 hns3_tx_queue_release_mbufs(txq);
99 rte_memzone_free(txq->mz);
101 rte_free(txq->sw_ring);
109 hns3_dev_rx_queue_release(void *queue)
111 struct hns3_rx_queue *rxq = queue;
112 struct hns3_adapter *hns;
118 rte_spinlock_lock(&hns->hw.lock);
119 hns3_rx_queue_release(queue);
120 rte_spinlock_unlock(&hns->hw.lock);
124 hns3_dev_tx_queue_release(void *queue)
126 struct hns3_tx_queue *txq = queue;
127 struct hns3_adapter *hns;
133 rte_spinlock_lock(&hns->hw.lock);
134 hns3_tx_queue_release(queue);
135 rte_spinlock_unlock(&hns->hw.lock);
139 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
141 struct hns3_rx_queue *rxq = queue;
142 struct hns3_adapter *hns;
152 if (hw->fkq_data.rx_queues[idx]) {
153 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
154 hw->fkq_data.rx_queues[idx] = NULL;
157 /* free fake rx queue arrays */
158 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
159 hw->fkq_data.nb_fake_rx_queues = 0;
160 rte_free(hw->fkq_data.rx_queues);
161 hw->fkq_data.rx_queues = NULL;
166 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
168 struct hns3_tx_queue *txq = queue;
169 struct hns3_adapter *hns;
179 if (hw->fkq_data.tx_queues[idx]) {
180 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
181 hw->fkq_data.tx_queues[idx] = NULL;
184 /* free fake tx queue arrays */
185 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
186 hw->fkq_data.nb_fake_tx_queues = 0;
187 rte_free(hw->fkq_data.tx_queues);
188 hw->fkq_data.tx_queues = NULL;
193 hns3_free_rx_queues(struct rte_eth_dev *dev)
195 struct hns3_adapter *hns = dev->data->dev_private;
196 struct hns3_fake_queue_data *fkq_data;
197 struct hns3_hw *hw = &hns->hw;
201 nb_rx_q = hw->data->nb_rx_queues;
202 for (i = 0; i < nb_rx_q; i++) {
203 if (dev->data->rx_queues[i]) {
204 hns3_rx_queue_release(dev->data->rx_queues[i]);
205 dev->data->rx_queues[i] = NULL;
209 /* Free fake Rx queues */
210 fkq_data = &hw->fkq_data;
211 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
212 if (fkq_data->rx_queues[i])
213 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
218 hns3_free_tx_queues(struct rte_eth_dev *dev)
220 struct hns3_adapter *hns = dev->data->dev_private;
221 struct hns3_fake_queue_data *fkq_data;
222 struct hns3_hw *hw = &hns->hw;
226 nb_tx_q = hw->data->nb_tx_queues;
227 for (i = 0; i < nb_tx_q; i++) {
228 if (dev->data->tx_queues[i]) {
229 hns3_tx_queue_release(dev->data->tx_queues[i]);
230 dev->data->tx_queues[i] = NULL;
234 /* Free fake Tx queues */
235 fkq_data = &hw->fkq_data;
236 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
237 if (fkq_data->tx_queues[i])
238 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
243 hns3_free_all_queues(struct rte_eth_dev *dev)
245 hns3_free_rx_queues(dev);
246 hns3_free_tx_queues(dev);
250 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
252 struct rte_mbuf *mbuf;
256 for (i = 0; i < rxq->nb_rx_desc; i++) {
257 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
258 if (unlikely(mbuf == NULL)) {
259 hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
261 hns3_rx_queue_release_mbufs(rxq);
265 rte_mbuf_refcnt_set(mbuf, 1);
267 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
269 mbuf->port = rxq->port_id;
271 rxq->sw_ring[i].mbuf = mbuf;
272 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
273 rxq->rx_ring[i].addr = dma_addr;
274 rxq->rx_ring[i].rx.bd_base_info = 0;
281 hns3_buf_size2type(uint32_t buf_size)
287 bd_size_type = HNS3_BD_SIZE_512_TYPE;
290 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
293 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
296 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
303 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
305 uint32_t rx_buf_len = rxq->rx_buf_len;
306 uint64_t dma_addr = rxq->rx_ring_phys_addr;
308 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
309 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
310 (uint32_t)((dma_addr >> 31) >> 1));
312 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
313 hns3_buf_size2type(rx_buf_len));
314 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
315 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
319 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
321 uint64_t dma_addr = txq->tx_ring_phys_addr;
323 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
324 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
325 (uint32_t)((dma_addr >> 31) >> 1));
327 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
328 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
332 hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
334 uint16_t nb_rx_q = hw->data->nb_rx_queues;
335 uint16_t nb_tx_q = hw->data->nb_tx_queues;
336 struct hns3_rx_queue *rxq;
337 struct hns3_tx_queue *txq;
341 pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE;
342 for (i = 0; i < hw->cfg_max_queues; i++) {
344 rxq = hw->data->rx_queues[i];
346 rxq->pvid_sw_discard_en = pvid_en;
349 txq = hw->data->tx_queues[i];
351 txq->pvid_sw_shift_en = pvid_en;
357 hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type)
362 reg_offset = queue_type == HNS3_RING_TYPE_TX ?
363 HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG;
364 reg = hns3_read_reg(tqp_base, reg_offset);
365 reg &= ~BIT(HNS3_RING_EN_B);
366 hns3_write_reg(tqp_base, reg_offset, reg);
370 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
372 uint16_t nb_rx_q = hw->data->nb_rx_queues;
373 uint16_t nb_tx_q = hw->data->nb_tx_queues;
374 struct hns3_rx_queue *rxq;
375 struct hns3_tx_queue *txq;
380 for (i = 0; i < hw->cfg_max_queues; i++) {
381 if (hns3_dev_indep_txrx_supported(hw)) {
382 rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
383 txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
385 tqp_base = (void *)((char *)hw->io_base +
386 hns3_get_tqp_reg_offset(i));
388 * If queue struct is not initialized, it means the
389 * related HW ring has not been initialized yet.
390 * So, these queues should be disabled before enable
391 * the tqps to avoid a HW exception since the queues
392 * are enabled by default.
395 hns3_stop_unused_queue(tqp_base,
398 hns3_stop_unused_queue(tqp_base,
401 rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
402 hw->fkq_data.rx_queues[i - nb_rx_q];
404 tqp_base = rxq->io_base;
407 * This is the master switch that used to control the enabling
408 * of a pair of Tx and Rx queues. Both the Rx and Tx point to
411 rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG);
413 rcb_reg |= BIT(HNS3_RING_EN_B);
415 rcb_reg &= ~BIT(HNS3_RING_EN_B);
416 hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg);
421 hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
423 struct hns3_hw *hw = &txq->hns->hw;
426 if (hns3_dev_indep_txrx_supported(hw)) {
427 reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
429 reg |= BIT(HNS3_RING_EN_B);
431 reg &= ~BIT(HNS3_RING_EN_B);
432 hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg);
438 hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
440 struct hns3_hw *hw = &rxq->hns->hw;
443 if (hns3_dev_indep_txrx_supported(hw)) {
444 reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
446 reg |= BIT(HNS3_RING_EN_B);
448 reg &= ~BIT(HNS3_RING_EN_B);
449 hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg);
455 hns3_start_all_txqs(struct rte_eth_dev *dev)
457 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
458 struct hns3_tx_queue *txq;
461 for (i = 0; i < dev->data->nb_tx_queues; i++) {
462 txq = hw->data->tx_queues[i];
464 hns3_err(hw, "Tx queue %u not available or setup.", i);
465 goto start_txqs_fail;
468 * Tx queue is enabled by default. Therefore, the Tx queues
469 * needs to be disabled when deferred_start is set. There is
470 * another master switch used to control the enabling of a pair
471 * of Tx and Rx queues. And the master switch is disabled by
474 if (txq->tx_deferred_start)
475 hns3_enable_txq(txq, false);
477 hns3_enable_txq(txq, true);
482 for (j = 0; j < i; j++) {
483 txq = hw->data->tx_queues[j];
484 hns3_enable_txq(txq, false);
490 hns3_start_all_rxqs(struct rte_eth_dev *dev)
492 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
493 struct hns3_rx_queue *rxq;
496 for (i = 0; i < dev->data->nb_rx_queues; i++) {
497 rxq = hw->data->rx_queues[i];
499 hns3_err(hw, "Rx queue %u not available or setup.", i);
500 goto start_rxqs_fail;
503 * Rx queue is enabled by default. Therefore, the Rx queues
504 * needs to be disabled when deferred_start is set. There is
505 * another master switch used to control the enabling of a pair
506 * of Tx and Rx queues. And the master switch is disabled by
509 if (rxq->rx_deferred_start)
510 hns3_enable_rxq(rxq, false);
512 hns3_enable_rxq(rxq, true);
517 for (j = 0; j < i; j++) {
518 rxq = hw->data->rx_queues[j];
519 hns3_enable_rxq(rxq, false);
525 hns3_restore_tqp_enable_state(struct hns3_hw *hw)
527 struct hns3_rx_queue *rxq;
528 struct hns3_tx_queue *txq;
531 for (i = 0; i < hw->data->nb_rx_queues; i++) {
532 rxq = hw->data->rx_queues[i];
534 hns3_enable_rxq(rxq, rxq->enabled);
537 for (i = 0; i < hw->data->nb_tx_queues; i++) {
538 txq = hw->data->tx_queues[i];
540 hns3_enable_txq(txq, txq->enabled);
545 hns3_stop_all_txqs(struct rte_eth_dev *dev)
547 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
548 struct hns3_tx_queue *txq;
551 for (i = 0; i < dev->data->nb_tx_queues; i++) {
552 txq = hw->data->tx_queues[i];
555 hns3_enable_txq(txq, false);
560 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
562 struct hns3_cfg_com_tqp_queue_cmd *req;
563 struct hns3_cmd_desc desc;
566 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
568 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
569 req->tqp_id = rte_cpu_to_le_16(queue_id);
571 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
573 ret = hns3_cmd_send(hw, &desc, 1);
575 hns3_err(hw, "TQP enable fail, ret = %d", ret);
581 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
583 struct hns3_reset_tqp_queue_cmd *req;
584 struct hns3_cmd_desc desc;
587 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
589 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
590 req->tqp_id = rte_cpu_to_le_16(queue_id);
591 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
592 ret = hns3_cmd_send(hw, &desc, 1);
594 hns3_err(hw, "send tqp reset cmd error, queue_id = %u, "
595 "ret = %d", queue_id, ret);
601 hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id,
602 uint8_t *reset_status)
604 struct hns3_reset_tqp_queue_cmd *req;
605 struct hns3_cmd_desc desc;
608 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
610 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
611 req->tqp_id = rte_cpu_to_le_16(queue_id);
613 ret = hns3_cmd_send(hw, &desc, 1);
615 hns3_err(hw, "get tqp reset status error, queue_id = %u, "
616 "ret = %d.", queue_id, ret);
619 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
624 hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
626 #define HNS3_TQP_RESET_TRY_MS 200
627 uint8_t reset_status;
631 ret = hns3_tqp_enable(hw, queue_id, false);
636 * In current version VF is not supported when PF is driven by DPDK
637 * driver, all task queue pairs are mapped to PF function, so PF's queue
638 * id is equals to the global queue id in PF range.
640 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
642 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
645 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
647 /* Wait for tqp hw reset */
648 rte_delay_ms(HNS3_POLL_RESPONE_MS);
649 ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
655 } while (get_timeofday_ms() < end);
659 hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d",
664 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
666 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
671 hns3_send_reset_tqp_cmd(hw, queue_id, false);
676 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
681 /* Disable VF's queue before send queue reset msg to PF */
682 ret = hns3_tqp_enable(hw, queue_id, false);
686 memcpy(msg_data, &queue_id, sizeof(uint16_t));
688 ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
689 sizeof(msg_data), true, NULL, 0);
691 hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
697 hns3_reset_tqp(struct hns3_adapter *hns, uint16_t queue_id)
699 struct hns3_hw *hw = &hns->hw;
702 return hns3vf_reset_tqp(hw, queue_id);
704 return hns3pf_reset_tqp(hw, queue_id);
708 hns3_reset_all_tqps(struct hns3_adapter *hns)
710 struct hns3_hw *hw = &hns->hw;
713 for (i = 0; i < hw->cfg_max_queues; i++) {
714 ret = hns3_reset_tqp(hns, i);
716 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
724 hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id,
725 enum hns3_ring_type queue_type, bool enable)
727 struct hns3_reset_tqp_queue_cmd *req;
728 struct hns3_cmd_desc desc;
732 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false);
734 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
735 req->tqp_id = rte_cpu_to_le_16(queue_id);
736 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
737 req->queue_direction = rte_cpu_to_le_16(queue_direction);
738 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
740 ret = hns3_cmd_send(hw, &desc, 1);
742 hns3_err(hw, "send queue reset cmd error, queue_id = %u, "
743 "queue_type = %s, ret = %d.", queue_id,
744 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
749 hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id,
750 enum hns3_ring_type queue_type,
751 uint8_t *reset_status)
753 struct hns3_reset_tqp_queue_cmd *req;
754 struct hns3_cmd_desc desc;
758 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true);
760 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
761 req->tqp_id = rte_cpu_to_le_16(queue_id);
762 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
763 req->queue_direction = rte_cpu_to_le_16(queue_direction);
765 ret = hns3_cmd_send(hw, &desc, 1);
767 hns3_err(hw, "get queue reset status error, queue_id = %u "
768 "queue_type = %s, ret = %d.", queue_id,
769 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
773 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
778 hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
779 enum hns3_ring_type queue_type)
781 #define HNS3_QUEUE_RESET_TRY_MS 200
782 struct hns3_tx_queue *txq;
783 struct hns3_rx_queue *rxq;
784 uint32_t reset_wait_times;
785 uint32_t max_wait_times;
786 uint8_t reset_status;
789 if (queue_type == HNS3_RING_TYPE_TX) {
790 txq = hw->data->tx_queues[queue_id];
791 hns3_enable_txq(txq, false);
793 rxq = hw->data->rx_queues[queue_id];
794 hns3_enable_rxq(rxq, false);
797 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true);
799 hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret);
803 reset_wait_times = 0;
804 max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS;
805 while (reset_wait_times < max_wait_times) {
806 /* Wait for queue hw reset */
807 rte_delay_ms(HNS3_POLL_RESPONE_MS);
808 ret = hns3_get_queue_reset_status(hw, queue_id,
809 queue_type, &reset_status);
811 goto queue_reset_fail;
819 hns3_err(hw, "reset queue timeout, queue_id = %u, "
820 "queue_type = %s", queue_id,
821 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx");
823 goto queue_reset_fail;
826 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
828 hns3_err(hw, "deassert queue reset fail, ret = %d.", ret);
833 hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
838 hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id)
842 /* Need an extend offset to config queues > 64 */
843 if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID)
844 reg_offset = HNS3_TQP_INTR_REG_BASE +
845 tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET;
847 reg_offset = HNS3_TQP_INTR_EXT_REG_BASE +
848 tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID *
849 HNS3_TQP_INTR_HIGH_ORDER_OFFSET +
850 tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID *
851 HNS3_TQP_INTR_LOW_ORDER_OFFSET;
857 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
858 uint8_t gl_idx, uint16_t gl_value)
860 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
861 HNS3_TQP_INTR_GL1_REG,
862 HNS3_TQP_INTR_GL2_REG};
863 uint32_t addr, value;
865 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
868 addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id);
869 if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
870 value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
872 value = HNS3_GL_USEC_TO_REG(gl_value);
874 hns3_write_dev(hw, addr, value);
878 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
880 uint32_t addr, value;
882 if (rl_value > HNS3_TQP_INTR_RL_MAX)
885 addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
886 value = HNS3_RL_USEC_TO_REG(rl_value);
888 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
890 hns3_write_dev(hw, addr, value);
894 hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
899 * int_ql_max == 0 means the hardware does not support QL,
900 * QL regs config is not permitted if QL is not supported,
903 if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE)
906 addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
907 hns3_write_dev(hw, addr, ql_value);
909 addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
910 hns3_write_dev(hw, addr, ql_value);
914 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
916 uint32_t addr, value;
918 addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
921 hns3_write_dev(hw, addr, value);
925 * Enable all rx queue interrupt when in interrupt rx mode.
926 * This api was called before enable queue rx&tx (in normal start or reset
927 * recover scenes), used to fix hardware rx queue interrupt enable was clear
931 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
933 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
934 uint16_t nb_rx_q = hw->data->nb_rx_queues;
937 if (dev->data->dev_conf.intr_conf.rxq == 0)
940 for (i = 0; i < nb_rx_q; i++)
941 hns3_queue_intr_enable(hw, i, en);
945 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
947 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
948 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
949 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
951 if (dev->data->dev_conf.intr_conf.rxq == 0)
954 hns3_queue_intr_enable(hw, queue_id, true);
956 return rte_intr_ack(intr_handle);
960 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
962 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
964 if (dev->data->dev_conf.intr_conf.rxq == 0)
967 hns3_queue_intr_enable(hw, queue_id, false);
973 hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx)
975 struct hns3_hw *hw = &hns->hw;
976 struct hns3_rx_queue *rxq;
979 PMD_INIT_FUNC_TRACE();
981 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
982 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
984 hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.",
989 rxq->next_to_use = 0;
990 rxq->rx_rearm_start = 0;
991 rxq->rx_free_hold = 0;
992 rxq->rx_rearm_nb = 0;
993 rxq->pkt_first_seg = NULL;
994 rxq->pkt_last_seg = NULL;
995 hns3_init_rx_queue_hw(rxq);
996 hns3_rxq_vec_setup(rxq);
1002 hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx)
1004 struct hns3_hw *hw = &hns->hw;
1005 struct hns3_rx_queue *rxq;
1007 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
1008 rxq->next_to_use = 0;
1009 rxq->rx_free_hold = 0;
1010 rxq->rx_rearm_start = 0;
1011 rxq->rx_rearm_nb = 0;
1012 hns3_init_rx_queue_hw(rxq);
1016 hns3_init_txq(struct hns3_tx_queue *txq)
1018 struct hns3_desc *desc;
1022 desc = txq->tx_ring;
1023 for (i = 0; i < txq->nb_tx_desc; i++) {
1024 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1028 txq->next_to_use = 0;
1029 txq->next_to_clean = 0;
1030 txq->tx_bd_ready = txq->nb_tx_desc - 1;
1031 hns3_init_tx_queue_hw(txq);
1035 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
1037 struct hns3_hw *hw = &hns->hw;
1038 struct hns3_tx_queue *txq;
1041 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
1042 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
1045 if (!tc_queue->enable)
1048 for (j = 0; j < tc_queue->tqp_count; j++) {
1049 num = tc_queue->tqp_offset + j;
1050 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
1054 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
1060 hns3_init_rx_queues(struct hns3_adapter *hns)
1062 struct hns3_hw *hw = &hns->hw;
1063 struct hns3_rx_queue *rxq;
1067 /* Initialize RSS for queues */
1068 ret = hns3_config_rss(hns);
1070 hns3_err(hw, "failed to configure rss, ret = %d.", ret);
1074 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1075 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
1077 hns3_err(hw, "Rx queue %u not available or setup.", i);
1081 if (rxq->rx_deferred_start)
1084 ret = hns3_init_rxq(hns, i);
1086 hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i,
1092 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++)
1093 hns3_init_fake_rxq(hns, i);
1098 for (j = 0; j < i; j++) {
1099 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
1100 hns3_rx_queue_release_mbufs(rxq);
1107 hns3_init_tx_queues(struct hns3_adapter *hns)
1109 struct hns3_hw *hw = &hns->hw;
1110 struct hns3_tx_queue *txq;
1113 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1114 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
1116 hns3_err(hw, "Tx queue %u not available or setup.", i);
1120 if (txq->tx_deferred_start)
1125 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
1126 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
1129 hns3_init_tx_ring_tc(hns);
1136 * Note: just init and setup queues, and don't enable tqps.
1139 hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
1141 struct hns3_hw *hw = &hns->hw;
1145 ret = hns3_reset_all_tqps(hns);
1147 hns3_err(hw, "failed to reset all queues, ret = %d.",
1153 ret = hns3_init_rx_queues(hns);
1155 hns3_err(hw, "failed to init rx queues, ret = %d.", ret);
1159 ret = hns3_init_tx_queues(hns);
1161 hns3_dev_release_mbufs(hns);
1162 hns3_err(hw, "failed to init tx queues, ret = %d.", ret);
1169 hns3_start_tqps(struct hns3_hw *hw)
1171 struct hns3_tx_queue *txq;
1172 struct hns3_rx_queue *rxq;
1175 hns3_enable_all_queues(hw, true);
1177 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1178 txq = hw->data->tx_queues[i];
1180 hw->data->tx_queue_state[i] =
1181 RTE_ETH_QUEUE_STATE_STARTED;
1184 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1185 rxq = hw->data->rx_queues[i];
1187 hw->data->rx_queue_state[i] =
1188 RTE_ETH_QUEUE_STATE_STARTED;
1193 hns3_stop_tqps(struct hns3_hw *hw)
1197 hns3_enable_all_queues(hw, false);
1199 for (i = 0; i < hw->data->nb_tx_queues; i++)
1200 hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1202 for (i = 0; i < hw->data->nb_rx_queues; i++)
1203 hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1207 * Iterate over all Rx Queue, and call the callback() function for each Rx
1211 * The target eth dev.
1212 * @param[in] callback
1213 * The function to call for each queue.
1214 * if callback function return nonzero will stop iterate and return it's value
1216 * The arguments to provide the callback function with.
1219 * 0 on success, otherwise with errno set.
1222 hns3_rxq_iterate(struct rte_eth_dev *dev,
1223 int (*callback)(struct hns3_rx_queue *, void *), void *arg)
1228 if (dev->data->rx_queues == NULL)
1231 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1232 ret = callback(dev->data->rx_queues[i], arg);
1241 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
1242 struct hns3_queue_info *q_info)
1244 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1245 const struct rte_memzone *rx_mz;
1246 struct hns3_rx_queue *rxq;
1247 unsigned int rx_desc;
1249 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
1250 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1252 hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
1257 /* Allocate rx ring hardware descriptors. */
1258 rxq->queue_id = q_info->idx;
1259 rxq->nb_rx_desc = q_info->nb_desc;
1262 * Allocate a litter more memory because rx vector functions
1263 * don't check boundaries each time.
1265 rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1266 sizeof(struct hns3_desc);
1267 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1268 rx_desc, HNS3_RING_BASE_ALIGN,
1270 if (rx_mz == NULL) {
1271 hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
1273 hns3_rx_queue_release(rxq);
1277 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
1278 rxq->rx_ring_phys_addr = rx_mz->iova;
1280 hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx,
1281 rxq->rx_ring_phys_addr);
1287 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1288 uint16_t nb_desc, unsigned int socket_id)
1290 struct hns3_adapter *hns = dev->data->dev_private;
1291 struct hns3_hw *hw = &hns->hw;
1292 struct hns3_queue_info q_info;
1293 struct hns3_rx_queue *rxq;
1296 if (hw->fkq_data.rx_queues[idx]) {
1297 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
1298 hw->fkq_data.rx_queues[idx] = NULL;
1302 q_info.socket_id = socket_id;
1303 q_info.nb_desc = nb_desc;
1304 q_info.type = "hns3 fake RX queue";
1305 q_info.ring_name = "rx_fake_ring";
1306 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1308 hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
1312 /* Don't need alloc sw_ring, because upper applications don't use it */
1313 rxq->sw_ring = NULL;
1316 rxq->rx_deferred_start = false;
1317 rxq->port_id = dev->data->port_id;
1318 rxq->configured = true;
1319 nb_rx_q = dev->data->nb_rx_queues;
1320 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1321 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
1322 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
1324 rte_spinlock_lock(&hw->lock);
1325 hw->fkq_data.rx_queues[idx] = rxq;
1326 rte_spinlock_unlock(&hw->lock);
1332 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
1333 struct hns3_queue_info *q_info)
1335 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1336 const struct rte_memzone *tx_mz;
1337 struct hns3_tx_queue *txq;
1338 struct hns3_desc *desc;
1339 unsigned int tx_desc;
1342 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
1343 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1345 hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
1350 /* Allocate tx ring hardware descriptors. */
1351 txq->queue_id = q_info->idx;
1352 txq->nb_tx_desc = q_info->nb_desc;
1353 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
1354 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1355 tx_desc, HNS3_RING_BASE_ALIGN,
1357 if (tx_mz == NULL) {
1358 hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
1360 hns3_tx_queue_release(txq);
1364 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
1365 txq->tx_ring_phys_addr = tx_mz->iova;
1367 hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx,
1368 txq->tx_ring_phys_addr);
1371 desc = txq->tx_ring;
1372 for (i = 0; i < txq->nb_tx_desc; i++) {
1373 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1381 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1382 uint16_t nb_desc, unsigned int socket_id)
1384 struct hns3_adapter *hns = dev->data->dev_private;
1385 struct hns3_hw *hw = &hns->hw;
1386 struct hns3_queue_info q_info;
1387 struct hns3_tx_queue *txq;
1390 if (hw->fkq_data.tx_queues[idx] != NULL) {
1391 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
1392 hw->fkq_data.tx_queues[idx] = NULL;
1396 q_info.socket_id = socket_id;
1397 q_info.nb_desc = nb_desc;
1398 q_info.type = "hns3 fake TX queue";
1399 q_info.ring_name = "tx_fake_ring";
1400 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1402 hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
1406 /* Don't need alloc sw_ring, because upper applications don't use it */
1407 txq->sw_ring = NULL;
1411 txq->tx_deferred_start = false;
1412 txq->port_id = dev->data->port_id;
1413 txq->configured = true;
1414 nb_tx_q = dev->data->nb_tx_queues;
1415 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1416 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1418 rte_spinlock_lock(&hw->lock);
1419 hw->fkq_data.tx_queues[idx] = txq;
1420 rte_spinlock_unlock(&hw->lock);
1426 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1428 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1432 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1433 /* first time configuration */
1435 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1436 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1437 RTE_CACHE_LINE_SIZE);
1438 if (hw->fkq_data.rx_queues == NULL) {
1439 hw->fkq_data.nb_fake_rx_queues = 0;
1442 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1444 rxq = hw->fkq_data.rx_queues;
1445 for (i = nb_queues; i < old_nb_queues; i++)
1446 hns3_dev_rx_queue_release(rxq[i]);
1448 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1449 RTE_CACHE_LINE_SIZE);
1452 if (nb_queues > old_nb_queues) {
1453 uint16_t new_qs = nb_queues - old_nb_queues;
1454 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1457 hw->fkq_data.rx_queues = rxq;
1458 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1459 rxq = hw->fkq_data.rx_queues;
1460 for (i = nb_queues; i < old_nb_queues; i++)
1461 hns3_dev_rx_queue_release(rxq[i]);
1463 rte_free(hw->fkq_data.rx_queues);
1464 hw->fkq_data.rx_queues = NULL;
1467 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1473 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1475 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1479 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1480 /* first time configuration */
1482 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1483 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1484 RTE_CACHE_LINE_SIZE);
1485 if (hw->fkq_data.tx_queues == NULL) {
1486 hw->fkq_data.nb_fake_tx_queues = 0;
1489 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1491 txq = hw->fkq_data.tx_queues;
1492 for (i = nb_queues; i < old_nb_queues; i++)
1493 hns3_dev_tx_queue_release(txq[i]);
1494 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1495 RTE_CACHE_LINE_SIZE);
1498 if (nb_queues > old_nb_queues) {
1499 uint16_t new_qs = nb_queues - old_nb_queues;
1500 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1503 hw->fkq_data.tx_queues = txq;
1504 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1505 txq = hw->fkq_data.tx_queues;
1506 for (i = nb_queues; i < old_nb_queues; i++)
1507 hns3_dev_tx_queue_release(txq[i]);
1509 rte_free(hw->fkq_data.tx_queues);
1510 hw->fkq_data.tx_queues = NULL;
1512 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1518 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1521 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1522 uint16_t rx_need_add_nb_q;
1523 uint16_t tx_need_add_nb_q;
1528 /* Setup new number of fake RX/TX queues and reconfigure device. */
1529 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1530 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1531 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1533 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1537 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1539 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1540 goto cfg_fake_tx_q_fail;
1543 /* Allocate and set up fake RX queue per Ethernet port. */
1544 port_id = hw->data->port_id;
1545 for (q = 0; q < rx_need_add_nb_q; q++) {
1546 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1547 rte_eth_dev_socket_id(port_id));
1549 goto setup_fake_rx_q_fail;
1552 /* Allocate and set up fake TX queue per Ethernet port. */
1553 for (q = 0; q < tx_need_add_nb_q; q++) {
1554 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1555 rte_eth_dev_socket_id(port_id));
1557 goto setup_fake_tx_q_fail;
1562 setup_fake_tx_q_fail:
1563 setup_fake_rx_q_fail:
1564 (void)hns3_fake_tx_queue_config(hw, 0);
1566 (void)hns3_fake_rx_queue_config(hw, 0);
1572 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1574 struct rte_eth_dev_data *dev_data = hns->hw.data;
1575 struct hns3_rx_queue *rxq;
1576 struct hns3_tx_queue *txq;
1579 if (dev_data->rx_queues)
1580 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1581 rxq = dev_data->rx_queues[i];
1584 hns3_rx_queue_release_mbufs(rxq);
1587 if (dev_data->tx_queues)
1588 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1589 txq = dev_data->tx_queues[i];
1592 hns3_tx_queue_release_mbufs(txq);
1597 hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1599 uint16_t vld_buf_size;
1600 uint16_t num_hw_specs;
1604 * hns3 network engine only support to set 4 typical specification, and
1605 * different buffer size will affect the max packet_len and the max
1606 * number of segmentation when hw gro is turned on in receive side. The
1607 * relationship between them is as follows:
1608 * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg
1609 * ---------------------|-------------------|----------------
1610 * HNS3_4K_BD_BUF_SIZE | 60KB | 15
1611 * HNS3_2K_BD_BUF_SIZE | 62KB | 31
1612 * HNS3_1K_BD_BUF_SIZE | 63KB | 63
1613 * HNS3_512_BD_BUF_SIZE | 31.5KB | 63
1615 static const uint16_t hw_rx_buf_size[] = {
1616 HNS3_4K_BD_BUF_SIZE,
1617 HNS3_2K_BD_BUF_SIZE,
1618 HNS3_1K_BD_BUF_SIZE,
1619 HNS3_512_BD_BUF_SIZE
1622 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1623 RTE_PKTMBUF_HEADROOM);
1624 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1627 num_hw_specs = RTE_DIM(hw_rx_buf_size);
1628 for (i = 0; i < num_hw_specs; i++) {
1629 if (vld_buf_size >= hw_rx_buf_size[i]) {
1630 *rx_buf_len = hw_rx_buf_size[i];
1638 hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
1641 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1642 struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode;
1643 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
1644 uint16_t min_vec_bds;
1647 * HNS3 hardware network engine set scattered as default. If the driver
1648 * is not work in scattered mode and the pkts greater than buf_size
1649 * but smaller than max_rx_pkt_len will be distributed to multiple BDs.
1650 * Driver cannot handle this situation.
1652 if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) {
1653 hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater "
1654 "than rx_buf_len if scattered is off.");
1658 if (pkt_burst == hns3_recv_pkts_vec) {
1659 min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
1660 HNS3_DEFAULT_RX_BURST;
1661 if (nb_desc < min_vec_bds ||
1662 nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) {
1663 hns3_err(hw, "if Rx burst mode is vector, "
1664 "number of descriptor is required to be "
1665 "bigger than min vector bds:%u, and could be "
1666 "divided by rxq rearm thresh:%u.",
1667 min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH);
1675 hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
1676 struct rte_mempool *mp, uint16_t nb_desc,
1681 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1682 nb_desc % HNS3_ALIGN_RING_DESC) {
1683 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1688 if (conf->rx_drop_en == 0)
1689 hns3_warn(hw, "if no descriptors available, packets are always "
1690 "dropped and rx_drop_en (1) is fixed on");
1692 if (hns3_rx_buf_len_calc(mp, buf_size)) {
1693 hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! "
1694 "minimal data room size (%u).",
1695 rte_pktmbuf_data_room_size(mp),
1696 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1700 if (hw->data->dev_started) {
1701 ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc);
1703 hns3_err(hw, "Rx queue runtime setup fail.");
1712 hns3_get_tqp_reg_offset(uint16_t queue_id)
1714 uint32_t reg_offset;
1716 /* Need an extend offset to config queue > 1024 */
1717 if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID)
1718 reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE;
1720 reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET +
1721 (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) *
1728 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1729 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1730 struct rte_mempool *mp)
1732 struct hns3_adapter *hns = dev->data->dev_private;
1733 struct hns3_hw *hw = &hns->hw;
1734 struct hns3_queue_info q_info;
1735 struct hns3_rx_queue *rxq;
1736 uint16_t rx_buf_size;
1740 ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
1744 if (dev->data->rx_queues[idx]) {
1745 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1746 dev->data->rx_queues[idx] = NULL;
1750 q_info.socket_id = socket_id;
1751 q_info.nb_desc = nb_desc;
1752 q_info.type = "hns3 RX queue";
1753 q_info.ring_name = "rx_ring";
1755 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1758 "Failed to alloc mem and reserve DMA mem for rx ring!");
1763 rxq->ptype_tbl = &hns->ptype_tbl;
1765 rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
1766 conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
1768 rxq->rx_deferred_start = conf->rx_deferred_start;
1769 if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
1770 hns3_warn(hw, "deferred start is not supported.");
1771 rxq->rx_deferred_start = false;
1774 rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1775 sizeof(struct hns3_entry);
1776 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1777 RTE_CACHE_LINE_SIZE, socket_id);
1778 if (rxq->sw_ring == NULL) {
1779 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1780 hns3_rx_queue_release(rxq);
1784 rxq->next_to_use = 0;
1785 rxq->rx_free_hold = 0;
1786 rxq->rx_rearm_start = 0;
1787 rxq->rx_rearm_nb = 0;
1788 rxq->pkt_first_seg = NULL;
1789 rxq->pkt_last_seg = NULL;
1790 rxq->port_id = dev->data->port_id;
1792 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
1793 * the pvid_sw_discard_en in the queue struct should not be changed,
1794 * because PVID-related operations do not need to be processed by PMD
1795 * driver. For hns3 VF device, whether it needs to process PVID depends
1796 * on the configuration of PF kernel mode netdevice driver. And the
1797 * related PF configuration is delivered through the mailbox and finally
1798 * reflectd in port_base_vlan_cfg.
1800 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1801 rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
1802 HNS3_PORT_BASE_VLAN_ENABLE;
1804 rxq->pvid_sw_discard_en = false;
1805 rxq->configured = true;
1806 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1807 idx * HNS3_TQP_REG_SIZE);
1808 rxq->io_base = (void *)((char *)hw->io_base +
1809 hns3_get_tqp_reg_offset(idx));
1810 rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
1811 HNS3_RING_RX_HEAD_REG);
1812 rxq->rx_buf_len = rx_buf_size;
1813 memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats));
1814 memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats));
1815 memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
1817 /* CRC len set here is used for amending packet length */
1818 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1819 rxq->crc_len = RTE_ETHER_CRC_LEN;
1823 rxq->bulk_mbuf_num = 0;
1825 rte_spinlock_lock(&hw->lock);
1826 dev->data->rx_queues[idx] = rxq;
1827 rte_spinlock_unlock(&hw->lock);
1833 hns3_rx_scattered_reset(struct rte_eth_dev *dev)
1835 struct hns3_adapter *hns = dev->data->dev_private;
1836 struct hns3_hw *hw = &hns->hw;
1839 dev->data->scattered_rx = false;
1843 hns3_rx_scattered_calc(struct rte_eth_dev *dev)
1845 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1846 struct hns3_adapter *hns = dev->data->dev_private;
1847 struct hns3_hw *hw = &hns->hw;
1848 struct hns3_rx_queue *rxq;
1851 if (dev->data->rx_queues == NULL)
1854 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
1855 rxq = dev->data->rx_queues[queue_id];
1856 if (hw->rx_buf_len == 0)
1857 hw->rx_buf_len = rxq->rx_buf_len;
1859 hw->rx_buf_len = RTE_MIN(hw->rx_buf_len,
1863 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
1864 dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
1865 dev->data->scattered_rx = true;
1869 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1871 static const uint32_t ptypes[] = {
1873 RTE_PTYPE_L2_ETHER_VLAN,
1874 RTE_PTYPE_L2_ETHER_QINQ,
1875 RTE_PTYPE_L2_ETHER_LLDP,
1876 RTE_PTYPE_L2_ETHER_ARP,
1878 RTE_PTYPE_L3_IPV4_EXT,
1880 RTE_PTYPE_L3_IPV6_EXT,
1886 RTE_PTYPE_TUNNEL_GRE,
1887 RTE_PTYPE_INNER_L2_ETHER,
1888 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1889 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1890 RTE_PTYPE_INNER_L3_IPV4,
1891 RTE_PTYPE_INNER_L3_IPV6,
1892 RTE_PTYPE_INNER_L3_IPV4_EXT,
1893 RTE_PTYPE_INNER_L3_IPV6_EXT,
1894 RTE_PTYPE_INNER_L4_UDP,
1895 RTE_PTYPE_INNER_L4_TCP,
1896 RTE_PTYPE_INNER_L4_SCTP,
1897 RTE_PTYPE_INNER_L4_ICMP,
1898 RTE_PTYPE_TUNNEL_VXLAN,
1899 RTE_PTYPE_TUNNEL_NVGRE,
1903 if (dev->rx_pkt_burst == hns3_recv_pkts ||
1904 dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
1905 dev->rx_pkt_burst == hns3_recv_pkts_vec ||
1906 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
1913 hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
1915 tbl->l2l3table[0][0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
1916 tbl->l2l3table[0][1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
1917 tbl->l2l3table[0][2] = RTE_PTYPE_L2_ETHER_ARP;
1918 tbl->l2l3table[0][3] = RTE_PTYPE_L2_ETHER;
1919 tbl->l2l3table[0][4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
1920 tbl->l2l3table[0][5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
1921 tbl->l2l3table[0][6] = RTE_PTYPE_L2_ETHER_LLDP;
1922 tbl->l2l3table[0][15] = RTE_PTYPE_L2_ETHER;
1924 tbl->l2l3table[1][0] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4;
1925 tbl->l2l3table[1][1] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6;
1926 tbl->l2l3table[1][2] = RTE_PTYPE_L2_ETHER_ARP;
1927 tbl->l2l3table[1][3] = RTE_PTYPE_L2_ETHER_VLAN;
1928 tbl->l2l3table[1][4] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4_EXT;
1929 tbl->l2l3table[1][5] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6_EXT;
1930 tbl->l2l3table[1][6] = RTE_PTYPE_L2_ETHER_LLDP;
1931 tbl->l2l3table[1][15] = RTE_PTYPE_L2_ETHER_VLAN;
1933 tbl->l2l3table[2][0] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4;
1934 tbl->l2l3table[2][1] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6;
1935 tbl->l2l3table[2][2] = RTE_PTYPE_L2_ETHER_ARP;
1936 tbl->l2l3table[2][3] = RTE_PTYPE_L2_ETHER_QINQ;
1937 tbl->l2l3table[2][4] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4_EXT;
1938 tbl->l2l3table[2][5] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6_EXT;
1939 tbl->l2l3table[2][6] = RTE_PTYPE_L2_ETHER_LLDP;
1940 tbl->l2l3table[2][15] = RTE_PTYPE_L2_ETHER_QINQ;
1942 tbl->l4table[0] = RTE_PTYPE_L4_UDP;
1943 tbl->l4table[1] = RTE_PTYPE_L4_TCP;
1944 tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE;
1945 tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
1946 tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
1947 tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
1951 hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
1953 tbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER;
1954 tbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN;
1955 tbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ;
1957 tbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4;
1958 tbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6;
1959 /* There is not a ptype for inner ARP/RARP */
1960 tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN;
1961 tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN;
1962 tbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT;
1963 tbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT;
1965 tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
1966 tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
1967 /* There is not a ptype for inner GRE */
1968 tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN;
1969 tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
1970 /* There is not a ptype for inner IGMP */
1971 tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN;
1972 tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
1974 tbl->ol2table[0] = RTE_PTYPE_L2_ETHER;
1975 tbl->ol2table[1] = RTE_PTYPE_L2_ETHER_VLAN;
1976 tbl->ol2table[2] = RTE_PTYPE_L2_ETHER_QINQ;
1978 tbl->ol3table[0] = RTE_PTYPE_L3_IPV4;
1979 tbl->ol3table[1] = RTE_PTYPE_L3_IPV6;
1980 tbl->ol3table[2] = RTE_PTYPE_UNKNOWN;
1981 tbl->ol3table[3] = RTE_PTYPE_UNKNOWN;
1982 tbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT;
1983 tbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT;
1985 tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
1986 tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;
1987 tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
1991 hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
1993 struct hns3_adapter *hns = dev->data->dev_private;
1994 struct hns3_ptype_table *tbl = &hns->ptype_tbl;
1996 memset(tbl, 0, sizeof(*tbl));
1998 hns3_init_non_tunnel_ptype_tbl(tbl);
1999 hns3_init_tunnel_ptype_tbl(tbl);
2003 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
2004 uint32_t l234_info, const struct hns3_desc *rxd)
2006 #define HNS3_STRP_STATUS_NUM 0x4
2008 #define HNS3_NO_STRP_VLAN_VLD 0x0
2009 #define HNS3_INNER_STRP_VLAN_VLD 0x1
2010 #define HNS3_OUTER_STRP_VLAN_VLD 0x2
2011 uint32_t strip_status;
2012 uint32_t report_mode;
2015 * Since HW limitation, the vlan tag will always be inserted into RX
2016 * descriptor when strip the tag from packet, driver needs to determine
2017 * reporting which tag to mbuf according to the PVID configuration
2018 * and vlan striped status.
2020 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
2022 HNS3_NO_STRP_VLAN_VLD,
2023 HNS3_OUTER_STRP_VLAN_VLD,
2024 HNS3_INNER_STRP_VLAN_VLD,
2025 HNS3_OUTER_STRP_VLAN_VLD
2028 HNS3_NO_STRP_VLAN_VLD,
2029 HNS3_NO_STRP_VLAN_VLD,
2030 HNS3_NO_STRP_VLAN_VLD,
2031 HNS3_INNER_STRP_VLAN_VLD
2034 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
2035 HNS3_RXD_STRP_TAGP_S);
2036 report_mode = report_type[rxq->pvid_sw_discard_en][strip_status];
2037 switch (report_mode) {
2038 case HNS3_NO_STRP_VLAN_VLD:
2041 case HNS3_INNER_STRP_VLAN_VLD:
2042 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2043 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
2045 case HNS3_OUTER_STRP_VLAN_VLD:
2046 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2047 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
2056 recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
2057 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
2060 uint8_t crc_len = rxq->crc_len;
2062 if (data_len <= crc_len) {
2063 rte_pktmbuf_free_seg(rxm);
2064 first_seg->nb_segs--;
2065 last_seg->data_len = (uint16_t)(last_seg->data_len -
2066 (crc_len - data_len));
2067 last_seg->next = NULL;
2069 rxm->data_len = (uint16_t)(data_len - crc_len);
2072 static inline struct rte_mbuf *
2073 hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
2077 if (likely(rxq->bulk_mbuf_num > 0))
2078 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2080 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf,
2081 HNS3_BULK_ALLOC_MBUF_NUM);
2082 if (likely(ret == 0)) {
2083 rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM;
2084 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2086 return rte_mbuf_raw_alloc(rxq->mb_pool);
2090 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2092 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
2093 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
2094 struct hns3_rx_queue *rxq; /* RX queue */
2095 struct hns3_entry *sw_ring;
2096 struct hns3_entry *rxe;
2097 struct hns3_desc rxd;
2098 struct rte_mbuf *nmb; /* pointer of the new mbuf */
2099 struct rte_mbuf *rxm;
2100 uint32_t bd_base_info;
2113 rx_ring = rxq->rx_ring;
2114 sw_ring = rxq->sw_ring;
2115 rx_id = rxq->next_to_use;
2117 while (nb_rx < nb_pkts) {
2118 rxdp = &rx_ring[rx_id];
2119 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2120 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2123 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2124 (1u << HNS3_RXD_VLD_B)];
2126 nmb = hns3_rx_alloc_buffer(rxq);
2127 if (unlikely(nmb == NULL)) {
2130 port_id = rxq->port_id;
2131 rte_eth_devices[port_id].data->rx_mbuf_alloc_failed++;
2136 rxe = &sw_ring[rx_id];
2138 if (unlikely(rx_id == rxq->nb_rx_desc))
2141 rte_prefetch0(sw_ring[rx_id].mbuf);
2142 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2143 rte_prefetch0(&rx_ring[rx_id]);
2144 rte_prefetch0(&sw_ring[rx_id]);
2150 dma_addr = rte_mbuf_data_iova_default(nmb);
2151 rxdp->addr = rte_cpu_to_le_64(dma_addr);
2152 rxdp->rx.bd_base_info = 0;
2154 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2155 rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) -
2157 rxm->data_len = rxm->pkt_len;
2158 rxm->port = rxq->port_id;
2159 rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2160 rxm->ol_flags = PKT_RX_RSS_HASH;
2161 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2163 rte_le_to_cpu_16(rxd.rx.fd_id);
2164 rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2169 /* Load remained descriptor data and extract necessary fields */
2170 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2171 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2172 ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
2173 l234_info, &cksum_err);
2177 rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
2179 if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2180 hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
2182 hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
2184 rx_pkts[nb_rx++] = rxm;
2187 rte_pktmbuf_free(rxm);
2190 rxq->next_to_use = rx_id;
2191 rxq->rx_free_hold += nb_rx_bd;
2192 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2193 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2194 rxq->rx_free_hold = 0;
2201 hns3_recv_scattered_pkts(void *rx_queue,
2202 struct rte_mbuf **rx_pkts,
2205 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
2206 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
2207 struct hns3_rx_queue *rxq; /* RX queue */
2208 struct hns3_entry *sw_ring;
2209 struct hns3_entry *rxe;
2210 struct rte_mbuf *first_seg;
2211 struct rte_mbuf *last_seg;
2212 struct hns3_desc rxd;
2213 struct rte_mbuf *nmb; /* pointer of the new mbuf */
2214 struct rte_mbuf *rxm;
2215 struct rte_eth_dev *dev;
2216 uint32_t bd_base_info;
2231 rx_id = rxq->next_to_use;
2232 rx_ring = rxq->rx_ring;
2233 sw_ring = rxq->sw_ring;
2234 first_seg = rxq->pkt_first_seg;
2235 last_seg = rxq->pkt_last_seg;
2237 while (nb_rx < nb_pkts) {
2238 rxdp = &rx_ring[rx_id];
2239 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2240 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2244 * The interactive process between software and hardware of
2245 * receiving a new packet in hns3 network engine:
2246 * 1. Hardware network engine firstly writes the packet content
2247 * to the memory pointed by the 'addr' field of the Rx Buffer
2248 * Descriptor, secondly fills the result of parsing the
2249 * packet include the valid field into the Rx Buffer
2250 * Descriptor in one write operation.
2251 * 2. Driver reads the Rx BD's valid field in the loop to check
2252 * whether it's valid, if valid then assign a new address to
2253 * the addr field, clear the valid field, get the other
2254 * information of the packet by parsing Rx BD's other fields,
2255 * finally write back the number of Rx BDs processed by the
2256 * driver to the HNS3_RING_RX_HEAD_REG register to inform
2258 * In the above process, the ordering is very important. We must
2259 * make sure that CPU read Rx BD's other fields only after the
2262 * There are two type of re-ordering: compiler re-ordering and
2263 * CPU re-ordering under the ARMv8 architecture.
2264 * 1. we use volatile to deal with compiler re-ordering, so you
2265 * can see that rx_ring/rxdp defined with volatile.
2266 * 2. we commonly use memory barrier to deal with CPU
2267 * re-ordering, but the cost is high.
2269 * In order to solve the high cost of using memory barrier, we
2270 * use the data dependency order under the ARMv8 architecture,
2273 * instr02: load B <- A
2274 * the instr02 will always execute after instr01.
2276 * To construct the data dependency ordering, we use the
2277 * following assignment:
2278 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2279 * (1u<<HNS3_RXD_VLD_B)]
2280 * Using gcc compiler under the ARMv8 architecture, the related
2281 * assembly code example as follows:
2282 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
2283 * instr01: ldr w26, [x22, #28] --read bd_base_info
2284 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
2285 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
2287 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
2288 * instr05: ldp x2, x3, [x0]
2289 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
2290 * instr07: ldp x4, x5, [x0, #16]
2291 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
2292 * the instr05~08 depend on x0's value, x0 depent on w26's
2293 * value, the w26 is the bd_base_info, this form the data
2294 * dependency ordering.
2295 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
2296 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
2297 * assignment is correct.
2299 * So we use the data dependency ordering instead of memory
2300 * barrier to improve receive performance.
2302 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2303 (1u << HNS3_RXD_VLD_B)];
2305 nmb = hns3_rx_alloc_buffer(rxq);
2306 if (unlikely(nmb == NULL)) {
2307 dev = &rte_eth_devices[rxq->port_id];
2308 dev->data->rx_mbuf_alloc_failed++;
2313 rxe = &sw_ring[rx_id];
2315 if (unlikely(rx_id == rxq->nb_rx_desc))
2318 rte_prefetch0(sw_ring[rx_id].mbuf);
2319 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2320 rte_prefetch0(&rx_ring[rx_id]);
2321 rte_prefetch0(&sw_ring[rx_id]);
2327 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2328 rxdp->rx.bd_base_info = 0;
2329 rxdp->addr = dma_addr;
2331 if (first_seg == NULL) {
2333 first_seg->nb_segs = 1;
2335 first_seg->nb_segs++;
2336 last_seg->next = rxm;
2339 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2340 rxm->data_len = rte_le_to_cpu_16(rxd.rx.size);
2342 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2349 * The last buffer of the received packet. packet len from
2350 * buffer description may contains CRC len, packet len should
2351 * subtract it, same as data len.
2353 first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len);
2356 * This is the last buffer of the received packet. If the CRC
2357 * is not stripped by the hardware:
2358 * - Subtract the CRC length from the total packet length.
2359 * - If the last buffer only contains the whole CRC or a part
2360 * of it, free the mbuf associated to the last buffer. If part
2361 * of the CRC is also contained in the previous mbuf, subtract
2362 * the length of that CRC part from the data length of the
2366 if (unlikely(rxq->crc_len > 0)) {
2367 first_seg->pkt_len -= rxq->crc_len;
2368 recalculate_data_len(first_seg, last_seg, rxm, rxq,
2372 first_seg->port = rxq->port_id;
2373 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2374 first_seg->ol_flags = PKT_RX_RSS_HASH;
2375 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2376 first_seg->hash.fdir.hi =
2377 rte_le_to_cpu_16(rxd.rx.fd_id);
2378 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2381 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
2382 HNS3_RXD_GRO_SIZE_S);
2383 if (gro_size != 0) {
2384 first_seg->ol_flags |= PKT_RX_LRO;
2385 first_seg->tso_segsz = gro_size;
2388 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2389 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2390 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
2391 l234_info, &cksum_err);
2395 first_seg->packet_type = hns3_rx_calc_ptype(rxq,
2396 l234_info, ol_info);
2398 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
2399 hns3_rx_set_cksum_flag(first_seg,
2400 first_seg->packet_type,
2402 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
2404 rx_pkts[nb_rx++] = first_seg;
2408 rte_pktmbuf_free(first_seg);
2412 rxq->next_to_use = rx_id;
2413 rxq->pkt_first_seg = first_seg;
2414 rxq->pkt_last_seg = last_seg;
2416 rxq->rx_free_hold += nb_rx_bd;
2417 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2418 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2419 rxq->rx_free_hold = 0;
2426 hns3_rxq_vec_setup(__rte_unused struct hns3_rx_queue *rxq)
2431 hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
2437 hns3_recv_pkts_vec(__rte_unused void *tx_queue,
2438 __rte_unused struct rte_mbuf **rx_pkts,
2439 __rte_unused uint16_t nb_pkts)
2445 hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue,
2446 __rte_unused struct rte_mbuf **rx_pkts,
2447 __rte_unused uint16_t nb_pkts)
2453 hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2454 struct rte_eth_burst_mode *mode)
2456 static const struct {
2457 eth_rx_burst_t pkt_burst;
2460 { hns3_recv_pkts, "Scalar" },
2461 { hns3_recv_scattered_pkts, "Scalar Scattered" },
2462 { hns3_recv_pkts_vec, "Vector Neon" },
2463 { hns3_recv_pkts_vec_sve, "Vector Sve" },
2466 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2470 for (i = 0; i < RTE_DIM(burst_infos); i++) {
2471 if (pkt_burst == burst_infos[i].pkt_burst) {
2472 snprintf(mode->info, sizeof(mode->info), "%s",
2473 burst_infos[i].info);
2483 hns3_check_sve_support(void)
2485 #if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
2486 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
2492 static eth_rx_burst_t
2493 hns3_get_rx_function(struct rte_eth_dev *dev)
2495 struct hns3_adapter *hns = dev->data->dev_private;
2496 uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
2498 if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
2499 return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve :
2502 if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
2503 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
2504 return hns3_recv_pkts;
2506 return hns3_recv_scattered_pkts;
2510 hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf,
2511 uint16_t nb_desc, uint16_t *tx_rs_thresh,
2512 uint16_t *tx_free_thresh, uint16_t idx)
2514 #define HNS3_TX_RS_FREE_THRESH_GAP 8
2515 uint16_t rs_thresh, free_thresh, fast_free_thresh;
2517 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
2518 nb_desc % HNS3_ALIGN_RING_DESC) {
2519 hns3_err(hw, "number (%u) of tx descriptors is invalid",
2524 rs_thresh = (conf->tx_rs_thresh > 0) ?
2525 conf->tx_rs_thresh : HNS3_DEFAULT_TX_RS_THRESH;
2526 free_thresh = (conf->tx_free_thresh > 0) ?
2527 conf->tx_free_thresh : HNS3_DEFAULT_TX_FREE_THRESH;
2528 if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
2529 rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
2530 free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
2531 hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
2532 "(%u) of tx descriptors for port=%u queue=%u check "
2534 rs_thresh, free_thresh, nb_desc, hw->data->port_id,
2539 if (conf->tx_free_thresh == 0) {
2540 /* Fast free Tx memory buffer to improve cache hit rate */
2541 fast_free_thresh = nb_desc - rs_thresh;
2542 if (fast_free_thresh >=
2543 HNS3_TX_FAST_FREE_AHEAD + HNS3_DEFAULT_TX_FREE_THRESH)
2544 free_thresh = fast_free_thresh -
2545 HNS3_TX_FAST_FREE_AHEAD;
2548 *tx_rs_thresh = rs_thresh;
2549 *tx_free_thresh = free_thresh;
2554 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
2555 unsigned int socket_id, const struct rte_eth_txconf *conf)
2557 struct hns3_adapter *hns = dev->data->dev_private;
2558 uint16_t tx_rs_thresh, tx_free_thresh;
2559 struct hns3_hw *hw = &hns->hw;
2560 struct hns3_queue_info q_info;
2561 struct hns3_tx_queue *txq;
2565 ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
2566 &tx_rs_thresh, &tx_free_thresh, idx);
2570 if (dev->data->tx_queues[idx] != NULL) {
2571 hns3_tx_queue_release(dev->data->tx_queues[idx]);
2572 dev->data->tx_queues[idx] = NULL;
2576 q_info.socket_id = socket_id;
2577 q_info.nb_desc = nb_desc;
2578 q_info.type = "hns3 TX queue";
2579 q_info.ring_name = "tx_ring";
2580 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
2583 "Failed to alloc mem and reserve DMA mem for tx ring!");
2587 txq->tx_deferred_start = conf->tx_deferred_start;
2588 if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
2589 hns3_warn(hw, "deferred start is not supported.");
2590 txq->tx_deferred_start = false;
2593 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
2594 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
2595 RTE_CACHE_LINE_SIZE, socket_id);
2596 if (txq->sw_ring == NULL) {
2597 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
2598 hns3_tx_queue_release(txq);
2603 txq->next_to_use = 0;
2604 txq->next_to_clean = 0;
2605 txq->tx_bd_ready = txq->nb_tx_desc - 1;
2606 txq->tx_free_thresh = tx_free_thresh;
2607 txq->tx_rs_thresh = tx_rs_thresh;
2608 txq->free = rte_zmalloc_socket("hns3 TX mbuf free array",
2609 sizeof(struct rte_mbuf *) * txq->tx_rs_thresh,
2610 RTE_CACHE_LINE_SIZE, socket_id);
2612 hns3_err(hw, "failed to allocate tx mbuf free array!");
2613 hns3_tx_queue_release(txq);
2617 txq->port_id = dev->data->port_id;
2619 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
2620 * the pvid_sw_shift_en in the queue struct should not be changed,
2621 * because PVID-related operations do not need to be processed by PMD
2622 * driver. For hns3 VF device, whether it needs to process PVID depends
2623 * on the configuration of PF kernel mode netdev driver. And the
2624 * related PF configuration is delivered through the mailbox and finally
2625 * reflectd in port_base_vlan_cfg.
2627 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
2628 txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
2629 HNS3_PORT_BASE_VLAN_ENABLE;
2631 txq->pvid_sw_shift_en = false;
2632 txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
2633 txq->configured = true;
2634 txq->io_base = (void *)((char *)hw->io_base +
2635 hns3_get_tqp_reg_offset(idx));
2636 txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
2637 HNS3_RING_TX_TAIL_REG);
2638 txq->min_tx_pkt_len = hw->min_tx_pkt_len;
2639 txq->tso_mode = hw->tso_mode;
2640 memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
2641 memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
2643 rte_spinlock_lock(&hw->lock);
2644 dev->data->tx_queues[idx] = txq;
2645 rte_spinlock_unlock(&hw->lock);
2651 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
2653 uint16_t tx_next_clean = txq->next_to_clean;
2654 uint16_t tx_next_use = txq->next_to_use;
2655 uint16_t tx_bd_ready = txq->tx_bd_ready;
2656 uint16_t tx_bd_max = txq->nb_tx_desc;
2657 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
2658 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
2659 struct rte_mbuf *mbuf;
2661 while ((!(desc->tx.tp_fe_sc_vld_ra_ri &
2662 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) &&
2663 tx_next_use != tx_next_clean) {
2664 mbuf = tx_bak_pkt->mbuf;
2666 rte_pktmbuf_free_seg(mbuf);
2667 tx_bak_pkt->mbuf = NULL;
2675 if (tx_next_clean >= tx_bd_max) {
2677 desc = txq->tx_ring;
2678 tx_bak_pkt = txq->sw_ring;
2682 txq->next_to_clean = tx_next_clean;
2683 txq->tx_bd_ready = tx_bd_ready;
2687 hns3_config_gro(struct hns3_hw *hw, bool en)
2689 struct hns3_cfg_gro_status_cmd *req;
2690 struct hns3_cmd_desc desc;
2693 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
2694 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
2696 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
2698 ret = hns3_cmd_send(hw, &desc, 1);
2700 hns3_err(hw, "%s hardware GRO failed, ret = %d",
2701 en ? "enable" : "disable", ret);
2707 hns3_restore_gro_conf(struct hns3_hw *hw)
2713 offloads = hw->data->dev_conf.rxmode.offloads;
2714 gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
2715 ret = hns3_config_gro(hw, gro_en);
2717 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
2718 gro_en ? "enabled" : "disabled", ret);
2724 hns3_pkt_is_tso(struct rte_mbuf *m)
2726 return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
2730 hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm)
2732 if (!hns3_pkt_is_tso(rxm))
2735 if (paylen <= rxm->tso_segsz)
2738 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B));
2739 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
2743 hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
2745 desc->addr = rte_mbuf_data_iova(rxm);
2746 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
2747 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
2751 hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
2752 struct rte_mbuf *rxm)
2754 uint64_t ol_flags = rxm->ol_flags;
2758 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
2759 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2760 rxm->outer_l2_len + rxm->outer_l3_len : 0;
2761 paylen = rxm->pkt_len - hdr_len;
2762 desc->tx.paylen = rte_cpu_to_le_32(paylen);
2763 hns3_set_tso(desc, paylen, rxm);
2766 * Currently, hardware doesn't support more than two layers VLAN offload
2767 * in Tx direction based on hns3 network engine. So when the number of
2768 * VLANs in the packets represented by rxm plus the number of VLAN
2769 * offload by hardware such as PVID etc, exceeds two, the packets will
2770 * be discarded or the original VLAN of the packets will be overwitted
2771 * by hardware. When the PF PVID is enabled by calling the API function
2772 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
2773 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
2774 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
2775 * be added to the position close to the IP header when PVID is enabled.
2777 if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT |
2779 desc->tx.ol_type_vlan_len_msec |=
2780 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
2781 if (ol_flags & PKT_TX_QINQ_PKT)
2782 desc->tx.outer_vlan_tag =
2783 rte_cpu_to_le_16(rxm->vlan_tci_outer);
2785 desc->tx.outer_vlan_tag =
2786 rte_cpu_to_le_16(rxm->vlan_tci);
2789 if (ol_flags & PKT_TX_QINQ_PKT ||
2790 ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
2791 desc->tx.type_cs_vlan_tso_len |=
2792 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
2793 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
2798 hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf,
2799 struct rte_mbuf **alloc_mbuf)
2801 #define MAX_NON_TSO_BD_PER_PKT 18
2802 struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT];
2805 /* Allocate enough mbufs */
2806 if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf))
2809 for (i = 0; i < nb_new_buf - 1; i++)
2810 pkt_segs[i]->next = pkt_segs[i + 1];
2812 pkt_segs[nb_new_buf - 1]->next = NULL;
2813 pkt_segs[0]->nb_segs = nb_new_buf;
2814 *alloc_mbuf = pkt_segs[0];
2820 hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
2822 new_pkt->ol_flags = old_pkt->ol_flags;
2823 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
2824 new_pkt->outer_l2_len = old_pkt->outer_l2_len;
2825 new_pkt->outer_l3_len = old_pkt->outer_l3_len;
2826 new_pkt->l2_len = old_pkt->l2_len;
2827 new_pkt->l3_len = old_pkt->l3_len;
2828 new_pkt->l4_len = old_pkt->l4_len;
2829 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
2830 new_pkt->vlan_tci = old_pkt->vlan_tci;
2834 hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
2835 uint8_t max_non_tso_bd_num)
2837 struct rte_mempool *mb_pool;
2838 struct rte_mbuf *new_mbuf;
2839 struct rte_mbuf *temp_new;
2840 struct rte_mbuf *temp;
2841 uint16_t last_buf_len;
2842 uint16_t nb_new_buf;
2852 mb_pool = tx_pkt->pool;
2853 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
2854 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
2855 if (nb_new_buf > max_non_tso_bd_num)
2858 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
2859 if (last_buf_len == 0)
2860 last_buf_len = buf_size;
2862 /* Allocate enough mbufs */
2863 ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf);
2867 /* Copy the original packet content to the new mbufs */
2869 s = rte_pktmbuf_mtod(temp, char *);
2870 len_s = rte_pktmbuf_data_len(temp);
2871 temp_new = new_mbuf;
2872 while (temp != NULL && temp_new != NULL) {
2873 d = rte_pktmbuf_mtod(temp_new, char *);
2874 buf_len = temp_new->next == NULL ? last_buf_len : buf_size;
2878 len = RTE_MIN(len_s, len_d);
2882 len_d = len_d - len;
2883 len_s = len_s - len;
2889 s = rte_pktmbuf_mtod(temp, char *);
2890 len_s = rte_pktmbuf_data_len(temp);
2894 temp_new->data_len = buf_len;
2895 temp_new = temp_new->next;
2897 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
2899 /* free original mbufs */
2900 rte_pktmbuf_free(tx_pkt);
2902 *new_pkt = new_mbuf;
2908 hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec)
2910 uint32_t tmp = *ol_type_vlan_len_msec;
2911 uint64_t ol_flags = m->ol_flags;
2913 /* (outer) IP header type */
2914 if (ol_flags & PKT_TX_OUTER_IPV4) {
2915 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2916 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
2917 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
2919 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
2920 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM);
2921 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
2922 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2925 /* OL3 header size, defined in 4 bytes */
2926 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2927 m->outer_l3_len >> HNS3_L3_LEN_UNIT);
2928 *ol_type_vlan_len_msec = tmp;
2932 hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
2933 uint32_t *type_cs_vlan_tso_len)
2935 #define HNS3_NVGRE_HLEN 8
2936 uint32_t tmp_outer = *ol_type_vlan_len_msec;
2937 uint32_t tmp_inner = *type_cs_vlan_tso_len;
2938 uint64_t ol_flags = m->ol_flags;
2939 uint16_t inner_l2_len;
2941 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2942 case PKT_TX_TUNNEL_VXLAN_GPE:
2943 case PKT_TX_TUNNEL_GENEVE:
2944 case PKT_TX_TUNNEL_VXLAN:
2945 /* MAC in UDP tunnelling packet, include VxLAN and GENEVE */
2946 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
2947 HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP);
2949 * The inner l2 length of mbuf is the sum of outer l4 length,
2950 * tunneling header length and inner l2 length for a tunnel
2951 * packect. But in hns3 tx descriptor, the tunneling header
2952 * length is contained in the field of outer L4 length.
2953 * Therefore, driver need to calculate the outer L4 length and
2956 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
2958 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
2961 inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN;
2963 case PKT_TX_TUNNEL_GRE:
2964 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
2965 HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
2967 * For NVGRE tunnel packect, the outer L4 is empty. So only
2968 * fill the NVGRE header length to the outer L4 field.
2970 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
2972 (uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT);
2974 inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN;
2977 /* For non UDP / GRE tunneling, drop the tunnel packet */
2981 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2982 inner_l2_len >> HNS3_L2_LEN_UNIT);
2983 /* OL2 header size, defined in 2 bytes */
2984 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2985 m->outer_l2_len >> HNS3_L2_LEN_UNIT);
2987 *type_cs_vlan_tso_len = tmp_inner;
2988 *ol_type_vlan_len_msec = tmp_outer;
2994 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
2995 uint16_t tx_desc_id)
2997 struct hns3_desc *tx_ring = txq->tx_ring;
2998 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2999 uint32_t tmp_outer = 0;
3000 uint32_t tmp_inner = 0;
3004 * The tunnel header is contained in the inner L2 header field of the
3005 * mbuf, but for hns3 descriptor, it is contained in the outer L4. So,
3006 * there is a need that switching between them. To avoid multiple
3007 * calculations, the length of the L2 header include the outer and
3008 * inner, will be filled during the parsing of tunnel packects.
3010 if (!(m->ol_flags & PKT_TX_TUNNEL_MASK)) {
3012 * For non tunnel type the tunnel type id is 0, so no need to
3013 * assign a value to it. Only the inner(normal) L2 header length
3016 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M,
3017 HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT);
3020 * If outer csum is not offload, the outer length may be filled
3021 * with 0. And the length of the outer header is added to the
3022 * inner l2_len. It would lead a cksum error. So driver has to
3023 * calculate the header length.
3025 if (unlikely(!(m->ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
3026 m->outer_l2_len == 0)) {
3027 struct rte_net_hdr_lens hdr_len;
3028 (void)rte_net_get_ptype(m, &hdr_len,
3029 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
3030 m->outer_l3_len = hdr_len.l3_len;
3031 m->outer_l2_len = hdr_len.l2_len;
3032 m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len;
3034 hns3_parse_outer_params(m, &tmp_outer);
3035 ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner);
3040 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
3041 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
3047 hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3049 uint64_t ol_flags = m->ol_flags;
3053 tmp = *type_cs_vlan_tso_len;
3054 if (ol_flags & PKT_TX_IPV4)
3055 l3_type = HNS3_L3T_IPV4;
3056 else if (ol_flags & PKT_TX_IPV6)
3057 l3_type = HNS3_L3T_IPV6;
3059 l3_type = HNS3_L3T_NONE;
3061 /* inner(/normal) L3 header size, defined in 4 bytes */
3062 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3063 m->l3_len >> HNS3_L3_LEN_UNIT);
3065 tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type);
3067 /* Enable L3 checksum offloads */
3068 if (ol_flags & PKT_TX_IP_CKSUM)
3069 tmp |= BIT(HNS3_TXD_L3CS_B);
3070 *type_cs_vlan_tso_len = tmp;
3074 hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3076 uint64_t ol_flags = m->ol_flags;
3078 /* Enable L4 checksum offloads */
3079 switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
3080 case PKT_TX_TCP_CKSUM:
3081 case PKT_TX_TCP_SEG:
3082 tmp = *type_cs_vlan_tso_len;
3083 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3086 case PKT_TX_UDP_CKSUM:
3087 tmp = *type_cs_vlan_tso_len;
3088 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3091 case PKT_TX_SCTP_CKSUM:
3092 tmp = *type_cs_vlan_tso_len;
3093 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3099 tmp |= BIT(HNS3_TXD_L4CS_B);
3100 tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
3101 m->l4_len >> HNS3_L4_LEN_UNIT);
3102 *type_cs_vlan_tso_len = tmp;
3106 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m,
3107 uint16_t tx_desc_id)
3109 struct hns3_desc *tx_ring = txq->tx_ring;
3110 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3113 hns3_parse_l3_cksum_params(m, &value);
3114 hns3_parse_l4_cksum_params(m, &value);
3116 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
3120 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
3121 uint32_t max_non_tso_bd_num)
3123 struct rte_mbuf *m_first = tx_pkts;
3124 struct rte_mbuf *m_last = tx_pkts;
3125 uint32_t tot_len = 0;
3130 * Hardware requires that the sum of the data length of every 8
3131 * consecutive buffers is greater than MSS in hns3 network engine.
3132 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
3133 * frags greater than gso header len + mss, and the remaining 7
3134 * consecutive frags greater than MSS except the last 7 frags.
3136 if (bd_num <= max_non_tso_bd_num)
3139 for (i = 0; m_last && i < max_non_tso_bd_num - 1;
3140 i++, m_last = m_last->next)
3141 tot_len += m_last->data_len;
3146 /* ensure the first 8 frags is greater than mss + header */
3147 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
3148 hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
3149 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
3150 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
3154 * ensure the sum of the data length of every 7 consecutive buffer
3155 * is greater than mss except the last one.
3157 for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) {
3158 tot_len -= m_first->data_len;
3159 tot_len += m_last->data_len;
3161 if (tot_len < tx_pkts->tso_segsz)
3164 m_first = m_first->next;
3165 m_last = m_last->next;
3172 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
3174 uint64_t ol_flags = m->ol_flags;
3175 uint32_t paylen, hdr_len, l4_proto;
3177 if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
3180 if (ol_flags & PKT_TX_OUTER_IPV4) {
3181 struct rte_ipv4_hdr *ipv4_hdr;
3182 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
3184 l4_proto = ipv4_hdr->next_proto_id;
3185 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
3186 ipv4_hdr->hdr_checksum = 0;
3188 struct rte_ipv6_hdr *ipv6_hdr;
3189 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
3191 l4_proto = ipv6_hdr->proto;
3193 /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
3194 if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) {
3195 struct rte_udp_hdr *udp_hdr;
3196 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3197 hdr_len += m->outer_l2_len + m->outer_l3_len;
3198 paylen = m->pkt_len - hdr_len;
3199 if (paylen <= m->tso_segsz)
3201 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3204 udp_hdr->dgram_cksum = 0;
3209 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
3211 uint32_t tmp_data_len_sum = 0;
3212 uint16_t nb_buf = m->nb_segs;
3213 uint32_t paylen, hdr_len;
3214 struct rte_mbuf *m_seg;
3217 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
3220 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3221 hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
3222 m->outer_l2_len + m->outer_l3_len : 0;
3223 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
3226 paylen = m->pkt_len - hdr_len;
3227 if (paylen > HNS3_MAX_BD_PAYLEN)
3231 * The TSO header (include outer and inner L2, L3 and L4 header)
3232 * should be provided by three descriptors in maximum in hns3 network
3236 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
3237 i++, m_seg = m_seg->next) {
3238 tmp_data_len_sum += m_seg->data_len;
3241 if (hdr_len > tmp_data_len_sum)
3247 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3249 hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
3251 struct rte_ether_hdr *eh;
3252 struct rte_vlan_hdr *vh;
3254 if (!txq->pvid_sw_shift_en)
3258 * Due to hardware limitations, we only support two-layer VLAN hardware
3259 * offload in Tx direction based on hns3 network engine, so when PVID is
3260 * enabled, QinQ insert is no longer supported.
3261 * And when PVID is enabled, in the following two cases:
3262 * i) packets with more than two VLAN tags.
3263 * ii) packets with one VLAN tag while the hardware VLAN insert is
3265 * The packets will be regarded as abnormal packets and discarded by
3266 * hardware in Tx direction. For debugging purposes, a validation check
3267 * for these types of packets is added to the '.tx_pkt_prepare' ops
3268 * implementation function named hns3_prep_pkts to inform users that
3269 * these packets will be discarded.
3271 if (m->ol_flags & PKT_TX_QINQ_PKT)
3274 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
3275 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
3276 if (m->ol_flags & PKT_TX_VLAN_PKT)
3279 /* Ensure the incoming packet is not a QinQ packet */
3280 vh = (struct rte_vlan_hdr *)(eh + 1);
3281 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
3290 hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
3294 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3295 ret = rte_validate_tx_offload(m);
3301 ret = hns3_vld_vlan_chk(tx_queue, m);
3307 if (hns3_pkt_is_tso(m)) {
3308 if (hns3_pkt_need_linearized(m, m->nb_segs,
3309 tx_queue->max_non_tso_bd_num) ||
3310 hns3_check_tso_pkt_valid(m)) {
3315 if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
3317 * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means
3318 * hardware support recalculate the TCP pseudo header
3319 * checksum of packets that need TSO, so network driver
3320 * software not need to recalculate it.
3322 hns3_outer_header_cksum_prepare(m);
3327 ret = rte_net_intel_cksum_prepare(m);
3333 hns3_outer_header_cksum_prepare(m);
3339 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3345 for (i = 0; i < nb_pkts; i++) {
3347 if (hns3_prep_pkt_proc(tx_queue, m))
3355 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
3358 struct hns3_desc *tx_ring = txq->tx_ring;
3359 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3361 /* Enable checksum offloading */
3362 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
3363 /* Fill in tunneling parameters if necessary */
3364 if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
3365 txq->dfx_stats.unsupported_tunnel_pkt_cnt++;
3369 hns3_txd_enable_checksum(txq, m, tx_desc_id);
3371 /* clear the control bit */
3372 desc->tx.type_cs_vlan_tso_len = 0;
3373 desc->tx.ol_type_vlan_len_msec = 0;
3380 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
3381 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
3383 uint8_t max_non_tso_bd_num;
3384 struct rte_mbuf *new_pkt;
3387 if (hns3_pkt_is_tso(*m_seg))
3391 * If packet length is greater than HNS3_MAX_FRAME_LEN
3392 * driver support, the packet will be ignored.
3394 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
3395 txq->dfx_stats.over_length_pkt_cnt++;
3399 max_non_tso_bd_num = txq->max_non_tso_bd_num;
3400 if (unlikely(nb_buf > max_non_tso_bd_num)) {
3401 txq->dfx_stats.exceed_limit_bd_pkt_cnt++;
3402 ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
3403 max_non_tso_bd_num);
3405 txq->dfx_stats.exceed_limit_bd_reassem_fail++;
3415 hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq)
3417 struct hns3_entry *tx_entry;
3418 struct hns3_desc *desc;
3419 uint16_t tx_next_clean;
3423 if (HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) < txq->tx_rs_thresh)
3427 * All mbufs can be released only when the VLD bits of all
3428 * descriptors in a batch are cleared.
3430 tx_next_clean = (txq->next_to_clean + txq->tx_rs_thresh - 1) %
3432 desc = &txq->tx_ring[tx_next_clean];
3433 for (i = 0; i < txq->tx_rs_thresh; i++) {
3434 if (rte_le_to_cpu_16(desc->tx.tp_fe_sc_vld_ra_ri) &
3435 BIT(HNS3_TXD_VLD_B))
3440 tx_entry = &txq->sw_ring[txq->next_to_clean];
3442 for (i = 0; i < txq->tx_rs_thresh; i++)
3443 rte_prefetch0((tx_entry + i)->mbuf);
3444 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
3445 rte_mempool_put(tx_entry->mbuf->pool, tx_entry->mbuf);
3446 tx_entry->mbuf = NULL;
3449 txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc;
3450 txq->tx_bd_ready += txq->tx_rs_thresh;
3455 hns3_tx_backup_1mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
3457 tx_entry->mbuf = pkts[0];
3461 hns3_tx_backup_4mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
3463 hns3_tx_backup_1mbuf(&tx_entry[0], &pkts[0]);
3464 hns3_tx_backup_1mbuf(&tx_entry[1], &pkts[1]);
3465 hns3_tx_backup_1mbuf(&tx_entry[2], &pkts[2]);
3466 hns3_tx_backup_1mbuf(&tx_entry[3], &pkts[3]);
3470 hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
3472 #define PER_LOOP_NUM 4
3473 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
3477 for (i = 0; i < PER_LOOP_NUM; i++, txdp++, pkts++) {
3478 dma_addr = rte_mbuf_data_iova(*pkts);
3479 txdp->addr = rte_cpu_to_le_64(dma_addr);
3480 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
3481 txdp->tx.paylen = 0;
3482 txdp->tx.type_cs_vlan_tso_len = 0;
3483 txdp->tx.ol_type_vlan_len_msec = 0;
3484 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
3489 hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
3491 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
3494 dma_addr = rte_mbuf_data_iova(*pkts);
3495 txdp->addr = rte_cpu_to_le_64(dma_addr);
3496 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
3497 txdp->tx.paylen = 0;
3498 txdp->tx.type_cs_vlan_tso_len = 0;
3499 txdp->tx.ol_type_vlan_len_msec = 0;
3500 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
3504 hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq,
3505 struct rte_mbuf **pkts,
3508 #define PER_LOOP_NUM 4
3509 #define PER_LOOP_MASK (PER_LOOP_NUM - 1)
3510 struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
3511 struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
3512 const uint32_t mainpart = (nb_pkts & ((uint32_t)~PER_LOOP_MASK));
3513 const uint32_t leftover = (nb_pkts & ((uint32_t)PER_LOOP_MASK));
3516 for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
3517 hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
3518 hns3_tx_setup_4bd(txdp + i, pkts + i);
3520 if (unlikely(leftover > 0)) {
3521 for (i = 0; i < leftover; i++) {
3522 hns3_tx_backup_1mbuf(tx_entry + mainpart + i,
3523 pkts + mainpart + i);
3524 hns3_tx_setup_1bd(txdp + mainpart + i,
3525 pkts + mainpart + i);
3531 hns3_xmit_pkts_simple(void *tx_queue,
3532 struct rte_mbuf **tx_pkts,
3535 struct hns3_tx_queue *txq = tx_queue;
3538 hns3_tx_free_buffer_simple(txq);
3540 nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
3541 if (unlikely(nb_pkts == 0)) {
3542 if (txq->tx_bd_ready == 0)
3543 txq->dfx_stats.queue_full_cnt++;
3547 txq->tx_bd_ready -= nb_pkts;
3548 if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) {
3549 nb_tx = txq->nb_tx_desc - txq->next_to_use;
3550 hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx);
3551 txq->next_to_use = 0;
3554 hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
3555 txq->next_to_use += nb_pkts - nb_tx;
3557 hns3_write_reg_opt(txq->io_tail_reg, nb_pkts);
3563 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3565 struct hns3_tx_queue *txq = tx_queue;
3566 struct hns3_entry *tx_bak_pkt;
3567 struct hns3_desc *tx_ring;
3568 struct rte_mbuf *tx_pkt;
3569 struct rte_mbuf *m_seg;
3570 struct hns3_desc *desc;
3571 uint32_t nb_hold = 0;
3572 uint16_t tx_next_use;
3573 uint16_t tx_pkt_num;
3579 /* free useless buffer */
3580 hns3_tx_free_useless_buffer(txq);
3582 tx_next_use = txq->next_to_use;
3583 tx_bd_max = txq->nb_tx_desc;
3584 tx_pkt_num = nb_pkts;
3585 tx_ring = txq->tx_ring;
3588 tx_bak_pkt = &txq->sw_ring[tx_next_use];
3589 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
3590 tx_pkt = *tx_pkts++;
3592 nb_buf = tx_pkt->nb_segs;
3594 if (nb_buf > txq->tx_bd_ready) {
3595 txq->dfx_stats.queue_full_cnt++;
3603 * If packet length is less than minimum packet length supported
3604 * by hardware in Tx direction, driver need to pad it to avoid
3607 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
3608 txq->min_tx_pkt_len)) {
3612 add_len = txq->min_tx_pkt_len -
3613 rte_pktmbuf_pkt_len(tx_pkt);
3614 appended = rte_pktmbuf_append(tx_pkt, add_len);
3615 if (appended == NULL) {
3616 txq->dfx_stats.pkt_padding_fail_cnt++;
3620 memset(appended, 0, add_len);
3625 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
3628 if (hns3_parse_cksum(txq, tx_next_use, m_seg))
3632 desc = &tx_ring[tx_next_use];
3635 * If the packet is divided into multiple Tx Buffer Descriptors,
3636 * only need to fill vlan, paylen and tso into the first Tx
3637 * Buffer Descriptor.
3639 hns3_fill_first_desc(txq, desc, m_seg);
3642 desc = &tx_ring[tx_next_use];
3644 * Fill valid bits, DMA address and data length for each
3645 * Tx Buffer Descriptor.
3647 hns3_fill_per_desc(desc, m_seg);
3648 tx_bak_pkt->mbuf = m_seg;
3649 m_seg = m_seg->next;
3652 if (tx_next_use >= tx_bd_max) {
3654 tx_bak_pkt = txq->sw_ring;
3658 } while (m_seg != NULL);
3660 /* Add end flag for the last Tx Buffer Descriptor */
3661 desc->tx.tp_fe_sc_vld_ra_ri |=
3662 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
3665 txq->next_to_use = tx_next_use;
3666 txq->tx_bd_ready -= i;
3672 hns3_write_reg_opt(txq->io_tail_reg, nb_hold);
3678 hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
3684 hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
3685 __rte_unused struct rte_mbuf **tx_pkts,
3686 __rte_unused uint16_t nb_pkts)
3692 hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue,
3693 struct rte_mbuf __rte_unused **tx_pkts,
3694 uint16_t __rte_unused nb_pkts)
3700 hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3701 struct rte_eth_burst_mode *mode)
3703 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3704 const char *info = NULL;
3706 if (pkt_burst == hns3_xmit_pkts_simple)
3707 info = "Scalar Simple";
3708 else if (pkt_burst == hns3_xmit_pkts)
3710 else if (pkt_burst == hns3_xmit_pkts_vec)
3711 info = "Vector Neon";
3712 else if (pkt_burst == hns3_xmit_pkts_vec_sve)
3713 info = "Vector Sve";
3718 snprintf(mode->info, sizeof(mode->info), "%s", info);
3723 static eth_tx_burst_t
3724 hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
3726 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
3727 struct hns3_adapter *hns = dev->data->dev_private;
3729 if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
3731 return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve :
3735 if (hns->tx_simple_allowed &&
3736 offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) {
3738 return hns3_xmit_pkts_simple;
3741 *prep = hns3_prep_pkts;
3742 return hns3_xmit_pkts;
3746 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
3747 struct rte_mbuf **pkts __rte_unused,
3748 uint16_t pkts_n __rte_unused)
3753 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
3755 struct hns3_adapter *hns = eth_dev->data->dev_private;
3756 eth_tx_prep_t prep = NULL;
3758 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
3759 __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
3760 eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
3761 eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
3762 eth_dev->tx_pkt_prepare = prep;
3764 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
3765 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
3766 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
3771 hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3772 struct rte_eth_rxq_info *qinfo)
3774 struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
3776 qinfo->mp = rxq->mb_pool;
3777 qinfo->nb_desc = rxq->nb_rx_desc;
3778 qinfo->scattered_rx = dev->data->scattered_rx;
3779 /* Report the HW Rx buffer length to user */
3780 qinfo->rx_buf_size = rxq->rx_buf_len;
3783 * If there are no available Rx buffer descriptors, incoming packets
3784 * are always dropped by hardware based on hns3 network engine.
3786 qinfo->conf.rx_drop_en = 1;
3787 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
3788 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
3789 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
3793 hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3794 struct rte_eth_txq_info *qinfo)
3796 struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
3798 qinfo->nb_desc = txq->nb_tx_desc;
3799 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
3800 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
3801 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
3802 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
3806 hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3808 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3809 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
3810 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3813 if (!hns3_dev_indep_txrx_supported(hw))
3816 ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
3818 hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
3823 ret = hns3_init_rxq(hns, rx_queue_id);
3825 hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
3830 hns3_enable_rxq(rxq, true);
3831 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
3837 hns3_reset_sw_rxq(struct hns3_rx_queue *rxq)
3839 rxq->next_to_use = 0;
3840 rxq->rx_rearm_start = 0;
3841 rxq->rx_free_hold = 0;
3842 rxq->rx_rearm_nb = 0;
3843 rxq->pkt_first_seg = NULL;
3844 rxq->pkt_last_seg = NULL;
3845 memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc));
3846 hns3_rxq_vec_setup(rxq);
3850 hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3852 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3853 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
3855 if (!hns3_dev_indep_txrx_supported(hw))
3858 hns3_enable_rxq(rxq, false);
3860 hns3_rx_queue_release_mbufs(rxq);
3862 hns3_reset_sw_rxq(rxq);
3863 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
3869 hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3871 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3872 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
3875 if (!hns3_dev_indep_txrx_supported(hw))
3878 ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
3880 hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
3886 hns3_enable_txq(txq, true);
3887 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
3893 hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3895 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3896 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
3898 if (!hns3_dev_indep_txrx_supported(hw))
3901 hns3_enable_txq(txq, false);
3902 hns3_tx_queue_release_mbufs(txq);
3904 * All the mbufs in sw_ring are released and all the pointers in sw_ring
3905 * are set to NULL. If this queue is still called by upper layer,
3906 * residual SW status of this txq may cause these pointers in sw_ring
3907 * which have been set to NULL to be released again. To avoid it,
3911 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
3917 hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3920 * Number of BDs that have been processed by the driver
3921 * but have not been notified to the hardware.
3923 uint32_t driver_hold_bd_num;
3924 struct hns3_rx_queue *rxq;
3927 rxq = dev->data->rx_queues[rx_queue_id];
3928 fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
3929 if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
3930 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
3931 driver_hold_bd_num = rxq->rx_rearm_nb;
3933 driver_hold_bd_num = rxq->rx_free_hold;
3935 if (fbd_num <= driver_hold_bd_num)
3938 return fbd_num - driver_hold_bd_num;