1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_vxlan.h>
19 #include <rte_ethdev_driver.h>
24 #include <rte_malloc.h>
27 #include "hns3_ethdev.h"
28 #include "hns3_rxtx.h"
29 #include "hns3_regs.h"
30 #include "hns3_logs.h"
32 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
33 #define DEFAULT_RX_FREE_THRESH 32
36 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
40 /* Note: Fake rx queue will not enter here */
42 for (i = 0; i < rxq->nb_rx_desc; i++) {
43 if (rxq->sw_ring[i].mbuf) {
44 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
45 rxq->sw_ring[i].mbuf = NULL;
52 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
56 /* Note: Fake rx queue will not enter here */
58 for (i = 0; i < txq->nb_tx_desc; i++) {
59 if (txq->sw_ring[i].mbuf) {
60 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
61 txq->sw_ring[i].mbuf = NULL;
68 hns3_rx_queue_release(void *queue)
70 struct hns3_rx_queue *rxq = queue;
72 hns3_rx_queue_release_mbufs(rxq);
74 rte_memzone_free(rxq->mz);
76 rte_free(rxq->sw_ring);
82 hns3_tx_queue_release(void *queue)
84 struct hns3_tx_queue *txq = queue;
86 hns3_tx_queue_release_mbufs(txq);
88 rte_memzone_free(txq->mz);
90 rte_free(txq->sw_ring);
96 hns3_dev_rx_queue_release(void *queue)
98 struct hns3_rx_queue *rxq = queue;
99 struct hns3_adapter *hns;
105 rte_spinlock_lock(&hns->hw.lock);
106 hns3_rx_queue_release(queue);
107 rte_spinlock_unlock(&hns->hw.lock);
111 hns3_dev_tx_queue_release(void *queue)
113 struct hns3_tx_queue *txq = queue;
114 struct hns3_adapter *hns;
120 rte_spinlock_lock(&hns->hw.lock);
121 hns3_tx_queue_release(queue);
122 rte_spinlock_unlock(&hns->hw.lock);
126 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
128 struct hns3_rx_queue *rxq = queue;
129 struct hns3_adapter *hns;
139 if (hw->fkq_data.rx_queues[idx]) {
140 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
141 hw->fkq_data.rx_queues[idx] = NULL;
144 /* free fake rx queue arrays */
145 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
146 hw->fkq_data.nb_fake_rx_queues = 0;
147 rte_free(hw->fkq_data.rx_queues);
148 hw->fkq_data.rx_queues = NULL;
153 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
155 struct hns3_tx_queue *txq = queue;
156 struct hns3_adapter *hns;
166 if (hw->fkq_data.tx_queues[idx]) {
167 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
168 hw->fkq_data.tx_queues[idx] = NULL;
171 /* free fake tx queue arrays */
172 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
173 hw->fkq_data.nb_fake_tx_queues = 0;
174 rte_free(hw->fkq_data.tx_queues);
175 hw->fkq_data.tx_queues = NULL;
180 hns3_free_rx_queues(struct rte_eth_dev *dev)
182 struct hns3_adapter *hns = dev->data->dev_private;
183 struct hns3_fake_queue_data *fkq_data;
184 struct hns3_hw *hw = &hns->hw;
188 nb_rx_q = hw->data->nb_rx_queues;
189 for (i = 0; i < nb_rx_q; i++) {
190 if (dev->data->rx_queues[i]) {
191 hns3_rx_queue_release(dev->data->rx_queues[i]);
192 dev->data->rx_queues[i] = NULL;
196 /* Free fake Rx queues */
197 fkq_data = &hw->fkq_data;
198 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
199 if (fkq_data->rx_queues[i])
200 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
205 hns3_free_tx_queues(struct rte_eth_dev *dev)
207 struct hns3_adapter *hns = dev->data->dev_private;
208 struct hns3_fake_queue_data *fkq_data;
209 struct hns3_hw *hw = &hns->hw;
213 nb_tx_q = hw->data->nb_tx_queues;
214 for (i = 0; i < nb_tx_q; i++) {
215 if (dev->data->tx_queues[i]) {
216 hns3_tx_queue_release(dev->data->tx_queues[i]);
217 dev->data->tx_queues[i] = NULL;
221 /* Free fake Tx queues */
222 fkq_data = &hw->fkq_data;
223 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
224 if (fkq_data->tx_queues[i])
225 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
230 hns3_free_all_queues(struct rte_eth_dev *dev)
232 hns3_free_rx_queues(dev);
233 hns3_free_tx_queues(dev);
237 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
239 struct rte_mbuf *mbuf;
243 for (i = 0; i < rxq->nb_rx_desc; i++) {
244 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
245 if (unlikely(mbuf == NULL)) {
246 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
248 hns3_rx_queue_release_mbufs(rxq);
252 rte_mbuf_refcnt_set(mbuf, 1);
254 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
256 mbuf->port = rxq->port_id;
258 rxq->sw_ring[i].mbuf = mbuf;
259 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
260 rxq->rx_ring[i].addr = dma_addr;
261 rxq->rx_ring[i].rx.bd_base_info = 0;
268 hns3_buf_size2type(uint32_t buf_size)
274 bd_size_type = HNS3_BD_SIZE_512_TYPE;
277 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
280 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
283 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
290 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
292 uint32_t rx_buf_len = rxq->rx_buf_len;
293 uint64_t dma_addr = rxq->rx_ring_phys_addr;
295 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
296 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
297 (uint32_t)((dma_addr >> 31) >> 1));
299 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
300 hns3_buf_size2type(rx_buf_len));
301 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
302 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
306 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
308 uint64_t dma_addr = txq->tx_ring_phys_addr;
310 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
311 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
312 (uint32_t)((dma_addr >> 31) >> 1));
314 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
315 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
319 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
321 uint16_t nb_rx_q = hw->data->nb_rx_queues;
322 uint16_t nb_tx_q = hw->data->nb_tx_queues;
323 struct hns3_rx_queue *rxq;
324 struct hns3_tx_queue *txq;
328 for (i = 0; i < hw->cfg_max_queues; i++) {
330 rxq = hw->data->rx_queues[i];
332 rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
334 txq = hw->data->tx_queues[i];
336 txq = hw->fkq_data.tx_queues[i - nb_tx_q];
337 if (rxq == NULL || txq == NULL ||
338 (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
341 rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
343 rcb_reg |= BIT(HNS3_RING_EN_B);
345 rcb_reg &= ~BIT(HNS3_RING_EN_B);
346 hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
351 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
353 struct hns3_cfg_com_tqp_queue_cmd *req;
354 struct hns3_cmd_desc desc;
357 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
359 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
360 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
362 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
364 ret = hns3_cmd_send(hw, &desc, 1);
366 hns3_err(hw, "TQP enable fail, ret = %d", ret);
372 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
374 struct hns3_reset_tqp_queue_cmd *req;
375 struct hns3_cmd_desc desc;
378 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
380 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
381 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
382 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
384 ret = hns3_cmd_send(hw, &desc, 1);
386 hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
392 hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
394 struct hns3_reset_tqp_queue_cmd *req;
395 struct hns3_cmd_desc desc;
398 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
400 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
401 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
403 ret = hns3_cmd_send(hw, &desc, 1);
405 hns3_err(hw, "Get reset status error, ret =%d", ret);
409 return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
413 hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
415 #define HNS3_TQP_RESET_TRY_MS 200
420 ret = hns3_tqp_enable(hw, queue_id, false);
425 * In current version VF is not supported when PF is driven by DPDK
426 * driver, all task queue pairs are mapped to PF function, so PF's queue
427 * id is equals to the global queue id in PF range.
429 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
431 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
435 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
437 /* Wait for tqp hw reset */
438 rte_delay_ms(HNS3_POLL_RESPONE_MS);
439 reset_status = hns3_get_reset_status(hw, queue_id);
444 } while (get_timeofday_ms() < end);
447 hns3_err(hw, "Reset TQP fail, ret = %d", ret);
451 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
453 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
459 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
464 /* Disable VF's queue before send queue reset msg to PF */
465 ret = hns3_tqp_enable(hw, queue_id, false);
469 memcpy(msg_data, &queue_id, sizeof(uint16_t));
471 return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
472 sizeof(msg_data), true, NULL, 0);
476 hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
478 struct hns3_hw *hw = &hns->hw;
480 return hns3vf_reset_tqp(hw, queue_id);
482 return hns3_reset_tqp(hw, queue_id);
486 hns3_reset_all_queues(struct hns3_adapter *hns)
488 struct hns3_hw *hw = &hns->hw;
491 for (i = 0; i < hw->cfg_max_queues; i++) {
492 ret = hns3_reset_queue(hns, i);
494 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
502 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
503 uint8_t gl_idx, uint16_t gl_value)
505 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
506 HNS3_TQP_INTR_GL1_REG,
507 HNS3_TQP_INTR_GL2_REG};
508 uint32_t addr, value;
510 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
513 addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
514 value = HNS3_GL_USEC_TO_REG(gl_value);
516 hns3_write_dev(hw, addr, value);
520 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
522 uint32_t addr, value;
524 if (rl_value > HNS3_TQP_INTR_RL_MAX)
527 addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
528 value = HNS3_RL_USEC_TO_REG(rl_value);
530 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
532 hns3_write_dev(hw, addr, value);
536 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
538 uint32_t addr, value;
540 addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
543 hns3_write_dev(hw, addr, value);
547 * Enable all rx queue interrupt when in interrupt rx mode.
548 * This api was called before enable queue rx&tx (in normal start or reset
549 * recover scenes), used to fix hardware rx queue interrupt enable was clear
553 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
555 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
556 uint16_t nb_rx_q = hw->data->nb_rx_queues;
559 if (dev->data->dev_conf.intr_conf.rxq == 0)
562 for (i = 0; i < nb_rx_q; i++)
563 hns3_queue_intr_enable(hw, i, en);
567 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
569 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
570 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
571 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
573 if (dev->data->dev_conf.intr_conf.rxq == 0)
576 hns3_queue_intr_enable(hw, queue_id, true);
578 return rte_intr_ack(intr_handle);
582 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
584 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
586 if (dev->data->dev_conf.intr_conf.rxq == 0)
589 hns3_queue_intr_enable(hw, queue_id, false);
595 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
597 struct hns3_hw *hw = &hns->hw;
598 struct hns3_rx_queue *rxq;
601 PMD_INIT_FUNC_TRACE();
603 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
604 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
606 hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
611 rxq->next_to_use = 0;
612 rxq->next_to_clean = 0;
614 hns3_init_rx_queue_hw(rxq);
620 hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
622 struct hns3_hw *hw = &hns->hw;
623 struct hns3_rx_queue *rxq;
625 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
626 rxq->next_to_use = 0;
627 rxq->next_to_clean = 0;
629 hns3_init_rx_queue_hw(rxq);
633 hns3_init_tx_queue(struct hns3_tx_queue *queue)
635 struct hns3_tx_queue *txq = queue;
636 struct hns3_desc *desc;
641 for (i = 0; i < txq->nb_tx_desc; i++) {
642 desc->tx.tp_fe_sc_vld_ra_ri = 0;
646 txq->next_to_use = 0;
647 txq->next_to_clean = 0;
648 txq->tx_bd_ready = txq->nb_tx_desc - 1;
649 hns3_init_tx_queue_hw(txq);
653 hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
655 struct hns3_hw *hw = &hns->hw;
656 struct hns3_tx_queue *txq;
658 txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
659 hns3_init_tx_queue(txq);
663 hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
665 struct hns3_hw *hw = &hns->hw;
666 struct hns3_tx_queue *txq;
668 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
669 hns3_init_tx_queue(txq);
673 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
675 struct hns3_hw *hw = &hns->hw;
676 struct hns3_tx_queue *txq;
679 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
680 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
683 if (!tc_queue->enable)
686 for (j = 0; j < tc_queue->tqp_count; j++) {
687 num = tc_queue->tqp_offset + j;
688 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
692 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
698 hns3_start_rx_queues(struct hns3_adapter *hns)
700 struct hns3_hw *hw = &hns->hw;
701 struct hns3_rx_queue *rxq;
705 /* Initialize RSS for queues */
706 ret = hns3_config_rss(hns);
708 hns3_err(hw, "Failed to configure rss %d", ret);
712 for (i = 0; i < hw->data->nb_rx_queues; i++) {
713 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
714 if (rxq == NULL || rxq->rx_deferred_start)
716 ret = hns3_dev_rx_queue_start(hns, i);
718 hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
724 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
725 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
726 if (rxq == NULL || rxq->rx_deferred_start)
728 hns3_fake_rx_queue_start(hns, i);
733 for (j = 0; j < i; j++) {
734 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
735 hns3_rx_queue_release_mbufs(rxq);
742 hns3_start_tx_queues(struct hns3_adapter *hns)
744 struct hns3_hw *hw = &hns->hw;
745 struct hns3_tx_queue *txq;
748 for (i = 0; i < hw->data->nb_tx_queues; i++) {
749 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
750 if (txq == NULL || txq->tx_deferred_start)
752 hns3_dev_tx_queue_start(hns, i);
755 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
756 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
757 if (txq == NULL || txq->tx_deferred_start)
759 hns3_fake_tx_queue_start(hns, i);
762 hns3_init_tx_ring_tc(hns);
767 * Note: just init and setup queues, and don't enable queue rx&tx.
770 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
772 struct hns3_hw *hw = &hns->hw;
776 ret = hns3_reset_all_queues(hns);
778 hns3_err(hw, "Failed to reset all queues %d", ret);
783 ret = hns3_start_rx_queues(hns);
785 hns3_err(hw, "Failed to start rx queues: %d", ret);
789 hns3_start_tx_queues(hns);
795 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
797 struct hns3_hw *hw = &hns->hw;
800 hns3_enable_all_queues(hw, false);
802 ret = hns3_reset_all_queues(hns);
804 hns3_err(hw, "Failed to reset all queues %d", ret);
812 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
813 struct hns3_queue_info *q_info)
815 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
816 const struct rte_memzone *rx_mz;
817 struct hns3_rx_queue *rxq;
818 unsigned int rx_desc;
820 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
821 RTE_CACHE_LINE_SIZE, q_info->socket_id);
823 hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
828 /* Allocate rx ring hardware descriptors. */
829 rxq->queue_id = q_info->idx;
830 rxq->nb_rx_desc = q_info->nb_desc;
831 rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
832 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
833 rx_desc, HNS3_RING_BASE_ALIGN,
836 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
838 hns3_rx_queue_release(rxq);
842 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
843 rxq->rx_ring_phys_addr = rx_mz->iova;
845 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
846 rxq->rx_ring_phys_addr);
852 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
853 uint16_t nb_desc, unsigned int socket_id)
855 struct hns3_adapter *hns = dev->data->dev_private;
856 struct hns3_hw *hw = &hns->hw;
857 struct hns3_queue_info q_info;
858 struct hns3_rx_queue *rxq;
861 if (hw->fkq_data.rx_queues[idx]) {
862 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
863 hw->fkq_data.rx_queues[idx] = NULL;
867 q_info.socket_id = socket_id;
868 q_info.nb_desc = nb_desc;
869 q_info.type = "hns3 fake RX queue";
870 q_info.ring_name = "rx_fake_ring";
871 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
873 hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
877 /* Don't need alloc sw_ring, because upper applications don't use it */
881 rxq->rx_deferred_start = false;
882 rxq->port_id = dev->data->port_id;
883 rxq->configured = true;
884 nb_rx_q = dev->data->nb_rx_queues;
885 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
886 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
887 rxq->rx_buf_len = hw->rx_buf_len;
889 rte_spinlock_lock(&hw->lock);
890 hw->fkq_data.rx_queues[idx] = rxq;
891 rte_spinlock_unlock(&hw->lock);
897 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
898 struct hns3_queue_info *q_info)
900 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
901 const struct rte_memzone *tx_mz;
902 struct hns3_tx_queue *txq;
903 struct hns3_desc *desc;
904 unsigned int tx_desc;
907 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
908 RTE_CACHE_LINE_SIZE, q_info->socket_id);
910 hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
915 /* Allocate tx ring hardware descriptors. */
916 txq->queue_id = q_info->idx;
917 txq->nb_tx_desc = q_info->nb_desc;
918 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
919 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
920 tx_desc, HNS3_RING_BASE_ALIGN,
923 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
925 hns3_tx_queue_release(txq);
929 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
930 txq->tx_ring_phys_addr = tx_mz->iova;
932 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
933 txq->tx_ring_phys_addr);
937 for (i = 0; i < txq->nb_tx_desc; i++) {
938 desc->tx.tp_fe_sc_vld_ra_ri = 0;
946 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
947 uint16_t nb_desc, unsigned int socket_id)
949 struct hns3_adapter *hns = dev->data->dev_private;
950 struct hns3_hw *hw = &hns->hw;
951 struct hns3_queue_info q_info;
952 struct hns3_tx_queue *txq;
955 if (hw->fkq_data.tx_queues[idx] != NULL) {
956 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
957 hw->fkq_data.tx_queues[idx] = NULL;
961 q_info.socket_id = socket_id;
962 q_info.nb_desc = nb_desc;
963 q_info.type = "hns3 fake TX queue";
964 q_info.ring_name = "tx_fake_ring";
965 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
967 hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
971 /* Don't need alloc sw_ring, because upper applications don't use it */
975 txq->tx_deferred_start = false;
976 txq->port_id = dev->data->port_id;
977 txq->configured = true;
978 nb_tx_q = dev->data->nb_tx_queues;
979 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
980 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
982 rte_spinlock_lock(&hw->lock);
983 hw->fkq_data.tx_queues[idx] = txq;
984 rte_spinlock_unlock(&hw->lock);
990 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
992 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
996 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
997 /* first time configuration */
999 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1000 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1001 RTE_CACHE_LINE_SIZE);
1002 if (hw->fkq_data.rx_queues == NULL) {
1003 hw->fkq_data.nb_fake_rx_queues = 0;
1006 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1008 rxq = hw->fkq_data.rx_queues;
1009 for (i = nb_queues; i < old_nb_queues; i++)
1010 hns3_dev_rx_queue_release(rxq[i]);
1012 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1013 RTE_CACHE_LINE_SIZE);
1016 if (nb_queues > old_nb_queues) {
1017 uint16_t new_qs = nb_queues - old_nb_queues;
1018 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1021 hw->fkq_data.rx_queues = rxq;
1022 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1023 rxq = hw->fkq_data.rx_queues;
1024 for (i = nb_queues; i < old_nb_queues; i++)
1025 hns3_dev_rx_queue_release(rxq[i]);
1027 rte_free(hw->fkq_data.rx_queues);
1028 hw->fkq_data.rx_queues = NULL;
1031 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1037 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1039 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1043 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1044 /* first time configuration */
1046 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1047 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1048 RTE_CACHE_LINE_SIZE);
1049 if (hw->fkq_data.tx_queues == NULL) {
1050 hw->fkq_data.nb_fake_tx_queues = 0;
1053 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1055 txq = hw->fkq_data.tx_queues;
1056 for (i = nb_queues; i < old_nb_queues; i++)
1057 hns3_dev_tx_queue_release(txq[i]);
1058 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1059 RTE_CACHE_LINE_SIZE);
1062 if (nb_queues > old_nb_queues) {
1063 uint16_t new_qs = nb_queues - old_nb_queues;
1064 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1067 hw->fkq_data.tx_queues = txq;
1068 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1069 txq = hw->fkq_data.tx_queues;
1070 for (i = nb_queues; i < old_nb_queues; i++)
1071 hns3_dev_tx_queue_release(txq[i]);
1073 rte_free(hw->fkq_data.tx_queues);
1074 hw->fkq_data.tx_queues = NULL;
1076 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1082 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1085 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1086 uint16_t rx_need_add_nb_q;
1087 uint16_t tx_need_add_nb_q;
1092 /* Setup new number of fake RX/TX queues and reconfigure device. */
1093 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
1094 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1095 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1096 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1098 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1099 goto cfg_fake_rx_q_fail;
1102 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1104 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1105 goto cfg_fake_tx_q_fail;
1108 /* Allocate and set up fake RX queue per Ethernet port. */
1109 port_id = hw->data->port_id;
1110 for (q = 0; q < rx_need_add_nb_q; q++) {
1111 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1112 rte_eth_dev_socket_id(port_id));
1114 goto setup_fake_rx_q_fail;
1117 /* Allocate and set up fake TX queue per Ethernet port. */
1118 for (q = 0; q < tx_need_add_nb_q; q++) {
1119 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1120 rte_eth_dev_socket_id(port_id));
1122 goto setup_fake_tx_q_fail;
1127 setup_fake_tx_q_fail:
1128 setup_fake_rx_q_fail:
1129 (void)hns3_fake_tx_queue_config(hw, 0);
1131 (void)hns3_fake_rx_queue_config(hw, 0);
1133 hw->cfg_max_queues = 0;
1139 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1141 struct rte_eth_dev_data *dev_data = hns->hw.data;
1142 struct hns3_rx_queue *rxq;
1143 struct hns3_tx_queue *txq;
1146 if (dev_data->rx_queues)
1147 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1148 rxq = dev_data->rx_queues[i];
1149 if (rxq == NULL || rxq->rx_deferred_start)
1151 hns3_rx_queue_release_mbufs(rxq);
1154 if (dev_data->tx_queues)
1155 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1156 txq = dev_data->tx_queues[i];
1157 if (txq == NULL || txq->tx_deferred_start)
1159 hns3_tx_queue_release_mbufs(txq);
1164 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1165 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1166 struct rte_mempool *mp)
1168 struct hns3_adapter *hns = dev->data->dev_private;
1169 struct hns3_hw *hw = &hns->hw;
1170 struct hns3_queue_info q_info;
1171 struct hns3_rx_queue *rxq;
1174 if (dev->data->dev_started) {
1175 hns3_err(hw, "rx_queue_setup after dev_start no supported");
1179 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1180 nb_desc % HNS3_ALIGN_RING_DESC) {
1181 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1186 if (dev->data->rx_queues[idx]) {
1187 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1188 dev->data->rx_queues[idx] = NULL;
1192 q_info.socket_id = socket_id;
1193 q_info.nb_desc = nb_desc;
1194 q_info.type = "hns3 RX queue";
1195 q_info.ring_name = "rx_ring";
1196 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1199 "Failed to alloc mem and reserve DMA mem for rx ring!");
1205 if (conf->rx_free_thresh <= 0)
1206 rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;
1208 rxq->rx_free_thresh = conf->rx_free_thresh;
1209 rxq->rx_deferred_start = conf->rx_deferred_start;
1211 rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc;
1212 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1213 RTE_CACHE_LINE_SIZE, socket_id);
1214 if (rxq->sw_ring == NULL) {
1215 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1216 hns3_rx_queue_release(rxq);
1220 rxq->next_to_use = 0;
1221 rxq->next_to_clean = 0;
1222 rxq->nb_rx_hold = 0;
1223 rxq->pkt_first_seg = NULL;
1224 rxq->pkt_last_seg = NULL;
1225 rxq->port_id = dev->data->port_id;
1226 rxq->configured = true;
1227 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1228 idx * HNS3_TQP_REG_SIZE);
1229 rxq->rx_buf_len = hw->rx_buf_len;
1231 rxq->pkt_len_errors = 0;
1232 rxq->l3_csum_erros = 0;
1233 rxq->l4_csum_erros = 0;
1234 rxq->ol3_csum_erros = 0;
1235 rxq->ol4_csum_erros = 0;
1237 rte_spinlock_lock(&hw->lock);
1238 dev->data->rx_queues[idx] = rxq;
1239 rte_spinlock_unlock(&hw->lock);
1244 static inline uint32_t
1245 rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info)
1247 #define HNS3_L2TBL_NUM 4
1248 #define HNS3_L3TBL_NUM 16
1249 #define HNS3_L4TBL_NUM 16
1250 #define HNS3_OL3TBL_NUM 16
1251 #define HNS3_OL4TBL_NUM 16
1252 uint32_t pkt_type = 0;
1253 uint32_t l2id, l3id, l4id;
1254 uint32_t ol3id, ol4id;
1256 static const uint32_t l2table[HNS3_L2TBL_NUM] = {
1258 RTE_PTYPE_L2_ETHER_VLAN,
1259 RTE_PTYPE_L2_ETHER_QINQ,
1263 static const uint32_t l3table[HNS3_L3TBL_NUM] = {
1266 RTE_PTYPE_L2_ETHER_ARP,
1268 RTE_PTYPE_L3_IPV4_EXT,
1269 RTE_PTYPE_L3_IPV6_EXT,
1270 RTE_PTYPE_L2_ETHER_LLDP,
1271 0, 0, 0, 0, 0, 0, 0, 0, 0
1274 static const uint32_t l4table[HNS3_L4TBL_NUM] = {
1277 RTE_PTYPE_TUNNEL_GRE,
1281 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1284 static const uint32_t inner_l2table[HNS3_L2TBL_NUM] = {
1285 RTE_PTYPE_INNER_L2_ETHER,
1286 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1287 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1291 static const uint32_t inner_l3table[HNS3_L3TBL_NUM] = {
1292 RTE_PTYPE_INNER_L3_IPV4,
1293 RTE_PTYPE_INNER_L3_IPV6,
1295 RTE_PTYPE_INNER_L2_ETHER,
1296 RTE_PTYPE_INNER_L3_IPV4_EXT,
1297 RTE_PTYPE_INNER_L3_IPV6_EXT,
1298 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1301 static const uint32_t inner_l4table[HNS3_L4TBL_NUM] = {
1302 RTE_PTYPE_INNER_L4_UDP,
1303 RTE_PTYPE_INNER_L4_TCP,
1304 RTE_PTYPE_TUNNEL_GRE,
1305 RTE_PTYPE_INNER_L4_SCTP,
1307 RTE_PTYPE_INNER_L4_ICMP,
1308 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1311 static const uint32_t ol3table[HNS3_OL3TBL_NUM] = {
1315 RTE_PTYPE_L3_IPV4_EXT,
1316 RTE_PTYPE_L3_IPV6_EXT,
1317 0, 0, 0, 0, 0, 0, 0, 0, 0,
1321 static const uint32_t ol4table[HNS3_OL4TBL_NUM] = {
1323 RTE_PTYPE_TUNNEL_VXLAN,
1324 RTE_PTYPE_TUNNEL_NVGRE,
1325 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1328 l2id = hns3_get_field(pkt_info, HNS3_RXD_STRP_TAGP_M,
1329 HNS3_RXD_STRP_TAGP_S);
1330 l3id = hns3_get_field(pkt_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
1331 l4id = hns3_get_field(pkt_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
1332 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
1333 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1335 if (ol4table[ol4id])
1336 pkt_type |= (inner_l2table[l2id] | inner_l3table[l3id] |
1337 inner_l4table[l4id] | ol3table[ol3id] |
1340 pkt_type |= (l2table[l2id] | l3table[l3id] | l4table[l4id]);
1345 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1347 static const uint32_t ptypes[] = {
1349 RTE_PTYPE_L2_ETHER_VLAN,
1350 RTE_PTYPE_L2_ETHER_QINQ,
1351 RTE_PTYPE_L2_ETHER_LLDP,
1352 RTE_PTYPE_L2_ETHER_ARP,
1354 RTE_PTYPE_L3_IPV4_EXT,
1356 RTE_PTYPE_L3_IPV6_EXT,
1362 RTE_PTYPE_TUNNEL_GRE,
1366 if (dev->rx_pkt_burst == hns3_recv_pkts)
1373 hns3_clean_rx_buffers(struct hns3_rx_queue *rxq, int count)
1375 rxq->next_to_use += count;
1376 if (rxq->next_to_use >= rxq->nb_rx_desc)
1377 rxq->next_to_use -= rxq->nb_rx_desc;
1379 hns3_write_dev(rxq, HNS3_RING_RX_HEAD_REG, count);
1383 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
1384 uint32_t bd_base_info, uint32_t l234_info,
1385 uint32_t *cksum_err)
1389 if (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) {
1394 if (unlikely(rxm->pkt_len == 0 ||
1395 (l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) {
1396 rxq->pkt_len_errors++;
1400 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
1401 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
1402 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1403 rxq->l3_csum_erros++;
1404 tmp |= HNS3_L3_CKSUM_ERR;
1407 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
1408 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1409 rxq->l4_csum_erros++;
1410 tmp |= HNS3_L4_CKSUM_ERR;
1413 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
1414 rxq->ol3_csum_erros++;
1415 tmp |= HNS3_OUTER_L3_CKSUM_ERR;
1418 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
1419 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
1420 rxq->ol4_csum_erros++;
1421 tmp |= HNS3_OUTER_L4_CKSUM_ERR;
1430 hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type,
1431 const uint32_t cksum_err)
1433 if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
1434 if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
1435 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
1436 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1437 if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
1438 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
1439 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1440 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
1441 (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
1442 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
1444 if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
1445 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
1446 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1447 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
1448 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
1449 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1454 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1456 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
1457 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
1458 struct hns3_rx_queue *rxq; /* RX queue */
1459 struct hns3_entry *sw_ring;
1460 struct hns3_entry *rxe;
1461 struct rte_mbuf *first_seg;
1462 struct rte_mbuf *last_seg;
1463 struct hns3_desc rxd;
1464 struct rte_mbuf *nmb; /* pointer of the new mbuf */
1465 struct rte_mbuf *rxm;
1466 struct rte_eth_dev *dev;
1467 uint32_t bd_base_info;
1482 dev = &rte_eth_devices[rxq->port_id];
1484 rx_id = rxq->next_to_clean;
1485 rx_ring = rxq->rx_ring;
1486 first_seg = rxq->pkt_first_seg;
1487 last_seg = rxq->pkt_last_seg;
1488 sw_ring = rxq->sw_ring;
1490 while (nb_rx < nb_pkts) {
1491 rxdp = &rx_ring[rx_id];
1492 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
1493 if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
1496 * The interactive process between software and hardware of
1497 * receiving a new packet in hns3 network engine:
1498 * 1. Hardware network engine firstly writes the packet content
1499 * to the memory pointed by the 'addr' field of the Rx Buffer
1500 * Descriptor, secondly fills the result of parsing the
1501 * packet include the valid field into the Rx Buffer
1502 * Descriptor in one write operation.
1503 * 2. Driver reads the Rx BD's valid field in the loop to check
1504 * whether it's valid, if valid then assign a new address to
1505 * the addr field, clear the valid field, get the other
1506 * information of the packet by parsing Rx BD's other fields,
1507 * finally write back the number of Rx BDs processed by the
1508 * driver to the HNS3_RING_RX_HEAD_REG register to inform
1510 * In the above process, the ordering is very important. We must
1511 * make sure that CPU read Rx BD's other fields only after the
1514 * There are two type of re-ordering: compiler re-ordering and
1515 * CPU re-ordering under the ARMv8 architecture.
1516 * 1. we use volatile to deal with compiler re-ordering, so you
1517 * can see that rx_ring/rxdp defined with volatile.
1518 * 2. we commonly use memory barrier to deal with CPU
1519 * re-ordering, but the cost is high.
1521 * In order to solve the high cost of using memory barrier, we
1522 * use the data dependency order under the ARMv8 architecture,
1525 * instr02: load B <- A
1526 * the instr02 will always execute after instr01.
1528 * To construct the data dependency ordering, we use the
1529 * following assignment:
1530 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1531 * (1u<<HNS3_RXD_VLD_B)]
1532 * Using gcc compiler under the ARMv8 architecture, the related
1533 * assembly code example as follows:
1534 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
1535 * instr01: ldr w26, [x22, #28] --read bd_base_info
1536 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
1537 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
1539 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
1540 * instr05: ldp x2, x3, [x0]
1541 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
1542 * instr07: ldp x4, x5, [x0, #16]
1543 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
1544 * the instr05~08 depend on x0's value, x0 depent on w26's
1545 * value, the w26 is the bd_base_info, this form the data
1546 * dependency ordering.
1547 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
1548 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
1549 * assignment is correct.
1551 * So we use the data dependency ordering instead of memory
1552 * barrier to improve receive performance.
1554 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1555 (1u << HNS3_RXD_VLD_B)];
1557 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1558 if (unlikely(nmb == NULL)) {
1559 dev->data->rx_mbuf_alloc_failed++;
1564 rxe = &sw_ring[rx_id];
1566 if (unlikely(rx_id == rxq->nb_rx_desc))
1569 rte_prefetch0(sw_ring[rx_id].mbuf);
1570 if ((rx_id & 0x3) == 0) {
1571 rte_prefetch0(&rx_ring[rx_id]);
1572 rte_prefetch0(&sw_ring[rx_id]);
1578 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1579 rxdp->rx.bd_base_info = 0;
1580 rxdp->addr = dma_addr;
1582 /* Load remained descriptor data and extract necessary fields */
1583 data_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.size));
1584 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
1585 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
1587 if (first_seg == NULL) {
1589 first_seg->nb_segs = 1;
1591 first_seg->nb_segs++;
1592 last_seg->next = rxm;
1595 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1596 rxm->data_len = data_len;
1598 if (!hns3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
1603 /* The last buffer of the received packet */
1604 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len));
1605 first_seg->pkt_len = pkt_len;
1606 first_seg->port = rxq->port_id;
1607 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
1608 first_seg->ol_flags = PKT_RX_RSS_HASH;
1609 if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
1610 first_seg->hash.fdir.hi =
1611 rte_le_to_cpu_32(rxd.rx.fd_id);
1612 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1616 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
1617 l234_info, &cksum_err);
1621 first_seg->packet_type = rxd_pkt_info_to_pkt_type(l234_info,
1624 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
1625 hns3_rx_set_cksum_flag(first_seg,
1626 first_seg->packet_type,
1629 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.rx.vlan_tag);
1630 first_seg->vlan_tci_outer =
1631 rte_le_to_cpu_16(rxd.rx.ot_vlan_tag);
1632 rx_pkts[nb_rx++] = first_seg;
1636 rte_pktmbuf_free(first_seg);
1640 rxq->next_to_clean = rx_id;
1641 rxq->pkt_first_seg = first_seg;
1642 rxq->pkt_last_seg = last_seg;
1644 nb_rx_bd = nb_rx_bd + rxq->nb_rx_hold;
1645 if (nb_rx_bd > rxq->rx_free_thresh) {
1646 hns3_clean_rx_buffers(rxq, nb_rx_bd);
1649 rxq->nb_rx_hold = nb_rx_bd;
1655 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1656 unsigned int socket_id, const struct rte_eth_txconf *conf)
1658 struct hns3_adapter *hns = dev->data->dev_private;
1659 struct hns3_hw *hw = &hns->hw;
1660 struct hns3_queue_info q_info;
1661 struct hns3_tx_queue *txq;
1664 if (dev->data->dev_started) {
1665 hns3_err(hw, "tx_queue_setup after dev_start no supported");
1669 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1670 nb_desc % HNS3_ALIGN_RING_DESC) {
1671 hns3_err(hw, "Number (%u) of tx descriptors is invalid",
1676 if (dev->data->tx_queues[idx] != NULL) {
1677 hns3_tx_queue_release(dev->data->tx_queues[idx]);
1678 dev->data->tx_queues[idx] = NULL;
1682 q_info.socket_id = socket_id;
1683 q_info.nb_desc = nb_desc;
1684 q_info.type = "hns3 TX queue";
1685 q_info.ring_name = "tx_ring";
1686 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1689 "Failed to alloc mem and reserve DMA mem for tx ring!");
1693 txq->tx_deferred_start = conf->tx_deferred_start;
1694 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
1695 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
1696 RTE_CACHE_LINE_SIZE, socket_id);
1697 if (txq->sw_ring == NULL) {
1698 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
1699 hns3_tx_queue_release(txq);
1704 txq->next_to_use = 0;
1705 txq->next_to_clean = 0;
1706 txq->tx_bd_ready = txq->nb_tx_desc - 1;
1707 txq->port_id = dev->data->port_id;
1708 txq->configured = true;
1709 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1710 idx * HNS3_TQP_REG_SIZE);
1711 txq->over_length_pkt_cnt = 0;
1712 txq->exceed_limit_bd_pkt_cnt = 0;
1713 txq->exceed_limit_bd_reassem_fail = 0;
1714 txq->unsupported_tunnel_pkt_cnt = 0;
1715 txq->queue_full_cnt = 0;
1716 txq->pkt_padding_fail_cnt = 0;
1717 rte_spinlock_lock(&hw->lock);
1718 dev->data->tx_queues[idx] = txq;
1719 rte_spinlock_unlock(&hw->lock);
1725 hns3_queue_xmit(struct hns3_tx_queue *txq, uint32_t buf_num)
1727 hns3_write_dev(txq, HNS3_RING_TX_TAIL_REG, buf_num);
1731 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
1733 uint16_t tx_next_clean = txq->next_to_clean;
1734 uint16_t tx_next_use = txq->next_to_use;
1735 uint16_t tx_bd_ready = txq->tx_bd_ready;
1736 uint16_t tx_bd_max = txq->nb_tx_desc;
1737 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
1738 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
1739 struct rte_mbuf *mbuf;
1741 while ((!hns3_get_bit(desc->tx.tp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B)) &&
1742 tx_next_use != tx_next_clean) {
1743 mbuf = tx_bak_pkt->mbuf;
1745 rte_pktmbuf_free_seg(mbuf);
1746 tx_bak_pkt->mbuf = NULL;
1754 if (tx_next_clean >= tx_bd_max) {
1756 desc = txq->tx_ring;
1757 tx_bak_pkt = txq->sw_ring;
1761 txq->next_to_clean = tx_next_clean;
1762 txq->tx_bd_ready = tx_bd_ready;
1766 hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
1767 struct rte_mbuf *rxm, uint8_t *l2_len)
1773 tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
1777 otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
1778 switch (tun_flags) {
1779 case PKT_TX_TUNNEL_GENEVE:
1780 case PKT_TX_TUNNEL_VXLAN:
1781 *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
1783 case PKT_TX_TUNNEL_GRE:
1785 * OL4 header size, defined in 4 Bytes, it contains outer
1786 * L4(GRE) length and tunneling length.
1788 ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
1790 *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
1793 /* For non UDP / GRE tunneling, drop the tunnel packet */
1796 hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1797 rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
1798 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
1804 hns3_set_tso(struct hns3_desc *desc,
1805 uint64_t ol_flags, struct rte_mbuf *rxm)
1807 uint32_t paylen, hdr_len;
1809 uint8_t l2_len = rxm->l2_len;
1811 if (!(ol_flags & PKT_TX_TCP_SEG))
1814 if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
1817 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
1818 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
1819 rxm->outer_l2_len + rxm->outer_l3_len : 0;
1820 paylen = rxm->pkt_len - hdr_len;
1821 if (paylen <= rxm->tso_segsz)
1824 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
1825 hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
1826 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
1827 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
1828 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1829 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1830 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
1831 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1832 l2_len >> HNS3_L2_LEN_UNIT);
1833 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
1834 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
1838 fill_desc(struct hns3_tx_queue *txq, uint16_t tx_desc_id, struct rte_mbuf *rxm,
1839 bool first, int offset)
1841 struct hns3_desc *tx_ring = txq->tx_ring;
1842 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1843 uint8_t frag_end = rxm->next == NULL ? 1 : 0;
1844 uint64_t ol_flags = rxm->ol_flags;
1845 uint16_t size = rxm->data_len;
1851 desc->addr = rte_mbuf_data_iova(rxm) + offset;
1852 desc->tx.send_size = rte_cpu_to_le_16(size);
1853 hns3_set_bit(rrcfv, HNS3_TXD_VLD_B, 1);
1856 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
1857 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
1858 rxm->outer_l2_len + rxm->outer_l3_len : 0;
1859 paylen = rxm->pkt_len - hdr_len;
1860 desc->tx.paylen = rte_cpu_to_le_32(paylen);
1861 hns3_set_tso(desc, ol_flags, rxm);
1864 hns3_set_bit(rrcfv, HNS3_TXD_FE_B, frag_end);
1865 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(rrcfv);
1868 if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1869 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
1870 hns3_set_bit(tmp, HNS3_TXD_VLAN_B, 1);
1871 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
1872 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
1875 if (ol_flags & PKT_TX_QINQ_PKT) {
1876 tmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
1877 hns3_set_bit(tmp, HNS3_TXD_OVLAN_B, 1);
1878 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp);
1879 desc->tx.outer_vlan_tag =
1880 rte_cpu_to_le_16(rxm->vlan_tci_outer);
1886 hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool,
1887 uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf)
1889 struct rte_mbuf *new_mbuf = NULL;
1890 struct rte_eth_dev *dev;
1891 struct rte_mbuf *temp;
1895 /* Allocate enough mbufs */
1896 for (i = 0; i < nb_new_buf; i++) {
1897 temp = rte_pktmbuf_alloc(mb_pool);
1898 if (unlikely(temp == NULL)) {
1899 dev = &rte_eth_devices[txq->port_id];
1900 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1901 hns3_err(hw, "Failed to alloc TX mbuf port_id=%d,"
1902 "queue_id=%d in reassemble tx pkts.",
1903 txq->port_id, txq->queue_id);
1904 rte_pktmbuf_free(new_mbuf);
1907 temp->next = new_mbuf;
1911 if (new_mbuf == NULL)
1914 new_mbuf->nb_segs = nb_new_buf;
1915 *alloc_mbuf = new_mbuf;
1921 hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
1922 struct rte_mbuf **new_pkt)
1924 struct hns3_tx_queue *txq = tx_queue;
1925 struct rte_mempool *mb_pool;
1926 struct rte_mbuf *new_mbuf;
1927 struct rte_mbuf *temp_new;
1928 struct rte_mbuf *temp;
1929 uint16_t last_buf_len;
1930 uint16_t nb_new_buf;
1941 mb_pool = tx_pkt->pool;
1942 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
1943 nb_new_buf = (tx_pkt->pkt_len - 1) / buf_size + 1;
1945 last_buf_len = tx_pkt->pkt_len % buf_size;
1946 if (last_buf_len == 0)
1947 last_buf_len = buf_size;
1949 /* Allocate enough mbufs */
1950 ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf);
1954 /* Copy the original packet content to the new mbufs */
1956 s = rte_pktmbuf_mtod(temp, char *);
1957 len_s = temp->data_len;
1958 temp_new = new_mbuf;
1959 for (i = 0; i < nb_new_buf; i++) {
1960 d = rte_pktmbuf_mtod(temp_new, char *);
1961 if (i < nb_new_buf - 1)
1964 buf_len = last_buf_len;
1968 len = RTE_MIN(len_s, len_d);
1972 len_d = len_d - len;
1973 len_s = len_s - len;
1979 s = rte_pktmbuf_mtod(temp, char *);
1980 len_s = temp->data_len;
1984 temp_new->data_len = buf_len;
1985 temp_new = temp_new->next;
1988 /* free original mbufs */
1989 rte_pktmbuf_free(tx_pkt);
1991 *new_pkt = new_mbuf;
1997 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
1999 uint32_t tmp = *ol_type_vlan_len_msec;
2001 /* (outer) IP header type */
2002 if (ol_flags & PKT_TX_OUTER_IPV4) {
2003 /* OL3 header size, defined in 4 bytes */
2004 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2005 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2006 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2007 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
2008 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
2010 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2011 HNS3_OL3T_IPV4_NO_CSUM);
2012 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
2013 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2015 /* OL3 header size, defined in 4 bytes */
2016 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2017 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2020 *ol_type_vlan_len_msec = tmp;
2024 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
2025 struct rte_net_hdr_lens *hdr_lens)
2027 uint32_t tmp = *ol_type_vlan_len_msec;
2030 /* OL2 header size, defined in 2 bytes */
2031 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2032 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2034 /* L4TUNT: L4 Tunneling Type */
2035 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2036 case PKT_TX_TUNNEL_GENEVE:
2037 case PKT_TX_TUNNEL_VXLAN:
2038 /* MAC in UDP tunnelling packet, include VxLAN */
2039 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2040 HNS3_TUN_MAC_IN_UDP);
2042 * OL4 header size, defined in 4 Bytes, it contains outer
2043 * L4(UDP) length and tunneling length.
2045 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2046 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
2049 case PKT_TX_TUNNEL_GRE:
2050 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2053 * OL4 header size, defined in 4 Bytes, it contains outer
2054 * L4(GRE) length and tunneling length.
2056 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
2057 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2058 l4_len >> HNS3_L4_LEN_UNIT);
2061 /* For non UDP / GRE tunneling, drop the tunnel packet */
2065 *ol_type_vlan_len_msec = tmp;
2071 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2073 struct rte_net_hdr_lens *hdr_lens)
2075 struct hns3_desc *tx_ring = txq->tx_ring;
2076 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2080 hns3_parse_outer_params(ol_flags, &value);
2081 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
2085 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
2091 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2095 /* Enable L3 checksum offloads */
2096 if (ol_flags & PKT_TX_IPV4) {
2097 tmp = *type_cs_vlan_tso_len;
2098 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2100 /* inner(/normal) L3 header size, defined in 4 bytes */
2101 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2102 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2103 if (ol_flags & PKT_TX_IP_CKSUM)
2104 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2105 *type_cs_vlan_tso_len = tmp;
2106 } else if (ol_flags & PKT_TX_IPV6) {
2107 tmp = *type_cs_vlan_tso_len;
2108 /* L3T, IPv6 don't do checksum */
2109 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2111 /* inner(/normal) L3 header size, defined in 4 bytes */
2112 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2113 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2114 *type_cs_vlan_tso_len = tmp;
2119 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2123 /* Enable L4 checksum offloads */
2124 switch (ol_flags & PKT_TX_L4_MASK) {
2125 case PKT_TX_TCP_CKSUM:
2126 tmp = *type_cs_vlan_tso_len;
2127 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2129 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2130 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2131 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2132 *type_cs_vlan_tso_len = tmp;
2134 case PKT_TX_UDP_CKSUM:
2135 tmp = *type_cs_vlan_tso_len;
2136 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2138 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2139 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2140 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
2141 *type_cs_vlan_tso_len = tmp;
2143 case PKT_TX_SCTP_CKSUM:
2144 tmp = *type_cs_vlan_tso_len;
2145 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2147 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2148 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2149 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
2150 *type_cs_vlan_tso_len = tmp;
2158 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2161 struct hns3_desc *tx_ring = txq->tx_ring;
2162 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2165 /* inner(/normal) L2 header size, defined in 2 bytes */
2166 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2167 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2169 hns3_parse_l3_cksum_params(ol_flags, &value);
2170 hns3_parse_l4_cksum_params(ol_flags, &value);
2172 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
2176 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
2178 struct rte_mbuf *m_first = tx_pkts;
2179 struct rte_mbuf *m_last = tx_pkts;
2180 uint32_t tot_len = 0;
2185 * Hardware requires that the sum of the data length of every 8
2186 * consecutive buffers is greater than MSS in hns3 network engine.
2187 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
2188 * frags greater than gso header len + mss, and the remaining 7
2189 * consecutive frags greater than MSS except the last 7 frags.
2191 if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT)
2194 for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1;
2195 i++, m_last = m_last->next)
2196 tot_len += m_last->data_len;
2201 /* ensure the first 8 frags is greater than mss + header */
2202 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
2203 hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
2204 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
2205 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
2209 * ensure the sum of the data length of every 7 consecutive buffer
2210 * is greater than mss except the last one.
2212 for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) {
2213 tot_len -= m_first->data_len;
2214 tot_len += m_last->data_len;
2216 if (tot_len < tx_pkts->tso_segsz)
2219 m_first = m_first->next;
2220 m_last = m_last->next;
2227 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
2229 uint64_t ol_flags = m->ol_flags;
2230 struct rte_ipv4_hdr *ipv4_hdr;
2231 struct rte_udp_hdr *udp_hdr;
2232 uint32_t paylen, hdr_len;
2234 if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
2237 if (ol_flags & PKT_TX_IPV4) {
2238 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2241 if (ol_flags & PKT_TX_IP_CKSUM)
2242 ipv4_hdr->hdr_checksum = 0;
2245 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM &&
2246 ol_flags & PKT_TX_TCP_SEG) {
2247 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2248 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2249 m->outer_l2_len + m->outer_l3_len : 0;
2250 paylen = m->pkt_len - hdr_len;
2251 if (paylen <= m->tso_segsz)
2253 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
2256 udp_hdr->dgram_cksum = 0;
2261 hns3_pkt_is_tso(struct rte_mbuf *m)
2263 return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
2267 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
2269 uint32_t tmp_data_len_sum = 0;
2270 uint16_t nb_buf = m->nb_segs;
2271 uint32_t paylen, hdr_len;
2272 struct rte_mbuf *m_seg;
2275 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
2278 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2279 hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
2280 m->outer_l2_len + m->outer_l3_len : 0;
2281 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
2284 paylen = m->pkt_len - hdr_len;
2285 if (paylen > HNS3_MAX_BD_PAYLEN)
2289 * The TSO header (include outer and inner L2, L3 and L4 header)
2290 * should be provided by three descriptors in maximum in hns3 network
2294 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
2295 i++, m_seg = m_seg->next) {
2296 tmp_data_len_sum += m_seg->data_len;
2299 if (hdr_len > tmp_data_len_sum)
2306 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2313 for (i = 0; i < nb_pkts; i++) {
2316 if (hns3_pkt_is_tso(m) &&
2317 (hns3_pkt_need_linearized(m, m->nb_segs) ||
2318 hns3_check_tso_pkt_valid(m))) {
2323 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2324 ret = rte_validate_tx_offload(m);
2330 ret = rte_net_intel_cksum_prepare(m);
2336 hns3_outer_header_cksum_prepare(m);
2343 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2344 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
2346 /* Fill in tunneling parameters if necessary */
2347 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
2348 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
2349 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
2351 txq->unsupported_tunnel_pkt_cnt++;
2355 /* Enable checksum offloading */
2356 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
2357 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
2363 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
2364 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
2366 struct rte_mbuf *new_pkt;
2369 if (hns3_pkt_is_tso(*m_seg))
2373 * If packet length is greater than HNS3_MAX_FRAME_LEN
2374 * driver support, the packet will be ignored.
2376 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
2377 txq->over_length_pkt_cnt++;
2381 if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
2382 txq->exceed_limit_bd_pkt_cnt++;
2383 ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
2385 txq->exceed_limit_bd_reassem_fail++;
2395 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2397 struct rte_net_hdr_lens hdr_lens = {0};
2398 struct hns3_tx_queue *txq = tx_queue;
2399 struct hns3_entry *tx_bak_pkt;
2400 struct rte_mbuf *tx_pkt;
2401 struct rte_mbuf *m_seg;
2402 uint32_t nb_hold = 0;
2403 uint16_t tx_next_use;
2404 uint16_t tx_pkt_num;
2410 /* free useless buffer */
2411 hns3_tx_free_useless_buffer(txq);
2413 tx_next_use = txq->next_to_use;
2414 tx_bd_max = txq->nb_tx_desc;
2415 tx_pkt_num = nb_pkts;
2418 tx_bak_pkt = &txq->sw_ring[tx_next_use];
2419 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
2420 tx_pkt = *tx_pkts++;
2422 nb_buf = tx_pkt->nb_segs;
2424 if (nb_buf > txq->tx_bd_ready) {
2425 txq->queue_full_cnt++;
2433 * If packet length is less than minimum packet size, driver
2436 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < HNS3_MIN_PKT_SIZE)) {
2440 add_len = HNS3_MIN_PKT_SIZE -
2441 rte_pktmbuf_pkt_len(tx_pkt);
2442 appended = rte_pktmbuf_append(tx_pkt, add_len);
2443 if (appended == NULL) {
2444 txq->pkt_padding_fail_cnt++;
2448 memset(appended, 0, add_len);
2453 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
2456 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
2461 fill_desc(txq, tx_next_use, m_seg, (i == 0), 0);
2462 tx_bak_pkt->mbuf = m_seg;
2463 m_seg = m_seg->next;
2466 if (tx_next_use >= tx_bd_max) {
2468 tx_bak_pkt = txq->sw_ring;
2472 } while (m_seg != NULL);
2475 txq->next_to_use = tx_next_use;
2476 txq->tx_bd_ready -= i;
2482 hns3_queue_xmit(txq, nb_hold);
2488 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
2489 struct rte_mbuf **pkts __rte_unused,
2490 uint16_t pkts_n __rte_unused)
2495 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
2497 struct hns3_adapter *hns = eth_dev->data->dev_private;
2499 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
2500 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
2501 eth_dev->rx_pkt_burst = hns3_recv_pkts;
2502 eth_dev->tx_pkt_burst = hns3_xmit_pkts;
2503 eth_dev->tx_pkt_prepare = hns3_prep_pkts;
2505 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
2506 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
2507 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;