1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_vxlan.h>
19 #include <rte_ethdev_driver.h>
24 #include <rte_malloc.h>
27 #include "hns3_ethdev.h"
28 #include "hns3_rxtx.h"
29 #include "hns3_regs.h"
30 #include "hns3_logs.h"
32 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
33 #define HNS3_RX_RING_PREFETCTH_MASK 3
36 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
40 /* Note: Fake rx queue will not enter here */
41 if (rxq->sw_ring == NULL)
44 for (i = 0; i < rxq->nb_rx_desc; i++)
45 if (rxq->sw_ring[i].mbuf)
46 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
48 for (i = 0; i < rxq->bulk_mbuf_num; i++)
49 rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]);
50 rxq->bulk_mbuf_num = 0;
52 if (rxq->pkt_first_seg) {
53 rte_pktmbuf_free(rxq->pkt_first_seg);
54 rxq->pkt_first_seg = NULL;
59 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
63 /* Note: Fake rx queue will not enter here */
65 for (i = 0; i < txq->nb_tx_desc; i++) {
66 if (txq->sw_ring[i].mbuf) {
67 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
68 txq->sw_ring[i].mbuf = NULL;
75 hns3_rx_queue_release(void *queue)
77 struct hns3_rx_queue *rxq = queue;
79 hns3_rx_queue_release_mbufs(rxq);
81 rte_memzone_free(rxq->mz);
83 rte_free(rxq->sw_ring);
89 hns3_tx_queue_release(void *queue)
91 struct hns3_tx_queue *txq = queue;
93 hns3_tx_queue_release_mbufs(txq);
95 rte_memzone_free(txq->mz);
97 rte_free(txq->sw_ring);
103 hns3_dev_rx_queue_release(void *queue)
105 struct hns3_rx_queue *rxq = queue;
106 struct hns3_adapter *hns;
112 rte_spinlock_lock(&hns->hw.lock);
113 hns3_rx_queue_release(queue);
114 rte_spinlock_unlock(&hns->hw.lock);
118 hns3_dev_tx_queue_release(void *queue)
120 struct hns3_tx_queue *txq = queue;
121 struct hns3_adapter *hns;
127 rte_spinlock_lock(&hns->hw.lock);
128 hns3_tx_queue_release(queue);
129 rte_spinlock_unlock(&hns->hw.lock);
133 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
135 struct hns3_rx_queue *rxq = queue;
136 struct hns3_adapter *hns;
146 if (hw->fkq_data.rx_queues[idx]) {
147 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
148 hw->fkq_data.rx_queues[idx] = NULL;
151 /* free fake rx queue arrays */
152 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
153 hw->fkq_data.nb_fake_rx_queues = 0;
154 rte_free(hw->fkq_data.rx_queues);
155 hw->fkq_data.rx_queues = NULL;
160 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
162 struct hns3_tx_queue *txq = queue;
163 struct hns3_adapter *hns;
173 if (hw->fkq_data.tx_queues[idx]) {
174 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
175 hw->fkq_data.tx_queues[idx] = NULL;
178 /* free fake tx queue arrays */
179 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
180 hw->fkq_data.nb_fake_tx_queues = 0;
181 rte_free(hw->fkq_data.tx_queues);
182 hw->fkq_data.tx_queues = NULL;
187 hns3_free_rx_queues(struct rte_eth_dev *dev)
189 struct hns3_adapter *hns = dev->data->dev_private;
190 struct hns3_fake_queue_data *fkq_data;
191 struct hns3_hw *hw = &hns->hw;
195 nb_rx_q = hw->data->nb_rx_queues;
196 for (i = 0; i < nb_rx_q; i++) {
197 if (dev->data->rx_queues[i]) {
198 hns3_rx_queue_release(dev->data->rx_queues[i]);
199 dev->data->rx_queues[i] = NULL;
203 /* Free fake Rx queues */
204 fkq_data = &hw->fkq_data;
205 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
206 if (fkq_data->rx_queues[i])
207 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
212 hns3_free_tx_queues(struct rte_eth_dev *dev)
214 struct hns3_adapter *hns = dev->data->dev_private;
215 struct hns3_fake_queue_data *fkq_data;
216 struct hns3_hw *hw = &hns->hw;
220 nb_tx_q = hw->data->nb_tx_queues;
221 for (i = 0; i < nb_tx_q; i++) {
222 if (dev->data->tx_queues[i]) {
223 hns3_tx_queue_release(dev->data->tx_queues[i]);
224 dev->data->tx_queues[i] = NULL;
228 /* Free fake Tx queues */
229 fkq_data = &hw->fkq_data;
230 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
231 if (fkq_data->tx_queues[i])
232 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
237 hns3_free_all_queues(struct rte_eth_dev *dev)
239 hns3_free_rx_queues(dev);
240 hns3_free_tx_queues(dev);
244 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
246 struct rte_mbuf *mbuf;
250 for (i = 0; i < rxq->nb_rx_desc; i++) {
251 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
252 if (unlikely(mbuf == NULL)) {
253 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
255 hns3_rx_queue_release_mbufs(rxq);
259 rte_mbuf_refcnt_set(mbuf, 1);
261 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
263 mbuf->port = rxq->port_id;
265 rxq->sw_ring[i].mbuf = mbuf;
266 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
267 rxq->rx_ring[i].addr = dma_addr;
268 rxq->rx_ring[i].rx.bd_base_info = 0;
275 hns3_buf_size2type(uint32_t buf_size)
281 bd_size_type = HNS3_BD_SIZE_512_TYPE;
284 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
287 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
290 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
297 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
299 uint32_t rx_buf_len = rxq->rx_buf_len;
300 uint64_t dma_addr = rxq->rx_ring_phys_addr;
302 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
303 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
304 (uint32_t)((dma_addr >> 31) >> 1));
306 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
307 hns3_buf_size2type(rx_buf_len));
308 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
309 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
313 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
315 uint64_t dma_addr = txq->tx_ring_phys_addr;
317 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
318 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
319 (uint32_t)((dma_addr >> 31) >> 1));
321 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
322 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
326 hns3_update_all_queues_pvid_state(struct hns3_hw *hw)
328 uint16_t nb_rx_q = hw->data->nb_rx_queues;
329 uint16_t nb_tx_q = hw->data->nb_tx_queues;
330 struct hns3_rx_queue *rxq;
331 struct hns3_tx_queue *txq;
335 pvid_state = hw->port_base_vlan_cfg.state;
336 for (i = 0; i < hw->cfg_max_queues; i++) {
338 rxq = hw->data->rx_queues[i];
340 rxq->pvid_state = pvid_state;
343 txq = hw->data->tx_queues[i];
345 txq->pvid_state = pvid_state;
351 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
353 uint16_t nb_rx_q = hw->data->nb_rx_queues;
354 uint16_t nb_tx_q = hw->data->nb_tx_queues;
355 struct hns3_rx_queue *rxq;
356 struct hns3_tx_queue *txq;
360 for (i = 0; i < hw->cfg_max_queues; i++) {
362 rxq = hw->data->rx_queues[i];
364 rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
366 txq = hw->data->tx_queues[i];
368 txq = hw->fkq_data.tx_queues[i - nb_tx_q];
369 if (rxq == NULL || txq == NULL ||
370 (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
373 rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
375 rcb_reg |= BIT(HNS3_RING_EN_B);
377 rcb_reg &= ~BIT(HNS3_RING_EN_B);
378 hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
383 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
385 struct hns3_cfg_com_tqp_queue_cmd *req;
386 struct hns3_cmd_desc desc;
389 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
391 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
392 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
394 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
396 ret = hns3_cmd_send(hw, &desc, 1);
398 hns3_err(hw, "TQP enable fail, ret = %d", ret);
404 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
406 struct hns3_reset_tqp_queue_cmd *req;
407 struct hns3_cmd_desc desc;
410 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
412 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
413 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
414 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
416 ret = hns3_cmd_send(hw, &desc, 1);
418 hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
424 hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
426 struct hns3_reset_tqp_queue_cmd *req;
427 struct hns3_cmd_desc desc;
430 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
432 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
433 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
435 ret = hns3_cmd_send(hw, &desc, 1);
437 hns3_err(hw, "Get reset status error, ret =%d", ret);
441 return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
445 hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
447 #define HNS3_TQP_RESET_TRY_MS 200
452 ret = hns3_tqp_enable(hw, queue_id, false);
457 * In current version VF is not supported when PF is driven by DPDK
458 * driver, all task queue pairs are mapped to PF function, so PF's queue
459 * id is equals to the global queue id in PF range.
461 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
463 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
467 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
469 /* Wait for tqp hw reset */
470 rte_delay_ms(HNS3_POLL_RESPONE_MS);
471 reset_status = hns3_get_reset_status(hw, queue_id);
476 } while (get_timeofday_ms() < end);
479 hns3_err(hw, "Reset TQP fail, ret = %d", ret);
483 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
485 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
491 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
496 /* Disable VF's queue before send queue reset msg to PF */
497 ret = hns3_tqp_enable(hw, queue_id, false);
501 memcpy(msg_data, &queue_id, sizeof(uint16_t));
503 return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
504 sizeof(msg_data), true, NULL, 0);
508 hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
510 struct hns3_hw *hw = &hns->hw;
512 return hns3vf_reset_tqp(hw, queue_id);
514 return hns3_reset_tqp(hw, queue_id);
518 hns3_reset_all_queues(struct hns3_adapter *hns)
520 struct hns3_hw *hw = &hns->hw;
523 for (i = 0; i < hw->cfg_max_queues; i++) {
524 ret = hns3_reset_queue(hns, i);
526 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
534 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
535 uint8_t gl_idx, uint16_t gl_value)
537 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
538 HNS3_TQP_INTR_GL1_REG,
539 HNS3_TQP_INTR_GL2_REG};
540 uint32_t addr, value;
542 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
545 addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
546 if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
547 value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
549 value = HNS3_GL_USEC_TO_REG(gl_value);
551 hns3_write_dev(hw, addr, value);
555 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
557 uint32_t addr, value;
559 if (rl_value > HNS3_TQP_INTR_RL_MAX)
562 addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
563 value = HNS3_RL_USEC_TO_REG(rl_value);
565 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
567 hns3_write_dev(hw, addr, value);
571 hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
575 if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
578 addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
579 hns3_write_dev(hw, addr, ql_value);
581 addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
582 hns3_write_dev(hw, addr, ql_value);
586 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
588 uint32_t addr, value;
590 addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
593 hns3_write_dev(hw, addr, value);
597 * Enable all rx queue interrupt when in interrupt rx mode.
598 * This api was called before enable queue rx&tx (in normal start or reset
599 * recover scenes), used to fix hardware rx queue interrupt enable was clear
603 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
605 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
606 uint16_t nb_rx_q = hw->data->nb_rx_queues;
609 if (dev->data->dev_conf.intr_conf.rxq == 0)
612 for (i = 0; i < nb_rx_q; i++)
613 hns3_queue_intr_enable(hw, i, en);
617 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
619 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
620 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
621 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
623 if (dev->data->dev_conf.intr_conf.rxq == 0)
626 hns3_queue_intr_enable(hw, queue_id, true);
628 return rte_intr_ack(intr_handle);
632 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
634 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
636 if (dev->data->dev_conf.intr_conf.rxq == 0)
639 hns3_queue_intr_enable(hw, queue_id, false);
645 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
647 struct hns3_hw *hw = &hns->hw;
648 struct hns3_rx_queue *rxq;
651 PMD_INIT_FUNC_TRACE();
653 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
654 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
656 hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
661 rxq->next_to_use = 0;
662 rxq->rx_free_hold = 0;
663 rxq->pkt_first_seg = NULL;
664 rxq->pkt_last_seg = NULL;
665 hns3_init_rx_queue_hw(rxq);
671 hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
673 struct hns3_hw *hw = &hns->hw;
674 struct hns3_rx_queue *rxq;
676 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
677 rxq->next_to_use = 0;
678 rxq->rx_free_hold = 0;
679 hns3_init_rx_queue_hw(rxq);
683 hns3_init_tx_queue(struct hns3_tx_queue *queue)
685 struct hns3_tx_queue *txq = queue;
686 struct hns3_desc *desc;
691 for (i = 0; i < txq->nb_tx_desc; i++) {
692 desc->tx.tp_fe_sc_vld_ra_ri = 0;
696 txq->next_to_use = 0;
697 txq->next_to_clean = 0;
698 txq->tx_bd_ready = txq->nb_tx_desc - 1;
699 hns3_init_tx_queue_hw(txq);
703 hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
705 struct hns3_hw *hw = &hns->hw;
706 struct hns3_tx_queue *txq;
708 txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
709 hns3_init_tx_queue(txq);
713 hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
715 struct hns3_hw *hw = &hns->hw;
716 struct hns3_tx_queue *txq;
718 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
719 hns3_init_tx_queue(txq);
723 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
725 struct hns3_hw *hw = &hns->hw;
726 struct hns3_tx_queue *txq;
729 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
730 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
733 if (!tc_queue->enable)
736 for (j = 0; j < tc_queue->tqp_count; j++) {
737 num = tc_queue->tqp_offset + j;
738 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
742 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
748 hns3_start_rx_queues(struct hns3_adapter *hns)
750 struct hns3_hw *hw = &hns->hw;
751 struct hns3_rx_queue *rxq;
755 /* Initialize RSS for queues */
756 ret = hns3_config_rss(hns);
758 hns3_err(hw, "Failed to configure rss %d", ret);
762 for (i = 0; i < hw->data->nb_rx_queues; i++) {
763 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
764 if (rxq == NULL || rxq->rx_deferred_start)
766 ret = hns3_dev_rx_queue_start(hns, i);
768 hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
774 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
775 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
776 if (rxq == NULL || rxq->rx_deferred_start)
778 hns3_fake_rx_queue_start(hns, i);
783 for (j = 0; j < i; j++) {
784 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
785 hns3_rx_queue_release_mbufs(rxq);
792 hns3_start_tx_queues(struct hns3_adapter *hns)
794 struct hns3_hw *hw = &hns->hw;
795 struct hns3_tx_queue *txq;
798 for (i = 0; i < hw->data->nb_tx_queues; i++) {
799 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
800 if (txq == NULL || txq->tx_deferred_start)
802 hns3_dev_tx_queue_start(hns, i);
805 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
806 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
807 if (txq == NULL || txq->tx_deferred_start)
809 hns3_fake_tx_queue_start(hns, i);
812 hns3_init_tx_ring_tc(hns);
817 * Note: just init and setup queues, and don't enable queue rx&tx.
820 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
822 struct hns3_hw *hw = &hns->hw;
826 ret = hns3_reset_all_queues(hns);
828 hns3_err(hw, "Failed to reset all queues %d", ret);
833 ret = hns3_start_rx_queues(hns);
835 hns3_err(hw, "Failed to start rx queues: %d", ret);
839 hns3_start_tx_queues(hns);
845 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
847 struct hns3_hw *hw = &hns->hw;
850 hns3_enable_all_queues(hw, false);
852 ret = hns3_reset_all_queues(hns);
854 hns3_err(hw, "Failed to reset all queues %d", ret);
862 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
863 struct hns3_queue_info *q_info)
865 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
866 const struct rte_memzone *rx_mz;
867 struct hns3_rx_queue *rxq;
868 unsigned int rx_desc;
870 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
871 RTE_CACHE_LINE_SIZE, q_info->socket_id);
873 hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
878 /* Allocate rx ring hardware descriptors. */
879 rxq->queue_id = q_info->idx;
880 rxq->nb_rx_desc = q_info->nb_desc;
881 rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
882 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
883 rx_desc, HNS3_RING_BASE_ALIGN,
886 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
888 hns3_rx_queue_release(rxq);
892 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
893 rxq->rx_ring_phys_addr = rx_mz->iova;
895 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
896 rxq->rx_ring_phys_addr);
902 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
903 uint16_t nb_desc, unsigned int socket_id)
905 struct hns3_adapter *hns = dev->data->dev_private;
906 struct hns3_hw *hw = &hns->hw;
907 struct hns3_queue_info q_info;
908 struct hns3_rx_queue *rxq;
911 if (hw->fkq_data.rx_queues[idx]) {
912 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
913 hw->fkq_data.rx_queues[idx] = NULL;
917 q_info.socket_id = socket_id;
918 q_info.nb_desc = nb_desc;
919 q_info.type = "hns3 fake RX queue";
920 q_info.ring_name = "rx_fake_ring";
921 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
923 hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
927 /* Don't need alloc sw_ring, because upper applications don't use it */
931 rxq->rx_deferred_start = false;
932 rxq->port_id = dev->data->port_id;
933 rxq->configured = true;
934 nb_rx_q = dev->data->nb_rx_queues;
935 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
936 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
937 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
939 rte_spinlock_lock(&hw->lock);
940 hw->fkq_data.rx_queues[idx] = rxq;
941 rte_spinlock_unlock(&hw->lock);
947 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
948 struct hns3_queue_info *q_info)
950 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
951 const struct rte_memzone *tx_mz;
952 struct hns3_tx_queue *txq;
953 struct hns3_desc *desc;
954 unsigned int tx_desc;
957 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
958 RTE_CACHE_LINE_SIZE, q_info->socket_id);
960 hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
965 /* Allocate tx ring hardware descriptors. */
966 txq->queue_id = q_info->idx;
967 txq->nb_tx_desc = q_info->nb_desc;
968 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
969 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
970 tx_desc, HNS3_RING_BASE_ALIGN,
973 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
975 hns3_tx_queue_release(txq);
979 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
980 txq->tx_ring_phys_addr = tx_mz->iova;
982 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
983 txq->tx_ring_phys_addr);
987 for (i = 0; i < txq->nb_tx_desc; i++) {
988 desc->tx.tp_fe_sc_vld_ra_ri = 0;
996 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
997 uint16_t nb_desc, unsigned int socket_id)
999 struct hns3_adapter *hns = dev->data->dev_private;
1000 struct hns3_hw *hw = &hns->hw;
1001 struct hns3_queue_info q_info;
1002 struct hns3_tx_queue *txq;
1005 if (hw->fkq_data.tx_queues[idx] != NULL) {
1006 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
1007 hw->fkq_data.tx_queues[idx] = NULL;
1011 q_info.socket_id = socket_id;
1012 q_info.nb_desc = nb_desc;
1013 q_info.type = "hns3 fake TX queue";
1014 q_info.ring_name = "tx_fake_ring";
1015 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1017 hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
1021 /* Don't need alloc sw_ring, because upper applications don't use it */
1022 txq->sw_ring = NULL;
1025 txq->tx_deferred_start = false;
1026 txq->port_id = dev->data->port_id;
1027 txq->configured = true;
1028 nb_tx_q = dev->data->nb_tx_queues;
1029 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1030 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1032 rte_spinlock_lock(&hw->lock);
1033 hw->fkq_data.tx_queues[idx] = txq;
1034 rte_spinlock_unlock(&hw->lock);
1040 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1042 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1046 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1047 /* first time configuration */
1049 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1050 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1051 RTE_CACHE_LINE_SIZE);
1052 if (hw->fkq_data.rx_queues == NULL) {
1053 hw->fkq_data.nb_fake_rx_queues = 0;
1056 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1058 rxq = hw->fkq_data.rx_queues;
1059 for (i = nb_queues; i < old_nb_queues; i++)
1060 hns3_dev_rx_queue_release(rxq[i]);
1062 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1063 RTE_CACHE_LINE_SIZE);
1066 if (nb_queues > old_nb_queues) {
1067 uint16_t new_qs = nb_queues - old_nb_queues;
1068 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1071 hw->fkq_data.rx_queues = rxq;
1072 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1073 rxq = hw->fkq_data.rx_queues;
1074 for (i = nb_queues; i < old_nb_queues; i++)
1075 hns3_dev_rx_queue_release(rxq[i]);
1077 rte_free(hw->fkq_data.rx_queues);
1078 hw->fkq_data.rx_queues = NULL;
1081 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1087 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1089 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1093 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1094 /* first time configuration */
1096 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1097 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1098 RTE_CACHE_LINE_SIZE);
1099 if (hw->fkq_data.tx_queues == NULL) {
1100 hw->fkq_data.nb_fake_tx_queues = 0;
1103 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1105 txq = hw->fkq_data.tx_queues;
1106 for (i = nb_queues; i < old_nb_queues; i++)
1107 hns3_dev_tx_queue_release(txq[i]);
1108 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1109 RTE_CACHE_LINE_SIZE);
1112 if (nb_queues > old_nb_queues) {
1113 uint16_t new_qs = nb_queues - old_nb_queues;
1114 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1117 hw->fkq_data.tx_queues = txq;
1118 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1119 txq = hw->fkq_data.tx_queues;
1120 for (i = nb_queues; i < old_nb_queues; i++)
1121 hns3_dev_tx_queue_release(txq[i]);
1123 rte_free(hw->fkq_data.tx_queues);
1124 hw->fkq_data.tx_queues = NULL;
1126 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1132 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1135 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1136 uint16_t rx_need_add_nb_q;
1137 uint16_t tx_need_add_nb_q;
1142 /* Setup new number of fake RX/TX queues and reconfigure device. */
1143 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
1144 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1145 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1146 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1148 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1149 goto cfg_fake_rx_q_fail;
1152 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1154 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1155 goto cfg_fake_tx_q_fail;
1158 /* Allocate and set up fake RX queue per Ethernet port. */
1159 port_id = hw->data->port_id;
1160 for (q = 0; q < rx_need_add_nb_q; q++) {
1161 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1162 rte_eth_dev_socket_id(port_id));
1164 goto setup_fake_rx_q_fail;
1167 /* Allocate and set up fake TX queue per Ethernet port. */
1168 for (q = 0; q < tx_need_add_nb_q; q++) {
1169 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1170 rte_eth_dev_socket_id(port_id));
1172 goto setup_fake_tx_q_fail;
1177 setup_fake_tx_q_fail:
1178 setup_fake_rx_q_fail:
1179 (void)hns3_fake_tx_queue_config(hw, 0);
1181 (void)hns3_fake_rx_queue_config(hw, 0);
1183 hw->cfg_max_queues = 0;
1189 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1191 struct rte_eth_dev_data *dev_data = hns->hw.data;
1192 struct hns3_rx_queue *rxq;
1193 struct hns3_tx_queue *txq;
1196 if (dev_data->rx_queues)
1197 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1198 rxq = dev_data->rx_queues[i];
1199 if (rxq == NULL || rxq->rx_deferred_start)
1201 hns3_rx_queue_release_mbufs(rxq);
1204 if (dev_data->tx_queues)
1205 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1206 txq = dev_data->tx_queues[i];
1207 if (txq == NULL || txq->tx_deferred_start)
1209 hns3_tx_queue_release_mbufs(txq);
1214 hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1216 uint16_t vld_buf_size;
1217 uint16_t num_hw_specs;
1221 * hns3 network engine only support to set 4 typical specification, and
1222 * different buffer size will affect the max packet_len and the max
1223 * number of segmentation when hw gro is turned on in receive side. The
1224 * relationship between them is as follows:
1225 * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg
1226 * ---------------------|-------------------|----------------
1227 * HNS3_4K_BD_BUF_SIZE | 60KB | 15
1228 * HNS3_2K_BD_BUF_SIZE | 62KB | 31
1229 * HNS3_1K_BD_BUF_SIZE | 63KB | 63
1230 * HNS3_512_BD_BUF_SIZE | 31.5KB | 63
1232 static const uint16_t hw_rx_buf_size[] = {
1233 HNS3_4K_BD_BUF_SIZE,
1234 HNS3_2K_BD_BUF_SIZE,
1235 HNS3_1K_BD_BUF_SIZE,
1236 HNS3_512_BD_BUF_SIZE
1239 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1240 RTE_PKTMBUF_HEADROOM);
1242 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1245 num_hw_specs = RTE_DIM(hw_rx_buf_size);
1246 for (i = 0; i < num_hw_specs; i++) {
1247 if (vld_buf_size >= hw_rx_buf_size[i]) {
1248 *rx_buf_len = hw_rx_buf_size[i];
1256 hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
1257 struct rte_mempool *mp, uint16_t nb_desc,
1260 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1261 nb_desc % HNS3_ALIGN_RING_DESC) {
1262 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1267 if (conf->rx_drop_en == 0)
1268 hns3_warn(hw, "if no descriptors available, packets are always "
1269 "dropped and rx_drop_en (1) is fixed on");
1271 if (hns3_rx_buf_len_calc(mp, buf_size)) {
1272 hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! "
1273 "minimal data room size (%u).",
1274 rte_pktmbuf_data_room_size(mp),
1275 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1283 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1284 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1285 struct rte_mempool *mp)
1287 struct hns3_adapter *hns = dev->data->dev_private;
1288 struct hns3_hw *hw = &hns->hw;
1289 struct hns3_queue_info q_info;
1290 struct hns3_rx_queue *rxq;
1291 uint16_t rx_buf_size;
1295 if (dev->data->dev_started) {
1296 hns3_err(hw, "rx_queue_setup after dev_start no supported");
1300 ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
1304 if (dev->data->rx_queues[idx]) {
1305 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1306 dev->data->rx_queues[idx] = NULL;
1310 q_info.socket_id = socket_id;
1311 q_info.nb_desc = nb_desc;
1312 q_info.type = "hns3 RX queue";
1313 q_info.ring_name = "rx_ring";
1315 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1318 "Failed to alloc mem and reserve DMA mem for rx ring!");
1323 rxq->ptype_tbl = &hns->ptype_tbl;
1325 rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
1326 conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
1327 rxq->rx_deferred_start = conf->rx_deferred_start;
1329 rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc;
1330 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1331 RTE_CACHE_LINE_SIZE, socket_id);
1332 if (rxq->sw_ring == NULL) {
1333 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1334 hns3_rx_queue_release(rxq);
1338 rxq->next_to_use = 0;
1339 rxq->rx_free_hold = 0;
1340 rxq->pkt_first_seg = NULL;
1341 rxq->pkt_last_seg = NULL;
1342 rxq->port_id = dev->data->port_id;
1343 rxq->pvid_state = hw->port_base_vlan_cfg.state;
1344 rxq->configured = true;
1345 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1346 idx * HNS3_TQP_REG_SIZE);
1347 rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
1348 HNS3_RING_RX_HEAD_REG);
1349 rxq->rx_buf_len = rx_buf_size;
1351 rxq->pkt_len_errors = 0;
1352 rxq->l3_csum_errors = 0;
1353 rxq->l4_csum_errors = 0;
1354 rxq->ol3_csum_errors = 0;
1355 rxq->ol4_csum_errors = 0;
1357 /* CRC len set here is used for amending packet length */
1358 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1359 rxq->crc_len = RTE_ETHER_CRC_LEN;
1363 rxq->bulk_mbuf_num = 0;
1365 rte_spinlock_lock(&hw->lock);
1366 dev->data->rx_queues[idx] = rxq;
1367 rte_spinlock_unlock(&hw->lock);
1373 hns3_rx_scattered_reset(struct rte_eth_dev *dev)
1375 struct hns3_adapter *hns = dev->data->dev_private;
1376 struct hns3_hw *hw = &hns->hw;
1379 dev->data->scattered_rx = false;
1383 hns3_rx_scattered_calc(struct rte_eth_dev *dev)
1385 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1386 struct hns3_adapter *hns = dev->data->dev_private;
1387 struct hns3_hw *hw = &hns->hw;
1388 struct hns3_rx_queue *rxq;
1391 if (dev->data->rx_queues == NULL)
1394 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
1395 rxq = dev->data->rx_queues[queue_id];
1396 if (hw->rx_buf_len == 0)
1397 hw->rx_buf_len = rxq->rx_buf_len;
1399 hw->rx_buf_len = RTE_MIN(hw->rx_buf_len,
1403 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
1404 dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
1405 dev->data->scattered_rx = true;
1409 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1411 static const uint32_t ptypes[] = {
1413 RTE_PTYPE_L2_ETHER_VLAN,
1414 RTE_PTYPE_L2_ETHER_QINQ,
1415 RTE_PTYPE_L2_ETHER_LLDP,
1416 RTE_PTYPE_L2_ETHER_ARP,
1418 RTE_PTYPE_L3_IPV4_EXT,
1420 RTE_PTYPE_L3_IPV6_EXT,
1426 RTE_PTYPE_TUNNEL_GRE,
1430 if (dev->rx_pkt_burst == hns3_recv_pkts ||
1431 dev->rx_pkt_burst == hns3_recv_scattered_pkts)
1438 hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
1440 struct hns3_adapter *hns = dev->data->dev_private;
1441 struct hns3_ptype_table *tbl = &hns->ptype_tbl;
1443 memset(tbl, 0, sizeof(*tbl));
1445 tbl->l2table[0] = RTE_PTYPE_L2_ETHER;
1446 tbl->l2table[1] = RTE_PTYPE_L2_ETHER_QINQ;
1447 tbl->l2table[2] = RTE_PTYPE_L2_ETHER_VLAN;
1448 tbl->l2table[3] = RTE_PTYPE_L2_ETHER_VLAN;
1450 tbl->l3table[0] = RTE_PTYPE_L3_IPV4;
1451 tbl->l3table[1] = RTE_PTYPE_L3_IPV6;
1452 tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP;
1453 tbl->l3table[3] = RTE_PTYPE_L2_ETHER;
1454 tbl->l3table[4] = RTE_PTYPE_L3_IPV4_EXT;
1455 tbl->l3table[5] = RTE_PTYPE_L3_IPV6_EXT;
1456 tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP;
1458 tbl->l4table[0] = RTE_PTYPE_L4_UDP;
1459 tbl->l4table[1] = RTE_PTYPE_L4_TCP;
1460 tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE;
1461 tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
1462 tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
1463 tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
1465 tbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER;
1466 tbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN;
1467 tbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ;
1469 tbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4;
1470 tbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6;
1471 tbl->inner_l3table[2] = 0;
1472 tbl->inner_l3table[3] = RTE_PTYPE_INNER_L2_ETHER;
1473 tbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT;
1474 tbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT;
1476 tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
1477 tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
1478 tbl->inner_l4table[2] = RTE_PTYPE_TUNNEL_GRE;
1479 tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
1480 tbl->inner_l4table[4] = RTE_PTYPE_L4_IGMP;
1481 tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
1483 tbl->ol3table[0] = RTE_PTYPE_L3_IPV4;
1484 tbl->ol3table[1] = RTE_PTYPE_L3_IPV6;
1485 tbl->ol3table[2] = 0;
1486 tbl->ol3table[3] = 0;
1487 tbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT;
1488 tbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT;
1490 tbl->ol4table[0] = 0;
1491 tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;
1492 tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
1496 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
1497 uint32_t l234_info, const struct hns3_desc *rxd)
1499 #define HNS3_STRP_STATUS_NUM 0x4
1501 #define HNS3_NO_STRP_VLAN_VLD 0x0
1502 #define HNS3_INNER_STRP_VLAN_VLD 0x1
1503 #define HNS3_OUTER_STRP_VLAN_VLD 0x2
1504 uint32_t strip_status;
1505 uint32_t report_mode;
1508 * Since HW limitation, the vlan tag will always be inserted into RX
1509 * descriptor when strip the tag from packet, driver needs to determine
1510 * reporting which tag to mbuf according to the PVID configuration
1511 * and vlan striped status.
1513 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
1515 HNS3_NO_STRP_VLAN_VLD,
1516 HNS3_OUTER_STRP_VLAN_VLD,
1517 HNS3_INNER_STRP_VLAN_VLD,
1518 HNS3_OUTER_STRP_VLAN_VLD
1521 HNS3_NO_STRP_VLAN_VLD,
1522 HNS3_NO_STRP_VLAN_VLD,
1523 HNS3_NO_STRP_VLAN_VLD,
1524 HNS3_INNER_STRP_VLAN_VLD
1527 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
1528 HNS3_RXD_STRP_TAGP_S);
1529 report_mode = report_type[rxq->pvid_state][strip_status];
1530 switch (report_mode) {
1531 case HNS3_NO_STRP_VLAN_VLD:
1534 case HNS3_INNER_STRP_VLAN_VLD:
1535 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1536 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
1538 case HNS3_OUTER_STRP_VLAN_VLD:
1539 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1540 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
1546 recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
1547 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
1550 uint8_t crc_len = rxq->crc_len;
1552 if (data_len <= crc_len) {
1553 rte_pktmbuf_free_seg(rxm);
1554 first_seg->nb_segs--;
1555 last_seg->data_len = (uint16_t)(last_seg->data_len -
1556 (crc_len - data_len));
1557 last_seg->next = NULL;
1559 rxm->data_len = (uint16_t)(data_len - crc_len);
1562 static inline struct rte_mbuf *
1563 hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
1567 if (likely(rxq->bulk_mbuf_num > 0))
1568 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
1570 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf,
1571 HNS3_BULK_ALLOC_MBUF_NUM);
1572 if (likely(ret == 0)) {
1573 rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM;
1574 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
1576 return rte_mbuf_raw_alloc(rxq->mb_pool);
1580 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1582 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
1583 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
1584 struct hns3_rx_queue *rxq; /* RX queue */
1585 struct hns3_entry *sw_ring;
1586 struct hns3_entry *rxe;
1587 struct hns3_desc rxd;
1588 struct rte_mbuf *nmb; /* pointer of the new mbuf */
1589 struct rte_mbuf *rxm;
1590 uint32_t bd_base_info;
1603 rx_ring = rxq->rx_ring;
1604 sw_ring = rxq->sw_ring;
1605 rx_id = rxq->next_to_use;
1607 while (nb_rx < nb_pkts) {
1608 rxdp = &rx_ring[rx_id];
1609 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
1610 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
1613 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1614 (1u << HNS3_RXD_VLD_B)];
1616 nmb = hns3_rx_alloc_buffer(rxq);
1617 if (unlikely(nmb == NULL)) {
1620 port_id = rxq->port_id;
1621 rte_eth_devices[port_id].data->rx_mbuf_alloc_failed++;
1626 rxe = &sw_ring[rx_id];
1628 if (unlikely(rx_id == rxq->nb_rx_desc))
1631 rte_prefetch0(sw_ring[rx_id].mbuf);
1632 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
1633 rte_prefetch0(&rx_ring[rx_id]);
1634 rte_prefetch0(&sw_ring[rx_id]);
1640 dma_addr = rte_mbuf_data_iova_default(nmb);
1641 rxdp->addr = rte_cpu_to_le_64(dma_addr);
1642 rxdp->rx.bd_base_info = 0;
1644 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1645 rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) -
1647 rxm->data_len = rxm->pkt_len;
1648 rxm->port = rxq->port_id;
1649 rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
1650 rxm->ol_flags = PKT_RX_RSS_HASH;
1651 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
1653 rte_le_to_cpu_16(rxd.rx.fd_id);
1654 rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1659 /* Load remained descriptor data and extract necessary fields */
1660 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
1661 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
1662 ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
1663 l234_info, &cksum_err);
1667 rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
1669 if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
1670 hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
1672 hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
1674 rx_pkts[nb_rx++] = rxm;
1677 rte_pktmbuf_free(rxm);
1680 rxq->next_to_use = rx_id;
1681 rxq->rx_free_hold += nb_rx_bd;
1682 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
1683 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
1684 rxq->rx_free_hold = 0;
1691 hns3_recv_scattered_pkts(void *rx_queue,
1692 struct rte_mbuf **rx_pkts,
1695 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
1696 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
1697 struct hns3_rx_queue *rxq; /* RX queue */
1698 struct hns3_entry *sw_ring;
1699 struct hns3_entry *rxe;
1700 struct rte_mbuf *first_seg;
1701 struct rte_mbuf *last_seg;
1702 struct hns3_desc rxd;
1703 struct rte_mbuf *nmb; /* pointer of the new mbuf */
1704 struct rte_mbuf *rxm;
1705 struct rte_eth_dev *dev;
1706 uint32_t bd_base_info;
1721 rx_id = rxq->next_to_use;
1722 rx_ring = rxq->rx_ring;
1723 sw_ring = rxq->sw_ring;
1724 first_seg = rxq->pkt_first_seg;
1725 last_seg = rxq->pkt_last_seg;
1727 while (nb_rx < nb_pkts) {
1728 rxdp = &rx_ring[rx_id];
1729 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
1730 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
1734 * The interactive process between software and hardware of
1735 * receiving a new packet in hns3 network engine:
1736 * 1. Hardware network engine firstly writes the packet content
1737 * to the memory pointed by the 'addr' field of the Rx Buffer
1738 * Descriptor, secondly fills the result of parsing the
1739 * packet include the valid field into the Rx Buffer
1740 * Descriptor in one write operation.
1741 * 2. Driver reads the Rx BD's valid field in the loop to check
1742 * whether it's valid, if valid then assign a new address to
1743 * the addr field, clear the valid field, get the other
1744 * information of the packet by parsing Rx BD's other fields,
1745 * finally write back the number of Rx BDs processed by the
1746 * driver to the HNS3_RING_RX_HEAD_REG register to inform
1748 * In the above process, the ordering is very important. We must
1749 * make sure that CPU read Rx BD's other fields only after the
1752 * There are two type of re-ordering: compiler re-ordering and
1753 * CPU re-ordering under the ARMv8 architecture.
1754 * 1. we use volatile to deal with compiler re-ordering, so you
1755 * can see that rx_ring/rxdp defined with volatile.
1756 * 2. we commonly use memory barrier to deal with CPU
1757 * re-ordering, but the cost is high.
1759 * In order to solve the high cost of using memory barrier, we
1760 * use the data dependency order under the ARMv8 architecture,
1763 * instr02: load B <- A
1764 * the instr02 will always execute after instr01.
1766 * To construct the data dependency ordering, we use the
1767 * following assignment:
1768 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1769 * (1u<<HNS3_RXD_VLD_B)]
1770 * Using gcc compiler under the ARMv8 architecture, the related
1771 * assembly code example as follows:
1772 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
1773 * instr01: ldr w26, [x22, #28] --read bd_base_info
1774 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
1775 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
1777 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
1778 * instr05: ldp x2, x3, [x0]
1779 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
1780 * instr07: ldp x4, x5, [x0, #16]
1781 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
1782 * the instr05~08 depend on x0's value, x0 depent on w26's
1783 * value, the w26 is the bd_base_info, this form the data
1784 * dependency ordering.
1785 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
1786 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
1787 * assignment is correct.
1789 * So we use the data dependency ordering instead of memory
1790 * barrier to improve receive performance.
1792 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1793 (1u << HNS3_RXD_VLD_B)];
1795 nmb = hns3_rx_alloc_buffer(rxq);
1796 if (unlikely(nmb == NULL)) {
1797 dev = &rte_eth_devices[rxq->port_id];
1798 dev->data->rx_mbuf_alloc_failed++;
1803 rxe = &sw_ring[rx_id];
1805 if (unlikely(rx_id == rxq->nb_rx_desc))
1808 rte_prefetch0(sw_ring[rx_id].mbuf);
1809 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
1810 rte_prefetch0(&rx_ring[rx_id]);
1811 rte_prefetch0(&sw_ring[rx_id]);
1817 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1818 rxdp->rx.bd_base_info = 0;
1819 rxdp->addr = dma_addr;
1821 if (first_seg == NULL) {
1823 first_seg->nb_segs = 1;
1825 first_seg->nb_segs++;
1826 last_seg->next = rxm;
1829 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1830 rxm->data_len = rte_le_to_cpu_16(rxd.rx.size);
1832 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
1839 * The last buffer of the received packet. packet len from
1840 * buffer description may contains CRC len, packet len should
1841 * subtract it, same as data len.
1843 first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len);
1846 * This is the last buffer of the received packet. If the CRC
1847 * is not stripped by the hardware:
1848 * - Subtract the CRC length from the total packet length.
1849 * - If the last buffer only contains the whole CRC or a part
1850 * of it, free the mbuf associated to the last buffer. If part
1851 * of the CRC is also contained in the previous mbuf, subtract
1852 * the length of that CRC part from the data length of the
1856 if (unlikely(rxq->crc_len > 0)) {
1857 first_seg->pkt_len -= rxq->crc_len;
1858 recalculate_data_len(first_seg, last_seg, rxm, rxq,
1862 first_seg->port = rxq->port_id;
1863 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
1864 first_seg->ol_flags = PKT_RX_RSS_HASH;
1865 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
1866 first_seg->hash.fdir.hi =
1867 rte_le_to_cpu_16(rxd.rx.fd_id);
1868 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1871 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
1872 HNS3_RXD_GRO_SIZE_S);
1873 if (gro_size != 0) {
1874 first_seg->ol_flags |= PKT_RX_LRO;
1875 first_seg->tso_segsz = gro_size;
1878 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
1879 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
1880 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
1881 l234_info, &cksum_err);
1885 first_seg->packet_type = hns3_rx_calc_ptype(rxq,
1886 l234_info, ol_info);
1888 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
1889 hns3_rx_set_cksum_flag(first_seg,
1890 first_seg->packet_type,
1892 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
1894 rx_pkts[nb_rx++] = first_seg;
1898 rte_pktmbuf_free(first_seg);
1902 rxq->next_to_use = rx_id;
1903 rxq->pkt_first_seg = first_seg;
1904 rxq->pkt_last_seg = last_seg;
1906 rxq->rx_free_hold += nb_rx_bd;
1907 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
1908 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
1909 rxq->rx_free_hold = 0;
1916 hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
1917 struct rte_eth_burst_mode *mode)
1919 static const struct {
1920 eth_rx_burst_t pkt_burst;
1923 { hns3_recv_pkts, "Scalar" },
1924 { hns3_recv_scattered_pkts, "Scalar Scattered" },
1927 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
1931 for (i = 0; i < RTE_DIM(burst_infos); i++) {
1932 if (pkt_burst == burst_infos[i].pkt_burst) {
1933 snprintf(mode->info, sizeof(mode->info), "%s",
1934 burst_infos[i].info);
1943 static eth_rx_burst_t
1944 hns3_get_rx_function(struct rte_eth_dev *dev)
1946 struct hns3_adapter *hns = dev->data->dev_private;
1947 uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
1949 if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
1950 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
1951 return hns3_recv_pkts;
1953 return hns3_recv_scattered_pkts;
1956 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1957 unsigned int socket_id, const struct rte_eth_txconf *conf)
1959 struct hns3_adapter *hns = dev->data->dev_private;
1960 struct hns3_hw *hw = &hns->hw;
1961 struct hns3_queue_info q_info;
1962 struct hns3_tx_queue *txq;
1965 if (dev->data->dev_started) {
1966 hns3_err(hw, "tx_queue_setup after dev_start no supported");
1970 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1971 nb_desc % HNS3_ALIGN_RING_DESC) {
1972 hns3_err(hw, "Number (%u) of tx descriptors is invalid",
1977 if (dev->data->tx_queues[idx] != NULL) {
1978 hns3_tx_queue_release(dev->data->tx_queues[idx]);
1979 dev->data->tx_queues[idx] = NULL;
1983 q_info.socket_id = socket_id;
1984 q_info.nb_desc = nb_desc;
1985 q_info.type = "hns3 TX queue";
1986 q_info.ring_name = "tx_ring";
1987 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1990 "Failed to alloc mem and reserve DMA mem for tx ring!");
1994 txq->tx_deferred_start = conf->tx_deferred_start;
1995 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
1996 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
1997 RTE_CACHE_LINE_SIZE, socket_id);
1998 if (txq->sw_ring == NULL) {
1999 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
2000 hns3_tx_queue_release(txq);
2005 txq->next_to_use = 0;
2006 txq->next_to_clean = 0;
2007 txq->tx_bd_ready = txq->nb_tx_desc - 1;
2008 txq->port_id = dev->data->port_id;
2009 txq->pvid_state = hw->port_base_vlan_cfg.state;
2010 txq->configured = true;
2011 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
2012 idx * HNS3_TQP_REG_SIZE);
2013 txq->min_tx_pkt_len = hw->min_tx_pkt_len;
2014 txq->over_length_pkt_cnt = 0;
2015 txq->exceed_limit_bd_pkt_cnt = 0;
2016 txq->exceed_limit_bd_reassem_fail = 0;
2017 txq->unsupported_tunnel_pkt_cnt = 0;
2018 txq->queue_full_cnt = 0;
2019 txq->pkt_padding_fail_cnt = 0;
2020 rte_spinlock_lock(&hw->lock);
2021 dev->data->tx_queues[idx] = txq;
2022 rte_spinlock_unlock(&hw->lock);
2028 hns3_queue_xmit(struct hns3_tx_queue *txq, uint32_t buf_num)
2030 hns3_write_dev(txq, HNS3_RING_TX_TAIL_REG, buf_num);
2034 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
2036 uint16_t tx_next_clean = txq->next_to_clean;
2037 uint16_t tx_next_use = txq->next_to_use;
2038 uint16_t tx_bd_ready = txq->tx_bd_ready;
2039 uint16_t tx_bd_max = txq->nb_tx_desc;
2040 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
2041 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
2042 struct rte_mbuf *mbuf;
2044 while ((!(desc->tx.tp_fe_sc_vld_ra_ri &
2045 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) &&
2046 tx_next_use != tx_next_clean) {
2047 mbuf = tx_bak_pkt->mbuf;
2049 rte_pktmbuf_free_seg(mbuf);
2050 tx_bak_pkt->mbuf = NULL;
2058 if (tx_next_clean >= tx_bd_max) {
2060 desc = txq->tx_ring;
2061 tx_bak_pkt = txq->sw_ring;
2065 txq->next_to_clean = tx_next_clean;
2066 txq->tx_bd_ready = tx_bd_ready;
2070 hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
2071 struct rte_mbuf *rxm, uint8_t *l2_len)
2077 tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
2081 otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
2082 switch (tun_flags) {
2083 case PKT_TX_TUNNEL_GENEVE:
2084 case PKT_TX_TUNNEL_VXLAN:
2085 *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
2087 case PKT_TX_TUNNEL_GRE:
2089 * OL4 header size, defined in 4 Bytes, it contains outer
2090 * L4(GRE) length and tunneling length.
2092 ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
2094 *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
2097 /* For non UDP / GRE tunneling, drop the tunnel packet */
2100 hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2101 rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
2102 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
2108 hns3_config_gro(struct hns3_hw *hw, bool en)
2110 struct hns3_cfg_gro_status_cmd *req;
2111 struct hns3_cmd_desc desc;
2114 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
2115 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
2117 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
2119 ret = hns3_cmd_send(hw, &desc, 1);
2121 hns3_err(hw, "%s hardware GRO failed, ret = %d",
2122 en ? "enable" : "disable", ret);
2128 hns3_restore_gro_conf(struct hns3_hw *hw)
2134 offloads = hw->data->dev_conf.rxmode.offloads;
2135 gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
2136 ret = hns3_config_gro(hw, gro_en);
2138 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
2139 gro_en ? "enabled" : "disabled", ret);
2145 hns3_pkt_is_tso(struct rte_mbuf *m)
2147 return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
2151 hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags,
2152 uint32_t paylen, struct rte_mbuf *rxm)
2154 uint8_t l2_len = rxm->l2_len;
2157 if (!hns3_pkt_is_tso(rxm))
2160 if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
2163 if (paylen <= rxm->tso_segsz)
2166 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
2167 hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
2168 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2169 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
2170 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2171 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2172 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2173 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2174 l2_len >> HNS3_L2_LEN_UNIT);
2175 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
2176 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
2180 hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
2182 desc->addr = rte_mbuf_data_iova(rxm);
2183 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
2184 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
2188 hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
2189 struct rte_mbuf *rxm)
2191 uint64_t ol_flags = rxm->ol_flags;
2195 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
2196 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2197 rxm->outer_l2_len + rxm->outer_l3_len : 0;
2198 paylen = rxm->pkt_len - hdr_len;
2199 desc->tx.paylen = rte_cpu_to_le_32(paylen);
2200 hns3_set_tso(desc, ol_flags, paylen, rxm);
2203 * Currently, hardware doesn't support more than two layers VLAN offload
2204 * in Tx direction based on hns3 network engine. So when the number of
2205 * VLANs in the packets represented by rxm plus the number of VLAN
2206 * offload by hardware such as PVID etc, exceeds two, the packets will
2207 * be discarded or the original VLAN of the packets will be overwitted
2208 * by hardware. When the PF PVID is enabled by calling the API function
2209 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
2210 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
2211 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
2212 * be added to the position close to the IP header when PVID is enabled.
2214 if (!txq->pvid_state && ol_flags & (PKT_TX_VLAN_PKT |
2216 desc->tx.ol_type_vlan_len_msec |=
2217 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
2218 if (ol_flags & PKT_TX_QINQ_PKT)
2219 desc->tx.outer_vlan_tag =
2220 rte_cpu_to_le_16(rxm->vlan_tci_outer);
2222 desc->tx.outer_vlan_tag =
2223 rte_cpu_to_le_16(rxm->vlan_tci);
2226 if (ol_flags & PKT_TX_QINQ_PKT ||
2227 ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_state)) {
2228 desc->tx.type_cs_vlan_tso_len |=
2229 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
2230 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
2235 hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool,
2236 uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf)
2238 struct rte_mbuf *new_mbuf = NULL;
2239 struct rte_eth_dev *dev;
2240 struct rte_mbuf *temp;
2244 /* Allocate enough mbufs */
2245 for (i = 0; i < nb_new_buf; i++) {
2246 temp = rte_pktmbuf_alloc(mb_pool);
2247 if (unlikely(temp == NULL)) {
2248 dev = &rte_eth_devices[txq->port_id];
2249 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2250 hns3_err(hw, "Failed to alloc TX mbuf port_id=%d,"
2251 "queue_id=%d in reassemble tx pkts.",
2252 txq->port_id, txq->queue_id);
2253 rte_pktmbuf_free(new_mbuf);
2256 temp->next = new_mbuf;
2260 if (new_mbuf == NULL)
2263 new_mbuf->nb_segs = nb_new_buf;
2264 *alloc_mbuf = new_mbuf;
2270 hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
2272 new_pkt->ol_flags = old_pkt->ol_flags;
2273 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
2274 new_pkt->outer_l2_len = old_pkt->outer_l2_len;
2275 new_pkt->outer_l3_len = old_pkt->outer_l3_len;
2276 new_pkt->l2_len = old_pkt->l2_len;
2277 new_pkt->l3_len = old_pkt->l3_len;
2278 new_pkt->l4_len = old_pkt->l4_len;
2279 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
2280 new_pkt->vlan_tci = old_pkt->vlan_tci;
2284 hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
2285 struct rte_mbuf **new_pkt)
2287 struct hns3_tx_queue *txq = tx_queue;
2288 struct rte_mempool *mb_pool;
2289 struct rte_mbuf *new_mbuf;
2290 struct rte_mbuf *temp_new;
2291 struct rte_mbuf *temp;
2292 uint16_t last_buf_len;
2293 uint16_t nb_new_buf;
2304 mb_pool = tx_pkt->pool;
2305 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
2306 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
2307 if (nb_new_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)
2310 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
2311 if (last_buf_len == 0)
2312 last_buf_len = buf_size;
2314 /* Allocate enough mbufs */
2315 ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf);
2319 /* Copy the original packet content to the new mbufs */
2321 s = rte_pktmbuf_mtod(temp, char *);
2322 len_s = rte_pktmbuf_data_len(temp);
2323 temp_new = new_mbuf;
2324 for (i = 0; i < nb_new_buf; i++) {
2325 d = rte_pktmbuf_mtod(temp_new, char *);
2326 if (i < nb_new_buf - 1)
2329 buf_len = last_buf_len;
2333 len = RTE_MIN(len_s, len_d);
2337 len_d = len_d - len;
2338 len_s = len_s - len;
2344 s = rte_pktmbuf_mtod(temp, char *);
2345 len_s = rte_pktmbuf_data_len(temp);
2349 temp_new->data_len = buf_len;
2350 temp_new = temp_new->next;
2352 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
2354 /* free original mbufs */
2355 rte_pktmbuf_free(tx_pkt);
2357 *new_pkt = new_mbuf;
2363 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
2365 uint32_t tmp = *ol_type_vlan_len_msec;
2367 /* (outer) IP header type */
2368 if (ol_flags & PKT_TX_OUTER_IPV4) {
2369 /* OL3 header size, defined in 4 bytes */
2370 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2371 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2372 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2373 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
2374 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
2376 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2377 HNS3_OL3T_IPV4_NO_CSUM);
2378 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
2379 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2381 /* OL3 header size, defined in 4 bytes */
2382 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2383 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2386 *ol_type_vlan_len_msec = tmp;
2390 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
2391 struct rte_net_hdr_lens *hdr_lens)
2393 uint32_t tmp = *ol_type_vlan_len_msec;
2396 /* OL2 header size, defined in 2 bytes */
2397 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2398 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2400 /* L4TUNT: L4 Tunneling Type */
2401 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2402 case PKT_TX_TUNNEL_GENEVE:
2403 case PKT_TX_TUNNEL_VXLAN:
2404 /* MAC in UDP tunnelling packet, include VxLAN */
2405 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2406 HNS3_TUN_MAC_IN_UDP);
2408 * OL4 header size, defined in 4 Bytes, it contains outer
2409 * L4(UDP) length and tunneling length.
2411 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2412 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
2415 case PKT_TX_TUNNEL_GRE:
2416 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2419 * OL4 header size, defined in 4 Bytes, it contains outer
2420 * L4(GRE) length and tunneling length.
2422 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
2423 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2424 l4_len >> HNS3_L4_LEN_UNIT);
2427 /* For non UDP / GRE tunneling, drop the tunnel packet */
2431 *ol_type_vlan_len_msec = tmp;
2437 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2439 struct rte_net_hdr_lens *hdr_lens)
2441 struct hns3_desc *tx_ring = txq->tx_ring;
2442 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2446 hns3_parse_outer_params(ol_flags, &value);
2447 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
2451 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
2457 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2461 /* Enable L3 checksum offloads */
2462 if (ol_flags & PKT_TX_IPV4) {
2463 tmp = *type_cs_vlan_tso_len;
2464 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2466 /* inner(/normal) L3 header size, defined in 4 bytes */
2467 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2468 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2469 if (ol_flags & PKT_TX_IP_CKSUM)
2470 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2471 *type_cs_vlan_tso_len = tmp;
2472 } else if (ol_flags & PKT_TX_IPV6) {
2473 tmp = *type_cs_vlan_tso_len;
2474 /* L3T, IPv6 don't do checksum */
2475 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2477 /* inner(/normal) L3 header size, defined in 4 bytes */
2478 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2479 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2480 *type_cs_vlan_tso_len = tmp;
2485 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2489 /* Enable L4 checksum offloads */
2490 switch (ol_flags & PKT_TX_L4_MASK) {
2491 case PKT_TX_TCP_CKSUM:
2492 tmp = *type_cs_vlan_tso_len;
2493 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2495 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2496 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2497 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2498 *type_cs_vlan_tso_len = tmp;
2500 case PKT_TX_UDP_CKSUM:
2501 tmp = *type_cs_vlan_tso_len;
2502 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2504 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2505 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2506 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
2507 *type_cs_vlan_tso_len = tmp;
2509 case PKT_TX_SCTP_CKSUM:
2510 tmp = *type_cs_vlan_tso_len;
2511 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2513 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2514 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2515 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
2516 *type_cs_vlan_tso_len = tmp;
2524 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2527 struct hns3_desc *tx_ring = txq->tx_ring;
2528 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2531 /* inner(/normal) L2 header size, defined in 2 bytes */
2532 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2533 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2535 hns3_parse_l3_cksum_params(ol_flags, &value);
2536 hns3_parse_l4_cksum_params(ol_flags, &value);
2538 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
2542 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
2544 struct rte_mbuf *m_first = tx_pkts;
2545 struct rte_mbuf *m_last = tx_pkts;
2546 uint32_t tot_len = 0;
2551 * Hardware requires that the sum of the data length of every 8
2552 * consecutive buffers is greater than MSS in hns3 network engine.
2553 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
2554 * frags greater than gso header len + mss, and the remaining 7
2555 * consecutive frags greater than MSS except the last 7 frags.
2557 if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT)
2560 for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1;
2561 i++, m_last = m_last->next)
2562 tot_len += m_last->data_len;
2567 /* ensure the first 8 frags is greater than mss + header */
2568 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
2569 hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
2570 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
2571 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
2575 * ensure the sum of the data length of every 7 consecutive buffer
2576 * is greater than mss except the last one.
2578 for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) {
2579 tot_len -= m_first->data_len;
2580 tot_len += m_last->data_len;
2582 if (tot_len < tx_pkts->tso_segsz)
2585 m_first = m_first->next;
2586 m_last = m_last->next;
2593 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
2595 uint64_t ol_flags = m->ol_flags;
2596 struct rte_ipv4_hdr *ipv4_hdr;
2597 struct rte_udp_hdr *udp_hdr;
2598 uint32_t paylen, hdr_len;
2600 if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
2603 if (ol_flags & PKT_TX_IPV4) {
2604 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2607 if (ol_flags & PKT_TX_IP_CKSUM)
2608 ipv4_hdr->hdr_checksum = 0;
2611 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM &&
2612 ol_flags & PKT_TX_TCP_SEG) {
2613 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2614 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2615 m->outer_l2_len + m->outer_l3_len : 0;
2616 paylen = m->pkt_len - hdr_len;
2617 if (paylen <= m->tso_segsz)
2619 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
2622 udp_hdr->dgram_cksum = 0;
2627 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
2629 uint32_t tmp_data_len_sum = 0;
2630 uint16_t nb_buf = m->nb_segs;
2631 uint32_t paylen, hdr_len;
2632 struct rte_mbuf *m_seg;
2635 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
2638 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2639 hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
2640 m->outer_l2_len + m->outer_l3_len : 0;
2641 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
2644 paylen = m->pkt_len - hdr_len;
2645 if (paylen > HNS3_MAX_BD_PAYLEN)
2649 * The TSO header (include outer and inner L2, L3 and L4 header)
2650 * should be provided by three descriptors in maximum in hns3 network
2654 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
2655 i++, m_seg = m_seg->next) {
2656 tmp_data_len_sum += m_seg->data_len;
2659 if (hdr_len > tmp_data_len_sum)
2665 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2667 hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
2669 struct rte_ether_hdr *eh;
2670 struct rte_vlan_hdr *vh;
2672 if (!txq->pvid_state)
2676 * Due to hardware limitations, we only support two-layer VLAN hardware
2677 * offload in Tx direction based on hns3 network engine, so when PVID is
2678 * enabled, QinQ insert is no longer supported.
2679 * And when PVID is enabled, in the following two cases:
2680 * i) packets with more than two VLAN tags.
2681 * ii) packets with one VLAN tag while the hardware VLAN insert is
2683 * The packets will be regarded as abnormal packets and discarded by
2684 * hardware in Tx direction. For debugging purposes, a validation check
2685 * for these types of packets is added to the '.tx_pkt_prepare' ops
2686 * implementation function named hns3_prep_pkts to inform users that
2687 * these packets will be discarded.
2689 if (m->ol_flags & PKT_TX_QINQ_PKT)
2692 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2693 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
2694 if (m->ol_flags & PKT_TX_VLAN_PKT)
2697 /* Ensure the incoming packet is not a QinQ packet */
2698 vh = (struct rte_vlan_hdr *)(eh + 1);
2699 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
2708 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2715 for (i = 0; i < nb_pkts; i++) {
2718 if (hns3_pkt_is_tso(m) &&
2719 (hns3_pkt_need_linearized(m, m->nb_segs) ||
2720 hns3_check_tso_pkt_valid(m))) {
2725 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2726 ret = rte_validate_tx_offload(m);
2732 if (hns3_vld_vlan_chk(tx_queue, m)) {
2737 ret = rte_net_intel_cksum_prepare(m);
2743 hns3_outer_header_cksum_prepare(m);
2750 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2751 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
2753 /* Fill in tunneling parameters if necessary */
2754 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
2755 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
2756 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
2758 txq->unsupported_tunnel_pkt_cnt++;
2762 /* Enable checksum offloading */
2763 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
2764 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
2770 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
2771 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
2773 struct rte_mbuf *new_pkt;
2776 if (hns3_pkt_is_tso(*m_seg))
2780 * If packet length is greater than HNS3_MAX_FRAME_LEN
2781 * driver support, the packet will be ignored.
2783 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
2784 txq->over_length_pkt_cnt++;
2788 if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
2789 txq->exceed_limit_bd_pkt_cnt++;
2790 ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
2792 txq->exceed_limit_bd_reassem_fail++;
2802 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2804 struct rte_net_hdr_lens hdr_lens = {0};
2805 struct hns3_tx_queue *txq = tx_queue;
2806 struct hns3_entry *tx_bak_pkt;
2807 struct hns3_desc *tx_ring;
2808 struct rte_mbuf *tx_pkt;
2809 struct rte_mbuf *m_seg;
2810 struct hns3_desc *desc;
2811 uint32_t nb_hold = 0;
2812 uint16_t tx_next_use;
2813 uint16_t tx_pkt_num;
2819 /* free useless buffer */
2820 hns3_tx_free_useless_buffer(txq);
2822 tx_next_use = txq->next_to_use;
2823 tx_bd_max = txq->nb_tx_desc;
2824 tx_pkt_num = nb_pkts;
2825 tx_ring = txq->tx_ring;
2828 tx_bak_pkt = &txq->sw_ring[tx_next_use];
2829 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
2830 tx_pkt = *tx_pkts++;
2832 nb_buf = tx_pkt->nb_segs;
2834 if (nb_buf > txq->tx_bd_ready) {
2835 txq->queue_full_cnt++;
2843 * If packet length is less than minimum packet length supported
2844 * by hardware in Tx direction, driver need to pad it to avoid
2847 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
2848 txq->min_tx_pkt_len)) {
2852 add_len = txq->min_tx_pkt_len -
2853 rte_pktmbuf_pkt_len(tx_pkt);
2854 appended = rte_pktmbuf_append(tx_pkt, add_len);
2855 if (appended == NULL) {
2856 txq->pkt_padding_fail_cnt++;
2860 memset(appended, 0, add_len);
2865 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
2868 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
2872 desc = &tx_ring[tx_next_use];
2875 * If the packet is divided into multiple Tx Buffer Descriptors,
2876 * only need to fill vlan, paylen and tso into the first Tx
2877 * Buffer Descriptor.
2879 hns3_fill_first_desc(txq, desc, m_seg);
2882 desc = &tx_ring[tx_next_use];
2884 * Fill valid bits, DMA address and data length for each
2885 * Tx Buffer Descriptor.
2887 hns3_fill_per_desc(desc, m_seg);
2888 tx_bak_pkt->mbuf = m_seg;
2889 m_seg = m_seg->next;
2892 if (tx_next_use >= tx_bd_max) {
2894 tx_bak_pkt = txq->sw_ring;
2898 } while (m_seg != NULL);
2900 /* Add end flag for the last Tx Buffer Descriptor */
2901 desc->tx.tp_fe_sc_vld_ra_ri |=
2902 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
2905 txq->next_to_use = tx_next_use;
2906 txq->tx_bd_ready -= i;
2912 hns3_queue_xmit(txq, nb_hold);
2918 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
2919 struct rte_mbuf **pkts __rte_unused,
2920 uint16_t pkts_n __rte_unused)
2925 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
2927 struct hns3_adapter *hns = eth_dev->data->dev_private;
2929 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
2930 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
2931 eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
2932 eth_dev->tx_pkt_burst = hns3_xmit_pkts;
2933 eth_dev->tx_pkt_prepare = hns3_prep_pkts;
2935 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
2936 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
2937 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
2942 hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2943 struct rte_eth_rxq_info *qinfo)
2945 struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
2947 qinfo->mp = rxq->mb_pool;
2948 qinfo->nb_desc = rxq->nb_rx_desc;
2949 qinfo->scattered_rx = dev->data->scattered_rx;
2952 * If there are no available Rx buffer descriptors, incoming packets
2953 * are always dropped by hardware based on hns3 network engine.
2955 qinfo->conf.rx_drop_en = 1;
2956 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
2957 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2958 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2962 hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2963 struct rte_eth_txq_info *qinfo)
2965 struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
2967 qinfo->nb_desc = txq->nb_tx_desc;
2968 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
2969 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;