1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_vxlan.h>
19 #include <rte_ethdev_driver.h>
24 #include <rte_malloc.h>
27 #include "hns3_ethdev.h"
28 #include "hns3_rxtx.h"
29 #include "hns3_regs.h"
30 #include "hns3_logs.h"
32 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
33 #define DEFAULT_RX_FREE_THRESH 32
36 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
40 /* Note: Fake rx queue will not enter here */
42 for (i = 0; i < rxq->nb_rx_desc; i++) {
43 if (rxq->sw_ring[i].mbuf) {
44 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
45 rxq->sw_ring[i].mbuf = NULL;
52 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
56 /* Note: Fake rx queue will not enter here */
58 for (i = 0; i < txq->nb_tx_desc; i++) {
59 if (txq->sw_ring[i].mbuf) {
60 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
61 txq->sw_ring[i].mbuf = NULL;
68 hns3_rx_queue_release(void *queue)
70 struct hns3_rx_queue *rxq = queue;
72 hns3_rx_queue_release_mbufs(rxq);
74 rte_memzone_free(rxq->mz);
76 rte_free(rxq->sw_ring);
82 hns3_tx_queue_release(void *queue)
84 struct hns3_tx_queue *txq = queue;
86 hns3_tx_queue_release_mbufs(txq);
88 rte_memzone_free(txq->mz);
90 rte_free(txq->sw_ring);
96 hns3_dev_rx_queue_release(void *queue)
98 struct hns3_rx_queue *rxq = queue;
99 struct hns3_adapter *hns;
105 rte_spinlock_lock(&hns->hw.lock);
106 hns3_rx_queue_release(queue);
107 rte_spinlock_unlock(&hns->hw.lock);
111 hns3_dev_tx_queue_release(void *queue)
113 struct hns3_tx_queue *txq = queue;
114 struct hns3_adapter *hns;
120 rte_spinlock_lock(&hns->hw.lock);
121 hns3_tx_queue_release(queue);
122 rte_spinlock_unlock(&hns->hw.lock);
126 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
128 struct hns3_rx_queue *rxq = queue;
129 struct hns3_adapter *hns;
139 if (hw->fkq_data.rx_queues[idx]) {
140 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
141 hw->fkq_data.rx_queues[idx] = NULL;
144 /* free fake rx queue arrays */
145 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
146 hw->fkq_data.nb_fake_rx_queues = 0;
147 rte_free(hw->fkq_data.rx_queues);
148 hw->fkq_data.rx_queues = NULL;
153 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
155 struct hns3_tx_queue *txq = queue;
156 struct hns3_adapter *hns;
166 if (hw->fkq_data.tx_queues[idx]) {
167 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
168 hw->fkq_data.tx_queues[idx] = NULL;
171 /* free fake tx queue arrays */
172 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
173 hw->fkq_data.nb_fake_tx_queues = 0;
174 rte_free(hw->fkq_data.tx_queues);
175 hw->fkq_data.tx_queues = NULL;
180 hns3_free_rx_queues(struct rte_eth_dev *dev)
182 struct hns3_adapter *hns = dev->data->dev_private;
183 struct hns3_fake_queue_data *fkq_data;
184 struct hns3_hw *hw = &hns->hw;
188 nb_rx_q = hw->data->nb_rx_queues;
189 for (i = 0; i < nb_rx_q; i++) {
190 if (dev->data->rx_queues[i]) {
191 hns3_rx_queue_release(dev->data->rx_queues[i]);
192 dev->data->rx_queues[i] = NULL;
196 /* Free fake Rx queues */
197 fkq_data = &hw->fkq_data;
198 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
199 if (fkq_data->rx_queues[i])
200 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
205 hns3_free_tx_queues(struct rte_eth_dev *dev)
207 struct hns3_adapter *hns = dev->data->dev_private;
208 struct hns3_fake_queue_data *fkq_data;
209 struct hns3_hw *hw = &hns->hw;
213 nb_tx_q = hw->data->nb_tx_queues;
214 for (i = 0; i < nb_tx_q; i++) {
215 if (dev->data->tx_queues[i]) {
216 hns3_tx_queue_release(dev->data->tx_queues[i]);
217 dev->data->tx_queues[i] = NULL;
221 /* Free fake Tx queues */
222 fkq_data = &hw->fkq_data;
223 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
224 if (fkq_data->tx_queues[i])
225 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
230 hns3_free_all_queues(struct rte_eth_dev *dev)
232 hns3_free_rx_queues(dev);
233 hns3_free_tx_queues(dev);
237 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
239 struct rte_mbuf *mbuf;
243 for (i = 0; i < rxq->nb_rx_desc; i++) {
244 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
245 if (unlikely(mbuf == NULL)) {
246 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
248 hns3_rx_queue_release_mbufs(rxq);
252 rte_mbuf_refcnt_set(mbuf, 1);
254 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
256 mbuf->port = rxq->port_id;
258 rxq->sw_ring[i].mbuf = mbuf;
259 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
260 rxq->rx_ring[i].addr = dma_addr;
261 rxq->rx_ring[i].rx.bd_base_info = 0;
268 hns3_buf_size2type(uint32_t buf_size)
274 bd_size_type = HNS3_BD_SIZE_512_TYPE;
277 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
280 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
283 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
290 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
292 uint32_t rx_buf_len = rxq->rx_buf_len;
293 uint64_t dma_addr = rxq->rx_ring_phys_addr;
295 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
296 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
297 (uint32_t)((dma_addr >> 31) >> 1));
299 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
300 hns3_buf_size2type(rx_buf_len));
301 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
302 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
306 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
308 uint64_t dma_addr = txq->tx_ring_phys_addr;
310 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
311 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
312 (uint32_t)((dma_addr >> 31) >> 1));
314 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
315 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
319 hns3_update_all_queues_pvid_state(struct hns3_hw *hw)
321 uint16_t nb_rx_q = hw->data->nb_rx_queues;
322 uint16_t nb_tx_q = hw->data->nb_tx_queues;
323 struct hns3_rx_queue *rxq;
324 struct hns3_tx_queue *txq;
328 pvid_state = hw->port_base_vlan_cfg.state;
329 for (i = 0; i < hw->cfg_max_queues; i++) {
331 rxq = hw->data->rx_queues[i];
333 rxq->pvid_state = pvid_state;
336 txq = hw->data->tx_queues[i];
338 txq->pvid_state = pvid_state;
344 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
346 uint16_t nb_rx_q = hw->data->nb_rx_queues;
347 uint16_t nb_tx_q = hw->data->nb_tx_queues;
348 struct hns3_rx_queue *rxq;
349 struct hns3_tx_queue *txq;
353 for (i = 0; i < hw->cfg_max_queues; i++) {
355 rxq = hw->data->rx_queues[i];
357 rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
359 txq = hw->data->tx_queues[i];
361 txq = hw->fkq_data.tx_queues[i - nb_tx_q];
362 if (rxq == NULL || txq == NULL ||
363 (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
366 rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
368 rcb_reg |= BIT(HNS3_RING_EN_B);
370 rcb_reg &= ~BIT(HNS3_RING_EN_B);
371 hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
376 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
378 struct hns3_cfg_com_tqp_queue_cmd *req;
379 struct hns3_cmd_desc desc;
382 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
384 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
385 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
387 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
389 ret = hns3_cmd_send(hw, &desc, 1);
391 hns3_err(hw, "TQP enable fail, ret = %d", ret);
397 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
399 struct hns3_reset_tqp_queue_cmd *req;
400 struct hns3_cmd_desc desc;
403 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
405 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
406 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
407 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
409 ret = hns3_cmd_send(hw, &desc, 1);
411 hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
417 hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
419 struct hns3_reset_tqp_queue_cmd *req;
420 struct hns3_cmd_desc desc;
423 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
425 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
426 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
428 ret = hns3_cmd_send(hw, &desc, 1);
430 hns3_err(hw, "Get reset status error, ret =%d", ret);
434 return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
438 hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
440 #define HNS3_TQP_RESET_TRY_MS 200
445 ret = hns3_tqp_enable(hw, queue_id, false);
450 * In current version VF is not supported when PF is driven by DPDK
451 * driver, all task queue pairs are mapped to PF function, so PF's queue
452 * id is equals to the global queue id in PF range.
454 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
456 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
460 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
462 /* Wait for tqp hw reset */
463 rte_delay_ms(HNS3_POLL_RESPONE_MS);
464 reset_status = hns3_get_reset_status(hw, queue_id);
469 } while (get_timeofday_ms() < end);
472 hns3_err(hw, "Reset TQP fail, ret = %d", ret);
476 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
478 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
484 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
489 /* Disable VF's queue before send queue reset msg to PF */
490 ret = hns3_tqp_enable(hw, queue_id, false);
494 memcpy(msg_data, &queue_id, sizeof(uint16_t));
496 return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
497 sizeof(msg_data), true, NULL, 0);
501 hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
503 struct hns3_hw *hw = &hns->hw;
505 return hns3vf_reset_tqp(hw, queue_id);
507 return hns3_reset_tqp(hw, queue_id);
511 hns3_reset_all_queues(struct hns3_adapter *hns)
513 struct hns3_hw *hw = &hns->hw;
516 for (i = 0; i < hw->cfg_max_queues; i++) {
517 ret = hns3_reset_queue(hns, i);
519 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
527 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
528 uint8_t gl_idx, uint16_t gl_value)
530 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
531 HNS3_TQP_INTR_GL1_REG,
532 HNS3_TQP_INTR_GL2_REG};
533 uint32_t addr, value;
535 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
538 addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
539 if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
540 value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
542 value = HNS3_GL_USEC_TO_REG(gl_value);
544 hns3_write_dev(hw, addr, value);
548 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
550 uint32_t addr, value;
552 if (rl_value > HNS3_TQP_INTR_RL_MAX)
555 addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
556 value = HNS3_RL_USEC_TO_REG(rl_value);
558 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
560 hns3_write_dev(hw, addr, value);
564 hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
568 if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
571 addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
572 hns3_write_dev(hw, addr, ql_value);
574 addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
575 hns3_write_dev(hw, addr, ql_value);
579 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
581 uint32_t addr, value;
583 addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
586 hns3_write_dev(hw, addr, value);
590 * Enable all rx queue interrupt when in interrupt rx mode.
591 * This api was called before enable queue rx&tx (in normal start or reset
592 * recover scenes), used to fix hardware rx queue interrupt enable was clear
596 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
598 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
599 uint16_t nb_rx_q = hw->data->nb_rx_queues;
602 if (dev->data->dev_conf.intr_conf.rxq == 0)
605 for (i = 0; i < nb_rx_q; i++)
606 hns3_queue_intr_enable(hw, i, en);
610 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
612 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
613 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
614 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
616 if (dev->data->dev_conf.intr_conf.rxq == 0)
619 hns3_queue_intr_enable(hw, queue_id, true);
621 return rte_intr_ack(intr_handle);
625 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
627 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
629 if (dev->data->dev_conf.intr_conf.rxq == 0)
632 hns3_queue_intr_enable(hw, queue_id, false);
638 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
640 struct hns3_hw *hw = &hns->hw;
641 struct hns3_rx_queue *rxq;
644 PMD_INIT_FUNC_TRACE();
646 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
647 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
649 hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
654 rxq->next_to_use = 0;
655 rxq->rx_free_hold = 0;
656 hns3_init_rx_queue_hw(rxq);
662 hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
664 struct hns3_hw *hw = &hns->hw;
665 struct hns3_rx_queue *rxq;
667 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
668 rxq->next_to_use = 0;
669 rxq->rx_free_hold = 0;
670 hns3_init_rx_queue_hw(rxq);
674 hns3_init_tx_queue(struct hns3_tx_queue *queue)
676 struct hns3_tx_queue *txq = queue;
677 struct hns3_desc *desc;
682 for (i = 0; i < txq->nb_tx_desc; i++) {
683 desc->tx.tp_fe_sc_vld_ra_ri = 0;
687 txq->next_to_use = 0;
688 txq->next_to_clean = 0;
689 txq->tx_bd_ready = txq->nb_tx_desc - 1;
690 hns3_init_tx_queue_hw(txq);
694 hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
696 struct hns3_hw *hw = &hns->hw;
697 struct hns3_tx_queue *txq;
699 txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
700 hns3_init_tx_queue(txq);
704 hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
706 struct hns3_hw *hw = &hns->hw;
707 struct hns3_tx_queue *txq;
709 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
710 hns3_init_tx_queue(txq);
714 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
716 struct hns3_hw *hw = &hns->hw;
717 struct hns3_tx_queue *txq;
720 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
721 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
724 if (!tc_queue->enable)
727 for (j = 0; j < tc_queue->tqp_count; j++) {
728 num = tc_queue->tqp_offset + j;
729 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
733 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
739 hns3_start_rx_queues(struct hns3_adapter *hns)
741 struct hns3_hw *hw = &hns->hw;
742 struct hns3_rx_queue *rxq;
746 /* Initialize RSS for queues */
747 ret = hns3_config_rss(hns);
749 hns3_err(hw, "Failed to configure rss %d", ret);
753 for (i = 0; i < hw->data->nb_rx_queues; i++) {
754 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
755 if (rxq == NULL || rxq->rx_deferred_start)
757 ret = hns3_dev_rx_queue_start(hns, i);
759 hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
765 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
766 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
767 if (rxq == NULL || rxq->rx_deferred_start)
769 hns3_fake_rx_queue_start(hns, i);
774 for (j = 0; j < i; j++) {
775 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
776 hns3_rx_queue_release_mbufs(rxq);
783 hns3_start_tx_queues(struct hns3_adapter *hns)
785 struct hns3_hw *hw = &hns->hw;
786 struct hns3_tx_queue *txq;
789 for (i = 0; i < hw->data->nb_tx_queues; i++) {
790 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
791 if (txq == NULL || txq->tx_deferred_start)
793 hns3_dev_tx_queue_start(hns, i);
796 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
797 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
798 if (txq == NULL || txq->tx_deferred_start)
800 hns3_fake_tx_queue_start(hns, i);
803 hns3_init_tx_ring_tc(hns);
808 * Note: just init and setup queues, and don't enable queue rx&tx.
811 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
813 struct hns3_hw *hw = &hns->hw;
817 ret = hns3_reset_all_queues(hns);
819 hns3_err(hw, "Failed to reset all queues %d", ret);
824 ret = hns3_start_rx_queues(hns);
826 hns3_err(hw, "Failed to start rx queues: %d", ret);
830 hns3_start_tx_queues(hns);
836 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
838 struct hns3_hw *hw = &hns->hw;
841 hns3_enable_all_queues(hw, false);
843 ret = hns3_reset_all_queues(hns);
845 hns3_err(hw, "Failed to reset all queues %d", ret);
853 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
854 struct hns3_queue_info *q_info)
856 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
857 const struct rte_memzone *rx_mz;
858 struct hns3_rx_queue *rxq;
859 unsigned int rx_desc;
861 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
862 RTE_CACHE_LINE_SIZE, q_info->socket_id);
864 hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
869 /* Allocate rx ring hardware descriptors. */
870 rxq->queue_id = q_info->idx;
871 rxq->nb_rx_desc = q_info->nb_desc;
872 rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
873 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
874 rx_desc, HNS3_RING_BASE_ALIGN,
877 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
879 hns3_rx_queue_release(rxq);
883 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
884 rxq->rx_ring_phys_addr = rx_mz->iova;
886 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
887 rxq->rx_ring_phys_addr);
893 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
894 uint16_t nb_desc, unsigned int socket_id)
896 struct hns3_adapter *hns = dev->data->dev_private;
897 struct hns3_hw *hw = &hns->hw;
898 struct hns3_queue_info q_info;
899 struct hns3_rx_queue *rxq;
902 if (hw->fkq_data.rx_queues[idx]) {
903 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
904 hw->fkq_data.rx_queues[idx] = NULL;
908 q_info.socket_id = socket_id;
909 q_info.nb_desc = nb_desc;
910 q_info.type = "hns3 fake RX queue";
911 q_info.ring_name = "rx_fake_ring";
912 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
914 hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
918 /* Don't need alloc sw_ring, because upper applications don't use it */
922 rxq->rx_deferred_start = false;
923 rxq->port_id = dev->data->port_id;
924 rxq->configured = true;
925 nb_rx_q = dev->data->nb_rx_queues;
926 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
927 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
928 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
930 rte_spinlock_lock(&hw->lock);
931 hw->fkq_data.rx_queues[idx] = rxq;
932 rte_spinlock_unlock(&hw->lock);
938 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
939 struct hns3_queue_info *q_info)
941 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
942 const struct rte_memzone *tx_mz;
943 struct hns3_tx_queue *txq;
944 struct hns3_desc *desc;
945 unsigned int tx_desc;
948 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
949 RTE_CACHE_LINE_SIZE, q_info->socket_id);
951 hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
956 /* Allocate tx ring hardware descriptors. */
957 txq->queue_id = q_info->idx;
958 txq->nb_tx_desc = q_info->nb_desc;
959 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
960 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
961 tx_desc, HNS3_RING_BASE_ALIGN,
964 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
966 hns3_tx_queue_release(txq);
970 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
971 txq->tx_ring_phys_addr = tx_mz->iova;
973 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
974 txq->tx_ring_phys_addr);
978 for (i = 0; i < txq->nb_tx_desc; i++) {
979 desc->tx.tp_fe_sc_vld_ra_ri = 0;
987 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
988 uint16_t nb_desc, unsigned int socket_id)
990 struct hns3_adapter *hns = dev->data->dev_private;
991 struct hns3_hw *hw = &hns->hw;
992 struct hns3_queue_info q_info;
993 struct hns3_tx_queue *txq;
996 if (hw->fkq_data.tx_queues[idx] != NULL) {
997 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
998 hw->fkq_data.tx_queues[idx] = NULL;
1002 q_info.socket_id = socket_id;
1003 q_info.nb_desc = nb_desc;
1004 q_info.type = "hns3 fake TX queue";
1005 q_info.ring_name = "tx_fake_ring";
1006 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1008 hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
1012 /* Don't need alloc sw_ring, because upper applications don't use it */
1013 txq->sw_ring = NULL;
1016 txq->tx_deferred_start = false;
1017 txq->port_id = dev->data->port_id;
1018 txq->configured = true;
1019 nb_tx_q = dev->data->nb_tx_queues;
1020 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1021 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1023 rte_spinlock_lock(&hw->lock);
1024 hw->fkq_data.tx_queues[idx] = txq;
1025 rte_spinlock_unlock(&hw->lock);
1031 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1033 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1037 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1038 /* first time configuration */
1040 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1041 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1042 RTE_CACHE_LINE_SIZE);
1043 if (hw->fkq_data.rx_queues == NULL) {
1044 hw->fkq_data.nb_fake_rx_queues = 0;
1047 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1049 rxq = hw->fkq_data.rx_queues;
1050 for (i = nb_queues; i < old_nb_queues; i++)
1051 hns3_dev_rx_queue_release(rxq[i]);
1053 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1054 RTE_CACHE_LINE_SIZE);
1057 if (nb_queues > old_nb_queues) {
1058 uint16_t new_qs = nb_queues - old_nb_queues;
1059 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1062 hw->fkq_data.rx_queues = rxq;
1063 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1064 rxq = hw->fkq_data.rx_queues;
1065 for (i = nb_queues; i < old_nb_queues; i++)
1066 hns3_dev_rx_queue_release(rxq[i]);
1068 rte_free(hw->fkq_data.rx_queues);
1069 hw->fkq_data.rx_queues = NULL;
1072 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1078 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1080 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1084 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1085 /* first time configuration */
1087 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1088 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1089 RTE_CACHE_LINE_SIZE);
1090 if (hw->fkq_data.tx_queues == NULL) {
1091 hw->fkq_data.nb_fake_tx_queues = 0;
1094 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1096 txq = hw->fkq_data.tx_queues;
1097 for (i = nb_queues; i < old_nb_queues; i++)
1098 hns3_dev_tx_queue_release(txq[i]);
1099 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1100 RTE_CACHE_LINE_SIZE);
1103 if (nb_queues > old_nb_queues) {
1104 uint16_t new_qs = nb_queues - old_nb_queues;
1105 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1108 hw->fkq_data.tx_queues = txq;
1109 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1110 txq = hw->fkq_data.tx_queues;
1111 for (i = nb_queues; i < old_nb_queues; i++)
1112 hns3_dev_tx_queue_release(txq[i]);
1114 rte_free(hw->fkq_data.tx_queues);
1115 hw->fkq_data.tx_queues = NULL;
1117 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1123 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1126 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1127 uint16_t rx_need_add_nb_q;
1128 uint16_t tx_need_add_nb_q;
1133 /* Setup new number of fake RX/TX queues and reconfigure device. */
1134 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
1135 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1136 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1137 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1139 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1140 goto cfg_fake_rx_q_fail;
1143 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1145 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1146 goto cfg_fake_tx_q_fail;
1149 /* Allocate and set up fake RX queue per Ethernet port. */
1150 port_id = hw->data->port_id;
1151 for (q = 0; q < rx_need_add_nb_q; q++) {
1152 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1153 rte_eth_dev_socket_id(port_id));
1155 goto setup_fake_rx_q_fail;
1158 /* Allocate and set up fake TX queue per Ethernet port. */
1159 for (q = 0; q < tx_need_add_nb_q; q++) {
1160 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1161 rte_eth_dev_socket_id(port_id));
1163 goto setup_fake_tx_q_fail;
1168 setup_fake_tx_q_fail:
1169 setup_fake_rx_q_fail:
1170 (void)hns3_fake_tx_queue_config(hw, 0);
1172 (void)hns3_fake_rx_queue_config(hw, 0);
1174 hw->cfg_max_queues = 0;
1180 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1182 struct rte_eth_dev_data *dev_data = hns->hw.data;
1183 struct hns3_rx_queue *rxq;
1184 struct hns3_tx_queue *txq;
1187 if (dev_data->rx_queues)
1188 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1189 rxq = dev_data->rx_queues[i];
1190 if (rxq == NULL || rxq->rx_deferred_start)
1192 hns3_rx_queue_release_mbufs(rxq);
1195 if (dev_data->tx_queues)
1196 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1197 txq = dev_data->tx_queues[i];
1198 if (txq == NULL || txq->tx_deferred_start)
1200 hns3_tx_queue_release_mbufs(txq);
1205 hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1207 uint16_t vld_buf_size;
1208 uint16_t num_hw_specs;
1212 * hns3 network engine only support to set 4 typical specification, and
1213 * different buffer size will affect the max packet_len and the max
1214 * number of segmentation when hw gro is turned on in receive side. The
1215 * relationship between them is as follows:
1216 * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg
1217 * ---------------------|-------------------|----------------
1218 * HNS3_4K_BD_BUF_SIZE | 60KB | 15
1219 * HNS3_2K_BD_BUF_SIZE | 62KB | 31
1220 * HNS3_1K_BD_BUF_SIZE | 63KB | 63
1221 * HNS3_512_BD_BUF_SIZE | 31.5KB | 63
1223 static const uint16_t hw_rx_buf_size[] = {
1224 HNS3_4K_BD_BUF_SIZE,
1225 HNS3_2K_BD_BUF_SIZE,
1226 HNS3_1K_BD_BUF_SIZE,
1227 HNS3_512_BD_BUF_SIZE
1230 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1231 RTE_PKTMBUF_HEADROOM);
1233 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1236 num_hw_specs = RTE_DIM(hw_rx_buf_size);
1237 for (i = 0; i < num_hw_specs; i++) {
1238 if (vld_buf_size >= hw_rx_buf_size[i]) {
1239 *rx_buf_len = hw_rx_buf_size[i];
1247 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1248 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1249 struct rte_mempool *mp)
1251 struct hns3_adapter *hns = dev->data->dev_private;
1252 struct hns3_hw *hw = &hns->hw;
1253 struct hns3_queue_info q_info;
1254 struct hns3_rx_queue *rxq;
1255 uint16_t rx_buf_size;
1258 if (dev->data->dev_started) {
1259 hns3_err(hw, "rx_queue_setup after dev_start no supported");
1263 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1264 nb_desc % HNS3_ALIGN_RING_DESC) {
1265 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1270 if (conf->rx_drop_en == 0)
1271 hns3_warn(hw, "if there are no available Rx descriptors,"
1272 "incoming packets are always dropped. input parameter"
1273 " conf->rx_drop_en(%u) is uneffective.",
1276 if (dev->data->rx_queues[idx]) {
1277 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1278 dev->data->rx_queues[idx] = NULL;
1282 q_info.socket_id = socket_id;
1283 q_info.nb_desc = nb_desc;
1284 q_info.type = "hns3 RX queue";
1285 q_info.ring_name = "rx_ring";
1287 if (hns3_rx_buf_len_calc(mp, &rx_buf_size)) {
1288 hns3_err(hw, "rxq mbufs' data room size:%u is not enough! "
1289 "minimal data room size:%u.",
1290 rte_pktmbuf_data_room_size(mp),
1291 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1295 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1298 "Failed to alloc mem and reserve DMA mem for rx ring!");
1304 rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
1305 conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
1306 rxq->rx_deferred_start = conf->rx_deferred_start;
1308 rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc;
1309 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1310 RTE_CACHE_LINE_SIZE, socket_id);
1311 if (rxq->sw_ring == NULL) {
1312 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1313 hns3_rx_queue_release(rxq);
1317 rxq->next_to_use = 0;
1318 rxq->rx_free_hold = 0;
1319 rxq->pkt_first_seg = NULL;
1320 rxq->pkt_last_seg = NULL;
1321 rxq->port_id = dev->data->port_id;
1322 rxq->pvid_state = hw->port_base_vlan_cfg.state;
1323 rxq->configured = true;
1324 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1325 idx * HNS3_TQP_REG_SIZE);
1326 rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
1327 HNS3_RING_RX_HEAD_REG);
1328 rxq->rx_buf_len = rx_buf_size;
1330 rxq->pkt_len_errors = 0;
1331 rxq->l3_csum_erros = 0;
1332 rxq->l4_csum_erros = 0;
1333 rxq->ol3_csum_erros = 0;
1334 rxq->ol4_csum_erros = 0;
1336 /* CRC len set here is used for amending packet length */
1337 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1338 rxq->crc_len = RTE_ETHER_CRC_LEN;
1342 rte_spinlock_lock(&hw->lock);
1343 dev->data->rx_queues[idx] = rxq;
1344 rte_spinlock_unlock(&hw->lock);
1349 static inline uint32_t
1350 rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info)
1352 #define HNS3_L2TBL_NUM 4
1353 #define HNS3_L3TBL_NUM 16
1354 #define HNS3_L4TBL_NUM 16
1355 #define HNS3_OL3TBL_NUM 16
1356 #define HNS3_OL4TBL_NUM 16
1357 uint32_t pkt_type = 0;
1358 uint32_t l2id, l3id, l4id;
1359 uint32_t ol3id, ol4id;
1361 static const uint32_t l2table[HNS3_L2TBL_NUM] = {
1363 RTE_PTYPE_L2_ETHER_QINQ,
1364 RTE_PTYPE_L2_ETHER_VLAN,
1365 RTE_PTYPE_L2_ETHER_VLAN
1368 static const uint32_t l3table[HNS3_L3TBL_NUM] = {
1371 RTE_PTYPE_L2_ETHER_ARP,
1373 RTE_PTYPE_L3_IPV4_EXT,
1374 RTE_PTYPE_L3_IPV6_EXT,
1375 RTE_PTYPE_L2_ETHER_LLDP,
1376 0, 0, 0, 0, 0, 0, 0, 0, 0
1379 static const uint32_t l4table[HNS3_L4TBL_NUM] = {
1382 RTE_PTYPE_TUNNEL_GRE,
1386 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1389 static const uint32_t inner_l2table[HNS3_L2TBL_NUM] = {
1390 RTE_PTYPE_INNER_L2_ETHER,
1391 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1392 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1396 static const uint32_t inner_l3table[HNS3_L3TBL_NUM] = {
1397 RTE_PTYPE_INNER_L3_IPV4,
1398 RTE_PTYPE_INNER_L3_IPV6,
1400 RTE_PTYPE_INNER_L2_ETHER,
1401 RTE_PTYPE_INNER_L3_IPV4_EXT,
1402 RTE_PTYPE_INNER_L3_IPV6_EXT,
1403 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1406 static const uint32_t inner_l4table[HNS3_L4TBL_NUM] = {
1407 RTE_PTYPE_INNER_L4_UDP,
1408 RTE_PTYPE_INNER_L4_TCP,
1409 RTE_PTYPE_TUNNEL_GRE,
1410 RTE_PTYPE_INNER_L4_SCTP,
1412 RTE_PTYPE_INNER_L4_ICMP,
1413 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1416 static const uint32_t ol3table[HNS3_OL3TBL_NUM] = {
1420 RTE_PTYPE_L3_IPV4_EXT,
1421 RTE_PTYPE_L3_IPV6_EXT,
1422 0, 0, 0, 0, 0, 0, 0, 0, 0,
1426 static const uint32_t ol4table[HNS3_OL4TBL_NUM] = {
1428 RTE_PTYPE_TUNNEL_VXLAN,
1429 RTE_PTYPE_TUNNEL_NVGRE,
1430 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1433 l2id = hns3_get_field(pkt_info, HNS3_RXD_STRP_TAGP_M,
1434 HNS3_RXD_STRP_TAGP_S);
1435 l3id = hns3_get_field(pkt_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
1436 l4id = hns3_get_field(pkt_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
1437 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
1438 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1440 if (ol4table[ol4id])
1441 pkt_type |= (inner_l2table[l2id] | inner_l3table[l3id] |
1442 inner_l4table[l4id] | ol3table[ol3id] |
1445 pkt_type |= (l2table[l2id] | l3table[l3id] | l4table[l4id]);
1450 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1452 static const uint32_t ptypes[] = {
1454 RTE_PTYPE_L2_ETHER_VLAN,
1455 RTE_PTYPE_L2_ETHER_QINQ,
1456 RTE_PTYPE_L2_ETHER_LLDP,
1457 RTE_PTYPE_L2_ETHER_ARP,
1459 RTE_PTYPE_L3_IPV4_EXT,
1461 RTE_PTYPE_L3_IPV6_EXT,
1467 RTE_PTYPE_TUNNEL_GRE,
1471 if (dev->rx_pkt_burst == hns3_recv_pkts)
1478 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
1479 uint32_t bd_base_info, uint32_t l234_info,
1480 uint32_t *cksum_err)
1484 if (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) {
1489 if (unlikely(rxm->pkt_len == 0 ||
1490 (l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) {
1491 rxq->pkt_len_errors++;
1495 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
1496 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
1497 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1498 rxq->l3_csum_erros++;
1499 tmp |= HNS3_L3_CKSUM_ERR;
1502 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
1503 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1504 rxq->l4_csum_erros++;
1505 tmp |= HNS3_L4_CKSUM_ERR;
1508 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
1509 rxq->ol3_csum_erros++;
1510 tmp |= HNS3_OUTER_L3_CKSUM_ERR;
1513 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
1514 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
1515 rxq->ol4_csum_erros++;
1516 tmp |= HNS3_OUTER_L4_CKSUM_ERR;
1525 hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type,
1526 const uint32_t cksum_err)
1528 if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
1529 if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
1530 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
1531 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1532 if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
1533 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
1534 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1535 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
1536 (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
1537 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
1539 if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
1540 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
1541 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1542 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
1543 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
1544 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1549 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
1550 uint32_t l234_info, const struct hns3_desc *rxd)
1552 #define HNS3_STRP_STATUS_NUM 0x4
1554 #define HNS3_NO_STRP_VLAN_VLD 0x0
1555 #define HNS3_INNER_STRP_VLAN_VLD 0x1
1556 #define HNS3_OUTER_STRP_VLAN_VLD 0x2
1557 uint32_t strip_status;
1558 uint32_t report_mode;
1561 * Since HW limitation, the vlan tag will always be inserted into RX
1562 * descriptor when strip the tag from packet, driver needs to determine
1563 * reporting which tag to mbuf according to the PVID configuration
1564 * and vlan striped status.
1566 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
1568 HNS3_NO_STRP_VLAN_VLD,
1569 HNS3_OUTER_STRP_VLAN_VLD,
1570 HNS3_INNER_STRP_VLAN_VLD,
1571 HNS3_OUTER_STRP_VLAN_VLD
1574 HNS3_NO_STRP_VLAN_VLD,
1575 HNS3_NO_STRP_VLAN_VLD,
1576 HNS3_NO_STRP_VLAN_VLD,
1577 HNS3_INNER_STRP_VLAN_VLD
1580 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
1581 HNS3_RXD_STRP_TAGP_S);
1582 report_mode = report_type[rxq->pvid_state][strip_status];
1583 switch (report_mode) {
1584 case HNS3_NO_STRP_VLAN_VLD:
1587 case HNS3_INNER_STRP_VLAN_VLD:
1588 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1589 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
1591 case HNS3_OUTER_STRP_VLAN_VLD:
1592 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1593 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
1599 recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
1600 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
1603 uint8_t crc_len = rxq->crc_len;
1605 if (data_len <= crc_len) {
1606 rte_pktmbuf_free_seg(rxm);
1607 first_seg->nb_segs--;
1608 last_seg->data_len = (uint16_t)(last_seg->data_len -
1609 (crc_len - data_len));
1610 last_seg->next = NULL;
1612 rxm->data_len = (uint16_t)(data_len - crc_len);
1616 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1618 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
1619 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
1620 struct hns3_rx_queue *rxq; /* RX queue */
1621 struct hns3_entry *sw_ring;
1622 struct hns3_entry *rxe;
1623 struct rte_mbuf *first_seg;
1624 struct rte_mbuf *last_seg;
1625 struct hns3_desc rxd;
1626 struct rte_mbuf *nmb; /* pointer of the new mbuf */
1627 struct rte_mbuf *rxm;
1628 struct rte_eth_dev *dev;
1629 uint32_t bd_base_info;
1646 rx_id = rxq->next_to_use;
1647 rx_ring = rxq->rx_ring;
1648 sw_ring = rxq->sw_ring;
1649 first_seg = rxq->pkt_first_seg;
1650 last_seg = rxq->pkt_last_seg;
1652 while (nb_rx < nb_pkts) {
1653 rxdp = &rx_ring[rx_id];
1654 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
1655 if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
1658 * The interactive process between software and hardware of
1659 * receiving a new packet in hns3 network engine:
1660 * 1. Hardware network engine firstly writes the packet content
1661 * to the memory pointed by the 'addr' field of the Rx Buffer
1662 * Descriptor, secondly fills the result of parsing the
1663 * packet include the valid field into the Rx Buffer
1664 * Descriptor in one write operation.
1665 * 2. Driver reads the Rx BD's valid field in the loop to check
1666 * whether it's valid, if valid then assign a new address to
1667 * the addr field, clear the valid field, get the other
1668 * information of the packet by parsing Rx BD's other fields,
1669 * finally write back the number of Rx BDs processed by the
1670 * driver to the HNS3_RING_RX_HEAD_REG register to inform
1672 * In the above process, the ordering is very important. We must
1673 * make sure that CPU read Rx BD's other fields only after the
1676 * There are two type of re-ordering: compiler re-ordering and
1677 * CPU re-ordering under the ARMv8 architecture.
1678 * 1. we use volatile to deal with compiler re-ordering, so you
1679 * can see that rx_ring/rxdp defined with volatile.
1680 * 2. we commonly use memory barrier to deal with CPU
1681 * re-ordering, but the cost is high.
1683 * In order to solve the high cost of using memory barrier, we
1684 * use the data dependency order under the ARMv8 architecture,
1687 * instr02: load B <- A
1688 * the instr02 will always execute after instr01.
1690 * To construct the data dependency ordering, we use the
1691 * following assignment:
1692 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1693 * (1u<<HNS3_RXD_VLD_B)]
1694 * Using gcc compiler under the ARMv8 architecture, the related
1695 * assembly code example as follows:
1696 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
1697 * instr01: ldr w26, [x22, #28] --read bd_base_info
1698 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
1699 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
1701 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
1702 * instr05: ldp x2, x3, [x0]
1703 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
1704 * instr07: ldp x4, x5, [x0, #16]
1705 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
1706 * the instr05~08 depend on x0's value, x0 depent on w26's
1707 * value, the w26 is the bd_base_info, this form the data
1708 * dependency ordering.
1709 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
1710 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
1711 * assignment is correct.
1713 * So we use the data dependency ordering instead of memory
1714 * barrier to improve receive performance.
1716 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1717 (1u << HNS3_RXD_VLD_B)];
1719 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1720 if (unlikely(nmb == NULL)) {
1721 dev = &rte_eth_devices[rxq->port_id];
1722 dev->data->rx_mbuf_alloc_failed++;
1727 rxe = &sw_ring[rx_id];
1729 if (unlikely(rx_id == rxq->nb_rx_desc))
1732 rte_prefetch0(sw_ring[rx_id].mbuf);
1733 if ((rx_id & 0x3) == 0) {
1734 rte_prefetch0(&rx_ring[rx_id]);
1735 rte_prefetch0(&sw_ring[rx_id]);
1741 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1742 rxdp->rx.bd_base_info = 0;
1743 rxdp->addr = dma_addr;
1746 * Load remained descriptor data and extract necessary fields.
1747 * Data size from buffer description may contains CRC len,
1748 * packet len should subtract it.
1750 data_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.size));
1751 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
1752 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
1754 if (first_seg == NULL) {
1756 first_seg->nb_segs = 1;
1758 first_seg->nb_segs++;
1759 last_seg->next = rxm;
1762 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1763 rxm->data_len = data_len;
1765 if (!hns3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
1771 * The last buffer of the received packet. packet len from
1772 * buffer description may contains CRC len, packet len should
1773 * subtract it, same as data len.
1775 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len));
1776 first_seg->pkt_len = pkt_len;
1779 * This is the last buffer of the received packet. If the CRC
1780 * is not stripped by the hardware:
1781 * - Subtract the CRC length from the total packet length.
1782 * - If the last buffer only contains the whole CRC or a part
1783 * of it, free the mbuf associated to the last buffer. If part
1784 * of the CRC is also contained in the previous mbuf, subtract
1785 * the length of that CRC part from the data length of the
1789 if (unlikely(rxq->crc_len > 0)) {
1790 first_seg->pkt_len -= rxq->crc_len;
1791 recalculate_data_len(first_seg, last_seg, rxm, rxq,
1795 first_seg->port = rxq->port_id;
1796 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
1797 first_seg->ol_flags = PKT_RX_RSS_HASH;
1798 if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
1799 first_seg->hash.fdir.hi =
1800 rte_le_to_cpu_32(rxd.rx.fd_id);
1801 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1804 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
1805 HNS3_RXD_GRO_SIZE_S);
1806 if (gro_size != 0) {
1807 first_seg->ol_flags |= PKT_RX_LRO;
1808 first_seg->tso_segsz = gro_size;
1811 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
1812 l234_info, &cksum_err);
1816 first_seg->packet_type = rxd_pkt_info_to_pkt_type(l234_info,
1819 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
1820 hns3_rx_set_cksum_flag(first_seg,
1821 first_seg->packet_type,
1823 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
1825 rx_pkts[nb_rx++] = first_seg;
1829 rte_pktmbuf_free(first_seg);
1833 rxq->next_to_use = rx_id;
1834 rxq->pkt_first_seg = first_seg;
1835 rxq->pkt_last_seg = last_seg;
1837 rxq->rx_free_hold += nb_rx_bd;
1838 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
1839 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
1840 rxq->rx_free_hold = 0;
1847 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1848 unsigned int socket_id, const struct rte_eth_txconf *conf)
1850 struct hns3_adapter *hns = dev->data->dev_private;
1851 struct hns3_hw *hw = &hns->hw;
1852 struct hns3_queue_info q_info;
1853 struct hns3_tx_queue *txq;
1856 if (dev->data->dev_started) {
1857 hns3_err(hw, "tx_queue_setup after dev_start no supported");
1861 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1862 nb_desc % HNS3_ALIGN_RING_DESC) {
1863 hns3_err(hw, "Number (%u) of tx descriptors is invalid",
1868 if (dev->data->tx_queues[idx] != NULL) {
1869 hns3_tx_queue_release(dev->data->tx_queues[idx]);
1870 dev->data->tx_queues[idx] = NULL;
1874 q_info.socket_id = socket_id;
1875 q_info.nb_desc = nb_desc;
1876 q_info.type = "hns3 TX queue";
1877 q_info.ring_name = "tx_ring";
1878 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1881 "Failed to alloc mem and reserve DMA mem for tx ring!");
1885 txq->tx_deferred_start = conf->tx_deferred_start;
1886 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
1887 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
1888 RTE_CACHE_LINE_SIZE, socket_id);
1889 if (txq->sw_ring == NULL) {
1890 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
1891 hns3_tx_queue_release(txq);
1896 txq->next_to_use = 0;
1897 txq->next_to_clean = 0;
1898 txq->tx_bd_ready = txq->nb_tx_desc - 1;
1899 txq->port_id = dev->data->port_id;
1900 txq->pvid_state = hw->port_base_vlan_cfg.state;
1901 txq->configured = true;
1902 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1903 idx * HNS3_TQP_REG_SIZE);
1904 txq->min_tx_pkt_len = hw->min_tx_pkt_len;
1905 txq->over_length_pkt_cnt = 0;
1906 txq->exceed_limit_bd_pkt_cnt = 0;
1907 txq->exceed_limit_bd_reassem_fail = 0;
1908 txq->unsupported_tunnel_pkt_cnt = 0;
1909 txq->queue_full_cnt = 0;
1910 txq->pkt_padding_fail_cnt = 0;
1911 rte_spinlock_lock(&hw->lock);
1912 dev->data->tx_queues[idx] = txq;
1913 rte_spinlock_unlock(&hw->lock);
1919 hns3_queue_xmit(struct hns3_tx_queue *txq, uint32_t buf_num)
1921 hns3_write_dev(txq, HNS3_RING_TX_TAIL_REG, buf_num);
1925 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
1927 uint16_t tx_next_clean = txq->next_to_clean;
1928 uint16_t tx_next_use = txq->next_to_use;
1929 uint16_t tx_bd_ready = txq->tx_bd_ready;
1930 uint16_t tx_bd_max = txq->nb_tx_desc;
1931 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
1932 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
1933 struct rte_mbuf *mbuf;
1935 while ((!hns3_get_bit(desc->tx.tp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B)) &&
1936 tx_next_use != tx_next_clean) {
1937 mbuf = tx_bak_pkt->mbuf;
1939 rte_pktmbuf_free_seg(mbuf);
1940 tx_bak_pkt->mbuf = NULL;
1948 if (tx_next_clean >= tx_bd_max) {
1950 desc = txq->tx_ring;
1951 tx_bak_pkt = txq->sw_ring;
1955 txq->next_to_clean = tx_next_clean;
1956 txq->tx_bd_ready = tx_bd_ready;
1960 hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
1961 struct rte_mbuf *rxm, uint8_t *l2_len)
1967 tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
1971 otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
1972 switch (tun_flags) {
1973 case PKT_TX_TUNNEL_GENEVE:
1974 case PKT_TX_TUNNEL_VXLAN:
1975 *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
1977 case PKT_TX_TUNNEL_GRE:
1979 * OL4 header size, defined in 4 Bytes, it contains outer
1980 * L4(GRE) length and tunneling length.
1982 ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
1984 *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
1987 /* For non UDP / GRE tunneling, drop the tunnel packet */
1990 hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1991 rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
1992 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
1998 hns3_config_gro(struct hns3_hw *hw, bool en)
2000 struct hns3_cfg_gro_status_cmd *req;
2001 struct hns3_cmd_desc desc;
2004 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
2005 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
2007 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
2009 ret = hns3_cmd_send(hw, &desc, 1);
2011 hns3_err(hw, "%s hardware GRO failed, ret = %d",
2012 en ? "enable" : "disable", ret);
2018 hns3_restore_gro_conf(struct hns3_hw *hw)
2024 offloads = hw->data->dev_conf.rxmode.offloads;
2025 gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
2026 ret = hns3_config_gro(hw, gro_en);
2028 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
2029 gro_en ? "enabled" : "disabled", ret);
2035 hns3_pkt_is_tso(struct rte_mbuf *m)
2037 return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
2041 hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags,
2042 uint32_t paylen, struct rte_mbuf *rxm)
2044 uint8_t l2_len = rxm->l2_len;
2047 if (!hns3_pkt_is_tso(rxm))
2050 if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
2053 if (paylen <= rxm->tso_segsz)
2056 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
2057 hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
2058 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2059 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
2060 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2061 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2062 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2063 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2064 l2_len >> HNS3_L2_LEN_UNIT);
2065 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
2066 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
2070 hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
2072 desc->addr = rte_mbuf_data_iova(rxm);
2073 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
2074 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
2078 hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
2079 struct rte_mbuf *rxm)
2081 uint64_t ol_flags = rxm->ol_flags;
2085 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
2086 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2087 rxm->outer_l2_len + rxm->outer_l3_len : 0;
2088 paylen = rxm->pkt_len - hdr_len;
2089 desc->tx.paylen = rte_cpu_to_le_32(paylen);
2090 hns3_set_tso(desc, ol_flags, paylen, rxm);
2093 * Currently, hardware doesn't support more than two layers VLAN offload
2094 * in Tx direction based on hns3 network engine. So when the number of
2095 * VLANs in the packets represented by rxm plus the number of VLAN
2096 * offload by hardware such as PVID etc, exceeds two, the packets will
2097 * be discarded or the original VLAN of the packets will be overwitted
2098 * by hardware. When the PF PVID is enabled by calling the API function
2099 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
2100 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
2101 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
2102 * be added to the position close to the IP header when PVID is enabled.
2104 if (!txq->pvid_state && ol_flags & (PKT_TX_VLAN_PKT |
2106 desc->tx.ol_type_vlan_len_msec |=
2107 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
2108 if (ol_flags & PKT_TX_QINQ_PKT)
2109 desc->tx.outer_vlan_tag =
2110 rte_cpu_to_le_16(rxm->vlan_tci_outer);
2112 desc->tx.outer_vlan_tag =
2113 rte_cpu_to_le_16(rxm->vlan_tci);
2116 if (ol_flags & PKT_TX_QINQ_PKT ||
2117 ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_state)) {
2118 desc->tx.type_cs_vlan_tso_len |=
2119 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
2120 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
2125 hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool,
2126 uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf)
2128 struct rte_mbuf *new_mbuf = NULL;
2129 struct rte_eth_dev *dev;
2130 struct rte_mbuf *temp;
2134 /* Allocate enough mbufs */
2135 for (i = 0; i < nb_new_buf; i++) {
2136 temp = rte_pktmbuf_alloc(mb_pool);
2137 if (unlikely(temp == NULL)) {
2138 dev = &rte_eth_devices[txq->port_id];
2139 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2140 hns3_err(hw, "Failed to alloc TX mbuf port_id=%d,"
2141 "queue_id=%d in reassemble tx pkts.",
2142 txq->port_id, txq->queue_id);
2143 rte_pktmbuf_free(new_mbuf);
2146 temp->next = new_mbuf;
2150 if (new_mbuf == NULL)
2153 new_mbuf->nb_segs = nb_new_buf;
2154 *alloc_mbuf = new_mbuf;
2160 hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
2162 new_pkt->ol_flags = old_pkt->ol_flags;
2163 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
2164 new_pkt->outer_l2_len = old_pkt->outer_l2_len;
2165 new_pkt->outer_l3_len = old_pkt->outer_l3_len;
2166 new_pkt->l2_len = old_pkt->l2_len;
2167 new_pkt->l3_len = old_pkt->l3_len;
2168 new_pkt->l4_len = old_pkt->l4_len;
2169 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
2170 new_pkt->vlan_tci = old_pkt->vlan_tci;
2174 hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
2175 struct rte_mbuf **new_pkt)
2177 struct hns3_tx_queue *txq = tx_queue;
2178 struct rte_mempool *mb_pool;
2179 struct rte_mbuf *new_mbuf;
2180 struct rte_mbuf *temp_new;
2181 struct rte_mbuf *temp;
2182 uint16_t last_buf_len;
2183 uint16_t nb_new_buf;
2194 mb_pool = tx_pkt->pool;
2195 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
2196 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
2197 if (nb_new_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)
2200 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
2201 if (last_buf_len == 0)
2202 last_buf_len = buf_size;
2204 /* Allocate enough mbufs */
2205 ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf);
2209 /* Copy the original packet content to the new mbufs */
2211 s = rte_pktmbuf_mtod(temp, char *);
2212 len_s = rte_pktmbuf_data_len(temp);
2213 temp_new = new_mbuf;
2214 for (i = 0; i < nb_new_buf; i++) {
2215 d = rte_pktmbuf_mtod(temp_new, char *);
2216 if (i < nb_new_buf - 1)
2219 buf_len = last_buf_len;
2223 len = RTE_MIN(len_s, len_d);
2227 len_d = len_d - len;
2228 len_s = len_s - len;
2234 s = rte_pktmbuf_mtod(temp, char *);
2235 len_s = rte_pktmbuf_data_len(temp);
2239 temp_new->data_len = buf_len;
2240 temp_new = temp_new->next;
2242 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
2244 /* free original mbufs */
2245 rte_pktmbuf_free(tx_pkt);
2247 *new_pkt = new_mbuf;
2253 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
2255 uint32_t tmp = *ol_type_vlan_len_msec;
2257 /* (outer) IP header type */
2258 if (ol_flags & PKT_TX_OUTER_IPV4) {
2259 /* OL3 header size, defined in 4 bytes */
2260 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2261 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2262 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2263 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
2264 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
2266 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2267 HNS3_OL3T_IPV4_NO_CSUM);
2268 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
2269 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2271 /* OL3 header size, defined in 4 bytes */
2272 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2273 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2276 *ol_type_vlan_len_msec = tmp;
2280 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
2281 struct rte_net_hdr_lens *hdr_lens)
2283 uint32_t tmp = *ol_type_vlan_len_msec;
2286 /* OL2 header size, defined in 2 bytes */
2287 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2288 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2290 /* L4TUNT: L4 Tunneling Type */
2291 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2292 case PKT_TX_TUNNEL_GENEVE:
2293 case PKT_TX_TUNNEL_VXLAN:
2294 /* MAC in UDP tunnelling packet, include VxLAN */
2295 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2296 HNS3_TUN_MAC_IN_UDP);
2298 * OL4 header size, defined in 4 Bytes, it contains outer
2299 * L4(UDP) length and tunneling length.
2301 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2302 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
2305 case PKT_TX_TUNNEL_GRE:
2306 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2309 * OL4 header size, defined in 4 Bytes, it contains outer
2310 * L4(GRE) length and tunneling length.
2312 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
2313 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2314 l4_len >> HNS3_L4_LEN_UNIT);
2317 /* For non UDP / GRE tunneling, drop the tunnel packet */
2321 *ol_type_vlan_len_msec = tmp;
2327 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2329 struct rte_net_hdr_lens *hdr_lens)
2331 struct hns3_desc *tx_ring = txq->tx_ring;
2332 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2336 hns3_parse_outer_params(ol_flags, &value);
2337 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
2341 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
2347 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2351 /* Enable L3 checksum offloads */
2352 if (ol_flags & PKT_TX_IPV4) {
2353 tmp = *type_cs_vlan_tso_len;
2354 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2356 /* inner(/normal) L3 header size, defined in 4 bytes */
2357 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2358 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2359 if (ol_flags & PKT_TX_IP_CKSUM)
2360 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2361 *type_cs_vlan_tso_len = tmp;
2362 } else if (ol_flags & PKT_TX_IPV6) {
2363 tmp = *type_cs_vlan_tso_len;
2364 /* L3T, IPv6 don't do checksum */
2365 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2367 /* inner(/normal) L3 header size, defined in 4 bytes */
2368 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2369 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2370 *type_cs_vlan_tso_len = tmp;
2375 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2379 /* Enable L4 checksum offloads */
2380 switch (ol_flags & PKT_TX_L4_MASK) {
2381 case PKT_TX_TCP_CKSUM:
2382 tmp = *type_cs_vlan_tso_len;
2383 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2385 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2386 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2387 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2388 *type_cs_vlan_tso_len = tmp;
2390 case PKT_TX_UDP_CKSUM:
2391 tmp = *type_cs_vlan_tso_len;
2392 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2394 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2395 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2396 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
2397 *type_cs_vlan_tso_len = tmp;
2399 case PKT_TX_SCTP_CKSUM:
2400 tmp = *type_cs_vlan_tso_len;
2401 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2403 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2404 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2405 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
2406 *type_cs_vlan_tso_len = tmp;
2414 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2417 struct hns3_desc *tx_ring = txq->tx_ring;
2418 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2421 /* inner(/normal) L2 header size, defined in 2 bytes */
2422 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2423 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2425 hns3_parse_l3_cksum_params(ol_flags, &value);
2426 hns3_parse_l4_cksum_params(ol_flags, &value);
2428 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
2432 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
2434 struct rte_mbuf *m_first = tx_pkts;
2435 struct rte_mbuf *m_last = tx_pkts;
2436 uint32_t tot_len = 0;
2441 * Hardware requires that the sum of the data length of every 8
2442 * consecutive buffers is greater than MSS in hns3 network engine.
2443 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
2444 * frags greater than gso header len + mss, and the remaining 7
2445 * consecutive frags greater than MSS except the last 7 frags.
2447 if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT)
2450 for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1;
2451 i++, m_last = m_last->next)
2452 tot_len += m_last->data_len;
2457 /* ensure the first 8 frags is greater than mss + header */
2458 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
2459 hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
2460 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
2461 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
2465 * ensure the sum of the data length of every 7 consecutive buffer
2466 * is greater than mss except the last one.
2468 for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) {
2469 tot_len -= m_first->data_len;
2470 tot_len += m_last->data_len;
2472 if (tot_len < tx_pkts->tso_segsz)
2475 m_first = m_first->next;
2476 m_last = m_last->next;
2483 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
2485 uint64_t ol_flags = m->ol_flags;
2486 struct rte_ipv4_hdr *ipv4_hdr;
2487 struct rte_udp_hdr *udp_hdr;
2488 uint32_t paylen, hdr_len;
2490 if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
2493 if (ol_flags & PKT_TX_IPV4) {
2494 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2497 if (ol_flags & PKT_TX_IP_CKSUM)
2498 ipv4_hdr->hdr_checksum = 0;
2501 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM &&
2502 ol_flags & PKT_TX_TCP_SEG) {
2503 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2504 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2505 m->outer_l2_len + m->outer_l3_len : 0;
2506 paylen = m->pkt_len - hdr_len;
2507 if (paylen <= m->tso_segsz)
2509 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
2512 udp_hdr->dgram_cksum = 0;
2517 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
2519 uint32_t tmp_data_len_sum = 0;
2520 uint16_t nb_buf = m->nb_segs;
2521 uint32_t paylen, hdr_len;
2522 struct rte_mbuf *m_seg;
2525 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
2528 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2529 hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
2530 m->outer_l2_len + m->outer_l3_len : 0;
2531 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
2534 paylen = m->pkt_len - hdr_len;
2535 if (paylen > HNS3_MAX_BD_PAYLEN)
2539 * The TSO header (include outer and inner L2, L3 and L4 header)
2540 * should be provided by three descriptors in maximum in hns3 network
2544 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
2545 i++, m_seg = m_seg->next) {
2546 tmp_data_len_sum += m_seg->data_len;
2549 if (hdr_len > tmp_data_len_sum)
2555 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2557 hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
2559 struct rte_ether_hdr *eh;
2560 struct rte_vlan_hdr *vh;
2562 if (!txq->pvid_state)
2566 * Due to hardware limitations, we only support two-layer VLAN hardware
2567 * offload in Tx direction based on hns3 network engine, so when PVID is
2568 * enabled, QinQ insert is no longer supported.
2569 * And when PVID is enabled, in the following two cases:
2570 * i) packets with more than two VLAN tags.
2571 * ii) packets with one VLAN tag while the hardware VLAN insert is
2573 * The packets will be regarded as abnormal packets and discarded by
2574 * hardware in Tx direction. For debugging purposes, a validation check
2575 * for these types of packets is added to the '.tx_pkt_prepare' ops
2576 * implementation function named hns3_prep_pkts to inform users that
2577 * these packets will be discarded.
2579 if (m->ol_flags & PKT_TX_QINQ_PKT)
2582 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2583 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
2584 if (m->ol_flags & PKT_TX_VLAN_PKT)
2587 /* Ensure the incoming packet is not a QinQ packet */
2588 vh = (struct rte_vlan_hdr *)(eh + 1);
2589 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
2598 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2605 for (i = 0; i < nb_pkts; i++) {
2608 if (hns3_pkt_is_tso(m) &&
2609 (hns3_pkt_need_linearized(m, m->nb_segs) ||
2610 hns3_check_tso_pkt_valid(m))) {
2615 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2616 ret = rte_validate_tx_offload(m);
2622 if (hns3_vld_vlan_chk(tx_queue, m)) {
2627 ret = rte_net_intel_cksum_prepare(m);
2633 hns3_outer_header_cksum_prepare(m);
2640 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2641 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
2643 /* Fill in tunneling parameters if necessary */
2644 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
2645 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
2646 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
2648 txq->unsupported_tunnel_pkt_cnt++;
2652 /* Enable checksum offloading */
2653 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
2654 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
2660 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
2661 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
2663 struct rte_mbuf *new_pkt;
2666 if (hns3_pkt_is_tso(*m_seg))
2670 * If packet length is greater than HNS3_MAX_FRAME_LEN
2671 * driver support, the packet will be ignored.
2673 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
2674 txq->over_length_pkt_cnt++;
2678 if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
2679 txq->exceed_limit_bd_pkt_cnt++;
2680 ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
2682 txq->exceed_limit_bd_reassem_fail++;
2692 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2694 struct rte_net_hdr_lens hdr_lens = {0};
2695 struct hns3_tx_queue *txq = tx_queue;
2696 struct hns3_entry *tx_bak_pkt;
2697 struct hns3_desc *tx_ring;
2698 struct rte_mbuf *tx_pkt;
2699 struct rte_mbuf *m_seg;
2700 struct hns3_desc *desc;
2701 uint32_t nb_hold = 0;
2702 uint16_t tx_next_use;
2703 uint16_t tx_pkt_num;
2709 /* free useless buffer */
2710 hns3_tx_free_useless_buffer(txq);
2712 tx_next_use = txq->next_to_use;
2713 tx_bd_max = txq->nb_tx_desc;
2714 tx_pkt_num = nb_pkts;
2715 tx_ring = txq->tx_ring;
2718 tx_bak_pkt = &txq->sw_ring[tx_next_use];
2719 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
2720 tx_pkt = *tx_pkts++;
2722 nb_buf = tx_pkt->nb_segs;
2724 if (nb_buf > txq->tx_bd_ready) {
2725 txq->queue_full_cnt++;
2733 * If packet length is less than minimum packet length supported
2734 * by hardware in Tx direction, driver need to pad it to avoid
2737 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
2738 txq->min_tx_pkt_len)) {
2742 add_len = txq->min_tx_pkt_len -
2743 rte_pktmbuf_pkt_len(tx_pkt);
2744 appended = rte_pktmbuf_append(tx_pkt, add_len);
2745 if (appended == NULL) {
2746 txq->pkt_padding_fail_cnt++;
2750 memset(appended, 0, add_len);
2755 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
2758 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
2762 desc = &tx_ring[tx_next_use];
2765 * If the packet is divided into multiple Tx Buffer Descriptors,
2766 * only need to fill vlan, paylen and tso into the first Tx
2767 * Buffer Descriptor.
2769 hns3_fill_first_desc(txq, desc, m_seg);
2772 desc = &tx_ring[tx_next_use];
2774 * Fill valid bits, DMA address and data length for each
2775 * Tx Buffer Descriptor.
2777 hns3_fill_per_desc(desc, m_seg);
2778 tx_bak_pkt->mbuf = m_seg;
2779 m_seg = m_seg->next;
2782 if (tx_next_use >= tx_bd_max) {
2784 tx_bak_pkt = txq->sw_ring;
2788 } while (m_seg != NULL);
2790 /* Add end flag for the last Tx Buffer Descriptor */
2791 desc->tx.tp_fe_sc_vld_ra_ri |=
2792 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
2795 txq->next_to_use = tx_next_use;
2796 txq->tx_bd_ready -= i;
2802 hns3_queue_xmit(txq, nb_hold);
2808 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
2809 struct rte_mbuf **pkts __rte_unused,
2810 uint16_t pkts_n __rte_unused)
2815 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
2817 struct hns3_adapter *hns = eth_dev->data->dev_private;
2819 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
2820 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
2821 eth_dev->rx_pkt_burst = hns3_recv_pkts;
2822 eth_dev->tx_pkt_burst = hns3_xmit_pkts;
2823 eth_dev->tx_pkt_prepare = hns3_prep_pkts;
2825 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
2826 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
2827 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
2832 hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2833 struct rte_eth_rxq_info *qinfo)
2835 struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
2837 qinfo->mp = rxq->mb_pool;
2838 qinfo->nb_desc = rxq->nb_rx_desc;
2839 qinfo->scattered_rx = dev->data->scattered_rx;
2842 * If there are no available Rx buffer descriptors, incoming packets
2843 * are always dropped by hardware based on hns3 network engine.
2845 qinfo->conf.rx_drop_en = 1;
2846 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
2847 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2848 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2852 hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2853 struct rte_eth_txq_info *qinfo)
2855 struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
2857 qinfo->nb_desc = txq->nb_tx_desc;
2858 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
2859 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;