1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_vxlan.h>
19 #include <rte_ethdev_driver.h>
24 #include <rte_malloc.h>
27 #include "hns3_ethdev.h"
28 #include "hns3_rxtx.h"
29 #include "hns3_regs.h"
30 #include "hns3_logs.h"
32 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
33 #define DEFAULT_RX_FREE_THRESH 32
36 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
40 /* Note: Fake rx queue will not enter here */
42 for (i = 0; i < rxq->nb_rx_desc; i++) {
43 if (rxq->sw_ring[i].mbuf) {
44 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
45 rxq->sw_ring[i].mbuf = NULL;
52 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
56 /* Note: Fake rx queue will not enter here */
58 for (i = 0; i < txq->nb_tx_desc; i++) {
59 if (txq->sw_ring[i].mbuf) {
60 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
61 txq->sw_ring[i].mbuf = NULL;
68 hns3_rx_queue_release(void *queue)
70 struct hns3_rx_queue *rxq = queue;
72 hns3_rx_queue_release_mbufs(rxq);
74 rte_memzone_free(rxq->mz);
76 rte_free(rxq->sw_ring);
82 hns3_tx_queue_release(void *queue)
84 struct hns3_tx_queue *txq = queue;
86 hns3_tx_queue_release_mbufs(txq);
88 rte_memzone_free(txq->mz);
90 rte_free(txq->sw_ring);
96 hns3_dev_rx_queue_release(void *queue)
98 struct hns3_rx_queue *rxq = queue;
99 struct hns3_adapter *hns;
105 rte_spinlock_lock(&hns->hw.lock);
106 hns3_rx_queue_release(queue);
107 rte_spinlock_unlock(&hns->hw.lock);
111 hns3_dev_tx_queue_release(void *queue)
113 struct hns3_tx_queue *txq = queue;
114 struct hns3_adapter *hns;
120 rte_spinlock_lock(&hns->hw.lock);
121 hns3_tx_queue_release(queue);
122 rte_spinlock_unlock(&hns->hw.lock);
126 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
128 struct hns3_rx_queue *rxq = queue;
129 struct hns3_adapter *hns;
139 if (hw->fkq_data.rx_queues[idx]) {
140 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
141 hw->fkq_data.rx_queues[idx] = NULL;
144 /* free fake rx queue arrays */
145 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
146 hw->fkq_data.nb_fake_rx_queues = 0;
147 rte_free(hw->fkq_data.rx_queues);
148 hw->fkq_data.rx_queues = NULL;
153 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
155 struct hns3_tx_queue *txq = queue;
156 struct hns3_adapter *hns;
166 if (hw->fkq_data.tx_queues[idx]) {
167 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
168 hw->fkq_data.tx_queues[idx] = NULL;
171 /* free fake tx queue arrays */
172 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
173 hw->fkq_data.nb_fake_tx_queues = 0;
174 rte_free(hw->fkq_data.tx_queues);
175 hw->fkq_data.tx_queues = NULL;
180 hns3_free_rx_queues(struct rte_eth_dev *dev)
182 struct hns3_adapter *hns = dev->data->dev_private;
183 struct hns3_fake_queue_data *fkq_data;
184 struct hns3_hw *hw = &hns->hw;
188 nb_rx_q = hw->data->nb_rx_queues;
189 for (i = 0; i < nb_rx_q; i++) {
190 if (dev->data->rx_queues[i]) {
191 hns3_rx_queue_release(dev->data->rx_queues[i]);
192 dev->data->rx_queues[i] = NULL;
196 /* Free fake Rx queues */
197 fkq_data = &hw->fkq_data;
198 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
199 if (fkq_data->rx_queues[i])
200 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
205 hns3_free_tx_queues(struct rte_eth_dev *dev)
207 struct hns3_adapter *hns = dev->data->dev_private;
208 struct hns3_fake_queue_data *fkq_data;
209 struct hns3_hw *hw = &hns->hw;
213 nb_tx_q = hw->data->nb_tx_queues;
214 for (i = 0; i < nb_tx_q; i++) {
215 if (dev->data->tx_queues[i]) {
216 hns3_tx_queue_release(dev->data->tx_queues[i]);
217 dev->data->tx_queues[i] = NULL;
221 /* Free fake Tx queues */
222 fkq_data = &hw->fkq_data;
223 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
224 if (fkq_data->tx_queues[i])
225 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
230 hns3_free_all_queues(struct rte_eth_dev *dev)
232 hns3_free_rx_queues(dev);
233 hns3_free_tx_queues(dev);
237 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
239 struct rte_mbuf *mbuf;
243 for (i = 0; i < rxq->nb_rx_desc; i++) {
244 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
245 if (unlikely(mbuf == NULL)) {
246 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
248 hns3_rx_queue_release_mbufs(rxq);
252 rte_mbuf_refcnt_set(mbuf, 1);
254 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
256 mbuf->port = rxq->port_id;
258 rxq->sw_ring[i].mbuf = mbuf;
259 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
260 rxq->rx_ring[i].addr = dma_addr;
261 rxq->rx_ring[i].rx.bd_base_info = 0;
268 hns3_buf_size2type(uint32_t buf_size)
274 bd_size_type = HNS3_BD_SIZE_512_TYPE;
277 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
280 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
283 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
290 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
292 uint32_t rx_buf_len = rxq->rx_buf_len;
293 uint64_t dma_addr = rxq->rx_ring_phys_addr;
295 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
296 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
297 (uint32_t)((dma_addr >> 31) >> 1));
299 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
300 hns3_buf_size2type(rx_buf_len));
301 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
302 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
306 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
308 uint64_t dma_addr = txq->tx_ring_phys_addr;
310 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
311 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
312 (uint32_t)((dma_addr >> 31) >> 1));
314 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
315 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
319 hns3_update_all_queues_pvid_state(struct hns3_hw *hw)
321 uint16_t nb_rx_q = hw->data->nb_rx_queues;
322 uint16_t nb_tx_q = hw->data->nb_tx_queues;
323 struct hns3_rx_queue *rxq;
324 struct hns3_tx_queue *txq;
328 pvid_state = hw->port_base_vlan_cfg.state;
329 for (i = 0; i < hw->cfg_max_queues; i++) {
331 rxq = hw->data->rx_queues[i];
333 rxq->pvid_state = pvid_state;
336 txq = hw->data->tx_queues[i];
338 txq->pvid_state = pvid_state;
344 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
346 uint16_t nb_rx_q = hw->data->nb_rx_queues;
347 uint16_t nb_tx_q = hw->data->nb_tx_queues;
348 struct hns3_rx_queue *rxq;
349 struct hns3_tx_queue *txq;
353 for (i = 0; i < hw->cfg_max_queues; i++) {
355 rxq = hw->data->rx_queues[i];
357 rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
359 txq = hw->data->tx_queues[i];
361 txq = hw->fkq_data.tx_queues[i - nb_tx_q];
362 if (rxq == NULL || txq == NULL ||
363 (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
366 rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
368 rcb_reg |= BIT(HNS3_RING_EN_B);
370 rcb_reg &= ~BIT(HNS3_RING_EN_B);
371 hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
376 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
378 struct hns3_cfg_com_tqp_queue_cmd *req;
379 struct hns3_cmd_desc desc;
382 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
384 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
385 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
387 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
389 ret = hns3_cmd_send(hw, &desc, 1);
391 hns3_err(hw, "TQP enable fail, ret = %d", ret);
397 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
399 struct hns3_reset_tqp_queue_cmd *req;
400 struct hns3_cmd_desc desc;
403 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
405 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
406 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
407 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
409 ret = hns3_cmd_send(hw, &desc, 1);
411 hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
417 hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
419 struct hns3_reset_tqp_queue_cmd *req;
420 struct hns3_cmd_desc desc;
423 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
425 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
426 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
428 ret = hns3_cmd_send(hw, &desc, 1);
430 hns3_err(hw, "Get reset status error, ret =%d", ret);
434 return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
438 hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
440 #define HNS3_TQP_RESET_TRY_MS 200
445 ret = hns3_tqp_enable(hw, queue_id, false);
450 * In current version VF is not supported when PF is driven by DPDK
451 * driver, all task queue pairs are mapped to PF function, so PF's queue
452 * id is equals to the global queue id in PF range.
454 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
456 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
460 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
462 /* Wait for tqp hw reset */
463 rte_delay_ms(HNS3_POLL_RESPONE_MS);
464 reset_status = hns3_get_reset_status(hw, queue_id);
469 } while (get_timeofday_ms() < end);
472 hns3_err(hw, "Reset TQP fail, ret = %d", ret);
476 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
478 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
484 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
489 /* Disable VF's queue before send queue reset msg to PF */
490 ret = hns3_tqp_enable(hw, queue_id, false);
494 memcpy(msg_data, &queue_id, sizeof(uint16_t));
496 return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
497 sizeof(msg_data), true, NULL, 0);
501 hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
503 struct hns3_hw *hw = &hns->hw;
505 return hns3vf_reset_tqp(hw, queue_id);
507 return hns3_reset_tqp(hw, queue_id);
511 hns3_reset_all_queues(struct hns3_adapter *hns)
513 struct hns3_hw *hw = &hns->hw;
516 for (i = 0; i < hw->cfg_max_queues; i++) {
517 ret = hns3_reset_queue(hns, i);
519 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
527 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
528 uint8_t gl_idx, uint16_t gl_value)
530 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
531 HNS3_TQP_INTR_GL1_REG,
532 HNS3_TQP_INTR_GL2_REG};
533 uint32_t addr, value;
535 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
538 addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
539 value = HNS3_GL_USEC_TO_REG(gl_value);
541 hns3_write_dev(hw, addr, value);
545 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
547 uint32_t addr, value;
549 if (rl_value > HNS3_TQP_INTR_RL_MAX)
552 addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
553 value = HNS3_RL_USEC_TO_REG(rl_value);
555 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
557 hns3_write_dev(hw, addr, value);
561 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
563 uint32_t addr, value;
565 addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
568 hns3_write_dev(hw, addr, value);
572 * Enable all rx queue interrupt when in interrupt rx mode.
573 * This api was called before enable queue rx&tx (in normal start or reset
574 * recover scenes), used to fix hardware rx queue interrupt enable was clear
578 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
580 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
581 uint16_t nb_rx_q = hw->data->nb_rx_queues;
584 if (dev->data->dev_conf.intr_conf.rxq == 0)
587 for (i = 0; i < nb_rx_q; i++)
588 hns3_queue_intr_enable(hw, i, en);
592 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
594 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
595 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
596 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
598 if (dev->data->dev_conf.intr_conf.rxq == 0)
601 hns3_queue_intr_enable(hw, queue_id, true);
603 return rte_intr_ack(intr_handle);
607 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
609 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
611 if (dev->data->dev_conf.intr_conf.rxq == 0)
614 hns3_queue_intr_enable(hw, queue_id, false);
620 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
622 struct hns3_hw *hw = &hns->hw;
623 struct hns3_rx_queue *rxq;
626 PMD_INIT_FUNC_TRACE();
628 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
629 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
631 hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
636 rxq->next_to_use = 0;
637 rxq->next_to_clean = 0;
639 hns3_init_rx_queue_hw(rxq);
645 hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
647 struct hns3_hw *hw = &hns->hw;
648 struct hns3_rx_queue *rxq;
650 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
651 rxq->next_to_use = 0;
652 rxq->next_to_clean = 0;
654 hns3_init_rx_queue_hw(rxq);
658 hns3_init_tx_queue(struct hns3_tx_queue *queue)
660 struct hns3_tx_queue *txq = queue;
661 struct hns3_desc *desc;
666 for (i = 0; i < txq->nb_tx_desc; i++) {
667 desc->tx.tp_fe_sc_vld_ra_ri = 0;
671 txq->next_to_use = 0;
672 txq->next_to_clean = 0;
673 txq->tx_bd_ready = txq->nb_tx_desc - 1;
674 hns3_init_tx_queue_hw(txq);
678 hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
680 struct hns3_hw *hw = &hns->hw;
681 struct hns3_tx_queue *txq;
683 txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
684 hns3_init_tx_queue(txq);
688 hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
690 struct hns3_hw *hw = &hns->hw;
691 struct hns3_tx_queue *txq;
693 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
694 hns3_init_tx_queue(txq);
698 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
700 struct hns3_hw *hw = &hns->hw;
701 struct hns3_tx_queue *txq;
704 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
705 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
708 if (!tc_queue->enable)
711 for (j = 0; j < tc_queue->tqp_count; j++) {
712 num = tc_queue->tqp_offset + j;
713 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
717 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
723 hns3_start_rx_queues(struct hns3_adapter *hns)
725 struct hns3_hw *hw = &hns->hw;
726 struct hns3_rx_queue *rxq;
730 /* Initialize RSS for queues */
731 ret = hns3_config_rss(hns);
733 hns3_err(hw, "Failed to configure rss %d", ret);
737 for (i = 0; i < hw->data->nb_rx_queues; i++) {
738 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
739 if (rxq == NULL || rxq->rx_deferred_start)
741 ret = hns3_dev_rx_queue_start(hns, i);
743 hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
749 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
750 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
751 if (rxq == NULL || rxq->rx_deferred_start)
753 hns3_fake_rx_queue_start(hns, i);
758 for (j = 0; j < i; j++) {
759 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
760 hns3_rx_queue_release_mbufs(rxq);
767 hns3_start_tx_queues(struct hns3_adapter *hns)
769 struct hns3_hw *hw = &hns->hw;
770 struct hns3_tx_queue *txq;
773 for (i = 0; i < hw->data->nb_tx_queues; i++) {
774 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
775 if (txq == NULL || txq->tx_deferred_start)
777 hns3_dev_tx_queue_start(hns, i);
780 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
781 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
782 if (txq == NULL || txq->tx_deferred_start)
784 hns3_fake_tx_queue_start(hns, i);
787 hns3_init_tx_ring_tc(hns);
792 * Note: just init and setup queues, and don't enable queue rx&tx.
795 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
797 struct hns3_hw *hw = &hns->hw;
801 ret = hns3_reset_all_queues(hns);
803 hns3_err(hw, "Failed to reset all queues %d", ret);
808 ret = hns3_start_rx_queues(hns);
810 hns3_err(hw, "Failed to start rx queues: %d", ret);
814 hns3_start_tx_queues(hns);
820 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
822 struct hns3_hw *hw = &hns->hw;
825 hns3_enable_all_queues(hw, false);
827 ret = hns3_reset_all_queues(hns);
829 hns3_err(hw, "Failed to reset all queues %d", ret);
837 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
838 struct hns3_queue_info *q_info)
840 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
841 const struct rte_memzone *rx_mz;
842 struct hns3_rx_queue *rxq;
843 unsigned int rx_desc;
845 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
846 RTE_CACHE_LINE_SIZE, q_info->socket_id);
848 hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
853 /* Allocate rx ring hardware descriptors. */
854 rxq->queue_id = q_info->idx;
855 rxq->nb_rx_desc = q_info->nb_desc;
856 rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
857 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
858 rx_desc, HNS3_RING_BASE_ALIGN,
861 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
863 hns3_rx_queue_release(rxq);
867 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
868 rxq->rx_ring_phys_addr = rx_mz->iova;
870 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
871 rxq->rx_ring_phys_addr);
877 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
878 uint16_t nb_desc, unsigned int socket_id)
880 struct hns3_adapter *hns = dev->data->dev_private;
881 struct hns3_hw *hw = &hns->hw;
882 struct hns3_queue_info q_info;
883 struct hns3_rx_queue *rxq;
886 if (hw->fkq_data.rx_queues[idx]) {
887 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
888 hw->fkq_data.rx_queues[idx] = NULL;
892 q_info.socket_id = socket_id;
893 q_info.nb_desc = nb_desc;
894 q_info.type = "hns3 fake RX queue";
895 q_info.ring_name = "rx_fake_ring";
896 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
898 hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
902 /* Don't need alloc sw_ring, because upper applications don't use it */
906 rxq->rx_deferred_start = false;
907 rxq->port_id = dev->data->port_id;
908 rxq->configured = true;
909 nb_rx_q = dev->data->nb_rx_queues;
910 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
911 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
912 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
914 rte_spinlock_lock(&hw->lock);
915 hw->fkq_data.rx_queues[idx] = rxq;
916 rte_spinlock_unlock(&hw->lock);
922 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
923 struct hns3_queue_info *q_info)
925 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
926 const struct rte_memzone *tx_mz;
927 struct hns3_tx_queue *txq;
928 struct hns3_desc *desc;
929 unsigned int tx_desc;
932 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
933 RTE_CACHE_LINE_SIZE, q_info->socket_id);
935 hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
940 /* Allocate tx ring hardware descriptors. */
941 txq->queue_id = q_info->idx;
942 txq->nb_tx_desc = q_info->nb_desc;
943 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
944 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
945 tx_desc, HNS3_RING_BASE_ALIGN,
948 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
950 hns3_tx_queue_release(txq);
954 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
955 txq->tx_ring_phys_addr = tx_mz->iova;
957 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
958 txq->tx_ring_phys_addr);
962 for (i = 0; i < txq->nb_tx_desc; i++) {
963 desc->tx.tp_fe_sc_vld_ra_ri = 0;
971 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
972 uint16_t nb_desc, unsigned int socket_id)
974 struct hns3_adapter *hns = dev->data->dev_private;
975 struct hns3_hw *hw = &hns->hw;
976 struct hns3_queue_info q_info;
977 struct hns3_tx_queue *txq;
980 if (hw->fkq_data.tx_queues[idx] != NULL) {
981 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
982 hw->fkq_data.tx_queues[idx] = NULL;
986 q_info.socket_id = socket_id;
987 q_info.nb_desc = nb_desc;
988 q_info.type = "hns3 fake TX queue";
989 q_info.ring_name = "tx_fake_ring";
990 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
992 hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
996 /* Don't need alloc sw_ring, because upper applications don't use it */
1000 txq->tx_deferred_start = false;
1001 txq->port_id = dev->data->port_id;
1002 txq->configured = true;
1003 nb_tx_q = dev->data->nb_tx_queues;
1004 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1005 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1007 rte_spinlock_lock(&hw->lock);
1008 hw->fkq_data.tx_queues[idx] = txq;
1009 rte_spinlock_unlock(&hw->lock);
1015 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1017 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1021 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1022 /* first time configuration */
1024 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1025 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1026 RTE_CACHE_LINE_SIZE);
1027 if (hw->fkq_data.rx_queues == NULL) {
1028 hw->fkq_data.nb_fake_rx_queues = 0;
1031 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1033 rxq = hw->fkq_data.rx_queues;
1034 for (i = nb_queues; i < old_nb_queues; i++)
1035 hns3_dev_rx_queue_release(rxq[i]);
1037 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1038 RTE_CACHE_LINE_SIZE);
1041 if (nb_queues > old_nb_queues) {
1042 uint16_t new_qs = nb_queues - old_nb_queues;
1043 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1046 hw->fkq_data.rx_queues = rxq;
1047 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1048 rxq = hw->fkq_data.rx_queues;
1049 for (i = nb_queues; i < old_nb_queues; i++)
1050 hns3_dev_rx_queue_release(rxq[i]);
1052 rte_free(hw->fkq_data.rx_queues);
1053 hw->fkq_data.rx_queues = NULL;
1056 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1062 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1064 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1068 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1069 /* first time configuration */
1071 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1072 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1073 RTE_CACHE_LINE_SIZE);
1074 if (hw->fkq_data.tx_queues == NULL) {
1075 hw->fkq_data.nb_fake_tx_queues = 0;
1078 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1080 txq = hw->fkq_data.tx_queues;
1081 for (i = nb_queues; i < old_nb_queues; i++)
1082 hns3_dev_tx_queue_release(txq[i]);
1083 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1084 RTE_CACHE_LINE_SIZE);
1087 if (nb_queues > old_nb_queues) {
1088 uint16_t new_qs = nb_queues - old_nb_queues;
1089 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1092 hw->fkq_data.tx_queues = txq;
1093 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1094 txq = hw->fkq_data.tx_queues;
1095 for (i = nb_queues; i < old_nb_queues; i++)
1096 hns3_dev_tx_queue_release(txq[i]);
1098 rte_free(hw->fkq_data.tx_queues);
1099 hw->fkq_data.tx_queues = NULL;
1101 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1107 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1110 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1111 uint16_t rx_need_add_nb_q;
1112 uint16_t tx_need_add_nb_q;
1117 /* Setup new number of fake RX/TX queues and reconfigure device. */
1118 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
1119 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1120 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1121 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1123 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1124 goto cfg_fake_rx_q_fail;
1127 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1129 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1130 goto cfg_fake_tx_q_fail;
1133 /* Allocate and set up fake RX queue per Ethernet port. */
1134 port_id = hw->data->port_id;
1135 for (q = 0; q < rx_need_add_nb_q; q++) {
1136 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1137 rte_eth_dev_socket_id(port_id));
1139 goto setup_fake_rx_q_fail;
1142 /* Allocate and set up fake TX queue per Ethernet port. */
1143 for (q = 0; q < tx_need_add_nb_q; q++) {
1144 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1145 rte_eth_dev_socket_id(port_id));
1147 goto setup_fake_tx_q_fail;
1152 setup_fake_tx_q_fail:
1153 setup_fake_rx_q_fail:
1154 (void)hns3_fake_tx_queue_config(hw, 0);
1156 (void)hns3_fake_rx_queue_config(hw, 0);
1158 hw->cfg_max_queues = 0;
1164 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1166 struct rte_eth_dev_data *dev_data = hns->hw.data;
1167 struct hns3_rx_queue *rxq;
1168 struct hns3_tx_queue *txq;
1171 if (dev_data->rx_queues)
1172 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1173 rxq = dev_data->rx_queues[i];
1174 if (rxq == NULL || rxq->rx_deferred_start)
1176 hns3_rx_queue_release_mbufs(rxq);
1179 if (dev_data->tx_queues)
1180 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1181 txq = dev_data->tx_queues[i];
1182 if (txq == NULL || txq->tx_deferred_start)
1184 hns3_tx_queue_release_mbufs(txq);
1189 hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1191 uint16_t vld_buf_size;
1192 uint16_t num_hw_specs;
1196 * hns3 network engine only support to set 4 typical specification, and
1197 * different buffer size will affect the max packet_len and the max
1198 * number of segmentation when hw gro is turned on in receive side. The
1199 * relationship between them is as follows:
1200 * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg
1201 * ---------------------|-------------------|----------------
1202 * HNS3_4K_BD_BUF_SIZE | 60KB | 15
1203 * HNS3_2K_BD_BUF_SIZE | 62KB | 31
1204 * HNS3_1K_BD_BUF_SIZE | 63KB | 63
1205 * HNS3_512_BD_BUF_SIZE | 31.5KB | 63
1207 static const uint16_t hw_rx_buf_size[] = {
1208 HNS3_4K_BD_BUF_SIZE,
1209 HNS3_2K_BD_BUF_SIZE,
1210 HNS3_1K_BD_BUF_SIZE,
1211 HNS3_512_BD_BUF_SIZE
1214 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1215 RTE_PKTMBUF_HEADROOM);
1217 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1220 num_hw_specs = RTE_DIM(hw_rx_buf_size);
1221 for (i = 0; i < num_hw_specs; i++) {
1222 if (vld_buf_size >= hw_rx_buf_size[i]) {
1223 *rx_buf_len = hw_rx_buf_size[i];
1231 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1232 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1233 struct rte_mempool *mp)
1235 struct hns3_adapter *hns = dev->data->dev_private;
1236 struct hns3_hw *hw = &hns->hw;
1237 struct hns3_queue_info q_info;
1238 struct hns3_rx_queue *rxq;
1239 uint16_t rx_buf_size;
1242 if (dev->data->dev_started) {
1243 hns3_err(hw, "rx_queue_setup after dev_start no supported");
1247 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1248 nb_desc % HNS3_ALIGN_RING_DESC) {
1249 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1254 if (conf->rx_drop_en == 0)
1255 hns3_warn(hw, "if there are no available Rx descriptors,"
1256 "incoming packets are always dropped. input parameter"
1257 " conf->rx_drop_en(%u) is uneffective.",
1260 if (dev->data->rx_queues[idx]) {
1261 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1262 dev->data->rx_queues[idx] = NULL;
1266 q_info.socket_id = socket_id;
1267 q_info.nb_desc = nb_desc;
1268 q_info.type = "hns3 RX queue";
1269 q_info.ring_name = "rx_ring";
1271 if (hns3_rx_buf_len_calc(mp, &rx_buf_size)) {
1272 hns3_err(hw, "rxq mbufs' data room size:%u is not enough! "
1273 "minimal data room size:%u.",
1274 rte_pktmbuf_data_room_size(mp),
1275 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1279 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1282 "Failed to alloc mem and reserve DMA mem for rx ring!");
1288 if (conf->rx_free_thresh <= 0)
1289 rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;
1291 rxq->rx_free_thresh = conf->rx_free_thresh;
1292 rxq->rx_deferred_start = conf->rx_deferred_start;
1294 rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc;
1295 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1296 RTE_CACHE_LINE_SIZE, socket_id);
1297 if (rxq->sw_ring == NULL) {
1298 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1299 hns3_rx_queue_release(rxq);
1303 rxq->next_to_use = 0;
1304 rxq->next_to_clean = 0;
1305 rxq->nb_rx_hold = 0;
1306 rxq->pkt_first_seg = NULL;
1307 rxq->pkt_last_seg = NULL;
1308 rxq->port_id = dev->data->port_id;
1309 rxq->pvid_state = hw->port_base_vlan_cfg.state;
1310 rxq->configured = true;
1311 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1312 idx * HNS3_TQP_REG_SIZE);
1313 rxq->rx_buf_len = rx_buf_size;
1315 rxq->pkt_len_errors = 0;
1316 rxq->l3_csum_erros = 0;
1317 rxq->l4_csum_erros = 0;
1318 rxq->ol3_csum_erros = 0;
1319 rxq->ol4_csum_erros = 0;
1321 /* CRC len set here is used for amending packet length */
1322 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1323 rxq->crc_len = RTE_ETHER_CRC_LEN;
1327 rte_spinlock_lock(&hw->lock);
1328 dev->data->rx_queues[idx] = rxq;
1329 rte_spinlock_unlock(&hw->lock);
1334 static inline uint32_t
1335 rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info)
1337 #define HNS3_L2TBL_NUM 4
1338 #define HNS3_L3TBL_NUM 16
1339 #define HNS3_L4TBL_NUM 16
1340 #define HNS3_OL3TBL_NUM 16
1341 #define HNS3_OL4TBL_NUM 16
1342 uint32_t pkt_type = 0;
1343 uint32_t l2id, l3id, l4id;
1344 uint32_t ol3id, ol4id;
1346 static const uint32_t l2table[HNS3_L2TBL_NUM] = {
1348 RTE_PTYPE_L2_ETHER_QINQ,
1349 RTE_PTYPE_L2_ETHER_VLAN,
1350 RTE_PTYPE_L2_ETHER_VLAN
1353 static const uint32_t l3table[HNS3_L3TBL_NUM] = {
1356 RTE_PTYPE_L2_ETHER_ARP,
1358 RTE_PTYPE_L3_IPV4_EXT,
1359 RTE_PTYPE_L3_IPV6_EXT,
1360 RTE_PTYPE_L2_ETHER_LLDP,
1361 0, 0, 0, 0, 0, 0, 0, 0, 0
1364 static const uint32_t l4table[HNS3_L4TBL_NUM] = {
1367 RTE_PTYPE_TUNNEL_GRE,
1371 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1374 static const uint32_t inner_l2table[HNS3_L2TBL_NUM] = {
1375 RTE_PTYPE_INNER_L2_ETHER,
1376 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1377 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1381 static const uint32_t inner_l3table[HNS3_L3TBL_NUM] = {
1382 RTE_PTYPE_INNER_L3_IPV4,
1383 RTE_PTYPE_INNER_L3_IPV6,
1385 RTE_PTYPE_INNER_L2_ETHER,
1386 RTE_PTYPE_INNER_L3_IPV4_EXT,
1387 RTE_PTYPE_INNER_L3_IPV6_EXT,
1388 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1391 static const uint32_t inner_l4table[HNS3_L4TBL_NUM] = {
1392 RTE_PTYPE_INNER_L4_UDP,
1393 RTE_PTYPE_INNER_L4_TCP,
1394 RTE_PTYPE_TUNNEL_GRE,
1395 RTE_PTYPE_INNER_L4_SCTP,
1397 RTE_PTYPE_INNER_L4_ICMP,
1398 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1401 static const uint32_t ol3table[HNS3_OL3TBL_NUM] = {
1405 RTE_PTYPE_L3_IPV4_EXT,
1406 RTE_PTYPE_L3_IPV6_EXT,
1407 0, 0, 0, 0, 0, 0, 0, 0, 0,
1411 static const uint32_t ol4table[HNS3_OL4TBL_NUM] = {
1413 RTE_PTYPE_TUNNEL_VXLAN,
1414 RTE_PTYPE_TUNNEL_NVGRE,
1415 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1418 l2id = hns3_get_field(pkt_info, HNS3_RXD_STRP_TAGP_M,
1419 HNS3_RXD_STRP_TAGP_S);
1420 l3id = hns3_get_field(pkt_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
1421 l4id = hns3_get_field(pkt_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
1422 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
1423 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1425 if (ol4table[ol4id])
1426 pkt_type |= (inner_l2table[l2id] | inner_l3table[l3id] |
1427 inner_l4table[l4id] | ol3table[ol3id] |
1430 pkt_type |= (l2table[l2id] | l3table[l3id] | l4table[l4id]);
1435 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1437 static const uint32_t ptypes[] = {
1439 RTE_PTYPE_L2_ETHER_VLAN,
1440 RTE_PTYPE_L2_ETHER_QINQ,
1441 RTE_PTYPE_L2_ETHER_LLDP,
1442 RTE_PTYPE_L2_ETHER_ARP,
1444 RTE_PTYPE_L3_IPV4_EXT,
1446 RTE_PTYPE_L3_IPV6_EXT,
1452 RTE_PTYPE_TUNNEL_GRE,
1456 if (dev->rx_pkt_burst == hns3_recv_pkts)
1463 hns3_clean_rx_buffers(struct hns3_rx_queue *rxq, int count)
1465 rxq->next_to_use += count;
1466 if (rxq->next_to_use >= rxq->nb_rx_desc)
1467 rxq->next_to_use -= rxq->nb_rx_desc;
1469 hns3_write_dev(rxq, HNS3_RING_RX_HEAD_REG, count);
1473 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
1474 uint32_t bd_base_info, uint32_t l234_info,
1475 uint32_t *cksum_err)
1479 if (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) {
1484 if (unlikely(rxm->pkt_len == 0 ||
1485 (l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) {
1486 rxq->pkt_len_errors++;
1490 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
1491 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
1492 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1493 rxq->l3_csum_erros++;
1494 tmp |= HNS3_L3_CKSUM_ERR;
1497 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
1498 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1499 rxq->l4_csum_erros++;
1500 tmp |= HNS3_L4_CKSUM_ERR;
1503 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
1504 rxq->ol3_csum_erros++;
1505 tmp |= HNS3_OUTER_L3_CKSUM_ERR;
1508 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
1509 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
1510 rxq->ol4_csum_erros++;
1511 tmp |= HNS3_OUTER_L4_CKSUM_ERR;
1520 hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type,
1521 const uint32_t cksum_err)
1523 if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
1524 if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
1525 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
1526 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1527 if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
1528 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
1529 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1530 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
1531 (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
1532 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
1534 if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
1535 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
1536 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1537 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
1538 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
1539 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1544 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
1545 uint32_t l234_info, const struct hns3_desc *rxd)
1547 #define HNS3_STRP_STATUS_NUM 0x4
1549 #define HNS3_NO_STRP_VLAN_VLD 0x0
1550 #define HNS3_INNER_STRP_VLAN_VLD 0x1
1551 #define HNS3_OUTER_STRP_VLAN_VLD 0x2
1552 uint32_t strip_status;
1553 uint32_t report_mode;
1556 * Since HW limitation, the vlan tag will always be inserted into RX
1557 * descriptor when strip the tag from packet, driver needs to determine
1558 * reporting which tag to mbuf according to the PVID configuration
1559 * and vlan striped status.
1561 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
1563 HNS3_NO_STRP_VLAN_VLD,
1564 HNS3_OUTER_STRP_VLAN_VLD,
1565 HNS3_INNER_STRP_VLAN_VLD,
1566 HNS3_OUTER_STRP_VLAN_VLD
1569 HNS3_NO_STRP_VLAN_VLD,
1570 HNS3_NO_STRP_VLAN_VLD,
1571 HNS3_NO_STRP_VLAN_VLD,
1572 HNS3_INNER_STRP_VLAN_VLD
1575 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
1576 HNS3_RXD_STRP_TAGP_S);
1577 report_mode = report_type[rxq->pvid_state][strip_status];
1578 switch (report_mode) {
1579 case HNS3_NO_STRP_VLAN_VLD:
1582 case HNS3_INNER_STRP_VLAN_VLD:
1583 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1584 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
1586 case HNS3_OUTER_STRP_VLAN_VLD:
1587 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1588 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
1594 recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
1595 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
1598 uint8_t crc_len = rxq->crc_len;
1600 if (data_len <= crc_len) {
1601 rte_pktmbuf_free_seg(rxm);
1602 first_seg->nb_segs--;
1603 last_seg->data_len = (uint16_t)(last_seg->data_len -
1604 (crc_len - data_len));
1605 last_seg->next = NULL;
1607 rxm->data_len = (uint16_t)(data_len - crc_len);
1611 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1613 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
1614 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
1615 struct hns3_rx_queue *rxq; /* RX queue */
1616 struct hns3_entry *sw_ring;
1617 struct hns3_entry *rxe;
1618 struct rte_mbuf *first_seg;
1619 struct rte_mbuf *last_seg;
1620 struct hns3_desc rxd;
1621 struct rte_mbuf *nmb; /* pointer of the new mbuf */
1622 struct rte_mbuf *rxm;
1623 struct rte_eth_dev *dev;
1624 uint32_t bd_base_info;
1641 rx_id = rxq->next_to_clean;
1642 rx_ring = rxq->rx_ring;
1643 first_seg = rxq->pkt_first_seg;
1644 last_seg = rxq->pkt_last_seg;
1645 sw_ring = rxq->sw_ring;
1647 while (nb_rx < nb_pkts) {
1648 rxdp = &rx_ring[rx_id];
1649 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
1650 if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
1653 * The interactive process between software and hardware of
1654 * receiving a new packet in hns3 network engine:
1655 * 1. Hardware network engine firstly writes the packet content
1656 * to the memory pointed by the 'addr' field of the Rx Buffer
1657 * Descriptor, secondly fills the result of parsing the
1658 * packet include the valid field into the Rx Buffer
1659 * Descriptor in one write operation.
1660 * 2. Driver reads the Rx BD's valid field in the loop to check
1661 * whether it's valid, if valid then assign a new address to
1662 * the addr field, clear the valid field, get the other
1663 * information of the packet by parsing Rx BD's other fields,
1664 * finally write back the number of Rx BDs processed by the
1665 * driver to the HNS3_RING_RX_HEAD_REG register to inform
1667 * In the above process, the ordering is very important. We must
1668 * make sure that CPU read Rx BD's other fields only after the
1671 * There are two type of re-ordering: compiler re-ordering and
1672 * CPU re-ordering under the ARMv8 architecture.
1673 * 1. we use volatile to deal with compiler re-ordering, so you
1674 * can see that rx_ring/rxdp defined with volatile.
1675 * 2. we commonly use memory barrier to deal with CPU
1676 * re-ordering, but the cost is high.
1678 * In order to solve the high cost of using memory barrier, we
1679 * use the data dependency order under the ARMv8 architecture,
1682 * instr02: load B <- A
1683 * the instr02 will always execute after instr01.
1685 * To construct the data dependency ordering, we use the
1686 * following assignment:
1687 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1688 * (1u<<HNS3_RXD_VLD_B)]
1689 * Using gcc compiler under the ARMv8 architecture, the related
1690 * assembly code example as follows:
1691 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
1692 * instr01: ldr w26, [x22, #28] --read bd_base_info
1693 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
1694 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
1696 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
1697 * instr05: ldp x2, x3, [x0]
1698 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
1699 * instr07: ldp x4, x5, [x0, #16]
1700 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
1701 * the instr05~08 depend on x0's value, x0 depent on w26's
1702 * value, the w26 is the bd_base_info, this form the data
1703 * dependency ordering.
1704 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
1705 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
1706 * assignment is correct.
1708 * So we use the data dependency ordering instead of memory
1709 * barrier to improve receive performance.
1711 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1712 (1u << HNS3_RXD_VLD_B)];
1714 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1715 if (unlikely(nmb == NULL)) {
1716 dev = &rte_eth_devices[rxq->port_id];
1717 dev->data->rx_mbuf_alloc_failed++;
1722 rxe = &sw_ring[rx_id];
1724 if (unlikely(rx_id == rxq->nb_rx_desc))
1727 rte_prefetch0(sw_ring[rx_id].mbuf);
1728 if ((rx_id & 0x3) == 0) {
1729 rte_prefetch0(&rx_ring[rx_id]);
1730 rte_prefetch0(&sw_ring[rx_id]);
1736 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1737 rxdp->rx.bd_base_info = 0;
1738 rxdp->addr = dma_addr;
1741 * Load remained descriptor data and extract necessary fields.
1742 * Data size from buffer description may contains CRC len,
1743 * packet len should subtract it.
1745 data_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.size));
1746 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
1747 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
1749 if (first_seg == NULL) {
1751 first_seg->nb_segs = 1;
1753 first_seg->nb_segs++;
1754 last_seg->next = rxm;
1757 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1758 rxm->data_len = data_len;
1760 if (!hns3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
1766 * The last buffer of the received packet. packet len from
1767 * buffer description may contains CRC len, packet len should
1768 * subtract it, same as data len.
1770 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len));
1771 first_seg->pkt_len = pkt_len;
1774 * This is the last buffer of the received packet. If the CRC
1775 * is not stripped by the hardware:
1776 * - Subtract the CRC length from the total packet length.
1777 * - If the last buffer only contains the whole CRC or a part
1778 * of it, free the mbuf associated to the last buffer. If part
1779 * of the CRC is also contained in the previous mbuf, subtract
1780 * the length of that CRC part from the data length of the
1784 if (unlikely(rxq->crc_len > 0)) {
1785 first_seg->pkt_len -= rxq->crc_len;
1786 recalculate_data_len(first_seg, last_seg, rxm, rxq,
1790 first_seg->port = rxq->port_id;
1791 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
1792 first_seg->ol_flags = PKT_RX_RSS_HASH;
1793 if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
1794 first_seg->hash.fdir.hi =
1795 rte_le_to_cpu_32(rxd.rx.fd_id);
1796 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1799 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
1800 HNS3_RXD_GRO_SIZE_S);
1801 if (gro_size != 0) {
1802 first_seg->ol_flags |= PKT_RX_LRO;
1803 first_seg->tso_segsz = gro_size;
1806 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
1807 l234_info, &cksum_err);
1811 first_seg->packet_type = rxd_pkt_info_to_pkt_type(l234_info,
1814 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
1815 hns3_rx_set_cksum_flag(first_seg,
1816 first_seg->packet_type,
1818 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
1820 rx_pkts[nb_rx++] = first_seg;
1824 rte_pktmbuf_free(first_seg);
1828 rxq->next_to_clean = rx_id;
1829 rxq->pkt_first_seg = first_seg;
1830 rxq->pkt_last_seg = last_seg;
1832 nb_rx_bd = nb_rx_bd + rxq->nb_rx_hold;
1833 if (nb_rx_bd > rxq->rx_free_thresh) {
1834 hns3_clean_rx_buffers(rxq, nb_rx_bd);
1837 rxq->nb_rx_hold = nb_rx_bd;
1843 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1844 unsigned int socket_id, const struct rte_eth_txconf *conf)
1846 struct hns3_adapter *hns = dev->data->dev_private;
1847 struct hns3_hw *hw = &hns->hw;
1848 struct hns3_queue_info q_info;
1849 struct hns3_tx_queue *txq;
1852 if (dev->data->dev_started) {
1853 hns3_err(hw, "tx_queue_setup after dev_start no supported");
1857 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1858 nb_desc % HNS3_ALIGN_RING_DESC) {
1859 hns3_err(hw, "Number (%u) of tx descriptors is invalid",
1864 if (dev->data->tx_queues[idx] != NULL) {
1865 hns3_tx_queue_release(dev->data->tx_queues[idx]);
1866 dev->data->tx_queues[idx] = NULL;
1870 q_info.socket_id = socket_id;
1871 q_info.nb_desc = nb_desc;
1872 q_info.type = "hns3 TX queue";
1873 q_info.ring_name = "tx_ring";
1874 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1877 "Failed to alloc mem and reserve DMA mem for tx ring!");
1881 txq->tx_deferred_start = conf->tx_deferred_start;
1882 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
1883 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
1884 RTE_CACHE_LINE_SIZE, socket_id);
1885 if (txq->sw_ring == NULL) {
1886 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
1887 hns3_tx_queue_release(txq);
1892 txq->next_to_use = 0;
1893 txq->next_to_clean = 0;
1894 txq->tx_bd_ready = txq->nb_tx_desc - 1;
1895 txq->port_id = dev->data->port_id;
1896 txq->pvid_state = hw->port_base_vlan_cfg.state;
1897 txq->configured = true;
1898 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1899 idx * HNS3_TQP_REG_SIZE);
1900 txq->over_length_pkt_cnt = 0;
1901 txq->exceed_limit_bd_pkt_cnt = 0;
1902 txq->exceed_limit_bd_reassem_fail = 0;
1903 txq->unsupported_tunnel_pkt_cnt = 0;
1904 txq->queue_full_cnt = 0;
1905 txq->pkt_padding_fail_cnt = 0;
1906 rte_spinlock_lock(&hw->lock);
1907 dev->data->tx_queues[idx] = txq;
1908 rte_spinlock_unlock(&hw->lock);
1914 hns3_queue_xmit(struct hns3_tx_queue *txq, uint32_t buf_num)
1916 hns3_write_dev(txq, HNS3_RING_TX_TAIL_REG, buf_num);
1920 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
1922 uint16_t tx_next_clean = txq->next_to_clean;
1923 uint16_t tx_next_use = txq->next_to_use;
1924 uint16_t tx_bd_ready = txq->tx_bd_ready;
1925 uint16_t tx_bd_max = txq->nb_tx_desc;
1926 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
1927 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
1928 struct rte_mbuf *mbuf;
1930 while ((!hns3_get_bit(desc->tx.tp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B)) &&
1931 tx_next_use != tx_next_clean) {
1932 mbuf = tx_bak_pkt->mbuf;
1934 rte_pktmbuf_free_seg(mbuf);
1935 tx_bak_pkt->mbuf = NULL;
1943 if (tx_next_clean >= tx_bd_max) {
1945 desc = txq->tx_ring;
1946 tx_bak_pkt = txq->sw_ring;
1950 txq->next_to_clean = tx_next_clean;
1951 txq->tx_bd_ready = tx_bd_ready;
1955 hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
1956 struct rte_mbuf *rxm, uint8_t *l2_len)
1962 tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
1966 otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
1967 switch (tun_flags) {
1968 case PKT_TX_TUNNEL_GENEVE:
1969 case PKT_TX_TUNNEL_VXLAN:
1970 *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
1972 case PKT_TX_TUNNEL_GRE:
1974 * OL4 header size, defined in 4 Bytes, it contains outer
1975 * L4(GRE) length and tunneling length.
1977 ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
1979 *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
1982 /* For non UDP / GRE tunneling, drop the tunnel packet */
1985 hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1986 rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
1987 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
1993 hns3_config_gro(struct hns3_hw *hw, bool en)
1995 struct hns3_cfg_gro_status_cmd *req;
1996 struct hns3_cmd_desc desc;
1999 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
2000 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
2002 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
2004 ret = hns3_cmd_send(hw, &desc, 1);
2006 hns3_err(hw, "%s hardware GRO failed, ret = %d",
2007 en ? "enable" : "disable", ret);
2013 hns3_restore_gro_conf(struct hns3_hw *hw)
2019 offloads = hw->data->dev_conf.rxmode.offloads;
2020 gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
2021 ret = hns3_config_gro(hw, gro_en);
2023 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
2024 gro_en ? "enabled" : "disabled", ret);
2030 hns3_pkt_is_tso(struct rte_mbuf *m)
2032 return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
2036 hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags,
2037 uint32_t paylen, struct rte_mbuf *rxm)
2039 uint8_t l2_len = rxm->l2_len;
2042 if (!hns3_pkt_is_tso(rxm))
2045 if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
2048 if (paylen <= rxm->tso_segsz)
2051 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
2052 hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
2053 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2054 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
2055 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2056 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2057 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2058 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2059 l2_len >> HNS3_L2_LEN_UNIT);
2060 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
2061 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
2065 hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
2067 desc->addr = rte_mbuf_data_iova(rxm);
2068 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
2069 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
2073 hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
2074 struct rte_mbuf *rxm)
2076 uint64_t ol_flags = rxm->ol_flags;
2080 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
2081 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2082 rxm->outer_l2_len + rxm->outer_l3_len : 0;
2083 paylen = rxm->pkt_len - hdr_len;
2084 desc->tx.paylen = rte_cpu_to_le_32(paylen);
2085 hns3_set_tso(desc, ol_flags, paylen, rxm);
2088 * Currently, hardware doesn't support more than two layers VLAN offload
2089 * in Tx direction based on hns3 network engine. So when the number of
2090 * VLANs in the packets represented by rxm plus the number of VLAN
2091 * offload by hardware such as PVID etc, exceeds two, the packets will
2092 * be discarded or the original VLAN of the packets will be overwitted
2093 * by hardware. When the PF PVID is enabled by calling the API function
2094 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
2095 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
2096 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
2097 * be added to the position close to the IP header when PVID is enabled.
2099 if (!txq->pvid_state && ol_flags & (PKT_TX_VLAN_PKT |
2101 desc->tx.ol_type_vlan_len_msec |=
2102 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
2103 if (ol_flags & PKT_TX_QINQ_PKT)
2104 desc->tx.outer_vlan_tag =
2105 rte_cpu_to_le_16(rxm->vlan_tci_outer);
2107 desc->tx.outer_vlan_tag =
2108 rte_cpu_to_le_16(rxm->vlan_tci);
2111 if (ol_flags & PKT_TX_QINQ_PKT ||
2112 ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_state)) {
2113 desc->tx.type_cs_vlan_tso_len |=
2114 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
2115 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
2120 hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool,
2121 uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf)
2123 struct rte_mbuf *new_mbuf = NULL;
2124 struct rte_eth_dev *dev;
2125 struct rte_mbuf *temp;
2129 /* Allocate enough mbufs */
2130 for (i = 0; i < nb_new_buf; i++) {
2131 temp = rte_pktmbuf_alloc(mb_pool);
2132 if (unlikely(temp == NULL)) {
2133 dev = &rte_eth_devices[txq->port_id];
2134 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2135 hns3_err(hw, "Failed to alloc TX mbuf port_id=%d,"
2136 "queue_id=%d in reassemble tx pkts.",
2137 txq->port_id, txq->queue_id);
2138 rte_pktmbuf_free(new_mbuf);
2141 temp->next = new_mbuf;
2145 if (new_mbuf == NULL)
2148 new_mbuf->nb_segs = nb_new_buf;
2149 *alloc_mbuf = new_mbuf;
2155 hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
2157 new_pkt->ol_flags = old_pkt->ol_flags;
2158 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
2159 new_pkt->outer_l2_len = old_pkt->outer_l2_len;
2160 new_pkt->outer_l3_len = old_pkt->outer_l3_len;
2161 new_pkt->l2_len = old_pkt->l2_len;
2162 new_pkt->l3_len = old_pkt->l3_len;
2163 new_pkt->l4_len = old_pkt->l4_len;
2164 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
2165 new_pkt->vlan_tci = old_pkt->vlan_tci;
2169 hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
2170 struct rte_mbuf **new_pkt)
2172 struct hns3_tx_queue *txq = tx_queue;
2173 struct rte_mempool *mb_pool;
2174 struct rte_mbuf *new_mbuf;
2175 struct rte_mbuf *temp_new;
2176 struct rte_mbuf *temp;
2177 uint16_t last_buf_len;
2178 uint16_t nb_new_buf;
2189 mb_pool = tx_pkt->pool;
2190 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
2191 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
2192 if (nb_new_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)
2195 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
2196 if (last_buf_len == 0)
2197 last_buf_len = buf_size;
2199 /* Allocate enough mbufs */
2200 ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf);
2204 /* Copy the original packet content to the new mbufs */
2206 s = rte_pktmbuf_mtod(temp, char *);
2207 len_s = rte_pktmbuf_data_len(temp);
2208 temp_new = new_mbuf;
2209 for (i = 0; i < nb_new_buf; i++) {
2210 d = rte_pktmbuf_mtod(temp_new, char *);
2211 if (i < nb_new_buf - 1)
2214 buf_len = last_buf_len;
2218 len = RTE_MIN(len_s, len_d);
2222 len_d = len_d - len;
2223 len_s = len_s - len;
2229 s = rte_pktmbuf_mtod(temp, char *);
2230 len_s = rte_pktmbuf_data_len(temp);
2234 temp_new->data_len = buf_len;
2235 temp_new = temp_new->next;
2237 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
2239 /* free original mbufs */
2240 rte_pktmbuf_free(tx_pkt);
2242 *new_pkt = new_mbuf;
2248 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
2250 uint32_t tmp = *ol_type_vlan_len_msec;
2252 /* (outer) IP header type */
2253 if (ol_flags & PKT_TX_OUTER_IPV4) {
2254 /* OL3 header size, defined in 4 bytes */
2255 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2256 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2257 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2258 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
2259 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
2261 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2262 HNS3_OL3T_IPV4_NO_CSUM);
2263 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
2264 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2266 /* OL3 header size, defined in 4 bytes */
2267 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2268 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2271 *ol_type_vlan_len_msec = tmp;
2275 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
2276 struct rte_net_hdr_lens *hdr_lens)
2278 uint32_t tmp = *ol_type_vlan_len_msec;
2281 /* OL2 header size, defined in 2 bytes */
2282 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2283 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2285 /* L4TUNT: L4 Tunneling Type */
2286 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2287 case PKT_TX_TUNNEL_GENEVE:
2288 case PKT_TX_TUNNEL_VXLAN:
2289 /* MAC in UDP tunnelling packet, include VxLAN */
2290 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2291 HNS3_TUN_MAC_IN_UDP);
2293 * OL4 header size, defined in 4 Bytes, it contains outer
2294 * L4(UDP) length and tunneling length.
2296 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2297 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
2300 case PKT_TX_TUNNEL_GRE:
2301 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2304 * OL4 header size, defined in 4 Bytes, it contains outer
2305 * L4(GRE) length and tunneling length.
2307 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
2308 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2309 l4_len >> HNS3_L4_LEN_UNIT);
2312 /* For non UDP / GRE tunneling, drop the tunnel packet */
2316 *ol_type_vlan_len_msec = tmp;
2322 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2324 struct rte_net_hdr_lens *hdr_lens)
2326 struct hns3_desc *tx_ring = txq->tx_ring;
2327 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2331 hns3_parse_outer_params(ol_flags, &value);
2332 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
2336 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
2342 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2346 /* Enable L3 checksum offloads */
2347 if (ol_flags & PKT_TX_IPV4) {
2348 tmp = *type_cs_vlan_tso_len;
2349 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2351 /* inner(/normal) L3 header size, defined in 4 bytes */
2352 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2353 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2354 if (ol_flags & PKT_TX_IP_CKSUM)
2355 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2356 *type_cs_vlan_tso_len = tmp;
2357 } else if (ol_flags & PKT_TX_IPV6) {
2358 tmp = *type_cs_vlan_tso_len;
2359 /* L3T, IPv6 don't do checksum */
2360 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2362 /* inner(/normal) L3 header size, defined in 4 bytes */
2363 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2364 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2365 *type_cs_vlan_tso_len = tmp;
2370 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2374 /* Enable L4 checksum offloads */
2375 switch (ol_flags & PKT_TX_L4_MASK) {
2376 case PKT_TX_TCP_CKSUM:
2377 tmp = *type_cs_vlan_tso_len;
2378 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2380 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2381 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2382 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2383 *type_cs_vlan_tso_len = tmp;
2385 case PKT_TX_UDP_CKSUM:
2386 tmp = *type_cs_vlan_tso_len;
2387 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2389 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2390 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2391 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
2392 *type_cs_vlan_tso_len = tmp;
2394 case PKT_TX_SCTP_CKSUM:
2395 tmp = *type_cs_vlan_tso_len;
2396 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2398 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2399 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2400 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
2401 *type_cs_vlan_tso_len = tmp;
2409 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2412 struct hns3_desc *tx_ring = txq->tx_ring;
2413 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2416 /* inner(/normal) L2 header size, defined in 2 bytes */
2417 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2418 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2420 hns3_parse_l3_cksum_params(ol_flags, &value);
2421 hns3_parse_l4_cksum_params(ol_flags, &value);
2423 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
2427 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
2429 struct rte_mbuf *m_first = tx_pkts;
2430 struct rte_mbuf *m_last = tx_pkts;
2431 uint32_t tot_len = 0;
2436 * Hardware requires that the sum of the data length of every 8
2437 * consecutive buffers is greater than MSS in hns3 network engine.
2438 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
2439 * frags greater than gso header len + mss, and the remaining 7
2440 * consecutive frags greater than MSS except the last 7 frags.
2442 if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT)
2445 for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1;
2446 i++, m_last = m_last->next)
2447 tot_len += m_last->data_len;
2452 /* ensure the first 8 frags is greater than mss + header */
2453 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
2454 hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
2455 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
2456 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
2460 * ensure the sum of the data length of every 7 consecutive buffer
2461 * is greater than mss except the last one.
2463 for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) {
2464 tot_len -= m_first->data_len;
2465 tot_len += m_last->data_len;
2467 if (tot_len < tx_pkts->tso_segsz)
2470 m_first = m_first->next;
2471 m_last = m_last->next;
2478 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
2480 uint64_t ol_flags = m->ol_flags;
2481 struct rte_ipv4_hdr *ipv4_hdr;
2482 struct rte_udp_hdr *udp_hdr;
2483 uint32_t paylen, hdr_len;
2485 if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
2488 if (ol_flags & PKT_TX_IPV4) {
2489 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2492 if (ol_flags & PKT_TX_IP_CKSUM)
2493 ipv4_hdr->hdr_checksum = 0;
2496 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM &&
2497 ol_flags & PKT_TX_TCP_SEG) {
2498 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2499 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2500 m->outer_l2_len + m->outer_l3_len : 0;
2501 paylen = m->pkt_len - hdr_len;
2502 if (paylen <= m->tso_segsz)
2504 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
2507 udp_hdr->dgram_cksum = 0;
2512 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
2514 uint32_t tmp_data_len_sum = 0;
2515 uint16_t nb_buf = m->nb_segs;
2516 uint32_t paylen, hdr_len;
2517 struct rte_mbuf *m_seg;
2520 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
2523 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2524 hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
2525 m->outer_l2_len + m->outer_l3_len : 0;
2526 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
2529 paylen = m->pkt_len - hdr_len;
2530 if (paylen > HNS3_MAX_BD_PAYLEN)
2534 * The TSO header (include outer and inner L2, L3 and L4 header)
2535 * should be provided by three descriptors in maximum in hns3 network
2539 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
2540 i++, m_seg = m_seg->next) {
2541 tmp_data_len_sum += m_seg->data_len;
2544 if (hdr_len > tmp_data_len_sum)
2550 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2552 hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
2554 struct rte_ether_hdr *eh;
2555 struct rte_vlan_hdr *vh;
2557 if (!txq->pvid_state)
2561 * Due to hardware limitations, we only support two-layer VLAN hardware
2562 * offload in Tx direction based on hns3 network engine, so when PVID is
2563 * enabled, QinQ insert is no longer supported.
2564 * And when PVID is enabled, in the following two cases:
2565 * i) packets with more than two VLAN tags.
2566 * ii) packets with one VLAN tag while the hardware VLAN insert is
2568 * The packets will be regarded as abnormal packets and discarded by
2569 * hardware in Tx direction. For debugging purposes, a validation check
2570 * for these types of packets is added to the '.tx_pkt_prepare' ops
2571 * implementation function named hns3_prep_pkts to inform users that
2572 * these packets will be discarded.
2574 if (m->ol_flags & PKT_TX_QINQ_PKT)
2577 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2578 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
2579 if (m->ol_flags & PKT_TX_VLAN_PKT)
2582 /* Ensure the incoming packet is not a QinQ packet */
2583 vh = (struct rte_vlan_hdr *)(eh + 1);
2584 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
2593 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2600 for (i = 0; i < nb_pkts; i++) {
2603 if (hns3_pkt_is_tso(m) &&
2604 (hns3_pkt_need_linearized(m, m->nb_segs) ||
2605 hns3_check_tso_pkt_valid(m))) {
2610 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2611 ret = rte_validate_tx_offload(m);
2617 if (hns3_vld_vlan_chk(tx_queue, m)) {
2622 ret = rte_net_intel_cksum_prepare(m);
2628 hns3_outer_header_cksum_prepare(m);
2635 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2636 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
2638 /* Fill in tunneling parameters if necessary */
2639 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
2640 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
2641 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
2643 txq->unsupported_tunnel_pkt_cnt++;
2647 /* Enable checksum offloading */
2648 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
2649 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
2655 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
2656 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
2658 struct rte_mbuf *new_pkt;
2661 if (hns3_pkt_is_tso(*m_seg))
2665 * If packet length is greater than HNS3_MAX_FRAME_LEN
2666 * driver support, the packet will be ignored.
2668 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
2669 txq->over_length_pkt_cnt++;
2673 if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
2674 txq->exceed_limit_bd_pkt_cnt++;
2675 ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
2677 txq->exceed_limit_bd_reassem_fail++;
2687 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2689 struct rte_net_hdr_lens hdr_lens = {0};
2690 struct hns3_tx_queue *txq = tx_queue;
2691 struct hns3_entry *tx_bak_pkt;
2692 struct hns3_desc *tx_ring;
2693 struct rte_mbuf *tx_pkt;
2694 struct rte_mbuf *m_seg;
2695 struct hns3_desc *desc;
2696 uint32_t nb_hold = 0;
2697 uint16_t tx_next_use;
2698 uint16_t tx_pkt_num;
2704 /* free useless buffer */
2705 hns3_tx_free_useless_buffer(txq);
2707 tx_next_use = txq->next_to_use;
2708 tx_bd_max = txq->nb_tx_desc;
2709 tx_pkt_num = nb_pkts;
2710 tx_ring = txq->tx_ring;
2713 tx_bak_pkt = &txq->sw_ring[tx_next_use];
2714 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
2715 tx_pkt = *tx_pkts++;
2717 nb_buf = tx_pkt->nb_segs;
2719 if (nb_buf > txq->tx_bd_ready) {
2720 txq->queue_full_cnt++;
2728 * If packet length is less than minimum packet size, driver
2731 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < HNS3_MIN_PKT_SIZE)) {
2735 add_len = HNS3_MIN_PKT_SIZE -
2736 rte_pktmbuf_pkt_len(tx_pkt);
2737 appended = rte_pktmbuf_append(tx_pkt, add_len);
2738 if (appended == NULL) {
2739 txq->pkt_padding_fail_cnt++;
2743 memset(appended, 0, add_len);
2748 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
2751 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
2755 desc = &tx_ring[tx_next_use];
2758 * If the packet is divided into multiple Tx Buffer Descriptors,
2759 * only need to fill vlan, paylen and tso into the first Tx
2760 * Buffer Descriptor.
2762 hns3_fill_first_desc(txq, desc, m_seg);
2765 desc = &tx_ring[tx_next_use];
2767 * Fill valid bits, DMA address and data length for each
2768 * Tx Buffer Descriptor.
2770 hns3_fill_per_desc(desc, m_seg);
2771 tx_bak_pkt->mbuf = m_seg;
2772 m_seg = m_seg->next;
2775 if (tx_next_use >= tx_bd_max) {
2777 tx_bak_pkt = txq->sw_ring;
2781 } while (m_seg != NULL);
2783 /* Add end flag for the last Tx Buffer Descriptor */
2784 desc->tx.tp_fe_sc_vld_ra_ri |=
2785 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
2788 txq->next_to_use = tx_next_use;
2789 txq->tx_bd_ready -= i;
2795 hns3_queue_xmit(txq, nb_hold);
2801 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
2802 struct rte_mbuf **pkts __rte_unused,
2803 uint16_t pkts_n __rte_unused)
2808 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
2810 struct hns3_adapter *hns = eth_dev->data->dev_private;
2812 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
2813 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
2814 eth_dev->rx_pkt_burst = hns3_recv_pkts;
2815 eth_dev->tx_pkt_burst = hns3_xmit_pkts;
2816 eth_dev->tx_pkt_prepare = hns3_prep_pkts;
2818 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
2819 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
2820 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;