1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_vxlan.h>
19 #include <rte_ethdev_driver.h>
24 #include <rte_malloc.h>
27 #include "hns3_ethdev.h"
28 #include "hns3_rxtx.h"
29 #include "hns3_regs.h"
30 #include "hns3_logs.h"
32 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
33 #define DEFAULT_RX_FREE_THRESH 16
36 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
40 /* Note: Fake rx queue will not enter here */
42 for (i = 0; i < rxq->nb_rx_desc; i++) {
43 if (rxq->sw_ring[i].mbuf) {
44 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
45 rxq->sw_ring[i].mbuf = NULL;
52 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
56 /* Note: Fake rx queue will not enter here */
58 for (i = 0; i < txq->nb_tx_desc; i++) {
59 if (txq->sw_ring[i].mbuf) {
60 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
61 txq->sw_ring[i].mbuf = NULL;
68 hns3_rx_queue_release(void *queue)
70 struct hns3_rx_queue *rxq = queue;
72 hns3_rx_queue_release_mbufs(rxq);
74 rte_memzone_free(rxq->mz);
76 rte_free(rxq->sw_ring);
82 hns3_tx_queue_release(void *queue)
84 struct hns3_tx_queue *txq = queue;
86 hns3_tx_queue_release_mbufs(txq);
88 rte_memzone_free(txq->mz);
90 rte_free(txq->sw_ring);
96 hns3_dev_rx_queue_release(void *queue)
98 struct hns3_rx_queue *rxq = queue;
99 struct hns3_adapter *hns;
105 rte_spinlock_lock(&hns->hw.lock);
106 hns3_rx_queue_release(queue);
107 rte_spinlock_unlock(&hns->hw.lock);
111 hns3_dev_tx_queue_release(void *queue)
113 struct hns3_tx_queue *txq = queue;
114 struct hns3_adapter *hns;
120 rte_spinlock_lock(&hns->hw.lock);
121 hns3_tx_queue_release(queue);
122 rte_spinlock_unlock(&hns->hw.lock);
126 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
128 struct hns3_rx_queue *rxq = queue;
129 struct hns3_adapter *hns;
139 if (hw->fkq_data.rx_queues[idx]) {
140 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
141 hw->fkq_data.rx_queues[idx] = NULL;
144 /* free fake rx queue arrays */
145 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
146 hw->fkq_data.nb_fake_rx_queues = 0;
147 rte_free(hw->fkq_data.rx_queues);
148 hw->fkq_data.rx_queues = NULL;
153 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
155 struct hns3_tx_queue *txq = queue;
156 struct hns3_adapter *hns;
166 if (hw->fkq_data.tx_queues[idx]) {
167 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
168 hw->fkq_data.tx_queues[idx] = NULL;
171 /* free fake tx queue arrays */
172 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
173 hw->fkq_data.nb_fake_tx_queues = 0;
174 rte_free(hw->fkq_data.tx_queues);
175 hw->fkq_data.tx_queues = NULL;
180 hns3_free_rx_queues(struct rte_eth_dev *dev)
182 struct hns3_adapter *hns = dev->data->dev_private;
183 struct hns3_fake_queue_data *fkq_data;
184 struct hns3_hw *hw = &hns->hw;
188 nb_rx_q = hw->data->nb_rx_queues;
189 for (i = 0; i < nb_rx_q; i++) {
190 if (dev->data->rx_queues[i]) {
191 hns3_rx_queue_release(dev->data->rx_queues[i]);
192 dev->data->rx_queues[i] = NULL;
196 /* Free fake Rx queues */
197 fkq_data = &hw->fkq_data;
198 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
199 if (fkq_data->rx_queues[i])
200 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
205 hns3_free_tx_queues(struct rte_eth_dev *dev)
207 struct hns3_adapter *hns = dev->data->dev_private;
208 struct hns3_fake_queue_data *fkq_data;
209 struct hns3_hw *hw = &hns->hw;
213 nb_tx_q = hw->data->nb_tx_queues;
214 for (i = 0; i < nb_tx_q; i++) {
215 if (dev->data->tx_queues[i]) {
216 hns3_tx_queue_release(dev->data->tx_queues[i]);
217 dev->data->tx_queues[i] = NULL;
221 /* Free fake Tx queues */
222 fkq_data = &hw->fkq_data;
223 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
224 if (fkq_data->tx_queues[i])
225 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
230 hns3_free_all_queues(struct rte_eth_dev *dev)
232 hns3_free_rx_queues(dev);
233 hns3_free_tx_queues(dev);
237 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
239 struct rte_mbuf *mbuf;
243 for (i = 0; i < rxq->nb_rx_desc; i++) {
244 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
245 if (unlikely(mbuf == NULL)) {
246 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
248 hns3_rx_queue_release_mbufs(rxq);
252 rte_mbuf_refcnt_set(mbuf, 1);
254 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
256 mbuf->port = rxq->port_id;
258 rxq->sw_ring[i].mbuf = mbuf;
259 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
260 rxq->rx_ring[i].addr = dma_addr;
261 rxq->rx_ring[i].rx.bd_base_info = 0;
268 hns3_buf_size2type(uint32_t buf_size)
274 bd_size_type = HNS3_BD_SIZE_512_TYPE;
277 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
280 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
283 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
290 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
292 uint32_t rx_buf_len = rxq->rx_buf_len;
293 uint64_t dma_addr = rxq->rx_ring_phys_addr;
295 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
296 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
297 (uint32_t)((dma_addr >> 31) >> 1));
299 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
300 hns3_buf_size2type(rx_buf_len));
301 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
302 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
306 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
308 uint64_t dma_addr = txq->tx_ring_phys_addr;
310 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
311 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
312 (uint32_t)((dma_addr >> 31) >> 1));
314 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
315 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
319 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
321 uint16_t nb_rx_q = hw->data->nb_rx_queues;
322 uint16_t nb_tx_q = hw->data->nb_tx_queues;
323 struct hns3_rx_queue *rxq;
324 struct hns3_tx_queue *txq;
328 for (i = 0; i < hw->cfg_max_queues; i++) {
330 rxq = hw->data->rx_queues[i];
332 rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
334 txq = hw->data->tx_queues[i];
336 txq = hw->fkq_data.tx_queues[i - nb_tx_q];
337 if (rxq == NULL || txq == NULL ||
338 (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
341 rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
343 rcb_reg |= BIT(HNS3_RING_EN_B);
345 rcb_reg &= ~BIT(HNS3_RING_EN_B);
346 hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
351 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
353 struct hns3_cfg_com_tqp_queue_cmd *req;
354 struct hns3_cmd_desc desc;
357 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
359 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
360 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
362 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
364 ret = hns3_cmd_send(hw, &desc, 1);
366 hns3_err(hw, "TQP enable fail, ret = %d", ret);
372 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
374 struct hns3_reset_tqp_queue_cmd *req;
375 struct hns3_cmd_desc desc;
378 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
380 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
381 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
382 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
384 ret = hns3_cmd_send(hw, &desc, 1);
386 hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
392 hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
394 struct hns3_reset_tqp_queue_cmd *req;
395 struct hns3_cmd_desc desc;
398 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
400 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
401 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
403 ret = hns3_cmd_send(hw, &desc, 1);
405 hns3_err(hw, "Get reset status error, ret =%d", ret);
409 return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
413 hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
415 #define HNS3_TQP_RESET_TRY_MS 200
420 ret = hns3_tqp_enable(hw, queue_id, false);
425 * In current version VF is not supported when PF is driven by DPDK
426 * driver, all task queue pairs are mapped to PF function, so PF's queue
427 * id is equals to the global queue id in PF range.
429 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
431 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
435 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
437 /* Wait for tqp hw reset */
438 rte_delay_ms(HNS3_POLL_RESPONE_MS);
439 reset_status = hns3_get_reset_status(hw, queue_id);
444 } while (get_timeofday_ms() < end);
447 hns3_err(hw, "Reset TQP fail, ret = %d", ret);
451 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
453 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
459 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
464 /* Disable VF's queue before send queue reset msg to PF */
465 ret = hns3_tqp_enable(hw, queue_id, false);
469 memcpy(msg_data, &queue_id, sizeof(uint16_t));
471 return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
472 sizeof(msg_data), true, NULL, 0);
476 hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
478 struct hns3_hw *hw = &hns->hw;
480 return hns3vf_reset_tqp(hw, queue_id);
482 return hns3_reset_tqp(hw, queue_id);
486 hns3_reset_all_queues(struct hns3_adapter *hns)
488 struct hns3_hw *hw = &hns->hw;
491 for (i = 0; i < hw->cfg_max_queues; i++) {
492 ret = hns3_reset_queue(hns, i);
494 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
502 hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
504 uint32_t addr, value;
506 addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
509 hns3_write_dev(hw, addr, value);
513 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
515 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
516 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
517 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
519 if (dev->data->dev_conf.intr_conf.rxq == 0)
522 /* enable the vectors */
523 hns3_tqp_intr_enable(hw, queue_id, true);
525 return rte_intr_ack(intr_handle);
529 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
531 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
533 if (dev->data->dev_conf.intr_conf.rxq == 0)
536 /* disable the vectors */
537 hns3_tqp_intr_enable(hw, queue_id, false);
543 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
545 struct hns3_hw *hw = &hns->hw;
546 struct hns3_rx_queue *rxq;
549 PMD_INIT_FUNC_TRACE();
551 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
552 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
554 hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
559 rxq->next_to_use = 0;
560 rxq->next_to_clean = 0;
561 hns3_init_rx_queue_hw(rxq);
567 hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
569 struct hns3_hw *hw = &hns->hw;
570 struct hns3_rx_queue *rxq;
572 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
573 rxq->next_to_use = 0;
574 rxq->next_to_clean = 0;
575 hns3_init_rx_queue_hw(rxq);
579 hns3_init_tx_queue(struct hns3_tx_queue *queue)
581 struct hns3_tx_queue *txq = queue;
582 struct hns3_desc *desc;
587 for (i = 0; i < txq->nb_tx_desc; i++) {
588 desc->tx.tp_fe_sc_vld_ra_ri = 0;
592 txq->next_to_use = 0;
593 txq->next_to_clean = 0;
594 txq->tx_bd_ready = txq->nb_tx_desc - 1;
595 hns3_init_tx_queue_hw(txq);
599 hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
601 struct hns3_hw *hw = &hns->hw;
602 struct hns3_tx_queue *txq;
604 txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
605 hns3_init_tx_queue(txq);
609 hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
611 struct hns3_hw *hw = &hns->hw;
612 struct hns3_tx_queue *txq;
614 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
615 hns3_init_tx_queue(txq);
619 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
621 struct hns3_hw *hw = &hns->hw;
622 struct hns3_tx_queue *txq;
625 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
626 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
629 if (!tc_queue->enable)
632 for (j = 0; j < tc_queue->tqp_count; j++) {
633 num = tc_queue->tqp_offset + j;
634 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
638 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
644 hns3_start_rx_queues(struct hns3_adapter *hns)
646 struct hns3_hw *hw = &hns->hw;
647 struct hns3_rx_queue *rxq;
651 /* Initialize RSS for queues */
652 ret = hns3_config_rss(hns);
654 hns3_err(hw, "Failed to configure rss %d", ret);
658 for (i = 0; i < hw->data->nb_rx_queues; i++) {
659 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
660 if (rxq == NULL || rxq->rx_deferred_start)
662 ret = hns3_dev_rx_queue_start(hns, i);
664 hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
670 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
671 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
672 if (rxq == NULL || rxq->rx_deferred_start)
674 hns3_fake_rx_queue_start(hns, i);
679 for (j = 0; j < i; j++) {
680 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
681 hns3_rx_queue_release_mbufs(rxq);
688 hns3_start_tx_queues(struct hns3_adapter *hns)
690 struct hns3_hw *hw = &hns->hw;
691 struct hns3_tx_queue *txq;
694 for (i = 0; i < hw->data->nb_tx_queues; i++) {
695 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
696 if (txq == NULL || txq->tx_deferred_start)
698 hns3_dev_tx_queue_start(hns, i);
701 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
702 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
703 if (txq == NULL || txq->tx_deferred_start)
705 hns3_fake_tx_queue_start(hns, i);
708 hns3_init_tx_ring_tc(hns);
712 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
714 struct hns3_hw *hw = &hns->hw;
718 ret = hns3_reset_all_queues(hns);
720 hns3_err(hw, "Failed to reset all queues %d", ret);
725 ret = hns3_start_rx_queues(hns);
727 hns3_err(hw, "Failed to start rx queues: %d", ret);
731 hns3_start_tx_queues(hns);
732 hns3_enable_all_queues(hw, true);
738 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
740 struct hns3_hw *hw = &hns->hw;
743 hns3_enable_all_queues(hw, false);
745 ret = hns3_reset_all_queues(hns);
747 hns3_err(hw, "Failed to reset all queues %d", ret);
755 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
756 struct hns3_queue_info *q_info)
758 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
759 const struct rte_memzone *rx_mz;
760 struct hns3_rx_queue *rxq;
761 unsigned int rx_desc;
763 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
764 RTE_CACHE_LINE_SIZE, q_info->socket_id);
766 hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
771 /* Allocate rx ring hardware descriptors. */
772 rxq->queue_id = q_info->idx;
773 rxq->nb_rx_desc = q_info->nb_desc;
774 rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
775 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
776 rx_desc, HNS3_RING_BASE_ALIGN,
779 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
781 hns3_rx_queue_release(rxq);
785 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
786 rxq->rx_ring_phys_addr = rx_mz->iova;
788 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
789 rxq->rx_ring_phys_addr);
795 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
796 uint16_t nb_desc, unsigned int socket_id)
798 struct hns3_adapter *hns = dev->data->dev_private;
799 struct hns3_hw *hw = &hns->hw;
800 struct hns3_queue_info q_info;
801 struct hns3_rx_queue *rxq;
804 if (hw->fkq_data.rx_queues[idx]) {
805 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
806 hw->fkq_data.rx_queues[idx] = NULL;
810 q_info.socket_id = socket_id;
811 q_info.nb_desc = nb_desc;
812 q_info.type = "hns3 fake RX queue";
813 q_info.ring_name = "rx_fake_ring";
814 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
816 hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
820 /* Don't need alloc sw_ring, because upper applications don't use it */
824 rxq->rx_deferred_start = false;
825 rxq->port_id = dev->data->port_id;
826 rxq->configured = true;
827 nb_rx_q = dev->data->nb_rx_queues;
828 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
829 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
830 rxq->rx_buf_len = hw->rx_buf_len;
832 rte_spinlock_lock(&hw->lock);
833 hw->fkq_data.rx_queues[idx] = rxq;
834 rte_spinlock_unlock(&hw->lock);
840 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
841 struct hns3_queue_info *q_info)
843 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
844 const struct rte_memzone *tx_mz;
845 struct hns3_tx_queue *txq;
846 struct hns3_desc *desc;
847 unsigned int tx_desc;
850 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
851 RTE_CACHE_LINE_SIZE, q_info->socket_id);
853 hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
858 /* Allocate tx ring hardware descriptors. */
859 txq->queue_id = q_info->idx;
860 txq->nb_tx_desc = q_info->nb_desc;
861 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
862 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
863 tx_desc, HNS3_RING_BASE_ALIGN,
866 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
868 hns3_tx_queue_release(txq);
872 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
873 txq->tx_ring_phys_addr = tx_mz->iova;
875 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
876 txq->tx_ring_phys_addr);
880 for (i = 0; i < txq->nb_tx_desc; i++) {
881 desc->tx.tp_fe_sc_vld_ra_ri = 0;
889 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
890 uint16_t nb_desc, unsigned int socket_id)
892 struct hns3_adapter *hns = dev->data->dev_private;
893 struct hns3_hw *hw = &hns->hw;
894 struct hns3_queue_info q_info;
895 struct hns3_tx_queue *txq;
898 if (hw->fkq_data.tx_queues[idx] != NULL) {
899 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
900 hw->fkq_data.tx_queues[idx] = NULL;
904 q_info.socket_id = socket_id;
905 q_info.nb_desc = nb_desc;
906 q_info.type = "hns3 fake TX queue";
907 q_info.ring_name = "tx_fake_ring";
908 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
910 hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
914 /* Don't need alloc sw_ring, because upper applications don't use it */
918 txq->tx_deferred_start = false;
919 txq->port_id = dev->data->port_id;
920 txq->configured = true;
921 nb_tx_q = dev->data->nb_tx_queues;
922 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
923 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
925 rte_spinlock_lock(&hw->lock);
926 hw->fkq_data.tx_queues[idx] = txq;
927 rte_spinlock_unlock(&hw->lock);
933 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
935 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
939 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
940 /* first time configuration */
943 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
944 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
945 RTE_CACHE_LINE_SIZE);
946 if (hw->fkq_data.rx_queues == NULL) {
947 hw->fkq_data.nb_fake_rx_queues = 0;
950 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
953 rxq = hw->fkq_data.rx_queues;
954 for (i = nb_queues; i < old_nb_queues; i++)
955 hns3_dev_rx_queue_release(rxq[i]);
957 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
958 RTE_CACHE_LINE_SIZE);
961 if (nb_queues > old_nb_queues) {
962 uint16_t new_qs = nb_queues - old_nb_queues;
963 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
966 hw->fkq_data.rx_queues = rxq;
967 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
968 rxq = hw->fkq_data.rx_queues;
969 for (i = nb_queues; i < old_nb_queues; i++)
970 hns3_dev_rx_queue_release(rxq[i]);
972 rte_free(hw->fkq_data.rx_queues);
973 hw->fkq_data.rx_queues = NULL;
976 hw->fkq_data.nb_fake_rx_queues = nb_queues;
982 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
984 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
988 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
989 /* first time configuration */
992 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
993 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
994 RTE_CACHE_LINE_SIZE);
995 if (hw->fkq_data.tx_queues == NULL) {
996 hw->fkq_data.nb_fake_tx_queues = 0;
999 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1002 txq = hw->fkq_data.tx_queues;
1003 for (i = nb_queues; i < old_nb_queues; i++)
1004 hns3_dev_tx_queue_release(txq[i]);
1005 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1006 RTE_CACHE_LINE_SIZE);
1009 if (nb_queues > old_nb_queues) {
1010 uint16_t new_qs = nb_queues - old_nb_queues;
1011 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1014 hw->fkq_data.tx_queues = txq;
1015 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1016 txq = hw->fkq_data.tx_queues;
1017 for (i = nb_queues; i < old_nb_queues; i++)
1018 hns3_dev_tx_queue_release(txq[i]);
1020 rte_free(hw->fkq_data.tx_queues);
1021 hw->fkq_data.tx_queues = NULL;
1023 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1029 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1032 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1033 uint16_t rx_need_add_nb_q;
1034 uint16_t tx_need_add_nb_q;
1039 /* Setup new number of fake RX/TX queues and reconfigure device. */
1040 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
1041 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1042 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1043 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1045 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1046 goto cfg_fake_rx_q_fail;
1049 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1051 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1052 goto cfg_fake_tx_q_fail;
1055 /* Allocate and set up fake RX queue per Ethernet port. */
1056 port_id = hw->data->port_id;
1057 for (q = 0; q < rx_need_add_nb_q; q++) {
1058 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1059 rte_eth_dev_socket_id(port_id));
1061 goto setup_fake_rx_q_fail;
1064 /* Allocate and set up fake TX queue per Ethernet port. */
1065 for (q = 0; q < tx_need_add_nb_q; q++) {
1066 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1067 rte_eth_dev_socket_id(port_id));
1069 goto setup_fake_tx_q_fail;
1074 setup_fake_tx_q_fail:
1075 setup_fake_rx_q_fail:
1076 (void)hns3_fake_tx_queue_config(hw, 0);
1078 (void)hns3_fake_rx_queue_config(hw, 0);
1080 hw->cfg_max_queues = 0;
1086 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1088 struct rte_eth_dev_data *dev_data = hns->hw.data;
1089 struct hns3_rx_queue *rxq;
1090 struct hns3_tx_queue *txq;
1093 if (dev_data->rx_queues)
1094 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1095 rxq = dev_data->rx_queues[i];
1096 if (rxq == NULL || rxq->rx_deferred_start)
1098 hns3_rx_queue_release_mbufs(rxq);
1101 if (dev_data->tx_queues)
1102 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1103 txq = dev_data->tx_queues[i];
1104 if (txq == NULL || txq->tx_deferred_start)
1106 hns3_tx_queue_release_mbufs(txq);
1111 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1112 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1113 struct rte_mempool *mp)
1115 struct hns3_adapter *hns = dev->data->dev_private;
1116 struct hns3_hw *hw = &hns->hw;
1117 struct hns3_queue_info q_info;
1118 struct hns3_rx_queue *rxq;
1121 if (dev->data->dev_started) {
1122 hns3_err(hw, "rx_queue_setup after dev_start no supported");
1126 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1127 nb_desc % HNS3_ALIGN_RING_DESC) {
1128 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1133 if (dev->data->rx_queues[idx]) {
1134 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1135 dev->data->rx_queues[idx] = NULL;
1139 q_info.socket_id = socket_id;
1140 q_info.nb_desc = nb_desc;
1141 q_info.type = "hns3 RX queue";
1142 q_info.ring_name = "rx_ring";
1143 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1146 "Failed to alloc mem and reserve DMA mem for rx ring!");
1152 if (conf->rx_free_thresh <= 0)
1153 rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;
1155 rxq->rx_free_thresh = conf->rx_free_thresh;
1156 rxq->rx_deferred_start = conf->rx_deferred_start;
1158 rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc;
1159 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1160 RTE_CACHE_LINE_SIZE, socket_id);
1161 if (rxq->sw_ring == NULL) {
1162 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1163 hns3_rx_queue_release(rxq);
1167 rxq->next_to_use = 0;
1168 rxq->next_to_clean = 0;
1169 rxq->nb_rx_hold = 0;
1170 rxq->pkt_first_seg = NULL;
1171 rxq->pkt_last_seg = NULL;
1172 rxq->port_id = dev->data->port_id;
1173 rxq->configured = true;
1174 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1175 idx * HNS3_TQP_REG_SIZE);
1176 rxq->rx_buf_len = hw->rx_buf_len;
1177 rxq->non_vld_descs = 0;
1179 rxq->pkt_len_errors = 0;
1180 rxq->l3_csum_erros = 0;
1181 rxq->l4_csum_erros = 0;
1182 rxq->ol3_csum_erros = 0;
1183 rxq->ol4_csum_erros = 0;
1185 rte_spinlock_lock(&hw->lock);
1186 dev->data->rx_queues[idx] = rxq;
1187 rte_spinlock_unlock(&hw->lock);
1192 static inline uint32_t
1193 rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info)
1195 #define HNS3_L2TBL_NUM 4
1196 #define HNS3_L3TBL_NUM 16
1197 #define HNS3_L4TBL_NUM 16
1198 #define HNS3_OL3TBL_NUM 16
1199 #define HNS3_OL4TBL_NUM 16
1200 uint32_t pkt_type = 0;
1201 uint32_t l2id, l3id, l4id;
1202 uint32_t ol3id, ol4id;
1204 static const uint32_t l2table[HNS3_L2TBL_NUM] = {
1206 RTE_PTYPE_L2_ETHER_VLAN,
1207 RTE_PTYPE_L2_ETHER_QINQ,
1211 static const uint32_t l3table[HNS3_L3TBL_NUM] = {
1214 RTE_PTYPE_L2_ETHER_ARP,
1216 RTE_PTYPE_L3_IPV4_EXT,
1217 RTE_PTYPE_L3_IPV6_EXT,
1218 RTE_PTYPE_L2_ETHER_LLDP,
1219 0, 0, 0, 0, 0, 0, 0, 0, 0
1222 static const uint32_t l4table[HNS3_L4TBL_NUM] = {
1225 RTE_PTYPE_TUNNEL_GRE,
1229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1232 static const uint32_t inner_l2table[HNS3_L2TBL_NUM] = {
1233 RTE_PTYPE_INNER_L2_ETHER,
1234 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1235 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1239 static const uint32_t inner_l3table[HNS3_L3TBL_NUM] = {
1240 RTE_PTYPE_INNER_L3_IPV4,
1241 RTE_PTYPE_INNER_L3_IPV6,
1243 RTE_PTYPE_INNER_L2_ETHER,
1244 RTE_PTYPE_INNER_L3_IPV4_EXT,
1245 RTE_PTYPE_INNER_L3_IPV6_EXT,
1246 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1249 static const uint32_t inner_l4table[HNS3_L4TBL_NUM] = {
1250 RTE_PTYPE_INNER_L4_UDP,
1251 RTE_PTYPE_INNER_L4_TCP,
1252 RTE_PTYPE_TUNNEL_GRE,
1253 RTE_PTYPE_INNER_L4_SCTP,
1255 RTE_PTYPE_INNER_L4_ICMP,
1256 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1259 static const uint32_t ol3table[HNS3_OL3TBL_NUM] = {
1263 RTE_PTYPE_L3_IPV4_EXT,
1264 RTE_PTYPE_L3_IPV6_EXT,
1265 0, 0, 0, 0, 0, 0, 0, 0, 0,
1269 static const uint32_t ol4table[HNS3_OL4TBL_NUM] = {
1271 RTE_PTYPE_TUNNEL_VXLAN,
1272 RTE_PTYPE_TUNNEL_NVGRE,
1273 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1276 l2id = hns3_get_field(pkt_info, HNS3_RXD_STRP_TAGP_M,
1277 HNS3_RXD_STRP_TAGP_S);
1278 l3id = hns3_get_field(pkt_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
1279 l4id = hns3_get_field(pkt_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
1280 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
1281 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1283 if (ol4table[ol4id])
1284 pkt_type |= (inner_l2table[l2id] | inner_l3table[l3id] |
1285 inner_l4table[l4id] | ol3table[ol3id] |
1288 pkt_type |= (l2table[l2id] | l3table[l3id] | l4table[l4id]);
1293 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1295 static const uint32_t ptypes[] = {
1297 RTE_PTYPE_L2_ETHER_VLAN,
1298 RTE_PTYPE_L2_ETHER_QINQ,
1299 RTE_PTYPE_L2_ETHER_LLDP,
1300 RTE_PTYPE_L2_ETHER_ARP,
1302 RTE_PTYPE_L3_IPV4_EXT,
1304 RTE_PTYPE_L3_IPV6_EXT,
1310 RTE_PTYPE_TUNNEL_GRE,
1314 if (dev->rx_pkt_burst == hns3_recv_pkts)
1321 hns3_clean_rx_buffers(struct hns3_rx_queue *rxq, int count)
1323 rxq->next_to_use += count;
1324 if (rxq->next_to_use >= rxq->nb_rx_desc)
1325 rxq->next_to_use -= rxq->nb_rx_desc;
1327 hns3_write_dev(rxq, HNS3_RING_RX_HEAD_REG, count);
1331 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
1332 uint32_t bd_base_info, uint32_t l234_info,
1333 uint32_t *cksum_err)
1337 if (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) {
1342 if (unlikely(rxm->pkt_len == 0 ||
1343 (l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) {
1344 rxq->pkt_len_errors++;
1348 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
1349 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
1350 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1351 rxq->l3_csum_erros++;
1352 tmp |= HNS3_L3_CKSUM_ERR;
1355 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
1356 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1357 rxq->l4_csum_erros++;
1358 tmp |= HNS3_L4_CKSUM_ERR;
1361 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
1362 rxq->ol3_csum_erros++;
1363 tmp |= HNS3_OUTER_L3_CKSUM_ERR;
1366 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
1367 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
1368 rxq->ol4_csum_erros++;
1369 tmp |= HNS3_OUTER_L4_CKSUM_ERR;
1378 hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type,
1379 const uint32_t cksum_err)
1381 if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
1382 if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
1383 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
1384 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1385 if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
1386 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
1387 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1388 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
1389 (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
1390 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
1392 if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
1393 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
1394 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1395 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
1396 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
1397 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1402 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1404 struct hns3_rx_queue *rxq; /* RX queue */
1405 struct hns3_desc *rx_ring; /* RX ring (desc) */
1406 struct hns3_entry *sw_ring;
1407 struct hns3_entry *rxe;
1408 struct hns3_desc *rxdp; /* pointer of the current desc */
1409 struct rte_mbuf *first_seg;
1410 struct rte_mbuf *last_seg;
1411 struct rte_mbuf *nmb; /* pointer of the new mbuf */
1412 struct rte_mbuf *rxm;
1413 struct rte_eth_dev *dev;
1414 uint32_t bd_base_info;
1424 int num; /* num of desc in ring */
1430 dev = &rte_eth_devices[rxq->port_id];
1432 rx_id = rxq->next_to_clean;
1433 rx_ring = rxq->rx_ring;
1434 first_seg = rxq->pkt_first_seg;
1435 last_seg = rxq->pkt_last_seg;
1436 sw_ring = rxq->sw_ring;
1438 /* Get num of packets in descriptor ring */
1439 num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
1440 while (nb_rx_bd < num && nb_rx < nb_pkts) {
1441 rxdp = &rx_ring[rx_id];
1442 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
1443 if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
1444 rxq->non_vld_descs++;
1448 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1449 if (unlikely(nmb == NULL)) {
1450 dev->data->rx_mbuf_alloc_failed++;
1455 rxe = &sw_ring[rx_id];
1457 if (rx_id == rxq->nb_rx_desc)
1460 rte_prefetch0(sw_ring[rx_id].mbuf);
1461 if ((rx_id & 0x3) == 0) {
1462 rte_prefetch0(&rx_ring[rx_id]);
1463 rte_prefetch0(&sw_ring[rx_id]);
1469 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1470 rxdp->addr = dma_addr;
1471 rxdp->rx.bd_base_info = 0;
1474 /* Load remained descriptor data and extract necessary fields */
1475 data_len = (uint16_t)(rte_le_to_cpu_16(rxdp->rx.size));
1476 l234_info = rte_le_to_cpu_32(rxdp->rx.l234_info);
1477 ol_info = rte_le_to_cpu_32(rxdp->rx.ol_info);
1479 if (first_seg == NULL) {
1481 first_seg->nb_segs = 1;
1483 first_seg->nb_segs++;
1484 last_seg->next = rxm;
1487 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1488 rxm->data_len = data_len;
1490 if (!hns3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
1495 /* The last buffer of the received packet */
1496 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxdp->rx.pkt_len));
1497 first_seg->pkt_len = pkt_len;
1498 first_seg->port = rxq->port_id;
1499 first_seg->hash.rss = rte_le_to_cpu_32(rxdp->rx.rss_hash);
1500 first_seg->ol_flags |= PKT_RX_RSS_HASH;
1501 if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
1502 first_seg->hash.fdir.hi =
1503 rte_le_to_cpu_32(rxdp->rx.fd_id);
1504 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1508 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
1509 l234_info, &cksum_err);
1513 first_seg->packet_type = rxd_pkt_info_to_pkt_type(l234_info,
1516 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
1517 hns3_rx_set_cksum_flag(rxm, first_seg->packet_type,
1520 first_seg->vlan_tci = rte_le_to_cpu_16(rxdp->rx.vlan_tag);
1521 first_seg->vlan_tci_outer =
1522 rte_le_to_cpu_16(rxdp->rx.ot_vlan_tag);
1523 rx_pkts[nb_rx++] = first_seg;
1527 rte_pktmbuf_free(first_seg);
1531 rxq->next_to_clean = rx_id;
1532 rxq->pkt_first_seg = first_seg;
1533 rxq->pkt_last_seg = last_seg;
1534 hns3_clean_rx_buffers(rxq, nb_rx_bd);
1540 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1541 unsigned int socket_id, const struct rte_eth_txconf *conf)
1543 struct hns3_adapter *hns = dev->data->dev_private;
1544 struct hns3_hw *hw = &hns->hw;
1545 struct hns3_queue_info q_info;
1546 struct hns3_tx_queue *txq;
1549 if (dev->data->dev_started) {
1550 hns3_err(hw, "tx_queue_setup after dev_start no supported");
1554 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1555 nb_desc % HNS3_ALIGN_RING_DESC) {
1556 hns3_err(hw, "Number (%u) of tx descriptors is invalid",
1561 if (dev->data->tx_queues[idx] != NULL) {
1562 hns3_tx_queue_release(dev->data->tx_queues[idx]);
1563 dev->data->tx_queues[idx] = NULL;
1567 q_info.socket_id = socket_id;
1568 q_info.nb_desc = nb_desc;
1569 q_info.type = "hns3 TX queue";
1570 q_info.ring_name = "tx_ring";
1571 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1574 "Failed to alloc mem and reserve DMA mem for tx ring!");
1578 txq->tx_deferred_start = conf->tx_deferred_start;
1579 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
1580 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
1581 RTE_CACHE_LINE_SIZE, socket_id);
1582 if (txq->sw_ring == NULL) {
1583 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
1584 hns3_tx_queue_release(txq);
1589 txq->next_to_use = 0;
1590 txq->next_to_clean = 0;
1591 txq->tx_bd_ready = txq->nb_tx_desc - 1;
1592 txq->port_id = dev->data->port_id;
1593 txq->configured = true;
1594 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1595 idx * HNS3_TQP_REG_SIZE);
1596 rte_spinlock_lock(&hw->lock);
1597 dev->data->tx_queues[idx] = txq;
1598 rte_spinlock_unlock(&hw->lock);
1604 hns3_queue_xmit(struct hns3_tx_queue *txq, uint32_t buf_num)
1606 hns3_write_dev(txq, HNS3_RING_TX_TAIL_REG, buf_num);
1610 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
1612 uint16_t tx_next_clean = txq->next_to_clean;
1613 uint16_t tx_next_use = txq->next_to_use;
1614 uint16_t tx_bd_ready = txq->tx_bd_ready;
1615 uint16_t tx_bd_max = txq->nb_tx_desc;
1616 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
1617 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
1618 struct rte_mbuf *mbuf;
1620 while ((!hns3_get_bit(desc->tx.tp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B)) &&
1621 tx_next_use != tx_next_clean) {
1622 mbuf = tx_bak_pkt->mbuf;
1624 rte_pktmbuf_free_seg(mbuf);
1625 tx_bak_pkt->mbuf = NULL;
1633 if (tx_next_clean >= tx_bd_max) {
1635 desc = txq->tx_ring;
1636 tx_bak_pkt = txq->sw_ring;
1640 txq->next_to_clean = tx_next_clean;
1641 txq->tx_bd_ready = tx_bd_ready;
1645 fill_desc(struct hns3_tx_queue *txq, uint16_t tx_desc_id, struct rte_mbuf *rxm,
1646 bool first, int offset)
1648 struct hns3_desc *tx_ring = txq->tx_ring;
1649 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1650 uint8_t frag_end = rxm->next == NULL ? 1 : 0;
1651 uint16_t size = rxm->data_len;
1653 uint64_t ol_flags = rxm->ol_flags;
1658 desc->addr = rte_mbuf_data_iova(rxm) + offset;
1659 desc->tx.send_size = rte_cpu_to_le_16(size);
1660 hns3_set_bit(rrcfv, HNS3_TXD_VLD_B, 1);
1663 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
1664 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
1665 rxm->outer_l2_len + rxm->outer_l3_len : 0;
1666 paylen = rxm->pkt_len - hdr_len;
1667 desc->tx.paylen = rte_cpu_to_le_32(paylen);
1670 hns3_set_bit(rrcfv, HNS3_TXD_FE_B, frag_end);
1671 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(rrcfv);
1674 if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1675 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
1676 hns3_set_bit(tmp, HNS3_TXD_VLAN_B, 1);
1677 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
1678 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
1681 if (ol_flags & PKT_TX_QINQ_PKT) {
1682 tmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
1683 hns3_set_bit(tmp, HNS3_TXD_OVLAN_B, 1);
1684 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp);
1685 desc->tx.outer_vlan_tag =
1686 rte_cpu_to_le_16(rxm->vlan_tci_outer);
1692 hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool,
1693 uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf)
1695 struct rte_mbuf *new_mbuf = NULL;
1696 struct rte_eth_dev *dev;
1697 struct rte_mbuf *temp;
1701 /* Allocate enough mbufs */
1702 for (i = 0; i < nb_new_buf; i++) {
1703 temp = rte_pktmbuf_alloc(mb_pool);
1704 if (unlikely(temp == NULL)) {
1705 dev = &rte_eth_devices[txq->port_id];
1706 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1707 hns3_err(hw, "Failed to alloc TX mbuf port_id=%d,"
1708 "queue_id=%d in reassemble tx pkts.",
1709 txq->port_id, txq->queue_id);
1710 rte_pktmbuf_free(new_mbuf);
1713 temp->next = new_mbuf;
1717 if (new_mbuf == NULL)
1720 new_mbuf->nb_segs = nb_new_buf;
1721 *alloc_mbuf = new_mbuf;
1727 hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
1728 struct rte_mbuf **new_pkt)
1730 struct hns3_tx_queue *txq = tx_queue;
1731 struct rte_mempool *mb_pool;
1732 struct rte_mbuf *new_mbuf;
1733 struct rte_mbuf *temp_new;
1734 struct rte_mbuf *temp;
1735 uint16_t last_buf_len;
1736 uint16_t nb_new_buf;
1747 mb_pool = tx_pkt->pool;
1748 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
1749 nb_new_buf = (tx_pkt->pkt_len - 1) / buf_size + 1;
1751 last_buf_len = tx_pkt->pkt_len % buf_size;
1752 if (last_buf_len == 0)
1753 last_buf_len = buf_size;
1755 /* Allocate enough mbufs */
1756 ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf);
1760 /* Copy the original packet content to the new mbufs */
1762 s = rte_pktmbuf_mtod(temp, char *);
1763 len_s = temp->data_len;
1764 temp_new = new_mbuf;
1765 for (i = 0; i < nb_new_buf; i++) {
1766 d = rte_pktmbuf_mtod(temp_new, char *);
1767 if (i < nb_new_buf - 1)
1770 buf_len = last_buf_len;
1774 len = RTE_MIN(len_s, len_d);
1778 len_d = len_d - len;
1779 len_s = len_s - len;
1785 s = rte_pktmbuf_mtod(temp, char *);
1786 len_s = temp->data_len;
1790 temp_new->data_len = buf_len;
1791 temp_new = temp_new->next;
1794 /* free original mbufs */
1795 rte_pktmbuf_free(tx_pkt);
1797 *new_pkt = new_mbuf;
1803 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
1805 uint32_t tmp = *ol_type_vlan_len_msec;
1807 /* (outer) IP header type */
1808 if (ol_flags & PKT_TX_OUTER_IPV4) {
1809 /* OL3 header size, defined in 4 bytes */
1810 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1811 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
1812 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1813 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
1814 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
1816 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
1817 HNS3_OL3T_IPV4_NO_CSUM);
1818 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
1819 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
1821 /* OL3 header size, defined in 4 bytes */
1822 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1823 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
1826 *ol_type_vlan_len_msec = tmp;
1830 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
1831 struct rte_net_hdr_lens *hdr_lens)
1833 uint32_t tmp = *ol_type_vlan_len_msec;
1836 /* OL2 header size, defined in 2 bytes */
1837 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1838 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
1840 /* L4TUNT: L4 Tunneling Type */
1841 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
1842 case PKT_TX_TUNNEL_GENEVE:
1843 case PKT_TX_TUNNEL_VXLAN:
1844 /* MAC in UDP tunnelling packet, include VxLAN */
1845 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
1846 HNS3_TUN_MAC_IN_UDP);
1848 * OL4 header size, defined in 4 Bytes, it contains outer
1849 * L4(UDP) length and tunneling length.
1851 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1852 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
1855 case PKT_TX_TUNNEL_GRE:
1856 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
1859 * OL4 header size, defined in 4 Bytes, it contains outer
1860 * L4(GRE) length and tunneling length.
1862 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
1863 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1864 l4_len >> HNS3_L4_LEN_UNIT);
1867 /* For non UDP / GRE tunneling, drop the tunnel packet */
1871 *ol_type_vlan_len_msec = tmp;
1877 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1879 struct rte_net_hdr_lens *hdr_lens)
1881 struct hns3_desc *tx_ring = txq->tx_ring;
1882 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1886 hns3_parse_outer_params(ol_flags, &value);
1887 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
1891 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
1897 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
1901 /* Enable L3 checksum offloads */
1902 if (ol_flags & PKT_TX_IPV4) {
1903 tmp = *type_cs_vlan_tso_len;
1904 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
1906 /* inner(/normal) L3 header size, defined in 4 bytes */
1907 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1908 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
1909 if (ol_flags & PKT_TX_IP_CKSUM)
1910 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
1911 *type_cs_vlan_tso_len = tmp;
1912 } else if (ol_flags & PKT_TX_IPV6) {
1913 tmp = *type_cs_vlan_tso_len;
1914 /* L3T, IPv6 don't do checksum */
1915 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
1917 /* inner(/normal) L3 header size, defined in 4 bytes */
1918 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1919 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
1920 *type_cs_vlan_tso_len = tmp;
1925 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
1929 /* Enable L4 checksum offloads */
1930 switch (ol_flags & PKT_TX_L4_MASK) {
1931 case PKT_TX_TCP_CKSUM:
1932 tmp = *type_cs_vlan_tso_len;
1933 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1935 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1936 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1937 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
1938 *type_cs_vlan_tso_len = tmp;
1940 case PKT_TX_UDP_CKSUM:
1941 tmp = *type_cs_vlan_tso_len;
1942 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1944 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1945 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1946 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
1947 *type_cs_vlan_tso_len = tmp;
1949 case PKT_TX_SCTP_CKSUM:
1950 tmp = *type_cs_vlan_tso_len;
1951 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1953 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1954 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1955 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
1956 *type_cs_vlan_tso_len = tmp;
1964 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1967 struct hns3_desc *tx_ring = txq->tx_ring;
1968 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1971 /* inner(/normal) L2 header size, defined in 2 bytes */
1972 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1973 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
1975 hns3_parse_l3_cksum_params(ol_flags, &value);
1976 hns3_parse_l4_cksum_params(ol_flags, &value);
1978 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
1982 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
1989 for (i = 0; i < nb_pkts; i++) {
1992 /* check the size of packet */
1993 if (m->pkt_len < RTE_ETHER_MIN_LEN) {
1998 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1999 ret = rte_validate_tx_offload(m);
2005 ret = rte_net_intel_cksum_prepare(m);
2016 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2017 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
2019 /* Fill in tunneling parameters if necessary */
2020 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
2021 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
2022 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
2026 /* Enable checksum offloading */
2027 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
2028 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
2034 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2036 struct rte_net_hdr_lens hdr_lens = {0};
2037 struct hns3_tx_queue *txq = tx_queue;
2038 struct hns3_entry *tx_bak_pkt;
2039 struct rte_mbuf *new_pkt;
2040 struct rte_mbuf *tx_pkt;
2041 struct rte_mbuf *m_seg;
2042 uint32_t nb_hold = 0;
2043 uint16_t tx_next_use;
2044 uint16_t tx_pkt_num;
2050 /* free useless buffer */
2051 hns3_tx_free_useless_buffer(txq);
2053 tx_next_use = txq->next_to_use;
2054 tx_bd_max = txq->nb_tx_desc;
2055 tx_pkt_num = nb_pkts;
2058 tx_bak_pkt = &txq->sw_ring[tx_next_use];
2059 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
2060 tx_pkt = *tx_pkts++;
2062 nb_buf = tx_pkt->nb_segs;
2064 if (nb_buf > txq->tx_bd_ready) {
2072 * If packet length is greater than HNS3_MAX_FRAME_LEN
2073 * driver support, the packet will be ignored.
2075 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
2079 * If packet length is less than minimum packet size, driver
2082 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < HNS3_MIN_PKT_SIZE)) {
2086 add_len = HNS3_MIN_PKT_SIZE -
2087 rte_pktmbuf_pkt_len(tx_pkt);
2088 appended = rte_pktmbuf_append(tx_pkt, add_len);
2089 if (appended == NULL)
2092 memset(appended, 0, add_len);
2096 if (unlikely(nb_buf > HNS3_MAX_TX_BD_PER_PKT)) {
2097 if (hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt))
2100 nb_buf = m_seg->nb_segs;
2103 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
2108 fill_desc(txq, tx_next_use, m_seg, (i == 0), 0);
2109 tx_bak_pkt->mbuf = m_seg;
2110 m_seg = m_seg->next;
2113 if (tx_next_use >= tx_bd_max) {
2115 tx_bak_pkt = txq->sw_ring;
2119 } while (m_seg != NULL);
2122 txq->next_to_use = tx_next_use;
2123 txq->tx_bd_ready -= i;
2129 hns3_queue_xmit(txq, nb_hold);
2135 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
2136 struct rte_mbuf **pkts __rte_unused,
2137 uint16_t pkts_n __rte_unused)
2142 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
2144 struct hns3_adapter *hns = eth_dev->data->dev_private;
2146 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
2147 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
2148 eth_dev->rx_pkt_burst = hns3_recv_pkts;
2149 eth_dev->tx_pkt_burst = hns3_xmit_pkts;
2150 eth_dev->tx_pkt_prepare = hns3_prep_pkts;
2152 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
2153 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
2154 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;