1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_vxlan.h>
19 #include <rte_ethdev_driver.h>
24 #include <rte_malloc.h>
27 #include "hns3_ethdev.h"
28 #include "hns3_rxtx.h"
29 #include "hns3_regs.h"
30 #include "hns3_logs.h"
32 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
33 #define DEFAULT_RX_FREE_THRESH 16
36 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
41 for (i = 0; i < rxq->nb_rx_desc; i++) {
42 if (rxq->sw_ring[i].mbuf) {
43 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
44 rxq->sw_ring[i].mbuf = NULL;
51 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
56 for (i = 0; i < txq->nb_tx_desc; i++) {
57 if (txq->sw_ring[i].mbuf) {
58 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
59 txq->sw_ring[i].mbuf = NULL;
66 hns3_rx_queue_release(void *queue)
68 struct hns3_rx_queue *rxq = queue;
70 hns3_rx_queue_release_mbufs(rxq);
72 rte_memzone_free(rxq->mz);
74 rte_free(rxq->sw_ring);
80 hns3_tx_queue_release(void *queue)
82 struct hns3_tx_queue *txq = queue;
84 hns3_tx_queue_release_mbufs(txq);
86 rte_memzone_free(txq->mz);
88 rte_free(txq->sw_ring);
94 hns3_dev_rx_queue_release(void *queue)
96 struct hns3_rx_queue *rxq = queue;
97 struct hns3_adapter *hns;
103 rte_spinlock_lock(&hns->hw.lock);
104 hns3_rx_queue_release(queue);
105 rte_spinlock_unlock(&hns->hw.lock);
109 hns3_dev_tx_queue_release(void *queue)
111 struct hns3_tx_queue *txq = queue;
112 struct hns3_adapter *hns;
118 rte_spinlock_lock(&hns->hw.lock);
119 hns3_tx_queue_release(queue);
120 rte_spinlock_unlock(&hns->hw.lock);
124 hns3_free_all_queues(struct rte_eth_dev *dev)
128 if (dev->data->rx_queues)
129 for (i = 0; i < dev->data->nb_rx_queues; i++) {
130 hns3_rx_queue_release(dev->data->rx_queues[i]);
131 dev->data->rx_queues[i] = NULL;
134 if (dev->data->tx_queues)
135 for (i = 0; i < dev->data->nb_tx_queues; i++) {
136 hns3_tx_queue_release(dev->data->tx_queues[i]);
137 dev->data->tx_queues[i] = NULL;
142 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
144 struct rte_mbuf *mbuf;
148 for (i = 0; i < rxq->nb_rx_desc; i++) {
149 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
150 if (unlikely(mbuf == NULL)) {
151 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
153 hns3_rx_queue_release_mbufs(rxq);
157 rte_mbuf_refcnt_set(mbuf, 1);
159 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
161 mbuf->port = rxq->port_id;
163 rxq->sw_ring[i].mbuf = mbuf;
164 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
165 rxq->rx_ring[i].addr = dma_addr;
166 rxq->rx_ring[i].rx.bd_base_info = 0;
173 hns3_buf_size2type(uint32_t buf_size)
179 bd_size_type = HNS3_BD_SIZE_512_TYPE;
182 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
185 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
188 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
195 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
197 uint32_t rx_buf_len = rxq->rx_buf_len;
198 uint64_t dma_addr = rxq->rx_ring_phys_addr;
200 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
201 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
202 (uint32_t)((dma_addr >> 31) >> 1));
204 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
205 hns3_buf_size2type(rx_buf_len));
206 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
207 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
211 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
213 uint64_t dma_addr = txq->tx_ring_phys_addr;
215 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
216 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
217 (uint32_t)((dma_addr >> 31) >> 1));
219 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
220 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
224 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
226 struct hns3_rx_queue *rxq;
227 struct hns3_tx_queue *txq;
231 for (i = 0; i < hw->data->nb_rx_queues; i++) {
232 rxq = hw->data->rx_queues[i];
233 txq = hw->data->tx_queues[i];
234 if (rxq == NULL || txq == NULL ||
235 (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
237 rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
239 rcb_reg |= BIT(HNS3_RING_EN_B);
241 rcb_reg &= ~BIT(HNS3_RING_EN_B);
242 hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
247 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
249 struct hns3_cfg_com_tqp_queue_cmd *req;
250 struct hns3_cmd_desc desc;
253 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
255 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
256 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
258 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
260 ret = hns3_cmd_send(hw, &desc, 1);
262 hns3_err(hw, "TQP enable fail, ret = %d", ret);
268 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
270 struct hns3_reset_tqp_queue_cmd *req;
271 struct hns3_cmd_desc desc;
274 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
276 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
277 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
278 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
280 ret = hns3_cmd_send(hw, &desc, 1);
282 hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
288 hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
290 struct hns3_reset_tqp_queue_cmd *req;
291 struct hns3_cmd_desc desc;
294 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
296 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
297 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
299 ret = hns3_cmd_send(hw, &desc, 1);
301 hns3_err(hw, "Get reset status error, ret =%d", ret);
305 return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
309 hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
311 #define HNS3_TQP_RESET_TRY_MS 200
316 ret = hns3_tqp_enable(hw, queue_id, false);
321 * In current version VF is not supported when PF is driven by DPDK
322 * driver, all task queue pairs are mapped to PF function, so PF's queue
323 * id is equals to the global queue id in PF range.
325 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
327 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
331 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
333 /* Wait for tqp hw reset */
334 rte_delay_ms(HNS3_POLL_RESPONE_MS);
335 reset_status = hns3_get_reset_status(hw, queue_id);
340 } while (get_timeofday_ms() < end);
343 hns3_err(hw, "Reset TQP fail, ret = %d", ret);
347 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
349 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
355 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
360 /* Disable VF's queue before send queue reset msg to PF */
361 ret = hns3_tqp_enable(hw, queue_id, false);
365 memcpy(msg_data, &queue_id, sizeof(uint16_t));
367 return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
368 sizeof(msg_data), true, NULL, 0);
372 hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
374 struct hns3_hw *hw = &hns->hw;
376 return hns3vf_reset_tqp(hw, queue_id);
378 return hns3_reset_tqp(hw, queue_id);
382 hns3_reset_all_queues(struct hns3_adapter *hns)
384 struct hns3_hw *hw = &hns->hw;
388 for (i = 0; i < hw->data->nb_rx_queues; i++) {
389 ret = hns3_reset_queue(hns, i);
391 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
399 hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
401 uint32_t addr, value;
403 addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
406 hns3_write_dev(hw, addr, value);
410 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
412 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
413 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
414 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
416 if (dev->data->dev_conf.intr_conf.rxq == 0)
419 /* enable the vectors */
420 hns3_tqp_intr_enable(hw, queue_id, true);
422 return rte_intr_ack(intr_handle);
426 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
428 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
430 if (dev->data->dev_conf.intr_conf.rxq == 0)
433 /* disable the vectors */
434 hns3_tqp_intr_enable(hw, queue_id, false);
440 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
442 struct hns3_hw *hw = &hns->hw;
443 struct hns3_rx_queue *rxq;
446 PMD_INIT_FUNC_TRACE();
448 rxq = hw->data->rx_queues[idx];
450 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
452 hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
457 rxq->next_to_use = 0;
458 rxq->next_to_clean = 0;
459 hns3_init_rx_queue_hw(rxq);
465 hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
467 struct hns3_hw *hw = &hns->hw;
468 struct hns3_tx_queue *txq;
469 struct hns3_desc *desc;
472 txq = hw->data->tx_queues[idx];
476 for (i = 0; i < txq->nb_tx_desc; i++) {
477 desc->tx.tp_fe_sc_vld_ra_ri = 0;
481 txq->next_to_use = 0;
482 txq->next_to_clean = 0;
483 txq->tx_bd_ready = txq->nb_tx_desc;
484 hns3_init_tx_queue_hw(txq);
488 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
490 struct hns3_hw *hw = &hns->hw;
491 struct hns3_tx_queue *txq;
494 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
495 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
498 if (!tc_queue->enable)
501 for (j = 0; j < tc_queue->tqp_count; j++) {
502 num = tc_queue->tqp_offset + j;
503 txq = hw->data->tx_queues[num];
507 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
513 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
515 struct hns3_hw *hw = &hns->hw;
516 struct rte_eth_dev_data *dev_data = hw->data;
517 struct hns3_rx_queue *rxq;
518 struct hns3_tx_queue *txq;
523 /* Initialize RSS for queues */
524 ret = hns3_config_rss(hns);
526 hns3_err(hw, "Failed to configure rss %d", ret);
531 ret = hns3_reset_all_queues(hns);
533 hns3_err(hw, "Failed to reset all queues %d", ret);
539 * Hardware does not support where the number of rx and tx queues is
540 * not equal in hip08. In .dev_configure callback function we will
541 * check the two values, here we think that the number of rx and tx
544 for (i = 0; i < hw->data->nb_rx_queues; i++) {
545 rxq = dev_data->rx_queues[i];
546 txq = dev_data->tx_queues[i];
547 if (rxq == NULL || txq == NULL || rxq->rx_deferred_start ||
548 txq->tx_deferred_start)
551 ret = hns3_dev_rx_queue_start(hns, i);
553 hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
557 hns3_dev_tx_queue_start(hns, i);
559 hns3_init_tx_ring_tc(hns);
561 hns3_enable_all_queues(hw, true);
565 for (j = 0; j < i; j++) {
566 rxq = dev_data->rx_queues[j];
567 hns3_rx_queue_release_mbufs(rxq);
574 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
576 struct hns3_hw *hw = &hns->hw;
579 hns3_enable_all_queues(hw, false);
581 ret = hns3_reset_all_queues(hns);
583 hns3_err(hw, "Failed to reset all queues %d", ret);
591 hns3_dev_release_mbufs(struct hns3_adapter *hns)
593 struct rte_eth_dev_data *dev_data = hns->hw.data;
594 struct hns3_rx_queue *rxq;
595 struct hns3_tx_queue *txq;
598 if (dev_data->rx_queues)
599 for (i = 0; i < dev_data->nb_rx_queues; i++) {
600 rxq = dev_data->rx_queues[i];
601 if (rxq == NULL || rxq->rx_deferred_start)
603 hns3_rx_queue_release_mbufs(rxq);
606 if (dev_data->tx_queues)
607 for (i = 0; i < dev_data->nb_tx_queues; i++) {
608 txq = dev_data->tx_queues[i];
609 if (txq == NULL || txq->tx_deferred_start)
611 hns3_tx_queue_release_mbufs(txq);
616 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
617 unsigned int socket_id, const struct rte_eth_rxconf *conf,
618 struct rte_mempool *mp)
620 struct hns3_adapter *hns = dev->data->dev_private;
621 const struct rte_memzone *rx_mz;
622 struct hns3_hw *hw = &hns->hw;
623 struct hns3_rx_queue *rxq;
624 unsigned int desc_size = sizeof(struct hns3_desc);
625 unsigned int rx_desc;
628 if (dev->data->dev_started) {
629 hns3_err(hw, "rx_queue_setup after dev_start no supported");
633 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
634 nb_desc % HNS3_ALIGN_RING_DESC) {
635 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
640 if (dev->data->rx_queues[idx]) {
641 hns3_rx_queue_release(dev->data->rx_queues[idx]);
642 dev->data->rx_queues[idx] = NULL;
645 rxq = rte_zmalloc_socket("hns3 RX queue", sizeof(struct hns3_rx_queue),
646 RTE_CACHE_LINE_SIZE, socket_id);
648 hns3_err(hw, "Failed to allocate memory for rx queue!");
654 rxq->nb_rx_desc = nb_desc;
656 if (conf->rx_free_thresh <= 0)
657 rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;
659 rxq->rx_free_thresh = conf->rx_free_thresh;
660 rxq->rx_deferred_start = conf->rx_deferred_start;
662 rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc;
663 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
664 RTE_CACHE_LINE_SIZE, socket_id);
665 if (rxq->sw_ring == NULL) {
666 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
667 hns3_rx_queue_release(rxq);
671 /* Allocate rx ring hardware descriptors. */
672 rx_desc = rxq->nb_rx_desc * desc_size;
673 rx_mz = rte_eth_dma_zone_reserve(dev, "rx_ring", idx, rx_desc,
674 HNS3_RING_BASE_ALIGN, socket_id);
676 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
678 hns3_rx_queue_release(rxq);
682 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
683 rxq->rx_ring_phys_addr = rx_mz->iova;
685 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, idx,
686 rxq->rx_ring_phys_addr);
688 rxq->next_to_use = 0;
689 rxq->next_to_clean = 0;
691 rxq->pkt_first_seg = NULL;
692 rxq->pkt_last_seg = NULL;
693 rxq->port_id = dev->data->port_id;
694 rxq->configured = true;
695 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
696 idx * HNS3_TQP_REG_SIZE);
697 rxq->rx_buf_len = hw->rx_buf_len;
698 rxq->non_vld_descs = 0;
700 rxq->pkt_len_errors = 0;
701 rxq->l3_csum_erros = 0;
702 rxq->l4_csum_erros = 0;
703 rxq->ol3_csum_erros = 0;
704 rxq->ol4_csum_erros = 0;
706 rte_spinlock_lock(&hw->lock);
707 dev->data->rx_queues[idx] = rxq;
708 rte_spinlock_unlock(&hw->lock);
713 static inline uint32_t
714 rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info)
716 #define HNS3_L2TBL_NUM 4
717 #define HNS3_L3TBL_NUM 16
718 #define HNS3_L4TBL_NUM 16
719 #define HNS3_OL3TBL_NUM 16
720 #define HNS3_OL4TBL_NUM 16
721 uint32_t pkt_type = 0;
722 uint32_t l2id, l3id, l4id;
723 uint32_t ol3id, ol4id;
725 static const uint32_t l2table[HNS3_L2TBL_NUM] = {
727 RTE_PTYPE_L2_ETHER_VLAN,
728 RTE_PTYPE_L2_ETHER_QINQ,
732 static const uint32_t l3table[HNS3_L3TBL_NUM] = {
735 RTE_PTYPE_L2_ETHER_ARP,
737 RTE_PTYPE_L3_IPV4_EXT,
738 RTE_PTYPE_L3_IPV6_EXT,
739 RTE_PTYPE_L2_ETHER_LLDP,
740 0, 0, 0, 0, 0, 0, 0, 0, 0
743 static const uint32_t l4table[HNS3_L4TBL_NUM] = {
746 RTE_PTYPE_TUNNEL_GRE,
750 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
753 static const uint32_t inner_l2table[HNS3_L2TBL_NUM] = {
754 RTE_PTYPE_INNER_L2_ETHER,
755 RTE_PTYPE_INNER_L2_ETHER_VLAN,
756 RTE_PTYPE_INNER_L2_ETHER_QINQ,
760 static const uint32_t inner_l3table[HNS3_L3TBL_NUM] = {
761 RTE_PTYPE_INNER_L3_IPV4,
762 RTE_PTYPE_INNER_L3_IPV6,
764 RTE_PTYPE_INNER_L2_ETHER,
765 RTE_PTYPE_INNER_L3_IPV4_EXT,
766 RTE_PTYPE_INNER_L3_IPV6_EXT,
767 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
770 static const uint32_t inner_l4table[HNS3_L4TBL_NUM] = {
771 RTE_PTYPE_INNER_L4_UDP,
772 RTE_PTYPE_INNER_L4_TCP,
773 RTE_PTYPE_TUNNEL_GRE,
774 RTE_PTYPE_INNER_L4_SCTP,
776 RTE_PTYPE_INNER_L4_ICMP,
777 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
780 static const uint32_t ol3table[HNS3_OL3TBL_NUM] = {
784 RTE_PTYPE_L3_IPV4_EXT,
785 RTE_PTYPE_L3_IPV6_EXT,
786 0, 0, 0, 0, 0, 0, 0, 0, 0,
790 static const uint32_t ol4table[HNS3_OL4TBL_NUM] = {
792 RTE_PTYPE_TUNNEL_VXLAN,
793 RTE_PTYPE_TUNNEL_NVGRE,
794 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
797 l2id = hns3_get_field(pkt_info, HNS3_RXD_STRP_TAGP_M,
798 HNS3_RXD_STRP_TAGP_S);
799 l3id = hns3_get_field(pkt_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
800 l4id = hns3_get_field(pkt_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
801 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
802 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
805 pkt_type |= (inner_l2table[l2id] | inner_l3table[l3id] |
806 inner_l4table[l4id] | ol3table[ol3id] |
809 pkt_type |= (l2table[l2id] | l3table[l3id] | l4table[l4id]);
814 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
816 static const uint32_t ptypes[] = {
818 RTE_PTYPE_L2_ETHER_VLAN,
819 RTE_PTYPE_L2_ETHER_QINQ,
820 RTE_PTYPE_L2_ETHER_LLDP,
821 RTE_PTYPE_L2_ETHER_ARP,
823 RTE_PTYPE_L3_IPV4_EXT,
825 RTE_PTYPE_L3_IPV6_EXT,
831 RTE_PTYPE_TUNNEL_GRE,
835 if (dev->rx_pkt_burst == hns3_recv_pkts)
842 hns3_clean_rx_buffers(struct hns3_rx_queue *rxq, int count)
844 rxq->next_to_use += count;
845 if (rxq->next_to_use >= rxq->nb_rx_desc)
846 rxq->next_to_use -= rxq->nb_rx_desc;
848 hns3_write_dev(rxq, HNS3_RING_RX_HEAD_REG, count);
852 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
853 uint32_t bd_base_info, uint32_t l234_info,
858 if (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) {
863 if (unlikely(rxm->pkt_len == 0 ||
864 (l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) {
865 rxq->pkt_len_errors++;
869 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
870 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
871 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
872 rxq->l3_csum_erros++;
873 tmp |= HNS3_L3_CKSUM_ERR;
876 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
877 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
878 rxq->l4_csum_erros++;
879 tmp |= HNS3_L4_CKSUM_ERR;
882 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
883 rxq->ol3_csum_erros++;
884 tmp |= HNS3_OUTER_L3_CKSUM_ERR;
887 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
888 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
889 rxq->ol4_csum_erros++;
890 tmp |= HNS3_OUTER_L4_CKSUM_ERR;
899 hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type,
900 const uint32_t cksum_err)
902 if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
903 if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
904 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
905 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
906 if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
907 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
908 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
909 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
910 (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
911 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
913 if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
914 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
915 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
916 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
917 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
918 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
923 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
925 struct hns3_rx_queue *rxq; /* RX queue */
926 struct hns3_desc *rx_ring; /* RX ring (desc) */
927 struct hns3_entry *sw_ring;
928 struct hns3_entry *rxe;
929 struct hns3_desc *rxdp; /* pointer of the current desc */
930 struct rte_mbuf *first_seg;
931 struct rte_mbuf *last_seg;
932 struct rte_mbuf *nmb; /* pointer of the new mbuf */
933 struct rte_mbuf *rxm;
934 struct rte_eth_dev *dev;
935 uint32_t bd_base_info;
945 int num; /* num of desc in ring */
951 dev = &rte_eth_devices[rxq->port_id];
953 rx_id = rxq->next_to_clean;
954 rx_ring = rxq->rx_ring;
955 first_seg = rxq->pkt_first_seg;
956 last_seg = rxq->pkt_last_seg;
957 sw_ring = rxq->sw_ring;
959 /* Get num of packets in descriptor ring */
960 num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
961 while (nb_rx_bd < num && nb_rx < nb_pkts) {
962 rxdp = &rx_ring[rx_id];
963 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
964 if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
965 rxq->non_vld_descs++;
969 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
970 if (unlikely(nmb == NULL)) {
971 dev->data->rx_mbuf_alloc_failed++;
976 rxe = &sw_ring[rx_id];
978 if (rx_id == rxq->nb_rx_desc)
981 rte_prefetch0(sw_ring[rx_id].mbuf);
982 if ((rx_id & 0x3) == 0) {
983 rte_prefetch0(&rx_ring[rx_id]);
984 rte_prefetch0(&sw_ring[rx_id]);
990 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
991 rxdp->addr = dma_addr;
992 rxdp->rx.bd_base_info = 0;
995 /* Load remained descriptor data and extract necessary fields */
996 data_len = (uint16_t)(rte_le_to_cpu_16(rxdp->rx.size));
997 l234_info = rte_le_to_cpu_32(rxdp->rx.l234_info);
998 ol_info = rte_le_to_cpu_32(rxdp->rx.ol_info);
1000 if (first_seg == NULL) {
1002 first_seg->nb_segs = 1;
1004 first_seg->nb_segs++;
1005 last_seg->next = rxm;
1008 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1009 rxm->data_len = data_len;
1011 if (!hns3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
1016 /* The last buffer of the received packet */
1017 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxdp->rx.pkt_len));
1018 first_seg->pkt_len = pkt_len;
1019 first_seg->port = rxq->port_id;
1020 first_seg->hash.rss = rte_le_to_cpu_32(rxdp->rx.rss_hash);
1021 first_seg->ol_flags |= PKT_RX_RSS_HASH;
1022 if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
1023 first_seg->hash.fdir.hi =
1024 rte_le_to_cpu_32(rxdp->rx.fd_id);
1025 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1029 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
1030 l234_info, &cksum_err);
1034 first_seg->packet_type = rxd_pkt_info_to_pkt_type(l234_info,
1037 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
1038 hns3_rx_set_cksum_flag(rxm, first_seg->packet_type,
1041 first_seg->vlan_tci = rte_le_to_cpu_16(rxdp->rx.vlan_tag);
1042 first_seg->vlan_tci_outer =
1043 rte_le_to_cpu_16(rxdp->rx.ot_vlan_tag);
1044 rx_pkts[nb_rx++] = first_seg;
1048 rte_pktmbuf_free(first_seg);
1052 rxq->next_to_clean = rx_id;
1053 rxq->pkt_first_seg = first_seg;
1054 rxq->pkt_last_seg = last_seg;
1055 hns3_clean_rx_buffers(rxq, nb_rx_bd);
1061 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1062 unsigned int socket_id, const struct rte_eth_txconf *conf)
1064 struct hns3_adapter *hns = dev->data->dev_private;
1065 const struct rte_memzone *tx_mz;
1066 struct hns3_hw *hw = &hns->hw;
1067 struct hns3_tx_queue *txq;
1068 struct hns3_desc *desc;
1069 unsigned int desc_size = sizeof(struct hns3_desc);
1070 unsigned int tx_desc;
1074 if (dev->data->dev_started) {
1075 hns3_err(hw, "tx_queue_setup after dev_start no supported");
1079 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1080 nb_desc % HNS3_ALIGN_RING_DESC) {
1081 hns3_err(hw, "Number (%u) of tx descriptors is invalid",
1086 if (dev->data->tx_queues[idx] != NULL) {
1087 hns3_tx_queue_release(dev->data->tx_queues[idx]);
1088 dev->data->tx_queues[idx] = NULL;
1091 txq = rte_zmalloc_socket("hns3 TX queue", sizeof(struct hns3_tx_queue),
1092 RTE_CACHE_LINE_SIZE, socket_id);
1094 hns3_err(hw, "Failed to allocate memory for tx queue!");
1098 txq->nb_tx_desc = nb_desc;
1099 txq->queue_id = idx;
1100 txq->tx_deferred_start = conf->tx_deferred_start;
1102 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
1103 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
1104 RTE_CACHE_LINE_SIZE, socket_id);
1105 if (txq->sw_ring == NULL) {
1106 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
1107 hns3_tx_queue_release(txq);
1111 /* Allocate tx ring hardware descriptors. */
1112 tx_desc = txq->nb_tx_desc * desc_size;
1113 tx_mz = rte_eth_dma_zone_reserve(dev, "tx_ring", idx, tx_desc,
1114 HNS3_RING_BASE_ALIGN, socket_id);
1115 if (tx_mz == NULL) {
1116 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
1118 hns3_tx_queue_release(txq);
1122 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
1123 txq->tx_ring_phys_addr = tx_mz->iova;
1125 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, idx,
1126 txq->tx_ring_phys_addr);
1129 desc = txq->tx_ring;
1130 for (i = 0; i < txq->nb_tx_desc; i++) {
1131 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1136 txq->next_to_use = 0;
1137 txq->next_to_clean = 0;
1138 txq->tx_bd_ready = txq->nb_tx_desc;
1139 txq->port_id = dev->data->port_id;
1140 txq->configured = true;
1141 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1142 idx * HNS3_TQP_REG_SIZE);
1143 rte_spinlock_lock(&hw->lock);
1144 dev->data->tx_queues[idx] = txq;
1145 rte_spinlock_unlock(&hw->lock);
1151 tx_ring_dist(struct hns3_tx_queue *txq, int begin, int end)
1153 return (end - begin + txq->nb_tx_desc) % txq->nb_tx_desc;
1157 tx_ring_space(struct hns3_tx_queue *txq)
1159 return txq->nb_tx_desc -
1160 tx_ring_dist(txq, txq->next_to_clean, txq->next_to_use) - 1;
1164 hns3_queue_xmit(struct hns3_tx_queue *txq, uint32_t buf_num)
1166 hns3_write_dev(txq, HNS3_RING_TX_TAIL_REG, buf_num);
1170 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
1172 uint16_t tx_next_clean = txq->next_to_clean;
1173 uint16_t tx_next_use = txq->next_to_use;
1174 uint16_t tx_bd_ready = txq->tx_bd_ready;
1175 uint16_t tx_bd_max = txq->nb_tx_desc;
1176 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
1177 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
1178 struct rte_mbuf *mbuf;
1180 while ((!hns3_get_bit(desc->tx.tp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B)) &&
1181 (tx_next_use != tx_next_clean || tx_bd_ready < tx_bd_max)) {
1182 mbuf = tx_bak_pkt->mbuf;
1185 rte_pktmbuf_free(mbuf);
1186 tx_bak_pkt->mbuf = NULL;
1194 if (tx_next_clean >= tx_bd_max) {
1196 desc = txq->tx_ring;
1197 tx_bak_pkt = txq->sw_ring;
1201 txq->next_to_clean = tx_next_clean;
1202 txq->tx_bd_ready = tx_bd_ready;
1206 fill_desc(struct hns3_tx_queue *txq, uint16_t tx_desc_id, struct rte_mbuf *rxm,
1207 bool first, int offset)
1209 struct hns3_desc *tx_ring = txq->tx_ring;
1210 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1211 uint8_t frag_end = rxm->next == NULL ? 1 : 0;
1212 uint16_t size = rxm->data_len;
1214 uint64_t ol_flags = rxm->ol_flags;
1219 desc->addr = rte_mbuf_data_iova(rxm) + offset;
1220 desc->tx.send_size = rte_cpu_to_le_16(size);
1221 hns3_set_bit(rrcfv, HNS3_TXD_VLD_B, 1);
1224 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
1225 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
1226 rxm->outer_l2_len + rxm->outer_l3_len : 0;
1227 paylen = rxm->pkt_len - hdr_len;
1228 desc->tx.paylen = rte_cpu_to_le_32(paylen);
1231 hns3_set_bit(rrcfv, HNS3_TXD_FE_B, frag_end);
1232 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(rrcfv);
1235 if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1236 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
1237 hns3_set_bit(tmp, HNS3_TXD_VLAN_B, 1);
1238 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
1239 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
1242 if (ol_flags & PKT_TX_QINQ_PKT) {
1243 tmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
1244 hns3_set_bit(tmp, HNS3_TXD_OVLAN_B, 1);
1245 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp);
1246 desc->tx.outer_vlan_tag =
1247 rte_cpu_to_le_16(rxm->vlan_tci_outer);
1253 hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool,
1254 uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf)
1256 struct rte_mbuf *new_mbuf = NULL;
1257 struct rte_eth_dev *dev;
1258 struct rte_mbuf *temp;
1262 /* Allocate enough mbufs */
1263 for (i = 0; i < nb_new_buf; i++) {
1264 temp = rte_pktmbuf_alloc(mb_pool);
1265 if (unlikely(temp == NULL)) {
1266 dev = &rte_eth_devices[txq->port_id];
1267 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1268 hns3_err(hw, "Failed to alloc TX mbuf port_id=%d,"
1269 "queue_id=%d in reassemble tx pkts.",
1270 txq->port_id, txq->queue_id);
1271 rte_pktmbuf_free(new_mbuf);
1274 temp->next = new_mbuf;
1278 if (new_mbuf == NULL)
1281 new_mbuf->nb_segs = nb_new_buf;
1282 *alloc_mbuf = new_mbuf;
1288 hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
1289 struct rte_mbuf **new_pkt)
1291 struct hns3_tx_queue *txq = tx_queue;
1292 struct rte_mempool *mb_pool;
1293 struct rte_mbuf *new_mbuf;
1294 struct rte_mbuf *temp_new;
1295 struct rte_mbuf *temp;
1296 uint16_t last_buf_len;
1297 uint16_t nb_new_buf;
1308 mb_pool = tx_pkt->pool;
1309 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
1310 nb_new_buf = (tx_pkt->pkt_len - 1) / buf_size + 1;
1312 last_buf_len = tx_pkt->pkt_len % buf_size;
1313 if (last_buf_len == 0)
1314 last_buf_len = buf_size;
1316 /* Allocate enough mbufs */
1317 ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf);
1321 /* Copy the original packet content to the new mbufs */
1323 s = rte_pktmbuf_mtod(temp, char *);
1324 len_s = temp->data_len;
1325 temp_new = new_mbuf;
1326 for (i = 0; i < nb_new_buf; i++) {
1327 d = rte_pktmbuf_mtod(temp_new, char *);
1328 if (i < nb_new_buf - 1)
1331 buf_len = last_buf_len;
1335 len = RTE_MIN(len_s, len_d);
1339 len_d = len_d - len;
1340 len_s = len_s - len;
1346 s = rte_pktmbuf_mtod(temp, char *);
1347 len_s = temp->data_len;
1351 temp_new->data_len = buf_len;
1352 temp_new = temp_new->next;
1355 /* free original mbufs */
1356 rte_pktmbuf_free(tx_pkt);
1358 *new_pkt = new_mbuf;
1364 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
1366 uint32_t tmp = *ol_type_vlan_len_msec;
1368 /* (outer) IP header type */
1369 if (ol_flags & PKT_TX_OUTER_IPV4) {
1370 /* OL3 header size, defined in 4 bytes */
1371 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1372 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
1373 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1374 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
1375 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
1377 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
1378 HNS3_OL3T_IPV4_NO_CSUM);
1379 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
1380 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
1382 /* OL3 header size, defined in 4 bytes */
1383 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1384 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
1387 *ol_type_vlan_len_msec = tmp;
1391 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
1392 struct rte_net_hdr_lens *hdr_lens)
1394 uint32_t tmp = *ol_type_vlan_len_msec;
1397 /* OL2 header size, defined in 2 bytes */
1398 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1399 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
1401 /* L4TUNT: L4 Tunneling Type */
1402 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
1403 case PKT_TX_TUNNEL_GENEVE:
1404 case PKT_TX_TUNNEL_VXLAN:
1405 /* MAC in UDP tunnelling packet, include VxLAN */
1406 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
1407 HNS3_TUN_MAC_IN_UDP);
1409 * OL4 header size, defined in 4 Bytes, it contains outer
1410 * L4(UDP) length and tunneling length.
1412 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1413 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
1416 case PKT_TX_TUNNEL_GRE:
1417 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
1420 * OL4 header size, defined in 4 Bytes, it contains outer
1421 * L4(GRE) length and tunneling length.
1423 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
1424 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1425 l4_len >> HNS3_L4_LEN_UNIT);
1428 /* For non UDP / GRE tunneling, drop the tunnel packet */
1432 *ol_type_vlan_len_msec = tmp;
1438 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1440 struct rte_net_hdr_lens *hdr_lens)
1442 struct hns3_desc *tx_ring = txq->tx_ring;
1443 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1447 hns3_parse_outer_params(ol_flags, &value);
1448 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
1452 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
1458 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
1462 /* Enable L3 checksum offloads */
1463 if (ol_flags & PKT_TX_IPV4) {
1464 tmp = *type_cs_vlan_tso_len;
1465 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
1467 /* inner(/normal) L3 header size, defined in 4 bytes */
1468 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1469 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
1470 if (ol_flags & PKT_TX_IP_CKSUM)
1471 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
1472 *type_cs_vlan_tso_len = tmp;
1473 } else if (ol_flags & PKT_TX_IPV6) {
1474 tmp = *type_cs_vlan_tso_len;
1475 /* L3T, IPv6 don't do checksum */
1476 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
1478 /* inner(/normal) L3 header size, defined in 4 bytes */
1479 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1480 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
1481 *type_cs_vlan_tso_len = tmp;
1486 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
1490 /* Enable L4 checksum offloads */
1491 switch (ol_flags & PKT_TX_L4_MASK) {
1492 case PKT_TX_TCP_CKSUM:
1493 tmp = *type_cs_vlan_tso_len;
1494 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1496 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1497 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1498 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
1499 *type_cs_vlan_tso_len = tmp;
1501 case PKT_TX_UDP_CKSUM:
1502 tmp = *type_cs_vlan_tso_len;
1503 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1505 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1506 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1507 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
1508 *type_cs_vlan_tso_len = tmp;
1510 case PKT_TX_SCTP_CKSUM:
1511 tmp = *type_cs_vlan_tso_len;
1512 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1514 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1515 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1516 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
1517 *type_cs_vlan_tso_len = tmp;
1525 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1528 struct hns3_desc *tx_ring = txq->tx_ring;
1529 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1532 /* inner(/normal) L2 header size, defined in 2 bytes */
1533 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1534 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
1536 hns3_parse_l3_cksum_params(ol_flags, &value);
1537 hns3_parse_l4_cksum_params(ol_flags, &value);
1539 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
1543 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
1550 for (i = 0; i < nb_pkts; i++) {
1553 /* check the size of packet */
1554 if (m->pkt_len < HNS3_MIN_FRAME_LEN) {
1559 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1560 ret = rte_validate_tx_offload(m);
1566 ret = rte_net_intel_cksum_prepare(m);
1577 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1578 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
1580 /* Fill in tunneling parameters if necessary */
1581 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
1582 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
1583 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
1587 /* Enable checksum offloading */
1588 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
1589 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
1595 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1597 struct rte_net_hdr_lens hdr_lens = {0};
1598 struct hns3_tx_queue *txq = tx_queue;
1599 struct hns3_entry *tx_bak_pkt;
1600 struct rte_mbuf *new_pkt;
1601 struct rte_mbuf *tx_pkt;
1602 struct rte_mbuf *m_seg;
1603 struct rte_mbuf *temp;
1604 uint32_t nb_hold = 0;
1605 uint16_t tx_next_clean;
1606 uint16_t tx_next_use;
1607 uint16_t tx_bd_ready;
1608 uint16_t tx_pkt_num;
1614 /* free useless buffer */
1615 hns3_tx_free_useless_buffer(txq);
1616 tx_bd_ready = txq->tx_bd_ready;
1617 if (tx_bd_ready == 0)
1620 tx_next_clean = txq->next_to_clean;
1621 tx_next_use = txq->next_to_use;
1622 tx_bd_max = txq->nb_tx_desc;
1623 tx_bak_pkt = &txq->sw_ring[tx_next_clean];
1625 tx_pkt_num = (tx_bd_ready < nb_pkts) ? tx_bd_ready : nb_pkts;
1628 tx_bak_pkt = &txq->sw_ring[tx_next_use];
1629 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
1630 tx_pkt = *tx_pkts++;
1632 nb_buf = tx_pkt->nb_segs;
1634 if (nb_buf > tx_ring_space(txq)) {
1642 * If packet length is greater than HNS3_MAX_FRAME_LEN
1643 * driver support, the packet will be ignored.
1645 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
1649 * If packet length is less than minimum packet size, driver
1652 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < HNS3_MIN_PKT_SIZE)) {
1656 add_len = HNS3_MIN_PKT_SIZE -
1657 rte_pktmbuf_pkt_len(tx_pkt);
1658 appended = rte_pktmbuf_append(tx_pkt, add_len);
1659 if (appended == NULL)
1662 memset(appended, 0, add_len);
1666 if (unlikely(nb_buf > HNS3_MAX_TX_BD_PER_PKT)) {
1667 if (hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt))
1670 nb_buf = m_seg->nb_segs;
1673 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
1678 fill_desc(txq, tx_next_use, m_seg, (i == 0), 0);
1680 tx_bak_pkt->mbuf = m_seg;
1684 if (tx_next_use >= tx_bd_max) {
1686 tx_bak_pkt = txq->sw_ring;
1690 } while (m_seg != NULL);
1693 txq->next_to_use = tx_next_use;
1698 if (likely(nb_tx)) {
1699 hns3_queue_xmit(txq, nb_hold);
1700 txq->next_to_clean = tx_next_clean;
1701 txq->tx_bd_ready = tx_bd_ready - nb_hold;
1708 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
1709 struct rte_mbuf **pkts __rte_unused,
1710 uint16_t pkts_n __rte_unused)
1715 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
1717 struct hns3_adapter *hns = eth_dev->data->dev_private;
1719 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
1720 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
1721 eth_dev->rx_pkt_burst = hns3_recv_pkts;
1722 eth_dev->tx_pkt_burst = hns3_xmit_pkts;
1723 eth_dev->tx_pkt_prepare = hns3_prep_pkts;
1725 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
1726 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
1727 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;