1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_vxlan.h>
19 #include <rte_ethdev_driver.h>
24 #include <rte_malloc.h>
27 #include "hns3_ethdev.h"
28 #include "hns3_rxtx.h"
29 #include "hns3_regs.h"
30 #include "hns3_logs.h"
32 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
33 #define DEFAULT_RX_FREE_THRESH 16
36 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
41 for (i = 0; i < rxq->nb_rx_desc; i++) {
42 if (rxq->sw_ring[i].mbuf) {
43 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
44 rxq->sw_ring[i].mbuf = NULL;
51 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
56 for (i = 0; i < txq->nb_tx_desc; i++) {
57 if (txq->sw_ring[i].mbuf) {
58 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
59 txq->sw_ring[i].mbuf = NULL;
66 hns3_rx_queue_release(void *queue)
68 struct hns3_rx_queue *rxq = queue;
70 hns3_rx_queue_release_mbufs(rxq);
72 rte_memzone_free(rxq->mz);
74 rte_free(rxq->sw_ring);
80 hns3_tx_queue_release(void *queue)
82 struct hns3_tx_queue *txq = queue;
84 hns3_tx_queue_release_mbufs(txq);
86 rte_memzone_free(txq->mz);
88 rte_free(txq->sw_ring);
94 hns3_dev_rx_queue_release(void *queue)
96 struct hns3_rx_queue *rxq = queue;
97 struct hns3_adapter *hns;
103 rte_spinlock_lock(&hns->hw.lock);
104 hns3_rx_queue_release(queue);
105 rte_spinlock_unlock(&hns->hw.lock);
109 hns3_dev_tx_queue_release(void *queue)
111 struct hns3_tx_queue *txq = queue;
112 struct hns3_adapter *hns;
118 rte_spinlock_lock(&hns->hw.lock);
119 hns3_tx_queue_release(queue);
120 rte_spinlock_unlock(&hns->hw.lock);
124 hns3_free_all_queues(struct rte_eth_dev *dev)
128 if (dev->data->rx_queues)
129 for (i = 0; i < dev->data->nb_rx_queues; i++) {
130 hns3_rx_queue_release(dev->data->rx_queues[i]);
131 dev->data->rx_queues[i] = NULL;
134 if (dev->data->tx_queues)
135 for (i = 0; i < dev->data->nb_tx_queues; i++) {
136 hns3_tx_queue_release(dev->data->tx_queues[i]);
137 dev->data->tx_queues[i] = NULL;
142 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
144 struct rte_mbuf *mbuf;
148 for (i = 0; i < rxq->nb_rx_desc; i++) {
149 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
150 if (unlikely(mbuf == NULL)) {
151 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
153 hns3_rx_queue_release_mbufs(rxq);
157 rte_mbuf_refcnt_set(mbuf, 1);
159 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
161 mbuf->port = rxq->port_id;
163 rxq->sw_ring[i].mbuf = mbuf;
164 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
165 rxq->rx_ring[i].addr = dma_addr;
166 rxq->rx_ring[i].rx.bd_base_info = 0;
173 hns3_buf_size2type(uint32_t buf_size)
179 bd_size_type = HNS3_BD_SIZE_512_TYPE;
182 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
185 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
188 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
195 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
197 uint32_t rx_buf_len = rxq->rx_buf_len;
198 uint64_t dma_addr = rxq->rx_ring_phys_addr;
200 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
201 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
202 (uint32_t)((dma_addr >> 31) >> 1));
204 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
205 hns3_buf_size2type(rx_buf_len));
206 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
207 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
211 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
213 uint64_t dma_addr = txq->tx_ring_phys_addr;
215 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
216 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
217 (uint32_t)((dma_addr >> 31) >> 1));
219 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
220 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
224 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
226 struct hns3_rx_queue *rxq;
227 struct hns3_tx_queue *txq;
231 for (i = 0; i < hw->data->nb_rx_queues; i++) {
232 rxq = hw->data->rx_queues[i];
233 txq = hw->data->tx_queues[i];
234 if (rxq == NULL || txq == NULL ||
235 (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
237 rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
239 rcb_reg |= BIT(HNS3_RING_EN_B);
241 rcb_reg &= ~BIT(HNS3_RING_EN_B);
242 hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
247 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
249 struct hns3_cfg_com_tqp_queue_cmd *req;
250 struct hns3_cmd_desc desc;
253 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
255 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
256 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
258 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
260 ret = hns3_cmd_send(hw, &desc, 1);
262 hns3_err(hw, "TQP enable fail, ret = %d", ret);
268 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
270 struct hns3_reset_tqp_queue_cmd *req;
271 struct hns3_cmd_desc desc;
274 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
276 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
277 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
278 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
280 ret = hns3_cmd_send(hw, &desc, 1);
282 hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
288 hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
290 struct hns3_reset_tqp_queue_cmd *req;
291 struct hns3_cmd_desc desc;
294 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
296 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
297 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
299 ret = hns3_cmd_send(hw, &desc, 1);
301 hns3_err(hw, "Get reset status error, ret =%d", ret);
305 return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
309 hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
311 #define HNS3_TQP_RESET_TRY_MS 200
316 ret = hns3_tqp_enable(hw, queue_id, false);
321 * In current version VF is not supported when PF is driven by DPDK
322 * driver, all task queue pairs are mapped to PF function, so PF's queue
323 * id is equals to the global queue id in PF range.
325 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
327 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
331 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
333 /* Wait for tqp hw reset */
334 rte_delay_ms(HNS3_POLL_RESPONE_MS);
335 reset_status = hns3_get_reset_status(hw, queue_id);
340 } while (get_timeofday_ms() < end);
343 hns3_err(hw, "Reset TQP fail, ret = %d", ret);
347 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
349 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
355 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
360 /* Disable VF's queue before send queue reset msg to PF */
361 ret = hns3_tqp_enable(hw, queue_id, false);
365 memcpy(msg_data, &queue_id, sizeof(uint16_t));
367 return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
368 sizeof(msg_data), true, NULL, 0);
372 hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
374 struct hns3_hw *hw = &hns->hw;
376 return hns3vf_reset_tqp(hw, queue_id);
378 return hns3_reset_tqp(hw, queue_id);
382 hns3_reset_all_queues(struct hns3_adapter *hns)
384 struct hns3_hw *hw = &hns->hw;
388 for (i = 0; i < hw->data->nb_rx_queues; i++) {
389 ret = hns3_reset_queue(hns, i);
391 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
399 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
401 struct hns3_hw *hw = &hns->hw;
402 struct hns3_rx_queue *rxq;
405 PMD_INIT_FUNC_TRACE();
407 rxq = hw->data->rx_queues[idx];
409 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
411 hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
416 rxq->next_to_use = 0;
417 rxq->next_to_clean = 0;
418 hns3_init_rx_queue_hw(rxq);
424 hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
426 struct hns3_hw *hw = &hns->hw;
427 struct hns3_tx_queue *txq;
428 struct hns3_desc *desc;
431 txq = hw->data->tx_queues[idx];
435 for (i = 0; i < txq->nb_tx_desc; i++) {
436 desc->tx.tp_fe_sc_vld_ra_ri = 0;
440 txq->next_to_use = 0;
441 txq->next_to_clean = 0;
442 txq->tx_bd_ready = txq->nb_tx_desc;
443 hns3_init_tx_queue_hw(txq);
447 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
449 struct hns3_hw *hw = &hns->hw;
450 struct hns3_tx_queue *txq;
453 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
454 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
457 if (!tc_queue->enable)
460 for (j = 0; j < tc_queue->tqp_count; j++) {
461 num = tc_queue->tqp_offset + j;
462 txq = hw->data->tx_queues[num];
466 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
472 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
474 struct hns3_hw *hw = &hns->hw;
475 struct rte_eth_dev_data *dev_data = hw->data;
476 struct hns3_rx_queue *rxq;
477 struct hns3_tx_queue *txq;
482 /* Initialize RSS for queues */
483 ret = hns3_config_rss(hns);
485 hns3_err(hw, "Failed to configure rss %d", ret);
490 ret = hns3_reset_all_queues(hns);
492 hns3_err(hw, "Failed to reset all queues %d", ret);
498 * Hardware does not support where the number of rx and tx queues is
499 * not equal in hip08. In .dev_configure callback function we will
500 * check the two values, here we think that the number of rx and tx
503 for (i = 0; i < hw->data->nb_rx_queues; i++) {
504 rxq = dev_data->rx_queues[i];
505 txq = dev_data->tx_queues[i];
506 if (rxq == NULL || txq == NULL || rxq->rx_deferred_start ||
507 txq->tx_deferred_start)
510 ret = hns3_dev_rx_queue_start(hns, i);
512 hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
516 hns3_dev_tx_queue_start(hns, i);
518 hns3_init_tx_ring_tc(hns);
520 hns3_enable_all_queues(hw, true);
524 for (j = 0; j < i; j++) {
525 rxq = dev_data->rx_queues[j];
526 hns3_rx_queue_release_mbufs(rxq);
533 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
535 struct hns3_hw *hw = &hns->hw;
538 hns3_enable_all_queues(hw, false);
540 ret = hns3_reset_all_queues(hns);
542 hns3_err(hw, "Failed to reset all queues %d", ret);
550 hns3_dev_release_mbufs(struct hns3_adapter *hns)
552 struct rte_eth_dev_data *dev_data = hns->hw.data;
553 struct hns3_rx_queue *rxq;
554 struct hns3_tx_queue *txq;
557 if (dev_data->rx_queues)
558 for (i = 0; i < dev_data->nb_rx_queues; i++) {
559 rxq = dev_data->rx_queues[i];
560 if (rxq == NULL || rxq->rx_deferred_start)
562 hns3_rx_queue_release_mbufs(rxq);
565 if (dev_data->tx_queues)
566 for (i = 0; i < dev_data->nb_tx_queues; i++) {
567 txq = dev_data->tx_queues[i];
568 if (txq == NULL || txq->tx_deferred_start)
570 hns3_tx_queue_release_mbufs(txq);
575 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
576 unsigned int socket_id, const struct rte_eth_rxconf *conf,
577 struct rte_mempool *mp)
579 struct hns3_adapter *hns = dev->data->dev_private;
580 const struct rte_memzone *rx_mz;
581 struct hns3_hw *hw = &hns->hw;
582 struct hns3_rx_queue *rxq;
583 unsigned int desc_size = sizeof(struct hns3_desc);
584 unsigned int rx_desc;
587 if (dev->data->dev_started) {
588 hns3_err(hw, "rx_queue_setup after dev_start no supported");
592 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
593 nb_desc % HNS3_ALIGN_RING_DESC) {
594 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
599 if (dev->data->rx_queues[idx]) {
600 hns3_rx_queue_release(dev->data->rx_queues[idx]);
601 dev->data->rx_queues[idx] = NULL;
604 rxq = rte_zmalloc_socket("hns3 RX queue", sizeof(struct hns3_rx_queue),
605 RTE_CACHE_LINE_SIZE, socket_id);
607 hns3_err(hw, "Failed to allocate memory for rx queue!");
613 rxq->nb_rx_desc = nb_desc;
615 if (conf->rx_free_thresh <= 0)
616 rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;
618 rxq->rx_free_thresh = conf->rx_free_thresh;
619 rxq->rx_deferred_start = conf->rx_deferred_start;
621 rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc;
622 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
623 RTE_CACHE_LINE_SIZE, socket_id);
624 if (rxq->sw_ring == NULL) {
625 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
626 hns3_rx_queue_release(rxq);
630 /* Allocate rx ring hardware descriptors. */
631 rx_desc = rxq->nb_rx_desc * desc_size;
632 rx_mz = rte_eth_dma_zone_reserve(dev, "rx_ring", idx, rx_desc,
633 HNS3_RING_BASE_ALIGN, socket_id);
635 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
637 hns3_rx_queue_release(rxq);
641 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
642 rxq->rx_ring_phys_addr = rx_mz->iova;
644 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, idx,
645 rxq->rx_ring_phys_addr);
647 rxq->next_to_use = 0;
648 rxq->next_to_clean = 0;
650 rxq->pkt_first_seg = NULL;
651 rxq->pkt_last_seg = NULL;
652 rxq->port_id = dev->data->port_id;
653 rxq->configured = true;
654 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
655 idx * HNS3_TQP_REG_SIZE);
656 rxq->rx_buf_len = hw->rx_buf_len;
657 rxq->non_vld_descs = 0;
659 rxq->pkt_len_errors = 0;
660 rxq->l3_csum_erros = 0;
661 rxq->l4_csum_erros = 0;
662 rxq->ol3_csum_erros = 0;
663 rxq->ol4_csum_erros = 0;
665 rte_spinlock_lock(&hw->lock);
666 dev->data->rx_queues[idx] = rxq;
667 rte_spinlock_unlock(&hw->lock);
672 static inline uint32_t
673 rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info)
675 #define HNS3_L2TBL_NUM 4
676 #define HNS3_L3TBL_NUM 16
677 #define HNS3_L4TBL_NUM 16
678 #define HNS3_OL3TBL_NUM 16
679 #define HNS3_OL4TBL_NUM 16
680 uint32_t pkt_type = 0;
681 uint32_t l2id, l3id, l4id;
682 uint32_t ol3id, ol4id;
684 static const uint32_t l2table[HNS3_L2TBL_NUM] = {
686 RTE_PTYPE_L2_ETHER_VLAN,
687 RTE_PTYPE_L2_ETHER_QINQ,
691 static const uint32_t l3table[HNS3_L3TBL_NUM] = {
694 RTE_PTYPE_L2_ETHER_ARP,
696 RTE_PTYPE_L3_IPV4_EXT,
697 RTE_PTYPE_L3_IPV6_EXT,
698 RTE_PTYPE_L2_ETHER_LLDP,
699 0, 0, 0, 0, 0, 0, 0, 0, 0
702 static const uint32_t l4table[HNS3_L4TBL_NUM] = {
705 RTE_PTYPE_TUNNEL_GRE,
709 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
712 static const uint32_t inner_l2table[HNS3_L2TBL_NUM] = {
713 RTE_PTYPE_INNER_L2_ETHER,
714 RTE_PTYPE_INNER_L2_ETHER_VLAN,
715 RTE_PTYPE_INNER_L2_ETHER_QINQ,
719 static const uint32_t inner_l3table[HNS3_L3TBL_NUM] = {
720 RTE_PTYPE_INNER_L3_IPV4,
721 RTE_PTYPE_INNER_L3_IPV6,
723 RTE_PTYPE_INNER_L2_ETHER,
724 RTE_PTYPE_INNER_L3_IPV4_EXT,
725 RTE_PTYPE_INNER_L3_IPV6_EXT,
726 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
729 static const uint32_t inner_l4table[HNS3_L4TBL_NUM] = {
730 RTE_PTYPE_INNER_L4_UDP,
731 RTE_PTYPE_INNER_L4_TCP,
732 RTE_PTYPE_TUNNEL_GRE,
733 RTE_PTYPE_INNER_L4_SCTP,
735 RTE_PTYPE_INNER_L4_ICMP,
736 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
739 static const uint32_t ol3table[HNS3_OL3TBL_NUM] = {
743 RTE_PTYPE_L3_IPV4_EXT,
744 RTE_PTYPE_L3_IPV6_EXT,
745 0, 0, 0, 0, 0, 0, 0, 0, 0,
749 static const uint32_t ol4table[HNS3_OL4TBL_NUM] = {
751 RTE_PTYPE_TUNNEL_VXLAN,
752 RTE_PTYPE_TUNNEL_NVGRE,
753 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
756 l2id = hns3_get_field(pkt_info, HNS3_RXD_STRP_TAGP_M,
757 HNS3_RXD_STRP_TAGP_S);
758 l3id = hns3_get_field(pkt_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
759 l4id = hns3_get_field(pkt_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
760 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
761 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
764 pkt_type |= (inner_l2table[l2id] | inner_l3table[l3id] |
765 inner_l4table[l4id] | ol3table[ol3id] |
768 pkt_type |= (l2table[l2id] | l3table[l3id] | l4table[l4id]);
773 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
775 static const uint32_t ptypes[] = {
777 RTE_PTYPE_L2_ETHER_VLAN,
778 RTE_PTYPE_L2_ETHER_QINQ,
779 RTE_PTYPE_L2_ETHER_LLDP,
780 RTE_PTYPE_L2_ETHER_ARP,
782 RTE_PTYPE_L3_IPV4_EXT,
784 RTE_PTYPE_L3_IPV6_EXT,
790 RTE_PTYPE_TUNNEL_GRE,
794 if (dev->rx_pkt_burst == hns3_recv_pkts)
801 hns3_clean_rx_buffers(struct hns3_rx_queue *rxq, int count)
803 rxq->next_to_use += count;
804 if (rxq->next_to_use >= rxq->nb_rx_desc)
805 rxq->next_to_use -= rxq->nb_rx_desc;
807 hns3_write_dev(rxq, HNS3_RING_RX_HEAD_REG, count);
811 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
812 uint32_t bd_base_info, uint32_t l234_info,
817 if (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) {
822 if (unlikely(rxm->pkt_len == 0 ||
823 (l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) {
824 rxq->pkt_len_errors++;
828 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
829 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
830 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
831 rxq->l3_csum_erros++;
832 tmp |= HNS3_L3_CKSUM_ERR;
835 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
836 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
837 rxq->l4_csum_erros++;
838 tmp |= HNS3_L4_CKSUM_ERR;
841 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
842 rxq->ol3_csum_erros++;
843 tmp |= HNS3_OUTER_L3_CKSUM_ERR;
846 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
847 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
848 rxq->ol4_csum_erros++;
849 tmp |= HNS3_OUTER_L4_CKSUM_ERR;
858 hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type,
859 const uint32_t cksum_err)
861 if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
862 if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
863 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
864 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
865 if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
866 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
867 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
868 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
869 (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
870 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
872 if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
873 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
874 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
875 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
876 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
877 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
882 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
884 struct hns3_rx_queue *rxq; /* RX queue */
885 struct hns3_desc *rx_ring; /* RX ring (desc) */
886 struct hns3_entry *sw_ring;
887 struct hns3_entry *rxe;
888 struct hns3_desc *rxdp; /* pointer of the current desc */
889 struct rte_mbuf *first_seg;
890 struct rte_mbuf *last_seg;
891 struct rte_mbuf *nmb; /* pointer of the new mbuf */
892 struct rte_mbuf *rxm;
893 struct rte_eth_dev *dev;
894 uint32_t bd_base_info;
904 int num; /* num of desc in ring */
910 dev = &rte_eth_devices[rxq->port_id];
912 rx_id = rxq->next_to_clean;
913 rx_ring = rxq->rx_ring;
914 first_seg = rxq->pkt_first_seg;
915 last_seg = rxq->pkt_last_seg;
916 sw_ring = rxq->sw_ring;
918 /* Get num of packets in descriptor ring */
919 num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
920 while (nb_rx_bd < num && nb_rx < nb_pkts) {
921 rxdp = &rx_ring[rx_id];
922 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
923 if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
924 rxq->non_vld_descs++;
928 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
929 if (unlikely(nmb == NULL)) {
930 dev->data->rx_mbuf_alloc_failed++;
935 rxe = &sw_ring[rx_id];
937 if (rx_id == rxq->nb_rx_desc)
940 rte_prefetch0(sw_ring[rx_id].mbuf);
941 if ((rx_id & 0x3) == 0) {
942 rte_prefetch0(&rx_ring[rx_id]);
943 rte_prefetch0(&sw_ring[rx_id]);
949 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
950 rxdp->addr = dma_addr;
951 rxdp->rx.bd_base_info = 0;
954 /* Load remained descriptor data and extract necessary fields */
955 data_len = (uint16_t)(rte_le_to_cpu_16(rxdp->rx.size));
956 l234_info = rte_le_to_cpu_32(rxdp->rx.l234_info);
957 ol_info = rte_le_to_cpu_32(rxdp->rx.ol_info);
959 if (first_seg == NULL) {
961 first_seg->nb_segs = 1;
963 first_seg->nb_segs++;
964 last_seg->next = rxm;
967 rxm->data_off = RTE_PKTMBUF_HEADROOM;
968 rxm->data_len = data_len;
970 if (!hns3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
975 /* The last buffer of the received packet */
976 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxdp->rx.pkt_len));
977 first_seg->pkt_len = pkt_len;
978 first_seg->port = rxq->port_id;
979 first_seg->hash.rss = rte_le_to_cpu_32(rxdp->rx.rss_hash);
980 first_seg->ol_flags |= PKT_RX_RSS_HASH;
981 if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
982 first_seg->hash.fdir.hi =
983 rte_le_to_cpu_32(rxdp->rx.fd_id);
984 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
988 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
989 l234_info, &cksum_err);
993 first_seg->packet_type = rxd_pkt_info_to_pkt_type(l234_info,
996 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
997 hns3_rx_set_cksum_flag(rxm, first_seg->packet_type,
1000 first_seg->vlan_tci = rte_le_to_cpu_16(rxdp->rx.vlan_tag);
1001 first_seg->vlan_tci_outer =
1002 rte_le_to_cpu_16(rxdp->rx.ot_vlan_tag);
1003 rx_pkts[nb_rx++] = first_seg;
1007 rte_pktmbuf_free(first_seg);
1011 rxq->next_to_clean = rx_id;
1012 rxq->pkt_first_seg = first_seg;
1013 rxq->pkt_last_seg = last_seg;
1014 hns3_clean_rx_buffers(rxq, nb_rx_bd);
1020 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1021 unsigned int socket_id, const struct rte_eth_txconf *conf)
1023 struct hns3_adapter *hns = dev->data->dev_private;
1024 const struct rte_memzone *tx_mz;
1025 struct hns3_hw *hw = &hns->hw;
1026 struct hns3_tx_queue *txq;
1027 struct hns3_desc *desc;
1028 unsigned int desc_size = sizeof(struct hns3_desc);
1029 unsigned int tx_desc;
1033 if (dev->data->dev_started) {
1034 hns3_err(hw, "tx_queue_setup after dev_start no supported");
1038 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1039 nb_desc % HNS3_ALIGN_RING_DESC) {
1040 hns3_err(hw, "Number (%u) of tx descriptors is invalid",
1045 if (dev->data->tx_queues[idx] != NULL) {
1046 hns3_tx_queue_release(dev->data->tx_queues[idx]);
1047 dev->data->tx_queues[idx] = NULL;
1050 txq = rte_zmalloc_socket("hns3 TX queue", sizeof(struct hns3_tx_queue),
1051 RTE_CACHE_LINE_SIZE, socket_id);
1053 hns3_err(hw, "Failed to allocate memory for tx queue!");
1057 txq->nb_tx_desc = nb_desc;
1058 txq->queue_id = idx;
1059 txq->tx_deferred_start = conf->tx_deferred_start;
1061 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
1062 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
1063 RTE_CACHE_LINE_SIZE, socket_id);
1064 if (txq->sw_ring == NULL) {
1065 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
1066 hns3_tx_queue_release(txq);
1070 /* Allocate tx ring hardware descriptors. */
1071 tx_desc = txq->nb_tx_desc * desc_size;
1072 tx_mz = rte_eth_dma_zone_reserve(dev, "tx_ring", idx, tx_desc,
1073 HNS3_RING_BASE_ALIGN, socket_id);
1074 if (tx_mz == NULL) {
1075 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
1077 hns3_tx_queue_release(txq);
1081 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
1082 txq->tx_ring_phys_addr = tx_mz->iova;
1084 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, idx,
1085 txq->tx_ring_phys_addr);
1088 desc = txq->tx_ring;
1089 for (i = 0; i < txq->nb_tx_desc; i++) {
1090 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1095 txq->next_to_use = 0;
1096 txq->next_to_clean = 0;
1097 txq->tx_bd_ready = txq->nb_tx_desc;
1098 txq->port_id = dev->data->port_id;
1099 txq->configured = true;
1100 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1101 idx * HNS3_TQP_REG_SIZE);
1102 rte_spinlock_lock(&hw->lock);
1103 dev->data->tx_queues[idx] = txq;
1104 rte_spinlock_unlock(&hw->lock);
1110 tx_ring_dist(struct hns3_tx_queue *txq, int begin, int end)
1112 return (end - begin + txq->nb_tx_desc) % txq->nb_tx_desc;
1116 tx_ring_space(struct hns3_tx_queue *txq)
1118 return txq->nb_tx_desc -
1119 tx_ring_dist(txq, txq->next_to_clean, txq->next_to_use) - 1;
1123 hns3_queue_xmit(struct hns3_tx_queue *txq, uint32_t buf_num)
1125 hns3_write_dev(txq, HNS3_RING_TX_TAIL_REG, buf_num);
1129 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
1131 uint16_t tx_next_clean = txq->next_to_clean;
1132 uint16_t tx_next_use = txq->next_to_use;
1133 uint16_t tx_bd_ready = txq->tx_bd_ready;
1134 uint16_t tx_bd_max = txq->nb_tx_desc;
1135 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
1136 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
1137 struct rte_mbuf *mbuf;
1139 while ((!hns3_get_bit(desc->tx.tp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B)) &&
1140 (tx_next_use != tx_next_clean || tx_bd_ready < tx_bd_max)) {
1141 mbuf = tx_bak_pkt->mbuf;
1144 rte_pktmbuf_free(mbuf);
1145 tx_bak_pkt->mbuf = NULL;
1153 if (tx_next_clean >= tx_bd_max) {
1155 desc = txq->tx_ring;
1156 tx_bak_pkt = txq->sw_ring;
1160 txq->next_to_clean = tx_next_clean;
1161 txq->tx_bd_ready = tx_bd_ready;
1165 fill_desc(struct hns3_tx_queue *txq, uint16_t tx_desc_id, struct rte_mbuf *rxm,
1166 bool first, int offset)
1168 struct hns3_desc *tx_ring = txq->tx_ring;
1169 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1170 uint8_t frag_end = rxm->next == NULL ? 1 : 0;
1171 uint16_t size = rxm->data_len;
1173 uint64_t ol_flags = rxm->ol_flags;
1178 desc->addr = rte_mbuf_data_iova(rxm) + offset;
1179 desc->tx.send_size = rte_cpu_to_le_16(size);
1180 hns3_set_bit(rrcfv, HNS3_TXD_VLD_B, 1);
1183 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
1184 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
1185 rxm->outer_l2_len + rxm->outer_l3_len : 0;
1186 paylen = rxm->pkt_len - hdr_len;
1187 desc->tx.paylen = rte_cpu_to_le_32(paylen);
1190 hns3_set_bit(rrcfv, HNS3_TXD_FE_B, frag_end);
1191 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(rrcfv);
1194 if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1195 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
1196 hns3_set_bit(tmp, HNS3_TXD_VLAN_B, 1);
1197 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
1198 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
1201 if (ol_flags & PKT_TX_QINQ_PKT) {
1202 tmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
1203 hns3_set_bit(tmp, HNS3_TXD_OVLAN_B, 1);
1204 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp);
1205 desc->tx.outer_vlan_tag =
1206 rte_cpu_to_le_16(rxm->vlan_tci_outer);
1212 hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool,
1213 uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf)
1215 struct rte_mbuf *new_mbuf = NULL;
1216 struct rte_eth_dev *dev;
1217 struct rte_mbuf *temp;
1221 /* Allocate enough mbufs */
1222 for (i = 0; i < nb_new_buf; i++) {
1223 temp = rte_pktmbuf_alloc(mb_pool);
1224 if (unlikely(temp == NULL)) {
1225 dev = &rte_eth_devices[txq->port_id];
1226 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1227 hns3_err(hw, "Failed to alloc TX mbuf port_id=%d,"
1228 "queue_id=%d in reassemble tx pkts.",
1229 txq->port_id, txq->queue_id);
1230 rte_pktmbuf_free(new_mbuf);
1233 temp->next = new_mbuf;
1237 if (new_mbuf == NULL)
1240 new_mbuf->nb_segs = nb_new_buf;
1241 *alloc_mbuf = new_mbuf;
1247 hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
1248 struct rte_mbuf **new_pkt)
1250 struct hns3_tx_queue *txq = tx_queue;
1251 struct rte_mempool *mb_pool;
1252 struct rte_mbuf *new_mbuf;
1253 struct rte_mbuf *temp_new;
1254 struct rte_mbuf *temp;
1255 uint16_t last_buf_len;
1256 uint16_t nb_new_buf;
1267 mb_pool = tx_pkt->pool;
1268 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
1269 nb_new_buf = (tx_pkt->pkt_len - 1) / buf_size + 1;
1271 last_buf_len = tx_pkt->pkt_len % buf_size;
1272 if (last_buf_len == 0)
1273 last_buf_len = buf_size;
1275 /* Allocate enough mbufs */
1276 ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf);
1280 /* Copy the original packet content to the new mbufs */
1282 s = rte_pktmbuf_mtod(temp, char *);
1283 len_s = temp->data_len;
1284 temp_new = new_mbuf;
1285 for (i = 0; i < nb_new_buf; i++) {
1286 d = rte_pktmbuf_mtod(temp_new, char *);
1287 if (i < nb_new_buf - 1)
1290 buf_len = last_buf_len;
1294 len = RTE_MIN(len_s, len_d);
1298 len_d = len_d - len;
1299 len_s = len_s - len;
1305 s = rte_pktmbuf_mtod(temp, char *);
1306 len_s = temp->data_len;
1310 temp_new->data_len = buf_len;
1311 temp_new = temp_new->next;
1314 /* free original mbufs */
1315 rte_pktmbuf_free(tx_pkt);
1317 *new_pkt = new_mbuf;
1323 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
1325 uint32_t tmp = *ol_type_vlan_len_msec;
1327 /* (outer) IP header type */
1328 if (ol_flags & PKT_TX_OUTER_IPV4) {
1329 /* OL3 header size, defined in 4 bytes */
1330 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1331 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
1332 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1333 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
1334 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
1336 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
1337 HNS3_OL3T_IPV4_NO_CSUM);
1338 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
1339 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
1341 /* OL3 header size, defined in 4 bytes */
1342 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1343 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
1346 *ol_type_vlan_len_msec = tmp;
1350 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
1351 struct rte_net_hdr_lens *hdr_lens)
1353 uint32_t tmp = *ol_type_vlan_len_msec;
1356 /* OL2 header size, defined in 2 bytes */
1357 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1358 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
1360 /* L4TUNT: L4 Tunneling Type */
1361 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
1362 case PKT_TX_TUNNEL_GENEVE:
1363 case PKT_TX_TUNNEL_VXLAN:
1364 /* MAC in UDP tunnelling packet, include VxLAN */
1365 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
1366 HNS3_TUN_MAC_IN_UDP);
1368 * OL4 header size, defined in 4 Bytes, it contains outer
1369 * L4(UDP) length and tunneling length.
1371 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1372 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
1375 case PKT_TX_TUNNEL_GRE:
1376 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
1379 * OL4 header size, defined in 4 Bytes, it contains outer
1380 * L4(GRE) length and tunneling length.
1382 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
1383 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1384 l4_len >> HNS3_L4_LEN_UNIT);
1387 /* For non UDP / GRE tunneling, drop the tunnel packet */
1391 *ol_type_vlan_len_msec = tmp;
1397 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1399 struct rte_net_hdr_lens *hdr_lens)
1401 struct hns3_desc *tx_ring = txq->tx_ring;
1402 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1406 hns3_parse_outer_params(ol_flags, &value);
1407 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
1411 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
1417 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
1421 /* Enable L3 checksum offloads */
1422 if (ol_flags & PKT_TX_IPV4) {
1423 tmp = *type_cs_vlan_tso_len;
1424 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
1426 /* inner(/normal) L3 header size, defined in 4 bytes */
1427 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1428 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
1429 if (ol_flags & PKT_TX_IP_CKSUM)
1430 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
1431 *type_cs_vlan_tso_len = tmp;
1432 } else if (ol_flags & PKT_TX_IPV6) {
1433 tmp = *type_cs_vlan_tso_len;
1434 /* L3T, IPv6 don't do checksum */
1435 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
1437 /* inner(/normal) L3 header size, defined in 4 bytes */
1438 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1439 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
1440 *type_cs_vlan_tso_len = tmp;
1445 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
1449 /* Enable L4 checksum offloads */
1450 switch (ol_flags & PKT_TX_L4_MASK) {
1451 case PKT_TX_TCP_CKSUM:
1452 tmp = *type_cs_vlan_tso_len;
1453 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1455 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1456 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1457 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
1458 *type_cs_vlan_tso_len = tmp;
1460 case PKT_TX_UDP_CKSUM:
1461 tmp = *type_cs_vlan_tso_len;
1462 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1464 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1465 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1466 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
1467 *type_cs_vlan_tso_len = tmp;
1469 case PKT_TX_SCTP_CKSUM:
1470 tmp = *type_cs_vlan_tso_len;
1471 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1473 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1474 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1475 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
1476 *type_cs_vlan_tso_len = tmp;
1484 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1487 struct hns3_desc *tx_ring = txq->tx_ring;
1488 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1491 /* inner(/normal) L2 header size, defined in 2 bytes */
1492 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1493 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
1495 hns3_parse_l3_cksum_params(ol_flags, &value);
1496 hns3_parse_l4_cksum_params(ol_flags, &value);
1498 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
1502 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
1509 for (i = 0; i < nb_pkts; i++) {
1512 /* check the size of packet */
1513 if (m->pkt_len < HNS3_MIN_FRAME_LEN) {
1518 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1519 ret = rte_validate_tx_offload(m);
1525 ret = rte_net_intel_cksum_prepare(m);
1536 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1537 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
1539 /* Fill in tunneling parameters if necessary */
1540 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
1541 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
1542 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
1546 /* Enable checksum offloading */
1547 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
1548 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
1554 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1556 struct rte_net_hdr_lens hdr_lens = {0};
1557 struct hns3_tx_queue *txq = tx_queue;
1558 struct hns3_entry *tx_bak_pkt;
1559 struct rte_mbuf *new_pkt;
1560 struct rte_mbuf *tx_pkt;
1561 struct rte_mbuf *m_seg;
1562 struct rte_mbuf *temp;
1563 uint32_t nb_hold = 0;
1564 uint16_t tx_next_clean;
1565 uint16_t tx_next_use;
1566 uint16_t tx_bd_ready;
1567 uint16_t tx_pkt_num;
1573 /* free useless buffer */
1574 hns3_tx_free_useless_buffer(txq);
1575 tx_bd_ready = txq->tx_bd_ready;
1576 if (tx_bd_ready == 0)
1579 tx_next_clean = txq->next_to_clean;
1580 tx_next_use = txq->next_to_use;
1581 tx_bd_max = txq->nb_tx_desc;
1582 tx_bak_pkt = &txq->sw_ring[tx_next_clean];
1584 tx_pkt_num = (tx_bd_ready < nb_pkts) ? tx_bd_ready : nb_pkts;
1587 tx_bak_pkt = &txq->sw_ring[tx_next_use];
1588 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
1589 tx_pkt = *tx_pkts++;
1591 nb_buf = tx_pkt->nb_segs;
1593 if (nb_buf > tx_ring_space(txq)) {
1601 * If the length of the packet is too long or zero, the packet
1604 if (unlikely(tx_pkt->pkt_len > HNS3_MAX_FRAME_LEN ||
1605 tx_pkt->pkt_len == 0))
1609 if (unlikely(nb_buf > HNS3_MAX_TX_BD_PER_PKT)) {
1610 if (hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt))
1613 nb_buf = m_seg->nb_segs;
1616 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
1621 fill_desc(txq, tx_next_use, m_seg, (i == 0), 0);
1623 tx_bak_pkt->mbuf = m_seg;
1627 if (tx_next_use >= tx_bd_max) {
1629 tx_bak_pkt = txq->sw_ring;
1633 } while (m_seg != NULL);
1640 if (likely(nb_tx)) {
1641 hns3_queue_xmit(txq, nb_hold);
1642 txq->next_to_clean = tx_next_clean;
1643 txq->next_to_use = tx_next_use;
1644 txq->tx_bd_ready = tx_bd_ready - nb_hold;
1651 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
1652 struct rte_mbuf **pkts __rte_unused,
1653 uint16_t pkts_n __rte_unused)
1658 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
1660 struct hns3_adapter *hns = eth_dev->data->dev_private;
1662 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
1663 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
1664 eth_dev->rx_pkt_burst = hns3_recv_pkts;
1665 eth_dev->tx_pkt_burst = hns3_xmit_pkts;
1666 eth_dev->tx_pkt_prepare = hns3_prep_pkts;
1668 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
1669 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
1670 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;