1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
23 #include <rte_malloc.h>
26 #include "hns3_ethdev.h"
27 #include "hns3_rxtx.h"
28 #include "hns3_regs.h"
29 #include "hns3_logs.h"
31 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
32 #define DEFAULT_RX_FREE_THRESH 16
35 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
40 for (i = 0; i < rxq->nb_rx_desc; i++) {
41 if (rxq->sw_ring[i].mbuf) {
42 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
43 rxq->sw_ring[i].mbuf = NULL;
50 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
55 for (i = 0; i < txq->nb_tx_desc; i++) {
56 if (txq->sw_ring[i].mbuf) {
57 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
58 txq->sw_ring[i].mbuf = NULL;
65 hns3_rx_queue_release(void *queue)
67 struct hns3_rx_queue *rxq = queue;
69 hns3_rx_queue_release_mbufs(rxq);
71 rte_memzone_free(rxq->mz);
73 rte_free(rxq->sw_ring);
79 hns3_tx_queue_release(void *queue)
81 struct hns3_tx_queue *txq = queue;
83 hns3_tx_queue_release_mbufs(txq);
85 rte_memzone_free(txq->mz);
87 rte_free(txq->sw_ring);
93 hns3_dev_rx_queue_release(void *queue)
95 struct hns3_rx_queue *rxq = queue;
96 struct hns3_adapter *hns;
102 rte_spinlock_lock(&hns->hw.lock);
103 hns3_rx_queue_release(queue);
104 rte_spinlock_unlock(&hns->hw.lock);
108 hns3_dev_tx_queue_release(void *queue)
110 struct hns3_tx_queue *txq = queue;
111 struct hns3_adapter *hns;
117 rte_spinlock_lock(&hns->hw.lock);
118 hns3_tx_queue_release(queue);
119 rte_spinlock_unlock(&hns->hw.lock);
123 hns3_free_all_queues(struct rte_eth_dev *dev)
127 if (dev->data->rx_queues)
128 for (i = 0; i < dev->data->nb_rx_queues; i++) {
129 hns3_rx_queue_release(dev->data->rx_queues[i]);
130 dev->data->rx_queues[i] = NULL;
133 if (dev->data->tx_queues)
134 for (i = 0; i < dev->data->nb_tx_queues; i++) {
135 hns3_tx_queue_release(dev->data->tx_queues[i]);
136 dev->data->tx_queues[i] = NULL;
141 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
143 struct rte_mbuf *mbuf;
147 for (i = 0; i < rxq->nb_rx_desc; i++) {
148 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
149 if (unlikely(mbuf == NULL)) {
150 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
152 hns3_rx_queue_release_mbufs(rxq);
156 rte_mbuf_refcnt_set(mbuf, 1);
158 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
160 mbuf->port = rxq->port_id;
162 rxq->sw_ring[i].mbuf = mbuf;
163 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
164 rxq->rx_ring[i].addr = dma_addr;
165 rxq->rx_ring[i].rx.bd_base_info = 0;
172 hns3_buf_size2type(uint32_t buf_size)
178 bd_size_type = HNS3_BD_SIZE_512_TYPE;
181 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
184 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
187 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
194 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
196 uint32_t rx_buf_len = rxq->rx_buf_len;
197 uint64_t dma_addr = rxq->rx_ring_phys_addr;
199 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
200 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
201 (uint32_t)((dma_addr >> 31) >> 1));
203 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
204 hns3_buf_size2type(rx_buf_len));
205 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
206 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
210 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
212 uint64_t dma_addr = txq->tx_ring_phys_addr;
214 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
215 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
216 (uint32_t)((dma_addr >> 31) >> 1));
218 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
219 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
223 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
225 struct hns3_rx_queue *rxq;
226 struct hns3_tx_queue *txq;
230 for (i = 0; i < hw->data->nb_rx_queues; i++) {
231 rxq = hw->data->rx_queues[i];
232 txq = hw->data->tx_queues[i];
233 if (rxq == NULL || txq == NULL ||
234 (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
236 rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
238 rcb_reg |= BIT(HNS3_RING_EN_B);
240 rcb_reg &= ~BIT(HNS3_RING_EN_B);
241 hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
246 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
248 struct hns3_cfg_com_tqp_queue_cmd *req;
249 struct hns3_cmd_desc desc;
252 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
254 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
255 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
257 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
259 ret = hns3_cmd_send(hw, &desc, 1);
261 hns3_err(hw, "TQP enable fail, ret = %d", ret);
267 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
269 struct hns3_reset_tqp_queue_cmd *req;
270 struct hns3_cmd_desc desc;
273 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
275 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
276 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
277 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
279 ret = hns3_cmd_send(hw, &desc, 1);
281 hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
287 hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
289 struct hns3_reset_tqp_queue_cmd *req;
290 struct hns3_cmd_desc desc;
293 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
295 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
296 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
298 ret = hns3_cmd_send(hw, &desc, 1);
300 hns3_err(hw, "Get reset status error, ret =%d", ret);
304 return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
308 hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
310 #define HNS3_TQP_RESET_TRY_MS 200
315 ret = hns3_tqp_enable(hw, queue_id, false);
320 * In current version VF is not supported when PF is driven by DPDK
321 * driver, all task queue pairs are mapped to PF function, so PF's queue
322 * id is equals to the global queue id in PF range.
324 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
326 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
330 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
332 /* Wait for tqp hw reset */
333 rte_delay_ms(HNS3_POLL_RESPONE_MS);
334 reset_status = hns3_get_reset_status(hw, queue_id);
339 } while (get_timeofday_ms() < end);
342 hns3_err(hw, "Reset TQP fail, ret = %d", ret);
346 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
348 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
354 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
359 /* Disable VF's queue before send queue reset msg to PF */
360 ret = hns3_tqp_enable(hw, queue_id, false);
364 memcpy(msg_data, &queue_id, sizeof(uint16_t));
366 return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
367 sizeof(msg_data), true, NULL, 0);
371 hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
373 struct hns3_hw *hw = &hns->hw;
375 return hns3vf_reset_tqp(hw, queue_id);
377 return hns3_reset_tqp(hw, queue_id);
381 hns3_reset_all_queues(struct hns3_adapter *hns)
383 struct hns3_hw *hw = &hns->hw;
387 for (i = 0; i < hw->data->nb_rx_queues; i++) {
388 ret = hns3_reset_queue(hns, i);
390 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
398 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
400 struct hns3_hw *hw = &hns->hw;
401 struct hns3_rx_queue *rxq;
404 PMD_INIT_FUNC_TRACE();
406 rxq = hw->data->rx_queues[idx];
408 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
410 hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
415 rxq->next_to_use = 0;
416 rxq->next_to_clean = 0;
417 hns3_init_rx_queue_hw(rxq);
423 hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
425 struct hns3_hw *hw = &hns->hw;
426 struct hns3_tx_queue *txq;
427 struct hns3_desc *desc;
430 txq = hw->data->tx_queues[idx];
434 for (i = 0; i < txq->nb_tx_desc; i++) {
435 desc->tx.tp_fe_sc_vld_ra_ri = 0;
439 txq->next_to_use = 0;
440 txq->next_to_clean = 0;
441 txq->tx_bd_ready = txq->nb_tx_desc;
442 hns3_init_tx_queue_hw(txq);
446 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
448 struct hns3_hw *hw = &hns->hw;
449 struct hns3_tx_queue *txq;
452 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
453 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
456 if (!tc_queue->enable)
459 for (j = 0; j < tc_queue->tqp_count; j++) {
460 num = tc_queue->tqp_offset + j;
461 txq = hw->data->tx_queues[num];
465 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
471 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
473 struct hns3_hw *hw = &hns->hw;
474 struct rte_eth_dev_data *dev_data = hw->data;
475 struct hns3_rx_queue *rxq;
476 struct hns3_tx_queue *txq;
481 /* Initialize RSS for queues */
482 ret = hns3_config_rss(hns);
484 hns3_err(hw, "Failed to configure rss %d", ret);
489 ret = hns3_reset_all_queues(hns);
491 hns3_err(hw, "Failed to reset all queues %d", ret);
497 * Hardware does not support where the number of rx and tx queues is
498 * not equal in hip08. In .dev_configure callback function we will
499 * check the two values, here we think that the number of rx and tx
502 for (i = 0; i < hw->data->nb_rx_queues; i++) {
503 rxq = dev_data->rx_queues[i];
504 txq = dev_data->tx_queues[i];
505 if (rxq == NULL || txq == NULL || rxq->rx_deferred_start ||
506 txq->tx_deferred_start)
509 ret = hns3_dev_rx_queue_start(hns, i);
511 hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
515 hns3_dev_tx_queue_start(hns, i);
517 hns3_init_tx_ring_tc(hns);
519 hns3_enable_all_queues(hw, true);
523 for (j = 0; j < i; j++) {
524 rxq = dev_data->rx_queues[j];
525 hns3_rx_queue_release_mbufs(rxq);
532 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
534 struct hns3_hw *hw = &hns->hw;
537 hns3_enable_all_queues(hw, false);
539 ret = hns3_reset_all_queues(hns);
541 hns3_err(hw, "Failed to reset all queues %d", ret);
549 hns3_dev_release_mbufs(struct hns3_adapter *hns)
551 struct rte_eth_dev_data *dev_data = hns->hw.data;
552 struct hns3_rx_queue *rxq;
553 struct hns3_tx_queue *txq;
556 if (dev_data->rx_queues)
557 for (i = 0; i < dev_data->nb_rx_queues; i++) {
558 rxq = dev_data->rx_queues[i];
559 if (rxq == NULL || rxq->rx_deferred_start)
561 hns3_rx_queue_release_mbufs(rxq);
564 if (dev_data->tx_queues)
565 for (i = 0; i < dev_data->nb_tx_queues; i++) {
566 txq = dev_data->tx_queues[i];
567 if (txq == NULL || txq->tx_deferred_start)
569 hns3_tx_queue_release_mbufs(txq);
574 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
575 unsigned int socket_id, const struct rte_eth_rxconf *conf,
576 struct rte_mempool *mp)
578 struct hns3_adapter *hns = dev->data->dev_private;
579 const struct rte_memzone *rx_mz;
580 struct hns3_hw *hw = &hns->hw;
581 struct hns3_rx_queue *rxq;
582 unsigned int desc_size = sizeof(struct hns3_desc);
583 unsigned int rx_desc;
586 if (dev->data->dev_started) {
587 hns3_err(hw, "rx_queue_setup after dev_start no supported");
591 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
592 nb_desc % HNS3_ALIGN_RING_DESC) {
593 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
598 if (dev->data->rx_queues[idx]) {
599 hns3_rx_queue_release(dev->data->rx_queues[idx]);
600 dev->data->rx_queues[idx] = NULL;
603 rxq = rte_zmalloc_socket("hns3 RX queue", sizeof(struct hns3_rx_queue),
604 RTE_CACHE_LINE_SIZE, socket_id);
606 hns3_err(hw, "Failed to allocate memory for rx queue!");
612 rxq->nb_rx_desc = nb_desc;
614 if (conf->rx_free_thresh <= 0)
615 rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;
617 rxq->rx_free_thresh = conf->rx_free_thresh;
618 rxq->rx_deferred_start = conf->rx_deferred_start;
620 rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc;
621 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
622 RTE_CACHE_LINE_SIZE, socket_id);
623 if (rxq->sw_ring == NULL) {
624 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
625 hns3_rx_queue_release(rxq);
629 /* Allocate rx ring hardware descriptors. */
630 rx_desc = rxq->nb_rx_desc * desc_size;
631 rx_mz = rte_eth_dma_zone_reserve(dev, "rx_ring", idx, rx_desc,
632 HNS3_RING_BASE_ALIGN, socket_id);
634 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
636 hns3_rx_queue_release(rxq);
640 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
641 rxq->rx_ring_phys_addr = rx_mz->iova;
643 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, idx,
644 rxq->rx_ring_phys_addr);
646 rxq->next_to_use = 0;
647 rxq->next_to_clean = 0;
649 rxq->pkt_first_seg = NULL;
650 rxq->pkt_last_seg = NULL;
651 rxq->port_id = dev->data->port_id;
652 rxq->configured = true;
653 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
654 idx * HNS3_TQP_REG_SIZE);
655 rxq->rx_buf_len = hw->rx_buf_len;
656 rxq->non_vld_descs = 0;
658 rxq->pkt_len_errors = 0;
659 rxq->l3_csum_erros = 0;
660 rxq->l4_csum_erros = 0;
661 rxq->ol3_csum_erros = 0;
662 rxq->ol4_csum_erros = 0;
665 rte_spinlock_lock(&hw->lock);
666 dev->data->rx_queues[idx] = rxq;
667 rte_spinlock_unlock(&hw->lock);
672 static inline uint32_t
673 rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info)
675 #define HNS3_L2TBL_NUM 4
676 #define HNS3_L3TBL_NUM 16
677 #define HNS3_L4TBL_NUM 16
678 #define HNS3_OL3TBL_NUM 16
679 #define HNS3_OL4TBL_NUM 16
680 uint32_t pkt_type = 0;
681 uint32_t l2id, l3id, l4id;
682 uint32_t ol3id, ol4id;
684 static const uint32_t l2table[HNS3_L2TBL_NUM] = {
686 RTE_PTYPE_L2_ETHER_VLAN,
687 RTE_PTYPE_L2_ETHER_QINQ,
691 static const uint32_t l3table[HNS3_L3TBL_NUM] = {
694 RTE_PTYPE_L2_ETHER_ARP,
696 RTE_PTYPE_L3_IPV4_EXT,
697 RTE_PTYPE_L3_IPV6_EXT,
698 RTE_PTYPE_L2_ETHER_LLDP,
699 0, 0, 0, 0, 0, 0, 0, 0, 0
702 static const uint32_t l4table[HNS3_L4TBL_NUM] = {
705 RTE_PTYPE_TUNNEL_GRE,
709 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
712 static const uint32_t inner_l2table[HNS3_L2TBL_NUM] = {
713 RTE_PTYPE_INNER_L2_ETHER,
714 RTE_PTYPE_INNER_L2_ETHER_VLAN,
715 RTE_PTYPE_INNER_L2_ETHER_QINQ,
719 static const uint32_t inner_l3table[HNS3_L3TBL_NUM] = {
720 RTE_PTYPE_INNER_L3_IPV4,
721 RTE_PTYPE_INNER_L3_IPV6,
723 RTE_PTYPE_INNER_L2_ETHER,
724 RTE_PTYPE_INNER_L3_IPV4_EXT,
725 RTE_PTYPE_INNER_L3_IPV6_EXT,
726 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
729 static const uint32_t inner_l4table[HNS3_L4TBL_NUM] = {
730 RTE_PTYPE_INNER_L4_UDP,
731 RTE_PTYPE_INNER_L4_TCP,
732 RTE_PTYPE_TUNNEL_GRE,
733 RTE_PTYPE_INNER_L4_SCTP,
735 RTE_PTYPE_INNER_L4_ICMP,
736 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
739 static const uint32_t ol3table[HNS3_OL3TBL_NUM] = {
743 RTE_PTYPE_L3_IPV4_EXT,
744 RTE_PTYPE_L3_IPV6_EXT,
745 0, 0, 0, 0, 0, 0, 0, 0, 0,
749 static const uint32_t ol4table[HNS3_OL4TBL_NUM] = {
751 RTE_PTYPE_TUNNEL_VXLAN,
752 RTE_PTYPE_TUNNEL_NVGRE,
753 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
756 l2id = hns3_get_field(pkt_info, HNS3_RXD_STRP_TAGP_M,
757 HNS3_RXD_STRP_TAGP_S);
758 l3id = hns3_get_field(pkt_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
759 l4id = hns3_get_field(pkt_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
760 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
761 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
764 pkt_type |= (inner_l2table[l2id] | inner_l3table[l3id] |
765 inner_l4table[l4id] | ol3table[ol3id] |
768 pkt_type |= (l2table[l2id] | l3table[l3id] | l4table[l4id]);
773 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
775 static const uint32_t ptypes[] = {
777 RTE_PTYPE_L2_ETHER_VLAN,
778 RTE_PTYPE_L2_ETHER_QINQ,
779 RTE_PTYPE_L2_ETHER_LLDP,
780 RTE_PTYPE_L2_ETHER_ARP,
782 RTE_PTYPE_L3_IPV4_EXT,
784 RTE_PTYPE_L3_IPV6_EXT,
790 RTE_PTYPE_TUNNEL_GRE,
794 if (dev->rx_pkt_burst == hns3_recv_pkts)
801 hns3_clean_rx_buffers(struct hns3_rx_queue *rxq, int count)
803 rxq->next_to_use += count;
804 if (rxq->next_to_use >= rxq->nb_rx_desc)
805 rxq->next_to_use -= rxq->nb_rx_desc;
807 hns3_write_dev(rxq, HNS3_RING_RX_HEAD_REG, count);
811 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
812 uint32_t bd_base_info, uint32_t l234_info,
817 if (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) {
823 if (unlikely(rxm->pkt_len == 0 ||
824 (l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) {
825 rxq->pkt_len_errors++;
830 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
831 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
832 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
833 rxq->l3_csum_erros++;
834 tmp |= HNS3_L3_CKSUM_ERR;
837 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
838 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
839 rxq->l4_csum_erros++;
840 tmp |= HNS3_L4_CKSUM_ERR;
843 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
844 rxq->ol3_csum_erros++;
845 tmp |= HNS3_OUTER_L3_CKSUM_ERR;
848 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
849 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
850 rxq->ol4_csum_erros++;
851 tmp |= HNS3_OUTER_L4_CKSUM_ERR;
860 hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type,
861 const uint32_t cksum_err)
863 if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
864 if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
865 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
866 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
867 if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
868 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
869 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
870 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
871 (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
872 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
874 if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
875 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
876 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
877 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
878 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
879 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
884 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
886 struct hns3_rx_queue *rxq; /* RX queue */
887 struct hns3_desc *rx_ring; /* RX ring (desc) */
888 struct hns3_entry *sw_ring;
889 struct hns3_entry *rxe;
890 struct hns3_desc *rxdp; /* pointer of the current desc */
891 struct rte_mbuf *first_seg;
892 struct rte_mbuf *last_seg;
893 struct rte_mbuf *nmb; /* pointer of the new mbuf */
894 struct rte_mbuf *rxm;
895 struct rte_eth_dev *dev;
896 uint32_t bd_base_info;
906 int num; /* num of desc in ring */
912 dev = &rte_eth_devices[rxq->port_id];
914 rx_id = rxq->next_to_clean;
915 rx_ring = rxq->rx_ring;
916 first_seg = rxq->pkt_first_seg;
917 last_seg = rxq->pkt_last_seg;
918 sw_ring = rxq->sw_ring;
920 /* Get num of packets in descriptor ring */
921 num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
922 while (nb_rx_bd < num && nb_rx < nb_pkts) {
923 rxdp = &rx_ring[rx_id];
924 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
925 if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
926 rxq->non_vld_descs++;
930 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
931 if (unlikely(nmb == NULL)) {
932 dev->data->rx_mbuf_alloc_failed++;
937 rxe = &sw_ring[rx_id];
939 if (rx_id == rxq->nb_rx_desc)
942 rte_prefetch0(sw_ring[rx_id].mbuf);
943 if ((rx_id & 0x3) == 0) {
944 rte_prefetch0(&rx_ring[rx_id]);
945 rte_prefetch0(&sw_ring[rx_id]);
951 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
952 rxdp->addr = dma_addr;
953 rxdp->rx.bd_base_info = 0;
956 /* Load remained descriptor data and extract necessary fields */
957 data_len = (uint16_t)(rte_le_to_cpu_16(rxdp->rx.size));
958 l234_info = rte_le_to_cpu_32(rxdp->rx.l234_info);
959 ol_info = rte_le_to_cpu_32(rxdp->rx.ol_info);
961 if (first_seg == NULL) {
963 first_seg->nb_segs = 1;
965 first_seg->nb_segs++;
966 last_seg->next = rxm;
969 rxm->data_off = RTE_PKTMBUF_HEADROOM;
970 rxm->data_len = data_len;
972 if (!hns3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
977 /* The last buffer of the received packet */
978 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxdp->rx.pkt_len));
979 first_seg->pkt_len = pkt_len;
980 first_seg->port = rxq->port_id;
981 first_seg->hash.rss = rte_le_to_cpu_32(rxdp->rx.rss_hash);
982 if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
983 first_seg->hash.fdir.hi =
984 rte_le_to_cpu_32(rxdp->rx.fd_id);
985 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
989 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
990 l234_info, &cksum_err);
994 first_seg->packet_type = rxd_pkt_info_to_pkt_type(l234_info,
997 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
998 hns3_rx_set_cksum_flag(rxm, first_seg->packet_type,
1001 first_seg->vlan_tci = rte_le_to_cpu_16(rxdp->rx.vlan_tag);
1002 first_seg->vlan_tci_outer =
1003 rte_le_to_cpu_16(rxdp->rx.ot_vlan_tag);
1004 rx_pkts[nb_rx++] = first_seg;
1008 rte_pktmbuf_free(first_seg);
1012 rxq->next_to_clean = rx_id;
1013 rxq->pkt_first_seg = first_seg;
1014 rxq->pkt_last_seg = last_seg;
1015 hns3_clean_rx_buffers(rxq, nb_rx_bd);
1021 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1022 unsigned int socket_id, const struct rte_eth_txconf *conf)
1024 struct hns3_adapter *hns = dev->data->dev_private;
1025 const struct rte_memzone *tx_mz;
1026 struct hns3_hw *hw = &hns->hw;
1027 struct hns3_tx_queue *txq;
1028 struct hns3_desc *desc;
1029 unsigned int desc_size = sizeof(struct hns3_desc);
1030 unsigned int tx_desc;
1034 if (dev->data->dev_started) {
1035 hns3_err(hw, "tx_queue_setup after dev_start no supported");
1039 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1040 nb_desc % HNS3_ALIGN_RING_DESC) {
1041 hns3_err(hw, "Number (%u) of tx descriptors is invalid",
1046 if (dev->data->tx_queues[idx] != NULL) {
1047 hns3_tx_queue_release(dev->data->tx_queues[idx]);
1048 dev->data->tx_queues[idx] = NULL;
1051 txq = rte_zmalloc_socket("hns3 TX queue", sizeof(struct hns3_tx_queue),
1052 RTE_CACHE_LINE_SIZE, socket_id);
1054 hns3_err(hw, "Failed to allocate memory for tx queue!");
1058 txq->nb_tx_desc = nb_desc;
1059 txq->queue_id = idx;
1060 txq->tx_deferred_start = conf->tx_deferred_start;
1062 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
1063 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
1064 RTE_CACHE_LINE_SIZE, socket_id);
1065 if (txq->sw_ring == NULL) {
1066 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
1067 hns3_tx_queue_release(txq);
1071 /* Allocate tx ring hardware descriptors. */
1072 tx_desc = txq->nb_tx_desc * desc_size;
1073 tx_mz = rte_eth_dma_zone_reserve(dev, "tx_ring", idx, tx_desc,
1074 HNS3_RING_BASE_ALIGN, socket_id);
1075 if (tx_mz == NULL) {
1076 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
1078 hns3_tx_queue_release(txq);
1082 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
1083 txq->tx_ring_phys_addr = tx_mz->iova;
1085 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, idx,
1086 txq->tx_ring_phys_addr);
1089 desc = txq->tx_ring;
1090 for (i = 0; i < txq->nb_tx_desc; i++) {
1091 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1096 txq->next_to_use = 0;
1097 txq->next_to_clean = 0;
1098 txq->tx_bd_ready = txq->nb_tx_desc;
1099 txq->port_id = dev->data->port_id;
1100 txq->pkt_len_errors = 0;
1101 txq->configured = true;
1102 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1103 idx * HNS3_TQP_REG_SIZE);
1104 rte_spinlock_lock(&hw->lock);
1105 dev->data->tx_queues[idx] = txq;
1106 rte_spinlock_unlock(&hw->lock);
1112 tx_ring_dist(struct hns3_tx_queue *txq, int begin, int end)
1114 return (end - begin + txq->nb_tx_desc) % txq->nb_tx_desc;
1118 tx_ring_space(struct hns3_tx_queue *txq)
1120 return txq->nb_tx_desc -
1121 tx_ring_dist(txq, txq->next_to_clean, txq->next_to_use) - 1;
1125 hns3_queue_xmit(struct hns3_tx_queue *txq, uint32_t buf_num)
1127 hns3_write_dev(txq, HNS3_RING_TX_TAIL_REG, buf_num);
1131 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
1133 uint16_t tx_next_clean = txq->next_to_clean;
1134 uint16_t tx_next_use = txq->next_to_use;
1135 uint16_t tx_bd_ready = txq->tx_bd_ready;
1136 uint16_t tx_bd_max = txq->nb_tx_desc;
1137 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
1138 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
1139 struct rte_mbuf *mbuf;
1141 while ((!hns3_get_bit(desc->tx.tp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B)) &&
1142 (tx_next_use != tx_next_clean || tx_bd_ready < tx_bd_max)) {
1143 mbuf = tx_bak_pkt->mbuf;
1146 rte_pktmbuf_free(mbuf);
1147 tx_bak_pkt->mbuf = NULL;
1155 if (tx_next_clean >= tx_bd_max) {
1157 desc = txq->tx_ring;
1158 tx_bak_pkt = txq->sw_ring;
1162 txq->next_to_clean = tx_next_clean;
1163 txq->tx_bd_ready = tx_bd_ready;
1167 fill_desc(struct hns3_tx_queue *txq, uint16_t tx_desc_id, struct rte_mbuf *rxm,
1168 bool first, int offset)
1170 struct hns3_desc *tx_ring = txq->tx_ring;
1171 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1172 uint8_t frag_end = rxm->next == NULL ? 1 : 0;
1173 uint16_t size = rxm->data_len;
1175 uint64_t ol_flags = rxm->ol_flags;
1180 desc->addr = rte_mbuf_data_iova(rxm) + offset;
1181 desc->tx.send_size = rte_cpu_to_le_16(size);
1182 hns3_set_bit(rrcfv, HNS3_TXD_VLD_B, 1);
1185 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
1186 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
1187 rxm->outer_l2_len + rxm->outer_l3_len : 0;
1188 paylen = rxm->pkt_len - hdr_len;
1189 desc->tx.paylen = rte_cpu_to_le_32(paylen);
1192 hns3_set_bit(rrcfv, HNS3_TXD_FE_B, frag_end);
1193 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(rrcfv);
1196 if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1197 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
1198 hns3_set_bit(tmp, HNS3_TXD_VLAN_B, 1);
1199 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
1200 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
1203 if (ol_flags & PKT_TX_QINQ_PKT) {
1204 tmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
1205 hns3_set_bit(tmp, HNS3_TXD_OVLAN_B, 1);
1206 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp);
1207 desc->tx.outer_vlan_tag =
1208 rte_cpu_to_le_16(rxm->vlan_tci_outer);
1214 hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool,
1215 uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf)
1217 struct rte_mbuf *new_mbuf = NULL;
1218 struct rte_eth_dev *dev;
1219 struct rte_mbuf *temp;
1223 /* Allocate enough mbufs */
1224 for (i = 0; i < nb_new_buf; i++) {
1225 temp = rte_pktmbuf_alloc(mb_pool);
1226 if (unlikely(temp == NULL)) {
1227 dev = &rte_eth_devices[txq->port_id];
1228 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1229 hns3_err(hw, "Failed to alloc TX mbuf port_id=%d,"
1230 "queue_id=%d in reassemble tx pkts.",
1231 txq->port_id, txq->queue_id);
1232 rte_pktmbuf_free(new_mbuf);
1235 temp->next = new_mbuf;
1239 if (new_mbuf == NULL)
1242 new_mbuf->nb_segs = nb_new_buf;
1243 *alloc_mbuf = new_mbuf;
1249 hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
1250 struct rte_mbuf **new_pkt)
1252 struct hns3_tx_queue *txq = tx_queue;
1253 struct rte_mempool *mb_pool;
1254 struct rte_mbuf *new_mbuf;
1255 struct rte_mbuf *temp_new;
1256 struct rte_mbuf *temp;
1257 uint16_t last_buf_len;
1258 uint16_t nb_new_buf;
1269 mb_pool = tx_pkt->pool;
1270 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
1271 nb_new_buf = (tx_pkt->pkt_len - 1) / buf_size + 1;
1273 last_buf_len = tx_pkt->pkt_len % buf_size;
1274 if (last_buf_len == 0)
1275 last_buf_len = buf_size;
1277 /* Allocate enough mbufs */
1278 ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf);
1282 /* Copy the original packet content to the new mbufs */
1284 s = rte_pktmbuf_mtod(temp, char *);
1285 len_s = temp->data_len;
1286 temp_new = new_mbuf;
1287 for (i = 0; i < nb_new_buf; i++) {
1288 d = rte_pktmbuf_mtod(temp_new, char *);
1289 if (i < nb_new_buf - 1)
1292 buf_len = last_buf_len;
1296 len = RTE_MIN(len_s, len_d);
1300 len_d = len_d - len;
1301 len_s = len_s - len;
1307 s = rte_pktmbuf_mtod(temp, char *);
1308 len_s = temp->data_len;
1312 temp_new->data_len = buf_len;
1313 temp_new = temp_new->next;
1316 /* free original mbufs */
1317 rte_pktmbuf_free(tx_pkt);
1319 *new_pkt = new_mbuf;
1325 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
1327 uint32_t tmp = *ol_type_vlan_len_msec;
1329 /* (outer) IP header type */
1330 if (ol_flags & PKT_TX_OUTER_IPV4) {
1331 /* OL3 header size, defined in 4 bytes */
1332 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1333 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
1334 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1335 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
1336 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
1338 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
1339 HNS3_OL3T_IPV4_NO_CSUM);
1340 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
1341 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
1343 /* OL3 header size, defined in 4 bytes */
1344 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1345 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
1348 *ol_type_vlan_len_msec = tmp;
1352 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
1353 struct rte_net_hdr_lens *hdr_lens)
1355 uint32_t tmp = *ol_type_vlan_len_msec;
1358 /* OL2 header size, defined in 2 bytes */
1359 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1360 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
1362 /* L4TUNT: L4 Tunneling Type */
1363 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
1364 case PKT_TX_TUNNEL_GENEVE:
1365 case PKT_TX_TUNNEL_VXLAN:
1366 /* MAC in UDP tunnelling packet, include VxLAN */
1367 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
1368 HNS3_TUN_MAC_IN_UDP);
1370 * OL4 header size, defined in 4 Bytes, it contains outer
1371 * L4(UDP) length and tunneling length.
1373 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1374 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
1377 case PKT_TX_TUNNEL_GRE:
1378 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
1381 * OL4 header size, defined in 4 Bytes, it contains outer
1382 * L4(GRE) length and tunneling length.
1384 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
1385 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1386 l4_len >> HNS3_L4_LEN_UNIT);
1389 /* For non UDP / GRE tunneling, drop the tunnel packet */
1393 *ol_type_vlan_len_msec = tmp;
1399 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1401 struct rte_net_hdr_lens *hdr_lens)
1403 struct hns3_desc *tx_ring = txq->tx_ring;
1404 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1408 hns3_parse_outer_params(ol_flags, &value);
1409 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
1413 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
1419 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
1423 /* Enable L3 checksum offloads */
1424 if (ol_flags & PKT_TX_IPV4) {
1425 tmp = *type_cs_vlan_tso_len;
1426 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
1428 /* inner(/normal) L3 header size, defined in 4 bytes */
1429 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1430 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
1431 if (ol_flags & PKT_TX_IP_CKSUM)
1432 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
1433 *type_cs_vlan_tso_len = tmp;
1434 } else if (ol_flags & PKT_TX_IPV6) {
1435 tmp = *type_cs_vlan_tso_len;
1436 /* L3T, IPv6 don't do checksum */
1437 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
1439 /* inner(/normal) L3 header size, defined in 4 bytes */
1440 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
1441 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
1442 *type_cs_vlan_tso_len = tmp;
1447 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
1451 /* Enable L4 checksum offloads */
1452 switch (ol_flags & PKT_TX_L4_MASK) {
1453 case PKT_TX_TCP_CKSUM:
1454 tmp = *type_cs_vlan_tso_len;
1455 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1457 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1458 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1459 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
1460 *type_cs_vlan_tso_len = tmp;
1462 case PKT_TX_UDP_CKSUM:
1463 tmp = *type_cs_vlan_tso_len;
1464 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1466 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1467 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1468 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
1469 *type_cs_vlan_tso_len = tmp;
1471 case PKT_TX_SCTP_CKSUM:
1472 tmp = *type_cs_vlan_tso_len;
1473 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
1475 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
1476 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
1477 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
1478 *type_cs_vlan_tso_len = tmp;
1486 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1489 struct hns3_desc *tx_ring = txq->tx_ring;
1490 struct hns3_desc *desc = &tx_ring[tx_desc_id];
1493 /* inner(/normal) L2 header size, defined in 2 bytes */
1494 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
1495 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
1497 hns3_parse_l3_cksum_params(ol_flags, &value);
1498 hns3_parse_l4_cksum_params(ol_flags, &value);
1500 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
1504 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
1511 for (i = 0; i < nb_pkts; i++) {
1514 /* check the size of packet */
1515 if (m->pkt_len < HNS3_MIN_FRAME_LEN) {
1520 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1521 ret = rte_validate_tx_offload(m);
1527 ret = rte_net_intel_cksum_prepare(m);
1538 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
1539 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
1541 /* Fill in tunneling parameters if necessary */
1542 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
1543 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
1544 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
1548 /* Enable checksum offloading */
1549 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
1550 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
1556 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1558 struct rte_net_hdr_lens hdr_lens = {0};
1559 struct hns3_tx_queue *txq = tx_queue;
1560 struct hns3_entry *tx_bak_pkt;
1561 struct rte_mbuf *new_pkt;
1562 struct rte_mbuf *tx_pkt;
1563 struct rte_mbuf *m_seg;
1564 struct rte_mbuf *temp;
1565 uint32_t nb_hold = 0;
1566 uint16_t tx_next_clean;
1567 uint16_t tx_next_use;
1568 uint16_t tx_bd_ready;
1569 uint16_t tx_pkt_num;
1575 /* free useless buffer */
1576 hns3_tx_free_useless_buffer(txq);
1577 tx_bd_ready = txq->tx_bd_ready;
1578 if (tx_bd_ready == 0)
1581 tx_next_clean = txq->next_to_clean;
1582 tx_next_use = txq->next_to_use;
1583 tx_bd_max = txq->nb_tx_desc;
1584 tx_bak_pkt = &txq->sw_ring[tx_next_clean];
1586 tx_pkt_num = (tx_bd_ready < nb_pkts) ? tx_bd_ready : nb_pkts;
1589 tx_bak_pkt = &txq->sw_ring[tx_next_use];
1590 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
1591 tx_pkt = *tx_pkts++;
1593 nb_buf = tx_pkt->nb_segs;
1595 if (nb_buf > tx_ring_space(txq)) {
1603 * If the length of the packet is too long or zero, the packet
1606 if (unlikely(tx_pkt->pkt_len > HNS3_MAX_FRAME_LEN ||
1607 tx_pkt->pkt_len == 0)) {
1608 txq->pkt_len_errors++;
1613 if (unlikely(nb_buf > HNS3_MAX_TX_BD_PER_PKT)) {
1614 if (hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt))
1617 nb_buf = m_seg->nb_segs;
1620 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
1625 fill_desc(txq, tx_next_use, m_seg, (i == 0), 0);
1627 tx_bak_pkt->mbuf = m_seg;
1631 if (tx_next_use >= tx_bd_max) {
1633 tx_bak_pkt = txq->sw_ring;
1637 } while (m_seg != NULL);
1644 if (likely(nb_tx)) {
1645 hns3_queue_xmit(txq, nb_hold);
1646 txq->next_to_clean = tx_next_clean;
1647 txq->next_to_use = tx_next_use;
1648 txq->tx_bd_ready = tx_bd_ready - nb_hold;
1655 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
1656 struct rte_mbuf **pkts __rte_unused,
1657 uint16_t pkts_n __rte_unused)
1662 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
1664 struct hns3_adapter *hns = eth_dev->data->dev_private;
1666 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
1667 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
1668 eth_dev->rx_pkt_burst = hns3_recv_pkts;
1669 eth_dev->tx_pkt_burst = hns3_xmit_pkts;
1670 eth_dev->tx_pkt_prepare = hns3_prep_pkts;
1672 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
1673 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
1674 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;