1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
9 #include <rte_mbuf_core.h>
11 #define HNS3_MIN_RING_DESC 64
12 #define HNS3_MAX_RING_DESC 32768
13 #define HNS3_DEFAULT_RING_DESC 1024
14 #define HNS3_ALIGN_RING_DESC 32
15 #define HNS3_RING_BASE_ALIGN 128
16 #define HNS3_BULK_ALLOC_MBUF_NUM 32
18 #define HNS3_DEFAULT_RX_FREE_THRESH 32
19 #define HNS3_DEFAULT_TX_FREE_THRESH 32
20 #define HNS3_DEFAULT_TX_RS_THRESH 32
21 #define HNS3_TX_FAST_FREE_AHEAD 64
23 #define HNS3_DEFAULT_RX_BURST 32
24 #if (HNS3_DEFAULT_RX_BURST > 64)
25 #error "PMD HNS3: HNS3_DEFAULT_RX_BURST must <= 64\n"
27 #define HNS3_DEFAULT_DESCS_PER_LOOP 4
28 #define HNS3_SVE_DEFAULT_DESCS_PER_LOOP 8
29 #if (HNS3_DEFAULT_DESCS_PER_LOOP > HNS3_SVE_DEFAULT_DESCS_PER_LOOP)
30 #define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_DEFAULT_DESCS_PER_LOOP
32 #define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_SVE_DEFAULT_DESCS_PER_LOOP
34 #define HNS3_DEFAULT_RXQ_REARM_THRESH 64
35 #define HNS3_UINT8_BIT 8
36 #define HNS3_UINT16_BIT 16
37 #define HNS3_UINT32_BIT 32
39 #define HNS3_512_BD_BUF_SIZE 512
40 #define HNS3_1K_BD_BUF_SIZE 1024
41 #define HNS3_2K_BD_BUF_SIZE 2048
42 #define HNS3_4K_BD_BUF_SIZE 4096
44 #define HNS3_MIN_BD_BUF_SIZE HNS3_512_BD_BUF_SIZE
45 #define HNS3_MAX_BD_BUF_SIZE HNS3_4K_BD_BUF_SIZE
47 #define HNS3_BD_SIZE_512_TYPE 0
48 #define HNS3_BD_SIZE_1024_TYPE 1
49 #define HNS3_BD_SIZE_2048_TYPE 2
50 #define HNS3_BD_SIZE_4096_TYPE 3
52 #define HNS3_RX_FLAG_VLAN_PRESENT 0x1
53 #define HNS3_RX_FLAG_L3ID_IPV4 0x0
54 #define HNS3_RX_FLAG_L3ID_IPV6 0x1
55 #define HNS3_RX_FLAG_L4ID_UDP 0x0
56 #define HNS3_RX_FLAG_L4ID_TCP 0x1
58 #define HNS3_RXD_DMAC_S 0
59 #define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S)
60 #define HNS3_RXD_VLAN_S 2
61 #define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S)
62 #define HNS3_RXD_L3ID_S 4
63 #define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S)
64 #define HNS3_RXD_L4ID_S 8
65 #define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
66 #define HNS3_RXD_FRAG_B 12
67 #define HNS3_RXD_STRP_TAGP_S 13
68 #define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
70 #define HNS3_RXD_L2E_B 16
71 #define HNS3_RXD_L3E_B 17
72 #define HNS3_RXD_L4E_B 18
73 #define HNS3_RXD_TRUNCATE_B 19
74 #define HNS3_RXD_HOI_B 20
75 #define HNS3_RXD_DOI_B 21
76 #define HNS3_RXD_OL3E_B 22
77 #define HNS3_RXD_OL4E_B 23
78 #define HNS3_RXD_GRO_COUNT_S 24
79 #define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
80 #define HNS3_RXD_GRO_FIXID_B 30
81 #define HNS3_RXD_GRO_ECN_B 31
83 #define HNS3_RXD_ODMAC_S 0
84 #define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
85 #define HNS3_RXD_OVLAN_S 2
86 #define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S)
87 #define HNS3_RXD_OL3ID_S 4
88 #define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S)
89 #define HNS3_RXD_OL4ID_S 8
90 #define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S)
91 #define HNS3_RXD_PTYPE_S 4
92 #define HNS3_RXD_PTYPE_M (0xff << HNS3_RXD_PTYPE_S)
93 #define HNS3_RXD_FBHI_S 12
94 #define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S)
95 #define HNS3_RXD_FBLI_S 14
96 #define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S)
98 #define HNS3_RXD_BDTYPE_S 0
99 #define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S)
100 #define HNS3_RXD_VLD_B 4
101 #define HNS3_RXD_UDP0_B 5
102 #define HNS3_RXD_EXTEND_B 7
103 #define HNS3_RXD_FE_B 8
104 #define HNS3_RXD_LUM_B 9
105 #define HNS3_RXD_CRCP_B 10
106 #define HNS3_RXD_L3L4P_B 11
107 #define HNS3_RXD_TSIND_S 12
108 #define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
109 #define HNS3_RXD_LKBK_B 15
110 #define HNS3_RXD_GRO_SIZE_S 16
111 #define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
113 #define HNS3_TXD_L3T_S 0
114 #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
115 #define HNS3_TXD_L4T_S 2
116 #define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S)
117 #define HNS3_TXD_L3CS_B 4
118 #define HNS3_TXD_L4CS_B 5
119 #define HNS3_TXD_VLAN_B 6
120 #define HNS3_TXD_TSO_B 7
122 #define HNS3_TXD_L2LEN_S 8
123 #define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S)
124 #define HNS3_TXD_L3LEN_S 16
125 #define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S)
126 #define HNS3_TXD_L4LEN_S 24
127 #define HNS3_TXD_L4LEN_M (0xffUL << HNS3_TXD_L4LEN_S)
129 #define HNS3_TXD_OL3T_S 0
130 #define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S)
131 #define HNS3_TXD_OVLAN_B 2
132 #define HNS3_TXD_MACSEC_B 3
133 #define HNS3_TXD_TUNTYPE_S 4
134 #define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S)
136 #define HNS3_TXD_BDTYPE_S 0
137 #define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S)
138 #define HNS3_TXD_FE_B 4
139 #define HNS3_TXD_SC_S 5
140 #define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S)
141 #define HNS3_TXD_EXTEND_B 7
142 #define HNS3_TXD_VLD_B 8
143 #define HNS3_TXD_RI_B 9
144 #define HNS3_TXD_RA_B 10
145 #define HNS3_TXD_TSYN_B 11
146 #define HNS3_TXD_DECTTL_S 12
147 #define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
149 #define HNS3_TXD_MSS_S 0
150 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
152 #define HNS3_L2_LEN_UNIT 1UL
153 #define HNS3_L3_LEN_UNIT 2UL
154 #define HNS3_L4_LEN_UNIT 2UL
156 #define HNS3_TXD_DEFAULT_BDTYPE 0
157 #define HNS3_TXD_VLD_CMD (0x1 << HNS3_TXD_VLD_B)
158 #define HNS3_TXD_FE_CMD (0x1 << HNS3_TXD_FE_B)
159 #define HNS3_TXD_DEFAULT_VLD_FE_BDTYPE \
160 (HNS3_TXD_VLD_CMD | HNS3_TXD_FE_CMD | HNS3_TXD_DEFAULT_BDTYPE)
161 #define HNS3_TXD_SEND_SIZE_SHIFT 16
163 enum hns3_pkt_l2t_type {
164 HNS3_L2_TYPE_UNICAST,
165 HNS3_L2_TYPE_MULTICAST,
166 HNS3_L2_TYPE_BROADCAST,
167 HNS3_L2_TYPE_INVALID,
170 enum hns3_pkt_l3t_type {
177 enum hns3_pkt_l4t_type {
184 enum hns3_pkt_ol3t_type {
187 HNS3_OL3T_IPV4_NO_CSUM,
191 enum hns3_pkt_tun_type {
198 /* hardware spec ring buffer format */
213 * L3T | L4T | L3CS | L4CS | VLAN | TSO |
216 uint32_t type_cs_vlan_tso_len;
218 uint8_t type_cs_vlan_tso;
224 uint16_t outer_vlan_tag;
227 /* OL3T | OVALAN | MACSEC */
228 uint32_t ol_type_vlan_len_msec;
230 uint8_t ol_type_vlan_msec;
238 uint16_t tp_fe_sc_vld_ra_ri;
252 uint16_t o_dm_vlan_id_fb;
253 uint16_t ot_vlan_tag;
257 uint32_t bd_base_info;
259 uint16_t bdtype_vld_udp0;
260 uint16_t fe_lum_crcp_l3l4p;
268 struct rte_mbuf *mbuf;
271 struct hns3_rx_basic_stats {
277 struct hns3_rx_dfx_stats {
278 uint64_t l3_csum_errors;
279 uint64_t l4_csum_errors;
280 uint64_t ol3_csum_errors;
281 uint64_t ol4_csum_errors;
284 struct hns3_rx_bd_errors_stats {
286 uint64_t pkt_len_errors;
289 struct hns3_rx_queue {
291 volatile void *io_head_reg;
292 struct hns3_adapter *hns;
293 struct hns3_ptype_table *ptype_tbl;
294 struct rte_mempool *mb_pool;
295 struct hns3_desc *rx_ring;
296 uint64_t rx_ring_phys_addr; /* RX ring DMA address */
297 const struct rte_memzone *mz;
298 struct hns3_entry *sw_ring;
299 struct rte_mbuf *pkt_first_seg;
300 struct rte_mbuf *pkt_last_seg;
307 * threshold for the number of BDs waited to passed to hardware. If the
308 * number exceeds the threshold, driver will pass these BDs to hardware.
310 uint16_t rx_free_thresh;
311 uint16_t next_to_use; /* index of next BD to be polled */
312 uint16_t rx_free_hold; /* num of BDs waited to passed to hardware */
313 uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
314 uint16_t rx_rearm_nb; /* number of remaining BDs to be re-armed */
316 /* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
319 bool rx_deferred_start; /* don't start this queue in dev start */
320 bool configured; /* indicate if rx queue has been configured */
322 * Indicate whether ignore the outer VLAN field in the Rx BD reported
323 * by the Hardware. Because the outer VLAN is the PVID if the PVID is
324 * set for some version of hardware network engine whose vlan mode is
325 * HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920. And this VLAN
326 * should not be transitted to the upper-layer application. For hardware
327 * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
328 * such as kunpeng 930, PVID will not be reported to the BDs. So, PMD
329 * driver does not need to perform PVID-related operation in Rx. At this
330 * point, the pvid_sw_discard_en will be false.
332 bool pvid_sw_discard_en;
333 bool ptype_en; /* indicate if the ptype field enabled */
334 bool enabled; /* indicate if Rx queue has been enabled */
336 struct hns3_rx_basic_stats basic_stats;
337 /* DFX statistics that driver does not need to discard packets */
338 struct hns3_rx_dfx_stats dfx_stats;
339 /* Error statistics that driver needs to discard packets */
340 struct hns3_rx_bd_errors_stats err_stats;
342 struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM];
343 uint16_t bulk_mbuf_num;
345 /* offset_table: used for vector, to solve execute re-order problem */
346 uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1];
347 uint64_t mbuf_initializer; /* value to init mbufs used with vector rx */
348 struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */
351 struct hns3_tx_basic_stats {
357 * The following items are used for the abnormal errors statistics in
358 * the Tx datapath. When upper level application calls the
359 * rte_eth_tx_burst API function to send multiple packets at a time with
360 * burst mode based on hns3 network engine, there are some abnormal
361 * conditions that cause the driver to fail to operate the hardware to
362 * send packets correctly.
363 * Note: When using burst mode to call the rte_eth_tx_burst API function
364 * to send multiple packets at a time. When the first abnormal error is
365 * detected, add one to the relevant error statistics item, and then
366 * exit the loop of sending multiple packets of the function. That is to
367 * say, even if there are multiple packets in which abnormal errors may
368 * be detected in the burst, the relevant error statistics in the driver
369 * will only be increased by one.
370 * The detail description of the Tx abnormal errors statistic items as
372 * - over_length_pkt_cnt
373 * Total number of greater than HNS3_MAX_FRAME_LEN the driver
376 * - exceed_limit_bd_pkt_cnt
377 * Total number of exceeding the hardware limited bd which process
378 * a packet needed bd numbers.
380 * - exceed_limit_bd_reassem_fail
381 * Total number of exceeding the hardware limited bd fail which
382 * process a packet needed bd numbers and reassemble fail.
384 * - unsupported_tunnel_pkt_cnt
385 * Total number of unsupported tunnel packet. The unsupported tunnel
386 * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet
387 * with MPLS-in-UDP RFC 7510 header.
390 * Total count which the available bd numbers in current bd queue is
391 * less than the bd numbers with the pkt process needed.
393 * - pkt_padding_fail_cnt
394 * Total count which the packet length is less than minimum packet
395 * length(struct hns3_tx_queue::min_tx_pkt_len) supported by
396 * hardware in Tx direction and fail to be appended with 0.
398 struct hns3_tx_dfx_stats {
399 uint64_t over_length_pkt_cnt;
400 uint64_t exceed_limit_bd_pkt_cnt;
401 uint64_t exceed_limit_bd_reassem_fail;
402 uint64_t unsupported_tunnel_pkt_cnt;
403 uint64_t queue_full_cnt;
404 uint64_t pkt_padding_fail_cnt;
407 struct hns3_tx_queue {
409 volatile void *io_tail_reg;
410 struct hns3_adapter *hns;
411 struct hns3_desc *tx_ring;
412 uint64_t tx_ring_phys_addr; /* TX ring DMA address */
413 const struct rte_memzone *mz;
414 struct hns3_entry *sw_ring;
420 * index of next BD whose corresponding rte_mbuf can be released by
423 uint16_t next_to_clean;
424 /* index of next BD to be filled by driver to send packet */
425 uint16_t next_to_use;
426 /* num of remaining BDs ready to be filled by driver to send packet */
427 uint16_t tx_bd_ready;
429 /* threshold for free tx buffer if available BDs less than this value */
430 uint16_t tx_free_thresh;
433 * For better performance in tx datapath, releasing mbuf in batches is
435 * Only checking the VLD bit of the last descriptor in a batch of the
436 * thresh descriptors does not mean that these descriptors are all sent
437 * by hardware successfully. So we need to check that the VLD bits of
438 * all descriptors are cleared. and then free all mbufs in the batch.
440 * Number of mbufs released at a time.
443 * Tx mbuf free array used for preserving temporarily address of mbuf
444 * released back to mempool, when releasing mbuf in batches.
446 uint16_t tx_rs_thresh;
447 struct rte_mbuf **free;
452 * HNS3_TSO_SW_CAL_PSEUDO_H_CSUM/HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
454 * - HNS3_TSO_SW_CAL_PSEUDO_H_CSUM
455 * In this mode, because of the hardware constraint, network driver
456 * software need erase the L4 len value of the TCP pseudo header
457 * and recalculate the TCP pseudo header checksum of packets that
460 * - HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
461 * In this mode, hardware support recalculate the TCP pseudo header
462 * checksum of packets that need TSO, so network driver software
463 * not need to recalculate it.
467 * The minimum length of the packet supported by hardware in the Tx
470 uint32_t min_tx_pkt_len;
472 uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
473 bool tx_deferred_start; /* don't start this queue in dev start */
474 bool configured; /* indicate if tx queue has been configured */
476 * Indicate whether add the vlan_tci of the mbuf to the inner VLAN field
477 * of Tx BD. Because the outer VLAN will always be the PVID when the
478 * PVID is set and for some version of hardware network engine whose
479 * vlan mode is HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920, the
480 * PVID will overwrite the outer VLAN field of Tx BD. For the hardware
481 * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
482 * such as kunpeng 930, if the PVID is set, the hardware will shift the
483 * VLAN field automatically. So, PMD driver does not need to do
484 * PVID-related operations in Tx. And pvid_sw_shift_en will be false at
487 bool pvid_sw_shift_en;
488 bool enabled; /* indicate if Tx queue has been enabled */
490 struct hns3_tx_basic_stats basic_stats;
491 struct hns3_tx_dfx_stats dfx_stats;
494 #define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \
495 ((txq)->nb_tx_desc - 1 - (txq)->tx_bd_ready)
497 struct hns3_queue_info {
498 const char *type; /* point to queue memory name */
499 const char *ring_name; /* point to hardware ring name */
502 unsigned int socket_id;
505 #define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
506 PKT_TX_OUTER_IP_CKSUM | \
511 enum hns3_cksum_status {
513 HNS3_L3_CKSUM_ERR = 1,
514 HNS3_L4_CKSUM_ERR = 2,
515 HNS3_OUTER_L3_CKSUM_ERR = 4,
516 HNS3_OUTER_L4_CKSUM_ERR = 8
520 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
521 uint32_t bd_base_info, uint32_t l234_info,
524 #define L2E_TRUNC_ERR_FLAG (BIT(HNS3_RXD_L2E_B) | \
525 BIT(HNS3_RXD_TRUNCATE_B))
526 #define CHECKSUM_ERR_FLAG (BIT(HNS3_RXD_L3E_B) | \
527 BIT(HNS3_RXD_L4E_B) | \
528 BIT(HNS3_RXD_OL3E_B) | \
529 BIT(HNS3_RXD_OL4E_B))
534 * If packet len bigger than mtu when recv with no-scattered algorithm,
535 * the first n bd will without FE bit, we need process this sisution.
536 * Note: we don't need add statistic counter because latest BD which
537 * with FE bit will mark HNS3_RXD_L2E_B bit.
539 if (unlikely((bd_base_info & BIT(HNS3_RXD_FE_B)) == 0))
542 if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) {
543 if (l234_info & BIT(HNS3_RXD_L2E_B))
544 rxq->err_stats.l2_errors++;
546 rxq->err_stats.pkt_len_errors++;
550 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
551 if (likely((l234_info & CHECKSUM_ERR_FLAG) == 0)) {
556 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
557 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
558 rxq->dfx_stats.l3_csum_errors++;
559 tmp |= HNS3_L3_CKSUM_ERR;
562 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
563 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
564 rxq->dfx_stats.l4_csum_errors++;
565 tmp |= HNS3_L4_CKSUM_ERR;
568 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
569 rxq->dfx_stats.ol3_csum_errors++;
570 tmp |= HNS3_OUTER_L3_CKSUM_ERR;
573 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
574 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
575 rxq->dfx_stats.ol4_csum_errors++;
576 tmp |= HNS3_OUTER_L4_CKSUM_ERR;
585 hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, const uint64_t packet_type,
586 const uint32_t cksum_err)
588 if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
589 if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
590 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
591 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
592 if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
593 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
594 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
595 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
596 (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
597 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
599 if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
600 (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
601 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
602 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
603 (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
604 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
608 static inline uint32_t
609 hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info,
610 const uint32_t ol_info)
612 const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl;
613 uint32_t l2id, l3id, l4id;
614 uint32_t ol3id, ol4id, ol2id;
618 ptype = hns3_get_field(ol_info, HNS3_RXD_PTYPE_M,
620 return ptype_tbl->ptype[ptype];
623 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
624 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
625 ol2id = hns3_get_field(ol_info, HNS3_RXD_OVLAN_M, HNS3_RXD_OVLAN_S);
626 l2id = hns3_get_field(l234_info, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S);
627 l3id = hns3_get_field(l234_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
628 l4id = hns3_get_field(l234_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
630 if (unlikely(ptype_tbl->ol4table[ol4id]))
631 return ptype_tbl->inner_l2table[l2id] |
632 ptype_tbl->inner_l3table[l3id] |
633 ptype_tbl->inner_l4table[l4id] |
634 ptype_tbl->ol3table[ol3id] |
635 ptype_tbl->ol4table[ol4id] | ptype_tbl->ol2table[ol2id];
637 return ptype_tbl->l2l3table[l2id][l3id] |
638 ptype_tbl->l4table[l4id];
641 void hns3_dev_rx_queue_release(void *queue);
642 void hns3_dev_tx_queue_release(void *queue);
643 void hns3_free_all_queues(struct rte_eth_dev *dev);
644 int hns3_reset_all_tqps(struct hns3_adapter *hns);
645 void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
646 int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
647 int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
648 void hns3_enable_all_queues(struct hns3_hw *hw, bool en);
649 int hns3_init_queues(struct hns3_adapter *hns, bool reset_queue);
650 void hns3_start_tqps(struct hns3_hw *hw);
651 void hns3_stop_tqps(struct hns3_hw *hw);
652 int hns3_rxq_iterate(struct rte_eth_dev *dev,
653 int (*callback)(struct hns3_rx_queue *, void *), void *arg);
654 void hns3_dev_release_mbufs(struct hns3_adapter *hns);
655 int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
656 unsigned int socket, const struct rte_eth_rxconf *conf,
657 struct rte_mempool *mp);
658 int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
659 unsigned int socket, const struct rte_eth_txconf *conf);
660 uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
661 int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
662 int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
663 int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
664 int hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
665 uint16_t hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
667 uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
669 uint16_t hns3_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
671 uint16_t hns3_recv_pkts_vec_sve(void *rx_queue, struct rte_mbuf **rx_pkts,
673 int hns3_rx_burst_mode_get(struct rte_eth_dev *dev,
674 __rte_unused uint16_t queue_id,
675 struct rte_eth_burst_mode *mode);
676 int hns3_rx_check_vec_support(struct rte_eth_dev *dev);
677 uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
679 uint16_t hns3_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
681 uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
683 uint16_t hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
685 uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts,
687 int hns3_tx_burst_mode_get(struct rte_eth_dev *dev,
688 __rte_unused uint16_t queue_id,
689 struct rte_eth_burst_mode *mode);
690 const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
691 void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);
692 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
693 uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
694 void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
695 uint8_t gl_idx, uint16_t gl_value);
696 void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
698 void hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id,
700 int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
702 int hns3_config_gro(struct hns3_hw *hw, bool en);
703 int hns3_restore_gro_conf(struct hns3_hw *hw);
704 void hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw);
705 void hns3_rx_scattered_reset(struct rte_eth_dev *dev);
706 void hns3_rx_scattered_calc(struct rte_eth_dev *dev);
707 int hns3_rx_check_vec_support(struct rte_eth_dev *dev);
708 int hns3_tx_check_vec_support(struct rte_eth_dev *dev);
709 void hns3_rxq_vec_setup(struct hns3_rx_queue *rxq);
710 void hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
711 struct rte_eth_rxq_info *qinfo);
712 void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
713 struct rte_eth_txq_info *qinfo);
714 uint32_t hns3_get_tqp_reg_offset(uint16_t idx);
715 int hns3_start_all_txqs(struct rte_eth_dev *dev);
716 int hns3_start_all_rxqs(struct rte_eth_dev *dev);
717 void hns3_stop_all_txqs(struct rte_eth_dev *dev);
718 void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
719 int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt);
720 void hns3_enable_rxd_adv_layout(struct hns3_hw *hw);
722 #endif /* _HNS3_RXTX_H_ */