1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
9 #include <rte_mbuf_core.h>
11 #define HNS3_MIN_RING_DESC 64
12 #define HNS3_MAX_RING_DESC 32768
13 #define HNS3_DEFAULT_RING_DESC 1024
14 #define HNS3_ALIGN_RING_DESC 32
15 #define HNS3_RING_BASE_ALIGN 128
16 #define HNS3_BULK_ALLOC_MBUF_NUM 32
18 #define HNS3_DEFAULT_RX_FREE_THRESH 32
19 #define HNS3_DEFAULT_TX_FREE_THRESH 32
20 #define HNS3_DEFAULT_TX_RS_THRESH 32
21 #define HNS3_TX_FAST_FREE_AHEAD 64
23 #define HNS3_DEFAULT_RX_BURST 64
24 #if (HNS3_DEFAULT_RX_BURST > 64)
25 #error "PMD HNS3: HNS3_DEFAULT_RX_BURST must <= 64\n"
27 #define HNS3_DEFAULT_DESCS_PER_LOOP 4
28 #define HNS3_SVE_DEFAULT_DESCS_PER_LOOP 8
29 #if (HNS3_DEFAULT_DESCS_PER_LOOP > HNS3_SVE_DEFAULT_DESCS_PER_LOOP)
30 #define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_DEFAULT_DESCS_PER_LOOP
32 #define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_SVE_DEFAULT_DESCS_PER_LOOP
34 #define HNS3_DEFAULT_RXQ_REARM_THRESH 64
35 #define HNS3_UINT8_BIT 8
36 #define HNS3_UINT16_BIT 16
37 #define HNS3_UINT32_BIT 32
39 #define HNS3_512_BD_BUF_SIZE 512
40 #define HNS3_1K_BD_BUF_SIZE 1024
41 #define HNS3_2K_BD_BUF_SIZE 2048
42 #define HNS3_4K_BD_BUF_SIZE 4096
44 #define HNS3_MIN_BD_BUF_SIZE HNS3_512_BD_BUF_SIZE
45 #define HNS3_MAX_BD_BUF_SIZE HNS3_4K_BD_BUF_SIZE
47 #define HNS3_BD_SIZE_512_TYPE 0
48 #define HNS3_BD_SIZE_1024_TYPE 1
49 #define HNS3_BD_SIZE_2048_TYPE 2
50 #define HNS3_BD_SIZE_4096_TYPE 3
52 #define HNS3_RX_FLAG_VLAN_PRESENT 0x1
53 #define HNS3_RX_FLAG_L3ID_IPV4 0x0
54 #define HNS3_RX_FLAG_L3ID_IPV6 0x1
55 #define HNS3_RX_FLAG_L4ID_UDP 0x0
56 #define HNS3_RX_FLAG_L4ID_TCP 0x1
58 #define HNS3_RXD_DMAC_S 0
59 #define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S)
60 #define HNS3_RXD_VLAN_S 2
61 #define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S)
62 #define HNS3_RXD_L3ID_S 4
63 #define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S)
64 #define HNS3_RXD_L4ID_S 8
65 #define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
66 #define HNS3_RXD_FRAG_B 12
67 #define HNS3_RXD_STRP_TAGP_S 13
68 #define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
70 #define HNS3_RXD_L2E_B 16
71 #define HNS3_RXD_L3E_B 17
72 #define HNS3_RXD_L4E_B 18
73 #define HNS3_RXD_TRUNCATE_B 19
74 #define HNS3_RXD_HOI_B 20
75 #define HNS3_RXD_DOI_B 21
76 #define HNS3_RXD_OL3E_B 22
77 #define HNS3_RXD_OL4E_B 23
78 #define HNS3_RXD_GRO_COUNT_S 24
79 #define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
80 #define HNS3_RXD_GRO_FIXID_B 30
81 #define HNS3_RXD_GRO_ECN_B 31
83 #define HNS3_RXD_ODMAC_S 0
84 #define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
85 #define HNS3_RXD_OVLAN_S 2
86 #define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S)
87 #define HNS3_RXD_OL3ID_S 4
88 #define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S)
89 #define HNS3_RXD_OL4ID_S 8
90 #define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S)
91 #define HNS3_RXD_PTYPE_S 4
92 #define HNS3_RXD_PTYPE_M (0xff << HNS3_RXD_PTYPE_S)
93 #define HNS3_RXD_FBHI_S 12
94 #define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S)
95 #define HNS3_RXD_FBLI_S 14
96 #define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S)
98 #define HNS3_RXD_BDTYPE_S 0
99 #define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S)
100 #define HNS3_RXD_VLD_B 4
101 #define HNS3_RXD_UDP0_B 5
102 #define HNS3_RXD_EXTEND_B 7
103 #define HNS3_RXD_FE_B 8
104 #define HNS3_RXD_LUM_B 9
105 #define HNS3_RXD_CRCP_B 10
106 #define HNS3_RXD_L3L4P_B 11
108 #define HNS3_RXD_TS_VLD_B 14
109 #define HNS3_RXD_GRO_SIZE_S 16
110 #define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
112 #define HNS3_TXD_L3T_S 0
113 #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
114 #define HNS3_TXD_L4T_S 2
115 #define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S)
116 #define HNS3_TXD_L3CS_B 4
117 #define HNS3_TXD_L4CS_B 5
118 #define HNS3_TXD_VLAN_B 6
119 #define HNS3_TXD_TSO_B 7
121 #define HNS3_TXD_L2LEN_S 8
122 #define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S)
123 #define HNS3_TXD_L3LEN_S 16
124 #define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S)
125 #define HNS3_TXD_L4LEN_S 24
126 #define HNS3_TXD_L4LEN_M (0xffUL << HNS3_TXD_L4LEN_S)
128 #define HNS3_TXD_OL3T_S 0
129 #define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S)
130 #define HNS3_TXD_OVLAN_B 2
131 #define HNS3_TXD_MACSEC_B 3
132 #define HNS3_TXD_TUNTYPE_S 4
133 #define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S)
135 #define HNS3_TXD_BDTYPE_S 0
136 #define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S)
137 #define HNS3_TXD_FE_B 4
138 #define HNS3_TXD_SC_S 5
139 #define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S)
140 #define HNS3_TXD_EXTEND_B 7
141 #define HNS3_TXD_VLD_B 8
142 #define HNS3_TXD_RI_B 9
143 #define HNS3_TXD_RA_B 10
144 #define HNS3_TXD_TSYN_B 11
145 #define HNS3_TXD_DECTTL_S 12
146 #define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
148 #define HNS3_TXD_MSS_S 0
149 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
151 #define HNS3_TXD_OL4CS_B 22
152 #define HNS3_L2_LEN_UNIT 1UL
153 #define HNS3_L3_LEN_UNIT 2UL
154 #define HNS3_L4_LEN_UNIT 2UL
156 #define HNS3_TXD_DEFAULT_BDTYPE 0
157 #define HNS3_TXD_VLD_CMD (0x1 << HNS3_TXD_VLD_B)
158 #define HNS3_TXD_FE_CMD (0x1 << HNS3_TXD_FE_B)
159 #define HNS3_TXD_DEFAULT_VLD_FE_BDTYPE \
160 (HNS3_TXD_VLD_CMD | HNS3_TXD_FE_CMD | HNS3_TXD_DEFAULT_BDTYPE)
161 #define HNS3_TXD_SEND_SIZE_SHIFT 16
163 enum hns3_pkt_l2t_type {
164 HNS3_L2_TYPE_UNICAST,
165 HNS3_L2_TYPE_MULTICAST,
166 HNS3_L2_TYPE_BROADCAST,
167 HNS3_L2_TYPE_INVALID,
170 enum hns3_pkt_l3t_type {
177 enum hns3_pkt_l4t_type {
184 enum hns3_pkt_ol3t_type {
187 HNS3_OL3T_IPV4_NO_CSUM,
191 enum hns3_pkt_tun_type {
198 /* hardware spec ring buffer format */
215 * L3T | L4T | L3CS | L4CS | VLAN | TSO |
218 uint32_t type_cs_vlan_tso_len;
220 uint8_t type_cs_vlan_tso;
226 uint16_t outer_vlan_tag;
229 /* OL3T | OVALAN | MACSEC */
230 uint32_t ol_type_vlan_len_msec;
232 uint8_t ol_type_vlan_msec;
239 uint32_t paylen_fd_dop_ol4cs;
240 uint16_t tp_fe_sc_vld_ra_ri;
254 uint16_t o_dm_vlan_id_fb;
255 uint16_t ot_vlan_tag;
259 uint32_t bd_base_info;
261 uint16_t bdtype_vld_udp0;
262 uint16_t fe_lum_crcp_l3l4p;
270 struct rte_mbuf *mbuf;
273 struct hns3_rx_basic_stats {
279 struct hns3_rx_dfx_stats {
280 uint64_t l3_csum_errors;
281 uint64_t l4_csum_errors;
282 uint64_t ol3_csum_errors;
283 uint64_t ol4_csum_errors;
286 struct hns3_rx_bd_errors_stats {
288 uint64_t pkt_len_errors;
291 struct hns3_rx_queue {
292 volatile void *io_head_reg;
293 struct hns3_ptype_table *ptype_tbl;
294 struct rte_mempool *mb_pool;
295 struct hns3_desc *rx_ring;
296 struct hns3_entry *sw_ring;
301 * threshold for the number of BDs waited to passed to hardware. If the
302 * number exceeds the threshold, driver will pass these BDs to hardware.
304 uint16_t rx_free_thresh;
305 uint16_t next_to_use; /* index of next BD to be polled */
306 uint16_t rx_free_hold; /* num of BDs waited to passed to hardware */
307 uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
308 uint16_t rx_rearm_nb; /* number of remaining BDs to be re-armed */
310 /* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
314 * Indicate whether ignore the outer VLAN field in the Rx BD reported
315 * by the Hardware. Because the outer VLAN is the PVID if the PVID is
316 * set for some version of hardware network engine whose vlan mode is
317 * HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920. And this VLAN
318 * should not be transitted to the upper-layer application. For hardware
319 * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
320 * such as kunpeng 930, PVID will not be reported to the BDs. So, PMD
321 * driver does not need to perform PVID-related operation in Rx. At this
322 * point, the pvid_sw_discard_en will be false.
324 uint8_t pvid_sw_discard_en:1;
325 uint8_t ptype_en:1; /* indicate if the ptype field enabled */
327 uint64_t mbuf_initializer; /* value to init mbufs used with vector rx */
328 /* offset_table: used for vector, to solve execute re-order problem */
329 uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1];
331 uint16_t bulk_mbuf_num; /* indicate bulk_mbuf valid nums */
333 struct hns3_rx_basic_stats basic_stats;
335 struct rte_mbuf *pkt_first_seg;
336 struct rte_mbuf *pkt_last_seg;
338 struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM];
340 /* DFX statistics that driver does not need to discard packets */
341 struct hns3_rx_dfx_stats dfx_stats;
342 /* Error statistics that driver needs to discard packets */
343 struct hns3_rx_bd_errors_stats err_stats;
345 struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */
349 * The following fields are not accessed in the I/O path, so they are
353 struct hns3_adapter *hns;
354 uint64_t rx_ring_phys_addr; /* RX ring DMA address */
355 const struct rte_memzone *mz;
360 bool configured; /* indicate if rx queue has been configured */
361 bool rx_deferred_start; /* don't start this queue in dev start */
362 bool enabled; /* indicate if Rx queue has been enabled */
365 struct hns3_tx_basic_stats {
371 * The following items are used for the abnormal errors statistics in
372 * the Tx datapath. When upper level application calls the
373 * rte_eth_tx_burst API function to send multiple packets at a time with
374 * burst mode based on hns3 network engine, there are some abnormal
375 * conditions that cause the driver to fail to operate the hardware to
376 * send packets correctly.
377 * Note: When using burst mode to call the rte_eth_tx_burst API function
378 * to send multiple packets at a time. When the first abnormal error is
379 * detected, add one to the relevant error statistics item, and then
380 * exit the loop of sending multiple packets of the function. That is to
381 * say, even if there are multiple packets in which abnormal errors may
382 * be detected in the burst, the relevant error statistics in the driver
383 * will only be increased by one.
384 * The detail description of the Tx abnormal errors statistic items as
386 * - over_length_pkt_cnt
387 * Total number of greater than HNS3_MAX_FRAME_LEN the driver
390 * - exceed_limit_bd_pkt_cnt
391 * Total number of exceeding the hardware limited bd which process
392 * a packet needed bd numbers.
394 * - exceed_limit_bd_reassem_fail
395 * Total number of exceeding the hardware limited bd fail which
396 * process a packet needed bd numbers and reassemble fail.
398 * - unsupported_tunnel_pkt_cnt
399 * Total number of unsupported tunnel packet. The unsupported tunnel
400 * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet
401 * with MPLS-in-UDP RFC 7510 header.
404 * Total count which the available bd numbers in current bd queue is
405 * less than the bd numbers with the pkt process needed.
407 * - pkt_padding_fail_cnt
408 * Total count which the packet length is less than minimum packet
409 * length(struct hns3_tx_queue::min_tx_pkt_len) supported by
410 * hardware in Tx direction and fail to be appended with 0.
412 struct hns3_tx_dfx_stats {
413 uint64_t over_length_pkt_cnt;
414 uint64_t exceed_limit_bd_pkt_cnt;
415 uint64_t exceed_limit_bd_reassem_fail;
416 uint64_t unsupported_tunnel_pkt_cnt;
417 uint64_t queue_full_cnt;
418 uint64_t pkt_padding_fail_cnt;
421 struct hns3_tx_queue {
422 volatile void *io_tail_reg;
423 struct hns3_desc *tx_ring;
424 struct hns3_entry *sw_ring;
428 * index of next BD whose corresponding rte_mbuf can be released by
431 uint16_t next_to_clean;
432 /* index of next BD to be filled by driver to send packet */
433 uint16_t next_to_use;
434 /* num of remaining BDs ready to be filled by driver to send packet */
435 uint16_t tx_bd_ready;
437 /* threshold for free tx buffer if available BDs less than this value */
438 uint16_t tx_free_thresh;
441 * The minimum length of the packet supported by hardware in the Tx
444 uint8_t min_tx_pkt_len;
446 uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
451 * HNS3_TSO_SW_CAL_PSEUDO_H_CSUM/HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
453 * - HNS3_TSO_SW_CAL_PSEUDO_H_CSUM
454 * In this mode, because of the hardware constraint, network driver
455 * software need erase the L4 len value of the TCP pseudo header
456 * and recalculate the TCP pseudo header checksum of packets that
459 * - HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
460 * In this mode, hardware support recalculate the TCP pseudo header
461 * checksum of packets that need TSO, so network driver software
462 * not need to recalculate it.
468 * HNS3_SPECIAL_PORT_HW_CKSUM_MODE/HNS3_SPECIAL_PORT_SW_CKSUM_MODE
470 * - HNS3_SPECIAL_PORT_SW_CKSUM_MODE
471 * In this mode, HW can not do checksum for special UDP port like
472 * 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel
473 * packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need
474 * do the checksum for these packets to avoid a checksum error.
476 * - HNS3_SPECIAL_PORT_HW_CKSUM_MODE
477 * In this mode, HW does not have the preceding problems and can
478 * directly calculate the checksum of these UDP packets.
480 uint16_t udp_cksum_mode:1;
482 uint16_t simple_bd_enable:1;
483 uint16_t tx_push_enable:1; /* check whether the tx push is enabled */
485 * Indicate whether add the vlan_tci of the mbuf to the inner VLAN field
486 * of Tx BD. Because the outer VLAN will always be the PVID when the
487 * PVID is set and for some version of hardware network engine whose
488 * vlan mode is HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920, the
489 * PVID will overwrite the outer VLAN field of Tx BD. For the hardware
490 * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
491 * such as kunpeng 930, if the PVID is set, the hardware will shift the
492 * VLAN field automatically. So, PMD driver does not need to do
493 * PVID-related operations in Tx. And pvid_sw_shift_en will be false at
496 uint16_t pvid_sw_shift_en:1;
499 * For better performance in tx datapath, releasing mbuf in batches is
501 * Only checking the VLD bit of the last descriptor in a batch of the
502 * thresh descriptors does not mean that these descriptors are all sent
503 * by hardware successfully. So we need to check that the VLD bits of
504 * all descriptors are cleared. and then free all mbufs in the batch.
506 * Number of mbufs released at a time.
509 * Tx mbuf free array used for preserving temporarily address of mbuf
510 * released back to mempool, when releasing mbuf in batches.
512 uint16_t tx_rs_thresh;
513 struct rte_mbuf **free;
515 struct hns3_tx_basic_stats basic_stats;
516 struct hns3_tx_dfx_stats dfx_stats;
520 * The following fields are not accessed in the I/O path, so they are
524 struct hns3_adapter *hns;
525 uint64_t tx_ring_phys_addr; /* TX ring DMA address */
526 const struct rte_memzone *mz;
531 bool configured; /* indicate if tx queue has been configured */
532 bool tx_deferred_start; /* don't start this queue in dev start */
533 bool enabled; /* indicate if Tx queue has been enabled */
536 #define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \
537 ((txq)->nb_tx_desc - 1 - (txq)->tx_bd_ready)
539 struct hns3_queue_info {
540 const char *type; /* point to queue memory name */
541 const char *ring_name; /* point to hardware ring name */
544 unsigned int socket_id;
547 #define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
548 PKT_TX_OUTER_UDP_CKSUM | \
549 PKT_TX_OUTER_IP_CKSUM | \
554 enum hns3_cksum_status {
556 HNS3_L3_CKSUM_ERR = 1,
557 HNS3_L4_CKSUM_ERR = 2,
558 HNS3_OUTER_L3_CKSUM_ERR = 4,
559 HNS3_OUTER_L4_CKSUM_ERR = 8
562 extern uint64_t hns3_timestamp_rx_dynflag;
563 extern int hns3_timestamp_dynfield_offset;
566 hns3_rx_set_cksum_flag(struct hns3_rx_queue *rxq,
567 struct rte_mbuf *rxm,
570 #define HNS3_RXD_CKSUM_ERR_MASK (BIT(HNS3_RXD_L3E_B) | \
571 BIT(HNS3_RXD_L4E_B) | \
572 BIT(HNS3_RXD_OL3E_B) | \
573 BIT(HNS3_RXD_OL4E_B))
575 if (likely((l234_info & HNS3_RXD_CKSUM_ERR_MASK) == 0)) {
576 rxm->ol_flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
580 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
581 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
582 rxq->dfx_stats.l3_csum_errors++;
584 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
587 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
588 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
589 rxq->dfx_stats.l4_csum_errors++;
591 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
594 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B)))
595 rxq->dfx_stats.ol3_csum_errors++;
597 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
598 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
599 rxq->dfx_stats.ol4_csum_errors++;
604 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
605 uint32_t bd_base_info, uint32_t l234_info)
607 #define L2E_TRUNC_ERR_FLAG (BIT(HNS3_RXD_L2E_B) | \
608 BIT(HNS3_RXD_TRUNCATE_B))
611 * If packet len bigger than mtu when recv with no-scattered algorithm,
612 * the first n bd will without FE bit, we need process this sisution.
613 * Note: we don't need add statistic counter because latest BD which
614 * with FE bit will mark HNS3_RXD_L2E_B bit.
616 if (unlikely((bd_base_info & BIT(HNS3_RXD_FE_B)) == 0))
619 if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) {
620 if (l234_info & BIT(HNS3_RXD_L2E_B))
621 rxq->err_stats.l2_errors++;
623 rxq->err_stats.pkt_len_errors++;
627 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
628 hns3_rx_set_cksum_flag(rxq, rxm, l234_info);
633 static inline uint32_t
634 hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info,
635 const uint32_t ol_info)
637 const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl;
638 uint32_t ol3id, ol4id;
643 ptype = hns3_get_field(ol_info, HNS3_RXD_PTYPE_M,
645 return ptype_tbl->ptype[ptype];
648 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
649 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
650 l3id = hns3_get_field(l234_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
651 l4id = hns3_get_field(l234_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
653 if (unlikely(ptype_tbl->ol4table[ol4id]))
654 return ptype_tbl->inner_l3table[l3id] |
655 ptype_tbl->inner_l4table[l4id] |
656 ptype_tbl->ol3table[ol3id] |
657 ptype_tbl->ol4table[ol4id];
659 return ptype_tbl->l3table[l3id] | ptype_tbl->l4table[l4id];
662 void hns3_dev_rx_queue_release(void *queue);
663 void hns3_dev_tx_queue_release(void *queue);
664 void hns3_free_all_queues(struct rte_eth_dev *dev);
665 int hns3_reset_all_tqps(struct hns3_adapter *hns);
666 void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
667 int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
668 int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
669 void hns3_enable_all_queues(struct hns3_hw *hw, bool en);
670 int hns3_init_queues(struct hns3_adapter *hns, bool reset_queue);
671 void hns3_start_tqps(struct hns3_hw *hw);
672 void hns3_stop_tqps(struct hns3_hw *hw);
673 int hns3_rxq_iterate(struct rte_eth_dev *dev,
674 int (*callback)(struct hns3_rx_queue *, void *), void *arg);
675 void hns3_dev_release_mbufs(struct hns3_adapter *hns);
676 int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
677 unsigned int socket, const struct rte_eth_rxconf *conf,
678 struct rte_mempool *mp);
679 int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
680 unsigned int socket, const struct rte_eth_txconf *conf);
681 uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
682 int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
683 int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
684 int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
685 int hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
686 uint16_t hns3_recv_pkts_simple(void *rx_queue, struct rte_mbuf **rx_pkts,
688 uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
690 uint16_t hns3_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
692 uint16_t hns3_recv_pkts_vec_sve(void *rx_queue, struct rte_mbuf **rx_pkts,
694 int hns3_rx_burst_mode_get(struct rte_eth_dev *dev,
695 __rte_unused uint16_t queue_id,
696 struct rte_eth_burst_mode *mode);
697 int hns3_rx_check_vec_support(struct rte_eth_dev *dev);
698 uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
700 uint16_t hns3_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
702 uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
704 uint16_t hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
706 uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts,
708 int hns3_tx_burst_mode_get(struct rte_eth_dev *dev,
709 __rte_unused uint16_t queue_id,
710 struct rte_eth_burst_mode *mode);
711 const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
712 void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);
713 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
714 uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
715 void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
716 uint8_t gl_idx, uint16_t gl_value);
717 void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
719 void hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id,
721 int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
723 int hns3_config_gro(struct hns3_hw *hw, bool en);
724 int hns3_restore_gro_conf(struct hns3_hw *hw);
725 void hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw);
726 void hns3_rx_scattered_reset(struct rte_eth_dev *dev);
727 void hns3_rx_scattered_calc(struct rte_eth_dev *dev);
728 int hns3_rx_check_vec_support(struct rte_eth_dev *dev);
729 int hns3_tx_check_vec_support(struct rte_eth_dev *dev);
730 void hns3_rxq_vec_setup(struct hns3_rx_queue *rxq);
731 void hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
732 struct rte_eth_rxq_info *qinfo);
733 void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
734 struct rte_eth_txq_info *qinfo);
735 uint32_t hns3_get_tqp_reg_offset(uint16_t idx);
736 int hns3_start_all_txqs(struct rte_eth_dev *dev);
737 int hns3_start_all_rxqs(struct rte_eth_dev *dev);
738 void hns3_stop_all_txqs(struct rte_eth_dev *dev);
739 void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
740 int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt);
741 void hns3_enable_rxd_adv_layout(struct hns3_hw *hw);
742 int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
743 int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
745 #endif /* _HNS3_RXTX_H_ */