net/hns3: add simple Rx path
[dpdk.git] / drivers / net / hns3 / hns3_rxtx.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #ifndef _HNS3_RXTX_H_
6 #define _HNS3_RXTX_H_
7
8 #define HNS3_MIN_RING_DESC      64
9 #define HNS3_MAX_RING_DESC      32768
10 #define HNS3_DEFAULT_RING_DESC  1024
11 #define HNS3_ALIGN_RING_DESC    32
12 #define HNS3_RING_BASE_ALIGN    128
13 #define HNS3_BULK_ALLOC_MBUF_NUM        32
14
15 #define HNS3_DEFAULT_RX_FREE_THRESH     32
16
17 #define HNS3_512_BD_BUF_SIZE    512
18 #define HNS3_1K_BD_BUF_SIZE     1024
19 #define HNS3_2K_BD_BUF_SIZE     2048
20 #define HNS3_4K_BD_BUF_SIZE     4096
21
22 #define HNS3_MIN_BD_BUF_SIZE    HNS3_512_BD_BUF_SIZE
23 #define HNS3_MAX_BD_BUF_SIZE    HNS3_4K_BD_BUF_SIZE
24
25 #define HNS3_BD_SIZE_512_TYPE                   0
26 #define HNS3_BD_SIZE_1024_TYPE                  1
27 #define HNS3_BD_SIZE_2048_TYPE                  2
28 #define HNS3_BD_SIZE_4096_TYPE                  3
29
30 #define HNS3_RX_FLAG_VLAN_PRESENT               0x1
31 #define HNS3_RX_FLAG_L3ID_IPV4                  0x0
32 #define HNS3_RX_FLAG_L3ID_IPV6                  0x1
33 #define HNS3_RX_FLAG_L4ID_UDP                   0x0
34 #define HNS3_RX_FLAG_L4ID_TCP                   0x1
35
36 #define HNS3_RXD_DMAC_S                         0
37 #define HNS3_RXD_DMAC_M                         (0x3 << HNS3_RXD_DMAC_S)
38 #define HNS3_RXD_VLAN_S                         2
39 #define HNS3_RXD_VLAN_M                         (0x3 << HNS3_RXD_VLAN_S)
40 #define HNS3_RXD_L3ID_S                         4
41 #define HNS3_RXD_L3ID_M                         (0xf << HNS3_RXD_L3ID_S)
42 #define HNS3_RXD_L4ID_S                         8
43 #define HNS3_RXD_L4ID_M                         (0xf << HNS3_RXD_L4ID_S)
44 #define HNS3_RXD_FRAG_B                         12
45 #define HNS3_RXD_STRP_TAGP_S                    13
46 #define HNS3_RXD_STRP_TAGP_M                    (0x3 << HNS3_RXD_STRP_TAGP_S)
47
48 #define HNS3_RXD_L2E_B                          16
49 #define HNS3_RXD_L3E_B                          17
50 #define HNS3_RXD_L4E_B                          18
51 #define HNS3_RXD_TRUNCATE_B                     19
52 #define HNS3_RXD_HOI_B                          20
53 #define HNS3_RXD_DOI_B                          21
54 #define HNS3_RXD_OL3E_B                         22
55 #define HNS3_RXD_OL4E_B                         23
56 #define HNS3_RXD_GRO_COUNT_S                    24
57 #define HNS3_RXD_GRO_COUNT_M                    (0x3f << HNS3_RXD_GRO_COUNT_S)
58 #define HNS3_RXD_GRO_FIXID_B                    30
59 #define HNS3_RXD_GRO_ECN_B                      31
60
61 #define HNS3_RXD_ODMAC_S                        0
62 #define HNS3_RXD_ODMAC_M                        (0x3 << HNS3_RXD_ODMAC_S)
63 #define HNS3_RXD_OVLAN_S                        2
64 #define HNS3_RXD_OVLAN_M                        (0x3 << HNS3_RXD_OVLAN_S)
65 #define HNS3_RXD_OL3ID_S                        4
66 #define HNS3_RXD_OL3ID_M                        (0xf << HNS3_RXD_OL3ID_S)
67 #define HNS3_RXD_OL4ID_S                        8
68 #define HNS3_RXD_OL4ID_M                        (0xf << HNS3_RXD_OL4ID_S)
69 #define HNS3_RXD_FBHI_S                         12
70 #define HNS3_RXD_FBHI_M                         (0x3 << HNS3_RXD_FBHI_S)
71 #define HNS3_RXD_FBLI_S                         14
72 #define HNS3_RXD_FBLI_M                         (0x3 << HNS3_RXD_FBLI_S)
73
74 #define HNS3_RXD_BDTYPE_S                       0
75 #define HNS3_RXD_BDTYPE_M                       (0xf << HNS3_RXD_BDTYPE_S)
76 #define HNS3_RXD_VLD_B                          4
77 #define HNS3_RXD_UDP0_B                         5
78 #define HNS3_RXD_EXTEND_B                       7
79 #define HNS3_RXD_FE_B                           8
80 #define HNS3_RXD_LUM_B                          9
81 #define HNS3_RXD_CRCP_B                         10
82 #define HNS3_RXD_L3L4P_B                        11
83 #define HNS3_RXD_TSIND_S                        12
84 #define HNS3_RXD_TSIND_M                        (0x7 << HNS3_RXD_TSIND_S)
85 #define HNS3_RXD_LKBK_B                         15
86 #define HNS3_RXD_GRO_SIZE_S                     16
87 #define HNS3_RXD_GRO_SIZE_M                     (0x3fff << HNS3_RXD_GRO_SIZE_S)
88
89 #define HNS3_TXD_L3T_S                          0
90 #define HNS3_TXD_L3T_M                          (0x3 << HNS3_TXD_L3T_S)
91 #define HNS3_TXD_L4T_S                          2
92 #define HNS3_TXD_L4T_M                          (0x3 << HNS3_TXD_L4T_S)
93 #define HNS3_TXD_L3CS_B                         4
94 #define HNS3_TXD_L4CS_B                         5
95 #define HNS3_TXD_VLAN_B                         6
96 #define HNS3_TXD_TSO_B                          7
97
98 #define HNS3_TXD_L2LEN_S                        8
99 #define HNS3_TXD_L2LEN_M                        (0xff << HNS3_TXD_L2LEN_S)
100 #define HNS3_TXD_L3LEN_S                        16
101 #define HNS3_TXD_L3LEN_M                        (0xff << HNS3_TXD_L3LEN_S)
102 #define HNS3_TXD_L4LEN_S                        24
103 #define HNS3_TXD_L4LEN_M                        (0xffUL << HNS3_TXD_L4LEN_S)
104
105 #define HNS3_TXD_OL3T_S                         0
106 #define HNS3_TXD_OL3T_M                         (0x3 << HNS3_TXD_OL3T_S)
107 #define HNS3_TXD_OVLAN_B                        2
108 #define HNS3_TXD_MACSEC_B                       3
109 #define HNS3_TXD_TUNTYPE_S                      4
110 #define HNS3_TXD_TUNTYPE_M                      (0xf << HNS3_TXD_TUNTYPE_S)
111
112 #define HNS3_TXD_BDTYPE_S                       0
113 #define HNS3_TXD_BDTYPE_M                       (0xf << HNS3_TXD_BDTYPE_S)
114 #define HNS3_TXD_FE_B                           4
115 #define HNS3_TXD_SC_S                           5
116 #define HNS3_TXD_SC_M                           (0x3 << HNS3_TXD_SC_S)
117 #define HNS3_TXD_EXTEND_B                       7
118 #define HNS3_TXD_VLD_B                          8
119 #define HNS3_TXD_RI_B                           9
120 #define HNS3_TXD_RA_B                           10
121 #define HNS3_TXD_TSYN_B                         11
122 #define HNS3_TXD_DECTTL_S                       12
123 #define HNS3_TXD_DECTTL_M                       (0xf << HNS3_TXD_DECTTL_S)
124
125 #define HNS3_TXD_MSS_S                          0
126 #define HNS3_TXD_MSS_M                          (0x3fff << HNS3_TXD_MSS_S)
127
128 #define HNS3_L2_LEN_UNIT                        1UL
129 #define HNS3_L3_LEN_UNIT                        2UL
130 #define HNS3_L4_LEN_UNIT                        2UL
131
132 enum hns3_pkt_l2t_type {
133         HNS3_L2_TYPE_UNICAST,
134         HNS3_L2_TYPE_MULTICAST,
135         HNS3_L2_TYPE_BROADCAST,
136         HNS3_L2_TYPE_INVALID,
137 };
138
139 enum hns3_pkt_l3t_type {
140         HNS3_L3T_NONE,
141         HNS3_L3T_IPV6,
142         HNS3_L3T_IPV4,
143         HNS3_L3T_RESERVED
144 };
145
146 enum hns3_pkt_l4t_type {
147         HNS3_L4T_UNKNOWN,
148         HNS3_L4T_TCP,
149         HNS3_L4T_UDP,
150         HNS3_L4T_SCTP
151 };
152
153 enum hns3_pkt_ol3t_type {
154         HNS3_OL3T_NONE,
155         HNS3_OL3T_IPV6,
156         HNS3_OL3T_IPV4_NO_CSUM,
157         HNS3_OL3T_IPV4_CSUM
158 };
159
160 enum hns3_pkt_tun_type {
161         HNS3_TUN_NONE,
162         HNS3_TUN_MAC_IN_UDP,
163         HNS3_TUN_NVGRE,
164         HNS3_TUN_OTHER
165 };
166
167 /* hardware spec ring buffer format */
168 struct hns3_desc {
169         union {
170                 uint64_t addr;
171                 struct {
172                         uint32_t addr0;
173                         uint32_t addr1;
174                 };
175         };
176         union {
177                 struct {
178                         uint16_t vlan_tag;
179                         uint16_t send_size;
180                         union {
181                                 /*
182                                  * L3T | L4T | L3CS | L4CS | VLAN | TSO |
183                                  * L2_LEN
184                                  */
185                                 uint32_t type_cs_vlan_tso_len;
186                                 struct {
187                                         uint8_t type_cs_vlan_tso;
188                                         uint8_t l2_len;
189                                         uint8_t l3_len;
190                                         uint8_t l4_len;
191                                 };
192                         };
193                         uint16_t outer_vlan_tag;
194                         uint16_t tv;
195                         union {
196                                 /* OL3T | OVALAN | MACSEC */
197                                 uint32_t ol_type_vlan_len_msec;
198                                 struct {
199                                         uint8_t ol_type_vlan_msec;
200                                         uint8_t ol2_len;
201                                         uint8_t ol3_len;
202                                         uint8_t ol4_len;
203                                 };
204                         };
205
206                         uint32_t paylen;
207                         uint16_t tp_fe_sc_vld_ra_ri;
208                         uint16_t mss;
209                 } tx;
210
211                 struct {
212                         uint32_t l234_info;
213                         uint16_t pkt_len;
214                         uint16_t size;
215                         uint32_t rss_hash;
216                         uint16_t fd_id;
217                         uint16_t vlan_tag;
218                         union {
219                                 uint32_t ol_info;
220                                 struct {
221                                         uint16_t o_dm_vlan_id_fb;
222                                         uint16_t ot_vlan_tag;
223                                 };
224                         };
225                         uint32_t bd_base_info;
226                 } rx;
227         };
228 } __rte_packed;
229
230 struct hns3_entry {
231         struct rte_mbuf *mbuf;
232 };
233
234 struct hns3_rx_queue {
235         void *io_base;
236         volatile void *io_head_reg;
237         struct hns3_adapter *hns;
238         struct hns3_ptype_table *ptype_tbl;
239         struct rte_mempool *mb_pool;
240         struct hns3_desc *rx_ring;
241         uint64_t rx_ring_phys_addr; /* RX ring DMA address */
242         const struct rte_memzone *mz;
243         struct hns3_entry *sw_ring;
244
245         struct rte_mbuf *pkt_first_seg;
246         struct rte_mbuf *pkt_last_seg;
247
248         uint16_t queue_id;
249         uint16_t port_id;
250         uint16_t nb_rx_desc;
251         uint16_t rx_buf_len;
252         /*
253          * threshold for the number of BDs waited to passed to hardware. If the
254          * number exceeds the threshold, driver will pass these BDs to hardware.
255          */
256         uint16_t rx_free_thresh;
257         uint16_t next_to_use;    /* index of next BD to be polled */
258         uint16_t rx_free_hold;   /* num of BDs waited to passed to hardware */
259
260         /*
261          * port based vlan configuration state.
262          * value range: HNS3_PORT_BASE_VLAN_DISABLE / HNS3_PORT_BASE_VLAN_ENABLE
263          */
264         uint16_t pvid_state;
265
266         /* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
267         uint8_t crc_len;
268
269         bool rx_deferred_start; /* don't start this queue in dev start */
270         bool configured;        /* indicate if rx queue has been configured */
271
272         uint64_t l2_errors;
273         uint64_t pkt_len_errors;
274         uint64_t l3_csum_errors;
275         uint64_t l4_csum_errors;
276         uint64_t ol3_csum_errors;
277         uint64_t ol4_csum_errors;
278
279         struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM];
280         uint16_t bulk_mbuf_num;
281 };
282
283 struct hns3_tx_queue {
284         void *io_base;
285         struct hns3_adapter *hns;
286         struct hns3_desc *tx_ring;
287         uint64_t tx_ring_phys_addr; /* TX ring DMA address */
288         const struct rte_memzone *mz;
289         struct hns3_entry *sw_ring;
290
291         uint16_t queue_id;
292         uint16_t port_id;
293         uint16_t nb_tx_desc;
294         uint16_t next_to_clean;
295         uint16_t next_to_use;
296         uint16_t tx_bd_ready;
297
298         /*
299          * port based vlan configuration state.
300          * value range: HNS3_PORT_BASE_VLAN_DISABLE / HNS3_PORT_BASE_VLAN_ENABLE
301          */
302         uint16_t pvid_state;
303
304         /*
305          * The minimum length of the packet supported by hardware in the Tx
306          * direction.
307          */
308         uint32_t min_tx_pkt_len;
309
310         bool tx_deferred_start; /* don't start this queue in dev start */
311         bool configured;        /* indicate if tx queue has been configured */
312
313         /*
314          * The following items are used for the abnormal errors statistics in
315          * the Tx datapath. When upper level application calls the
316          * rte_eth_tx_burst API function to send multiple packets at a time with
317          * burst mode based on hns3 network engine, there are some abnormal
318          * conditions that cause the driver to fail to operate the hardware to
319          * send packets correctly.
320          * Note: When using burst mode to call the rte_eth_tx_burst API function
321          * to send multiple packets at a time. When the first abnormal error is
322          * detected, add one to the relevant error statistics item, and then
323          * exit the loop of sending multiple packets of the function. That is to
324          * say, even if there are multiple packets in which abnormal errors may
325          * be detected in the burst, the relevant error statistics in the driver
326          * will only be increased by one.
327          * The detail description of the Tx abnormal errors statistic items as
328          * below:
329          *  - over_length_pkt_cnt
330          *     Total number of greater than HNS3_MAX_FRAME_LEN the driver
331          *     supported.
332          *
333          * - exceed_limit_bd_pkt_cnt
334          *     Total number of exceeding the hardware limited bd which process
335          *     a packet needed bd numbers.
336          *
337          * - exceed_limit_bd_reassem_fail
338          *     Total number of exceeding the hardware limited bd fail which
339          *     process a packet needed bd numbers and reassemble fail.
340          *
341          * - unsupported_tunnel_pkt_cnt
342          *     Total number of unsupported tunnel packet. The unsupported tunnel
343          *     type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet
344          *     with MPLS-in-UDP RFC 7510 header.
345          *
346          * - queue_full_cnt
347          *     Total count which the available bd numbers in current bd queue is
348          *     less than the bd numbers with the pkt process needed.
349          *
350          * - pkt_padding_fail_cnt
351          *     Total count which the packet length is less than minimum packet
352          *     length(struct hns3_tx_queue::min_tx_pkt_len) supported by
353          *     hardware in Tx direction and fail to be appended with 0.
354          */
355         uint64_t over_length_pkt_cnt;
356         uint64_t exceed_limit_bd_pkt_cnt;
357         uint64_t exceed_limit_bd_reassem_fail;
358         uint64_t unsupported_tunnel_pkt_cnt;
359         uint64_t queue_full_cnt;
360         uint64_t pkt_padding_fail_cnt;
361 };
362
363 struct hns3_queue_info {
364         const char *type;   /* point to queue memory name */
365         const char *ring_name;  /* point to hardware ring name */
366         uint16_t idx;
367         uint16_t nb_desc;
368         unsigned int socket_id;
369 };
370
371 #define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
372         PKT_TX_OUTER_IPV6 | \
373         PKT_TX_OUTER_IPV4 | \
374         PKT_TX_OUTER_IP_CKSUM | \
375         PKT_TX_IPV6 | \
376         PKT_TX_IPV4 | \
377         PKT_TX_IP_CKSUM | \
378         PKT_TX_L4_MASK | \
379         PKT_TX_TUNNEL_MASK)
380
381 enum hns3_cksum_status {
382         HNS3_CKSUM_NONE = 0,
383         HNS3_L3_CKSUM_ERR = 1,
384         HNS3_L4_CKSUM_ERR = 2,
385         HNS3_OUTER_L3_CKSUM_ERR = 4,
386         HNS3_OUTER_L4_CKSUM_ERR = 8
387 };
388
389 static inline int
390 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
391                    uint32_t bd_base_info, uint32_t l234_info,
392                    uint32_t *cksum_err)
393 {
394 #define L2E_TRUNC_ERR_FLAG      (BIT(HNS3_RXD_L2E_B) | \
395                                  BIT(HNS3_RXD_TRUNCATE_B))
396 #define CHECKSUM_ERR_FLAG       (BIT(HNS3_RXD_L3E_B) | \
397                                  BIT(HNS3_RXD_L4E_B) | \
398                                  BIT(HNS3_RXD_OL3E_B) | \
399                                  BIT(HNS3_RXD_OL4E_B))
400
401         uint32_t tmp = 0;
402
403         /*
404          * If packet len bigger than mtu when recv with no-scattered algorithm,
405          * the first n bd will without FE bit, we need process this sisution.
406          * Note: we don't need add statistic counter because latest BD which
407          *       with FE bit will mark HNS3_RXD_L2E_B bit.
408          */
409         if (unlikely((bd_base_info & BIT(HNS3_RXD_FE_B)) == 0))
410                 return -EINVAL;
411
412         if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) {
413                 if (l234_info & BIT(HNS3_RXD_L2E_B))
414                         rxq->l2_errors++;
415                 else
416                         rxq->pkt_len_errors++;
417                 return -EINVAL;
418         }
419
420         if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
421                 if (likely((l234_info & CHECKSUM_ERR_FLAG) == 0)) {
422                         *cksum_err = 0;
423                         return 0;
424                 }
425
426                 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
427                         rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
428                         rxq->l3_csum_errors++;
429                         tmp |= HNS3_L3_CKSUM_ERR;
430                 }
431
432                 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
433                         rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
434                         rxq->l4_csum_errors++;
435                         tmp |= HNS3_L4_CKSUM_ERR;
436                 }
437
438                 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
439                         rxq->ol3_csum_errors++;
440                         tmp |= HNS3_OUTER_L3_CKSUM_ERR;
441                 }
442
443                 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
444                         rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
445                         rxq->ol4_csum_errors++;
446                         tmp |= HNS3_OUTER_L4_CKSUM_ERR;
447                 }
448         }
449         *cksum_err = tmp;
450
451         return 0;
452 }
453
454 static inline void
455 hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, const uint64_t packet_type,
456                        const uint32_t cksum_err)
457 {
458         if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
459                 if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
460                     (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
461                         rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
462                 if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
463                     (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
464                         rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
465                 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
466                     (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
467                         rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
468         } else {
469                 if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
470                     (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
471                         rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
472                 if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
473                     (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
474                         rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
475         }
476 }
477
478 static inline uint32_t
479 hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info,
480                    const uint32_t ol_info)
481 {
482         const struct hns3_ptype_table *const ptype_tbl = rxq->ptype_tbl;
483         uint32_t l2id, l3id, l4id;
484         uint32_t ol3id, ol4id;
485
486         ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
487         ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
488         l2id = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
489                               HNS3_RXD_STRP_TAGP_S);
490         l3id = hns3_get_field(l234_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
491         l4id = hns3_get_field(l234_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
492
493         if (unlikely(ptype_tbl->ol4table[ol4id]))
494                 return ptype_tbl->inner_l2table[l2id] |
495                         ptype_tbl->inner_l3table[l3id] |
496                         ptype_tbl->inner_l4table[l4id] |
497                         ptype_tbl->ol3table[ol3id] | ptype_tbl->ol4table[ol4id];
498         else
499                 return ptype_tbl->l2table[l2id] | ptype_tbl->l3table[l3id] |
500                         ptype_tbl->l4table[l4id];
501 }
502
503 void hns3_dev_rx_queue_release(void *queue);
504 void hns3_dev_tx_queue_release(void *queue);
505 void hns3_free_all_queues(struct rte_eth_dev *dev);
506 int hns3_reset_all_queues(struct hns3_adapter *hns);
507 void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
508 int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
509 int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
510 void hns3_enable_all_queues(struct hns3_hw *hw, bool en);
511 int hns3_start_queues(struct hns3_adapter *hns, bool reset_queue);
512 int hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue);
513 void hns3_dev_release_mbufs(struct hns3_adapter *hns);
514 int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
515                         unsigned int socket, const struct rte_eth_rxconf *conf,
516                         struct rte_mempool *mp);
517 int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
518                         unsigned int socket, const struct rte_eth_txconf *conf);
519 uint16_t hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
520                         uint16_t nb_pkts);
521 uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
522                                   uint16_t nb_pkts);
523 int hns3_rx_burst_mode_get(struct rte_eth_dev *dev,
524                            __rte_unused uint16_t queue_id,
525                            struct rte_eth_burst_mode *mode);
526 uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
527                         uint16_t nb_pkts);
528 uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
529                         uint16_t nb_pkts);
530 const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
531 void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);
532 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
533 void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
534                             uint8_t gl_idx, uint16_t gl_value);
535 void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
536                             uint16_t rl_value);
537 void hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id,
538                             uint16_t ql_value);
539 int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
540                                   uint16_t nb_tx_q);
541 int hns3_config_gro(struct hns3_hw *hw, bool en);
542 int hns3_restore_gro_conf(struct hns3_hw *hw);
543 void hns3_update_all_queues_pvid_state(struct hns3_hw *hw);
544 void hns3_rx_scattered_reset(struct rte_eth_dev *dev);
545 void hns3_rx_scattered_calc(struct rte_eth_dev *dev);
546 void hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
547                        struct rte_eth_rxq_info *qinfo);
548 void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
549                        struct rte_eth_txq_info *qinfo);
550 #endif /* _HNS3_RXTX_H_ */