net/bnxt: fix ring and context memory allocation
[dpdk.git] / drivers / net / bnxt / bnxt_rxr.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #ifndef _BNXT_RXR_H_
7 #define _BNXT_RXR_H_
8 #include "hsi_struct_def_dpdk.h"
9
10 #define BNXT_TPA_START_AGG_ID_PRE_TH(cmp) \
11         ((rte_le_to_cpu_16((cmp)->agg_id) & RX_TPA_START_CMPL_AGG_ID_MASK) >> \
12          RX_TPA_START_CMPL_AGG_ID_SFT)
13
14 #define BNXT_TPA_START_AGG_ID_TH(cmp) \
15         rte_le_to_cpu_16((cmp)->agg_id)
16
17 static inline uint16_t bnxt_tpa_start_agg_id(struct bnxt *bp,
18                                              struct rx_tpa_start_cmpl *cmp)
19 {
20         if (BNXT_CHIP_P5(bp))
21                 return BNXT_TPA_START_AGG_ID_TH(cmp);
22         else
23                 return BNXT_TPA_START_AGG_ID_PRE_TH(cmp);
24 }
25
26 #define BNXT_TPA_END_AGG_BUFS(cmp) \
27         (((cmp)->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) \
28          >> RX_TPA_END_CMPL_AGG_BUFS_SFT)
29
30 #define BNXT_TPA_END_AGG_BUFS_TH(cmp) \
31         ((cmp)->tpa_agg_bufs)
32
33 #define BNXT_TPA_END_AGG_ID(cmp) \
34         (((cmp)->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >> \
35          RX_TPA_END_CMPL_AGG_ID_SFT)
36
37 #define BNXT_TPA_END_AGG_ID_TH(cmp) \
38         rte_le_to_cpu_16((cmp)->agg_id)
39
40 #define BNXT_RX_L2_AGG_BUFS(cmp) \
41         (((cmp)->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >> \
42                 RX_PKT_CMPL_AGG_BUFS_SFT)
43
44 /* Number of descriptors to process per inner loop in vector mode. */
45 #define BNXT_RX_DESCS_PER_LOOP_VEC128   4U /* SSE, Neon */
46 #define BNXT_RX_DESCS_PER_LOOP_VEC256   8U /* AVX2 */
47
48 /* Number of extra Rx mbuf ring entries to allocate for vector mode. */
49 #define BNXT_RX_EXTRA_MBUF_ENTRIES \
50         RTE_MAX(BNXT_RX_DESCS_PER_LOOP_VEC128, BNXT_RX_DESCS_PER_LOOP_VEC256)
51
52 #define BNXT_OL_FLAGS_TBL_DIM   64
53 #define BNXT_OL_FLAGS_ERR_TBL_DIM 32
54
55 struct bnxt_tpa_info {
56         struct rte_mbuf                 *mbuf;
57         uint16_t                        len;
58         uint32_t                        agg_count;
59         struct rx_tpa_v2_abuf_cmpl      agg_arr[TPA_MAX_NUM_SEGS];
60
61         uint32_t                        rss_hash;
62         uint32_t                        vlan;
63         uint16_t                        cfa_code;
64         uint8_t                         hash_valid:1;
65         uint8_t                         vlan_valid:1;
66         uint8_t                         cfa_code_valid:1;
67         uint8_t                         l4_csum_valid:1;
68 };
69
70 struct bnxt_rx_ring_info {
71         uint16_t                rx_raw_prod;
72         uint16_t                ag_raw_prod;
73         uint16_t                rx_cons; /* Needed for representor */
74         uint16_t                rx_next_cons;
75         struct bnxt_db_info     rx_db;
76         struct bnxt_db_info     ag_db;
77
78         struct rx_prod_pkt_bd   *rx_desc_ring;
79         struct rx_prod_pkt_bd   *ag_desc_ring;
80         struct rte_mbuf         **rx_buf_ring; /* sw ring */
81         struct rte_mbuf         **ag_buf_ring; /* sw ring */
82
83         rte_iova_t              rx_desc_mapping;
84         rte_iova_t              ag_desc_mapping;
85
86         struct bnxt_ring        *rx_ring_struct;
87         struct bnxt_ring        *ag_ring_struct;
88
89         /*
90          * To deal with out of order return from TPA, use free buffer indicator
91          */
92         struct rte_bitmap       *ag_bitmap;
93
94         struct bnxt_tpa_info *tpa_info;
95
96         uint32_t ol_flags_table[BNXT_OL_FLAGS_TBL_DIM];
97         uint32_t ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM];
98 };
99
100 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
101                                uint16_t nb_pkts);
102 void bnxt_free_rx_rings(struct bnxt *bp);
103 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
104 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
105 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
106 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
107 int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr);
108
109 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
110 uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
111                             uint16_t nb_pkts);
112 int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);
113 #endif
114
115 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
116 uint16_t bnxt_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
117                                  uint16_t nb_pkts);
118 #endif
119 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
120                            struct rx_pkt_cmpl_hi *rxcmp1,
121                            struct rte_mbuf *mbuf);
122
123 typedef uint32_t bnxt_cfa_code_dynfield_t;
124 extern int bnxt_cfa_code_dynfield_offset;
125
126 static inline bnxt_cfa_code_dynfield_t *
127 bnxt_cfa_code_dynfield(struct rte_mbuf *mbuf)
128 {
129         return RTE_MBUF_DYNFIELD(mbuf,
130                 bnxt_cfa_code_dynfield_offset, bnxt_cfa_code_dynfield_t *);
131 }
132
133 #define BNXT_RX_META_CFA_CODE_SHIFT             19
134 #define BNXT_CFA_CODE_META_SHIFT                16
135 #define BNXT_RX_META_CFA_CODE_INT_ACT_REC_BIT   0x8000000
136 #define BNXT_RX_META_CFA_CODE_EEM_BIT           0x4000000
137 #define BNXT_CFA_META_FMT_MASK                  0x70
138 #define BNXT_CFA_META_FMT_SHFT                  4
139 #define BNXT_CFA_META_FMT_EM_EEM_SHFT           1
140 #define BNXT_CFA_META_FMT_EEM                   3
141 #define BNXT_CFA_META_EEM_TCAM_SHIFT            31
142 #define BNXT_CFA_META_EM_TEST(x) ((x) >> BNXT_CFA_META_EEM_TCAM_SHIFT)
143
144 /* Definitions for translation of hardware packet type to mbuf ptype. */
145 #define BNXT_PTYPE_TBL_DIM              128
146 #define BNXT_PTYPE_TBL_TUN_SFT          0 /* Set if tunneled packet. */
147 #define BNXT_PTYPE_TBL_TUN_MSK          BIT(BNXT_PTYPE_TBL_TUN_SFT)
148 #define BNXT_PTYPE_TBL_IP_VER_SFT       1 /* Set if IPv6, clear if IPv4. */
149 #define BNXT_PTYPE_TBL_IP_VER_MSK       BIT(BNXT_PTYPE_TBL_IP_VER_SFT)
150 #define BNXT_PTYPE_TBL_VLAN_SFT         2 /* Set if VLAN encapsulated. */
151 #define BNXT_PTYPE_TBL_VLAN_MSK         BIT(BNXT_PTYPE_TBL_VLAN_SFT)
152 #define BNXT_PTYPE_TBL_TYPE_SFT         3 /* Hardware packet type field. */
153 #define BNXT_PTYPE_TBL_TYPE_MSK         0x78 /* Hardware itype field mask. */
154 #define BNXT_PTYPE_TBL_TYPE_IP          1
155 #define BNXT_PTYPE_TBL_TYPE_TCP         2
156 #define BNXT_PTYPE_TBL_TYPE_UDP         3
157 #define BNXT_PTYPE_TBL_TYPE_ICMP        7
158
159 #define RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT  8
160 #define CMPL_FLAGS2_VLAN_TUN_MSK \
161         (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN | RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
162
163 #define BNXT_CMPL_ITYPE_TO_IDX(ft) \
164         (((ft) & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> \
165           (RX_PKT_CMPL_FLAGS_ITYPE_SFT - BNXT_PTYPE_TBL_TYPE_SFT))
166
167 #define BNXT_CMPL_VLAN_TUN_TO_IDX(f2) \
168         (((f2) & CMPL_FLAGS2_VLAN_TUN_MSK) >> \
169          (RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT - BNXT_PTYPE_TBL_VLAN_SFT))
170
171 #define BNXT_CMPL_IP_VER_TO_IDX(f2) \
172         (((f2) & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> \
173          (RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT - BNXT_PTYPE_TBL_IP_VER_SFT))
174
175 static inline void
176 bnxt_check_ptype_constants(void)
177 {
178         RTE_BUILD_BUG_ON(BNXT_CMPL_ITYPE_TO_IDX(RX_PKT_CMPL_FLAGS_ITYPE_MASK) !=
179                          BNXT_PTYPE_TBL_TYPE_MSK);
180         RTE_BUILD_BUG_ON(BNXT_CMPL_VLAN_TUN_TO_IDX(CMPL_FLAGS2_VLAN_TUN_MSK) !=
181                          (BNXT_PTYPE_TBL_VLAN_MSK | BNXT_PTYPE_TBL_TUN_MSK));
182         RTE_BUILD_BUG_ON(BNXT_CMPL_IP_VER_TO_IDX(RX_PKT_CMPL_FLAGS2_IP_TYPE) !=
183                          BNXT_PTYPE_TBL_IP_VER_MSK);
184 }
185
186 extern uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM];
187
188 /* Stingray2 specific code for RX completion parsing */
189 #define RX_CMP_VLAN_VALID(rxcmp)        \
190         (((struct rx_pkt_v2_cmpl *)rxcmp)->metadata1_payload_offset &   \
191          RX_PKT_V2_CMPL_METADATA1_VALID)
192
193 #define RX_CMP_METADATA0_VID(rxcmp1)                            \
194         ((((struct rx_pkt_v2_cmpl_hi *)rxcmp1)->metadata0) &    \
195          (RX_PKT_V2_CMPL_HI_METADATA0_VID_MASK |                \
196           RX_PKT_V2_CMPL_HI_METADATA0_DE  |                     \
197           RX_PKT_V2_CMPL_HI_METADATA0_PRI_MASK))
198
199 static inline void bnxt_rx_vlan_v2(struct rte_mbuf *mbuf,
200                                    struct rx_pkt_cmpl *rxcmp,
201                                    struct rx_pkt_cmpl_hi *rxcmp1)
202 {
203         if (RX_CMP_VLAN_VALID(rxcmp)) {
204                 mbuf->vlan_tci = RX_CMP_METADATA0_VID(rxcmp1);
205                 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
206         }
207 }
208
209 #define RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK       (0x1 << 3)
210 #define RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK        (0x7 << 10)
211 #define RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK       (0x1 << 13)
212 #define RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK       (0x1 << 14)
213
214 #define RX_CMP_V2_CS_OK_HDR_CNT(flags)                          \
215         (((flags) & RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK) >>        \
216          RX_PKT_V2_CMPL_HI_FLAGS2_CS_OK_SFT)
217
218 #define RX_CMP_V2_CS_ALL_OK_MODE(flags)                         \
219         (((flags) & RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK))
220
221 #define RX_CMP_FLAGS2_L3_CS_OK_MASK             (0x7 << 10)
222 #define RX_CMP_FLAGS2_L4_CS_OK_MASK             (0x38 << 10)
223 #define RX_CMP_FLAGS2_L3_CS_OK_SFT              10
224 #define RX_CMP_FLAGS2_L4_CS_OK_SFT              13
225
226 #define RX_CMP_V2_L4_CS_OK(flags2)                      \
227         (((flags2) & RX_CMP_FLAGS2_L4_CS_OK_MASK) >>    \
228          RX_CMP_FLAGS2_L4_CS_OK_SFT)
229
230 #define RX_CMP_V2_L3_CS_OK(flags2)                      \
231         (((flags2) & RX_CMP_FLAGS2_L3_CS_OK_MASK) >>    \
232          RX_CMP_FLAGS2_L3_CS_OK_SFT)
233
234 #define RX_CMP_V2_L4_CS_ERR(err)                                \
235         (((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK)  ==  \
236          RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_L4_CS_ERROR)
237
238 #define RX_CMP_V2_L3_CS_ERR(err)                                \
239         (((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK) ==   \
240          RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_IP_CS_ERROR)
241
242 #define RX_CMP_V2_T_IP_CS_ERR(err)                              \
243         (((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) == \
244          RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_IP_CS_ERROR)
245
246 #define RX_CMP_V2_T_L4_CS_ERR(err)                              \
247         (((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) == \
248          RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_L4_CS_ERROR)
249
250 #define RX_CMP_V2_OT_L4_CS_ERR(err)                                     \
251         (((err) & RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_MASK) ==        \
252          RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_OT_L4_CS_ERROR)
253
254 static inline void bnxt_parse_csum_v2(struct rte_mbuf *mbuf,
255                                       struct rx_pkt_cmpl_hi *rxcmp1)
256 {
257         struct rx_pkt_v2_cmpl_hi *v2_cmp =
258                 (struct rx_pkt_v2_cmpl_hi *)(rxcmp1);
259         uint16_t error_v2 = rte_le_to_cpu_16(v2_cmp->errors_v2);
260         uint32_t flags2 = rte_le_to_cpu_32(v2_cmp->flags2);
261         uint32_t hdr_cnt = 0, t_pkt = 0;
262
263         if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) {
264                 hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2);
265                 if (hdr_cnt > 1)
266                         t_pkt = 1;
267
268                 if (unlikely(RX_CMP_V2_L4_CS_ERR(error_v2)))
269                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
270                 else if (flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK)
271                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
272                 else
273                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
274
275                 if (unlikely(RX_CMP_V2_L3_CS_ERR(error_v2)))
276                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
277                 else if (flags2 & RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK)
278                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
279                 else
280                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
281         } else {
282                 hdr_cnt = RX_CMP_V2_L4_CS_OK(flags2);
283                 if (hdr_cnt > 1)
284                         t_pkt = 1;
285
286                 if (RX_CMP_V2_L4_CS_OK(flags2))
287                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
288                 else if (RX_CMP_V2_L4_CS_ERR(error_v2))
289                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
290                 else
291                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
292
293                 if (RX_CMP_V2_L3_CS_OK(flags2))
294                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
295                 else if (RX_CMP_V2_L3_CS_ERR(error_v2))
296                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
297                 else
298                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
299         }
300
301         if (t_pkt) {
302                 if (unlikely(RX_CMP_V2_OT_L4_CS_ERR(error_v2) ||
303                                         RX_CMP_V2_T_L4_CS_ERR(error_v2)))
304                         mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
305                 else
306                         mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
307
308                 if (unlikely(RX_CMP_V2_T_IP_CS_ERR(error_v2)))
309                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
310         }
311 }
312
313 static inline void
314 bnxt_parse_pkt_type_v2(struct rte_mbuf *mbuf,
315                        struct rx_pkt_cmpl *rxcmp,
316                        struct rx_pkt_cmpl_hi *rxcmp1)
317 {
318         struct rx_pkt_v2_cmpl *v2_cmp =
319                 (struct rx_pkt_v2_cmpl *)(rxcmp);
320         struct rx_pkt_v2_cmpl_hi *v2_cmp1 =
321                 (struct rx_pkt_v2_cmpl_hi *)(rxcmp1);
322         uint16_t flags_type = v2_cmp->flags_type &
323                 rte_cpu_to_le_32(RX_PKT_V2_CMPL_FLAGS_ITYPE_MASK);
324         uint32_t flags2 = rte_le_to_cpu_32(v2_cmp1->flags2);
325         uint32_t l3, pkt_type = 0, vlan = 0;
326         uint32_t ip6 = 0, t_pkt = 0;
327         uint32_t hdr_cnt, csum_count;
328
329         if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) {
330                 hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2);
331                 if (hdr_cnt > 1)
332                         t_pkt = 1;
333         } else {
334                 csum_count = RX_CMP_V2_L4_CS_OK(flags2);
335                 if (csum_count > 1)
336                         t_pkt = 1;
337         }
338
339         vlan = !!RX_CMP_VLAN_VALID(rxcmp);
340         pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
341
342         ip6 = !!(flags2 & RX_PKT_V2_CMPL_HI_FLAGS2_IP_TYPE);
343
344         if (!t_pkt && !ip6)
345                 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
346         else if (!t_pkt && ip6)
347                 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
348         else if (t_pkt && !ip6)
349                 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
350         else
351                 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
352
353         switch (flags_type) {
354         case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_ICMP):
355                 if (!t_pkt)
356                         pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
357                 else
358                         pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
359                 break;
360         case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_TCP):
361                 if (!t_pkt)
362                         pkt_type |= l3 | RTE_PTYPE_L4_TCP;
363                 else
364                         pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
365                 break;
366         case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_UDP):
367                 if (!t_pkt)
368                         pkt_type |= l3 | RTE_PTYPE_L4_UDP;
369                 else
370                         pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
371                 break;
372         case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_IP):
373                 pkt_type |= l3;
374                 break;
375         }
376
377         mbuf->packet_type = pkt_type;
378 }
379
380 #endif /*  _BNXT_RXR_H_ */