a6fdd7767a64fbf877ab5b1be79c158023e4d95d
[dpdk.git] / drivers / net / bnxt / bnxt_rxr.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #ifndef _BNXT_RXR_H_
7 #define _BNXT_RXR_H_
8 #include "hsi_struct_def_dpdk.h"
9
10 #define BNXT_TPA_START_AGG_ID_PRE_TH(cmp) \
11         ((rte_le_to_cpu_16((cmp)->agg_id) & RX_TPA_START_CMPL_AGG_ID_MASK) >> \
12          RX_TPA_START_CMPL_AGG_ID_SFT)
13
14 #define BNXT_TPA_START_AGG_ID_TH(cmp) \
15         rte_le_to_cpu_16((cmp)->agg_id)
16
17 static inline uint16_t bnxt_tpa_start_agg_id(struct bnxt *bp,
18                                              struct rx_tpa_start_cmpl *cmp)
19 {
20         if (BNXT_CHIP_P5(bp))
21                 return BNXT_TPA_START_AGG_ID_TH(cmp);
22         else
23                 return BNXT_TPA_START_AGG_ID_PRE_TH(cmp);
24 }
25
26 #define BNXT_TPA_END_AGG_BUFS(cmp) \
27         (((cmp)->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) \
28          >> RX_TPA_END_CMPL_AGG_BUFS_SFT)
29
30 #define BNXT_TPA_END_AGG_BUFS_TH(cmp) \
31         ((cmp)->tpa_agg_bufs)
32
33 #define BNXT_TPA_END_AGG_ID(cmp) \
34         (((cmp)->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >> \
35          RX_TPA_END_CMPL_AGG_ID_SFT)
36
37 #define BNXT_TPA_END_AGG_ID_TH(cmp) \
38         rte_le_to_cpu_16((cmp)->agg_id)
39
40 #define BNXT_RX_L2_AGG_BUFS(cmp) \
41         (((cmp)->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >> \
42                 RX_PKT_CMPL_AGG_BUFS_SFT)
43
44 #define BNXT_RX_POST_THRESH     32
45
46 /* Number of descriptors to process per inner loop in vector mode. */
47 #define RTE_BNXT_DESCS_PER_LOOP         4U
48
49 #define BNXT_OL_FLAGS_TBL_DIM   64
50 #define BNXT_OL_FLAGS_ERR_TBL_DIM 32
51
52 struct bnxt_tpa_info {
53         struct rte_mbuf                 *mbuf;
54         uint16_t                        len;
55         uint32_t                        agg_count;
56         struct rx_tpa_v2_abuf_cmpl      agg_arr[TPA_MAX_NUM_SEGS];
57
58         uint32_t                        rss_hash;
59         uint32_t                        vlan;
60         uint16_t                        cfa_code;
61         uint8_t                         hash_valid:1;
62         uint8_t                         vlan_valid:1;
63         uint8_t                         cfa_code_valid:1;
64         uint8_t                         l4_csum_valid:1;
65 };
66
67 struct bnxt_rx_ring_info {
68         uint16_t                rx_raw_prod;
69         uint16_t                ag_raw_prod;
70         uint16_t                rx_cons; /* Needed for representor */
71         struct bnxt_db_info     rx_db;
72         struct bnxt_db_info     ag_db;
73
74         struct rx_prod_pkt_bd   *rx_desc_ring;
75         struct rx_prod_pkt_bd   *ag_desc_ring;
76         struct rte_mbuf         **rx_buf_ring; /* sw ring */
77         struct rte_mbuf         **ag_buf_ring; /* sw ring */
78
79         rte_iova_t              rx_desc_mapping;
80         rte_iova_t              ag_desc_mapping;
81
82         struct bnxt_ring        *rx_ring_struct;
83         struct bnxt_ring        *ag_ring_struct;
84
85         /*
86          * To deal with out of order return from TPA, use free buffer indicator
87          */
88         struct rte_bitmap       *ag_bitmap;
89
90         struct bnxt_tpa_info *tpa_info;
91
92         uint32_t ol_flags_table[BNXT_OL_FLAGS_TBL_DIM];
93         uint32_t ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM];
94 };
95
96 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
97                                uint16_t nb_pkts);
98 void bnxt_free_rx_rings(struct bnxt *bp);
99 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
100 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
101 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
102 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
103 int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr);
104
105 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
106 uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
107                             uint16_t nb_pkts);
108 int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);
109 #endif
110
111 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
112                            struct rx_pkt_cmpl_hi *rxcmp1,
113                            struct rte_mbuf *mbuf);
114
115 typedef uint32_t bnxt_cfa_code_dynfield_t;
116 extern int bnxt_cfa_code_dynfield_offset;
117
118 static inline bnxt_cfa_code_dynfield_t *
119 bnxt_cfa_code_dynfield(struct rte_mbuf *mbuf)
120 {
121         return RTE_MBUF_DYNFIELD(mbuf,
122                 bnxt_cfa_code_dynfield_offset, bnxt_cfa_code_dynfield_t *);
123 }
124
125 #define BNXT_RX_META_CFA_CODE_SHIFT             19
126 #define BNXT_CFA_CODE_META_SHIFT                16
127 #define BNXT_RX_META_CFA_CODE_INT_ACT_REC_BIT   0x8000000
128 #define BNXT_RX_META_CFA_CODE_EEM_BIT           0x4000000
129 #define BNXT_CFA_META_FMT_MASK                  0x70
130 #define BNXT_CFA_META_FMT_SHFT                  4
131 #define BNXT_CFA_META_FMT_EM_EEM_SHFT           1
132 #define BNXT_CFA_META_FMT_EEM                   3
133 #define BNXT_CFA_META_EEM_TCAM_SHIFT            31
134 #define BNXT_CFA_META_EM_TEST(x) ((x) >> BNXT_CFA_META_EEM_TCAM_SHIFT)
135
136 #define BNXT_PTYPE_TBL_DIM      128
137 extern uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM];
138
139 /* Stingray2 specific code for RX completion parsing */
140 #define RX_CMP_VLAN_VALID(rxcmp)        \
141         (((struct rx_pkt_v2_cmpl *)rxcmp)->metadata1_payload_offset &   \
142          RX_PKT_V2_CMPL_METADATA1_VALID)
143
144 #define RX_CMP_METADATA0_VID(rxcmp1)                            \
145         ((((struct rx_pkt_v2_cmpl_hi *)rxcmp1)->metadata0) &    \
146          (RX_PKT_V2_CMPL_HI_METADATA0_VID_MASK |                \
147           RX_PKT_V2_CMPL_HI_METADATA0_DE  |                     \
148           RX_PKT_V2_CMPL_HI_METADATA0_PRI_MASK))
149
150 static inline void bnxt_rx_vlan_v2(struct rte_mbuf *mbuf,
151                                    struct rx_pkt_cmpl *rxcmp,
152                                    struct rx_pkt_cmpl_hi *rxcmp1)
153 {
154         if (RX_CMP_VLAN_VALID(rxcmp)) {
155                 mbuf->vlan_tci = RX_CMP_METADATA0_VID(rxcmp1);
156                 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
157         }
158 }
159
160 #define RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK       (0x1 << 3)
161 #define RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK        (0x7 << 10)
162 #define RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK       (0x1 << 13)
163 #define RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK       (0x1 << 14)
164
165 #define RX_CMP_V2_CS_OK_HDR_CNT(flags)                          \
166         (((flags) & RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK) >>        \
167          RX_PKT_V2_CMPL_HI_FLAGS2_CS_OK_SFT)
168
169 #define RX_CMP_V2_CS_ALL_OK_MODE(flags)                         \
170         (((flags) & RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK))
171
172 #define RX_CMP_FLAGS2_L3_CS_OK_MASK             (0x7 << 10)
173 #define RX_CMP_FLAGS2_L4_CS_OK_MASK             (0x38 << 10)
174 #define RX_CMP_FLAGS2_L3_CS_OK_SFT              10
175 #define RX_CMP_FLAGS2_L4_CS_OK_SFT              13
176
177 #define RX_CMP_V2_L4_CS_OK(flags2)                      \
178         (((flags2) & RX_CMP_FLAGS2_L4_CS_OK_MASK) >>    \
179          RX_CMP_FLAGS2_L4_CS_OK_SFT)
180
181 #define RX_CMP_V2_L3_CS_OK(flags2)                      \
182         (((flags2) & RX_CMP_FLAGS2_L3_CS_OK_MASK) >>    \
183          RX_CMP_FLAGS2_L3_CS_OK_SFT)
184
185 #define RX_CMP_V2_L4_CS_ERR(err)                                \
186         (((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK)  ==  \
187          RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_L4_CS_ERROR)
188
189 #define RX_CMP_V2_L3_CS_ERR(err)                                \
190         (((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK) ==   \
191          RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_IP_CS_ERROR)
192
193 #define RX_CMP_V2_T_IP_CS_ERR(err)                              \
194         (((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) == \
195          RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_IP_CS_ERROR)
196
197 #define RX_CMP_V2_T_L4_CS_ERR(err)                              \
198         (((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) == \
199          RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_L4_CS_ERROR)
200
201 #define RX_CMP_V2_OT_L4_CS_ERR(err)                                     \
202         (((err) & RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_MASK) ==        \
203          RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_OT_L4_CS_ERROR)
204
205 static inline void bnxt_parse_csum_v2(struct rte_mbuf *mbuf,
206                                       struct rx_pkt_cmpl_hi *rxcmp1)
207 {
208         struct rx_pkt_v2_cmpl_hi *v2_cmp =
209                 (struct rx_pkt_v2_cmpl_hi *)(rxcmp1);
210         uint16_t error_v2 = rte_le_to_cpu_16(v2_cmp->errors_v2);
211         uint32_t flags2 = rte_le_to_cpu_32(v2_cmp->flags2);
212         uint32_t hdr_cnt = 0, t_pkt = 0;
213
214         if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) {
215                 hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2);
216                 if (hdr_cnt > 1)
217                         t_pkt = 1;
218
219                 if (unlikely(RX_CMP_V2_L4_CS_ERR(error_v2)))
220                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
221                 else if (flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK)
222                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
223                 else
224                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
225
226                 if (unlikely(RX_CMP_V2_L3_CS_ERR(error_v2)))
227                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
228                 else if (flags2 & RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK)
229                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
230                 else
231                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
232         } else {
233                 hdr_cnt = RX_CMP_V2_L4_CS_OK(flags2);
234                 if (hdr_cnt > 1)
235                         t_pkt = 1;
236
237                 if (RX_CMP_V2_L4_CS_OK(flags2))
238                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
239                 else if (RX_CMP_V2_L4_CS_ERR(error_v2))
240                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
241                 else
242                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
243
244                 if (RX_CMP_V2_L3_CS_OK(flags2))
245                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
246                 else if (RX_CMP_V2_L3_CS_ERR(error_v2))
247                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
248                 else
249                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
250         }
251
252         if (t_pkt) {
253                 if (unlikely(RX_CMP_V2_OT_L4_CS_ERR(error_v2) ||
254                                         RX_CMP_V2_T_L4_CS_ERR(error_v2)))
255                         mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
256                 else
257                         mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
258
259                 if (unlikely(RX_CMP_V2_T_IP_CS_ERR(error_v2)))
260                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
261         }
262 }
263
264 static inline void
265 bnxt_parse_pkt_type_v2(struct rte_mbuf *mbuf,
266                        struct rx_pkt_cmpl *rxcmp,
267                        struct rx_pkt_cmpl_hi *rxcmp1)
268 {
269         struct rx_pkt_v2_cmpl *v2_cmp =
270                 (struct rx_pkt_v2_cmpl *)(rxcmp);
271         struct rx_pkt_v2_cmpl_hi *v2_cmp1 =
272                 (struct rx_pkt_v2_cmpl_hi *)(rxcmp1);
273         uint16_t flags_type = v2_cmp->flags_type &
274                 rte_cpu_to_le_32(RX_PKT_V2_CMPL_FLAGS_ITYPE_MASK);
275         uint32_t flags2 = rte_le_to_cpu_32(v2_cmp1->flags2);
276         uint32_t l3, pkt_type = 0, vlan = 0;
277         uint32_t ip6 = 0, t_pkt = 0;
278         uint32_t hdr_cnt, csum_count;
279
280         if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) {
281                 hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2);
282                 if (hdr_cnt > 1)
283                         t_pkt = 1;
284         } else {
285                 csum_count = RX_CMP_V2_L4_CS_OK(flags2);
286                 if (csum_count > 1)
287                         t_pkt = 1;
288         }
289
290         vlan = !!RX_CMP_VLAN_VALID(rxcmp);
291         pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
292
293         ip6 = !!(flags2 & RX_PKT_V2_CMPL_HI_FLAGS2_IP_TYPE);
294
295         if (!t_pkt && !ip6)
296                 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
297         else if (!t_pkt && ip6)
298                 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
299         else if (t_pkt && !ip6)
300                 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
301         else
302                 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
303
304         switch (flags_type) {
305         case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_ICMP):
306                 if (!t_pkt)
307                         pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
308                 else
309                         pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
310                 break;
311         case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_TCP):
312                 if (!t_pkt)
313                         pkt_type |= l3 | RTE_PTYPE_L4_TCP;
314                 else
315                         pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
316                 break;
317         case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_UDP):
318                 if (!t_pkt)
319                         pkt_type |= l3 | RTE_PTYPE_L4_UDP;
320                 else
321                         pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
322                 break;
323         case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_IP):
324                 pkt_type |= l3;
325                 break;
326         }
327
328         mbuf->packet_type = pkt_type;
329 }
330
331 #endif /*  _BNXT_RXR_H_ */