1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
8 #include "hsi_struct_def_dpdk.h"
10 #define BNXT_TPA_START_AGG_ID_PRE_TH(cmp) \
11 ((rte_le_to_cpu_16((cmp)->agg_id) & RX_TPA_START_CMPL_AGG_ID_MASK) >> \
12 RX_TPA_START_CMPL_AGG_ID_SFT)
14 #define BNXT_TPA_START_AGG_ID_TH(cmp) \
15 rte_le_to_cpu_16((cmp)->agg_id)
17 static inline uint16_t bnxt_tpa_start_agg_id(struct bnxt *bp,
18 struct rx_tpa_start_cmpl *cmp)
21 return BNXT_TPA_START_AGG_ID_TH(cmp);
23 return BNXT_TPA_START_AGG_ID_PRE_TH(cmp);
26 #define BNXT_TPA_END_AGG_BUFS(cmp) \
27 (((cmp)->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) \
28 >> RX_TPA_END_CMPL_AGG_BUFS_SFT)
30 #define BNXT_TPA_END_AGG_BUFS_TH(cmp) \
33 #define BNXT_TPA_END_AGG_ID(cmp) \
34 (((cmp)->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >> \
35 RX_TPA_END_CMPL_AGG_ID_SFT)
37 #define BNXT_TPA_END_AGG_ID_TH(cmp) \
38 rte_le_to_cpu_16((cmp)->agg_id)
40 #define BNXT_RX_L2_AGG_BUFS(cmp) \
41 (((cmp)->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >> \
42 RX_PKT_CMPL_AGG_BUFS_SFT)
44 /* Number of descriptors to process per inner loop in vector mode. */
45 #define BNXT_RX_DESCS_PER_LOOP_VEC128 4U /* SSE, Neon */
46 #define BNXT_RX_DESCS_PER_LOOP_VEC256 8U /* AVX2 */
48 /* Number of extra Rx mbuf ring entries to allocate for vector mode. */
49 #define BNXT_RX_EXTRA_MBUF_ENTRIES \
50 RTE_MAX(BNXT_RX_DESCS_PER_LOOP_VEC128, BNXT_RX_DESCS_PER_LOOP_VEC256)
52 #define BNXT_OL_FLAGS_TBL_DIM 64
53 #define BNXT_OL_FLAGS_ERR_TBL_DIM 32
55 struct bnxt_tpa_info {
56 struct rte_mbuf *mbuf;
59 struct rx_tpa_v2_abuf_cmpl agg_arr[TPA_MAX_NUM_SEGS];
66 uint8_t cfa_code_valid:1;
67 uint8_t l4_csum_valid:1;
70 struct bnxt_rx_ring_info {
73 uint16_t rx_cons; /* Needed for representor */
74 uint16_t rx_next_cons;
75 struct bnxt_db_info rx_db;
76 struct bnxt_db_info ag_db;
78 struct rx_prod_pkt_bd *rx_desc_ring;
79 struct rx_prod_pkt_bd *ag_desc_ring;
80 struct rte_mbuf **rx_buf_ring; /* sw ring */
81 struct rte_mbuf **ag_buf_ring; /* sw ring */
83 rte_iova_t rx_desc_mapping;
84 rte_iova_t ag_desc_mapping;
86 struct bnxt_ring *rx_ring_struct;
87 struct bnxt_ring *ag_ring_struct;
90 * To deal with out of order return from TPA, use free buffer indicator
92 struct rte_bitmap *ag_bitmap;
94 struct bnxt_tpa_info *tpa_info;
96 uint32_t ol_flags_table[BNXT_OL_FLAGS_TBL_DIM];
97 uint32_t ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM];
100 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
102 void bnxt_free_rx_rings(struct bnxt *bp);
103 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
104 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
105 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
106 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
107 int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr);
109 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
110 uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
112 int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);
115 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
116 uint16_t bnxt_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
119 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
120 struct rx_pkt_cmpl_hi *rxcmp1,
121 struct rte_mbuf *mbuf);
123 typedef uint32_t bnxt_cfa_code_dynfield_t;
124 extern int bnxt_cfa_code_dynfield_offset;
126 static inline bnxt_cfa_code_dynfield_t *
127 bnxt_cfa_code_dynfield(struct rte_mbuf *mbuf)
129 return RTE_MBUF_DYNFIELD(mbuf,
130 bnxt_cfa_code_dynfield_offset, bnxt_cfa_code_dynfield_t *);
133 #define BNXT_RX_META_CFA_CODE_SHIFT 19
134 #define BNXT_CFA_CODE_META_SHIFT 16
135 #define BNXT_RX_META_CFA_CODE_INT_ACT_REC_BIT 0x8000000
136 #define BNXT_RX_META_CFA_CODE_EEM_BIT 0x4000000
137 #define BNXT_CFA_META_FMT_MASK 0x70
138 #define BNXT_CFA_META_FMT_SHFT 4
139 #define BNXT_CFA_META_FMT_EM_EEM_SHFT 1
140 #define BNXT_CFA_META_FMT_EEM 3
141 #define BNXT_CFA_META_EEM_TCAM_SHIFT 31
142 #define BNXT_CFA_META_EM_TEST(x) ((x) >> BNXT_CFA_META_EEM_TCAM_SHIFT)
144 /* Definitions for translation of hardware packet type to mbuf ptype. */
145 #define BNXT_PTYPE_TBL_DIM 128
146 #define BNXT_PTYPE_TBL_TUN_SFT 0 /* Set if tunneled packet. */
147 #define BNXT_PTYPE_TBL_TUN_MSK BIT(BNXT_PTYPE_TBL_TUN_SFT)
148 #define BNXT_PTYPE_TBL_IP_VER_SFT 1 /* Set if IPv6, clear if IPv4. */
149 #define BNXT_PTYPE_TBL_IP_VER_MSK BIT(BNXT_PTYPE_TBL_IP_VER_SFT)
150 #define BNXT_PTYPE_TBL_VLAN_SFT 2 /* Set if VLAN encapsulated. */
151 #define BNXT_PTYPE_TBL_VLAN_MSK BIT(BNXT_PTYPE_TBL_VLAN_SFT)
152 #define BNXT_PTYPE_TBL_TYPE_SFT 3 /* Hardware packet type field. */
153 #define BNXT_PTYPE_TBL_TYPE_MSK 0x78 /* Hardware itype field mask. */
154 #define BNXT_PTYPE_TBL_TYPE_IP 1
155 #define BNXT_PTYPE_TBL_TYPE_TCP 2
156 #define BNXT_PTYPE_TBL_TYPE_UDP 3
157 #define BNXT_PTYPE_TBL_TYPE_ICMP 7
159 #define RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT 8
160 #define CMPL_FLAGS2_VLAN_TUN_MSK \
161 (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN | RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
163 #define BNXT_CMPL_ITYPE_TO_IDX(ft) \
164 (((ft) & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> \
165 (RX_PKT_CMPL_FLAGS_ITYPE_SFT - BNXT_PTYPE_TBL_TYPE_SFT))
167 #define BNXT_CMPL_VLAN_TUN_TO_IDX(f2) \
168 (((f2) & CMPL_FLAGS2_VLAN_TUN_MSK) >> \
169 (RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT - BNXT_PTYPE_TBL_VLAN_SFT))
171 #define BNXT_CMPL_IP_VER_TO_IDX(f2) \
172 (((f2) & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> \
173 (RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT - BNXT_PTYPE_TBL_IP_VER_SFT))
176 bnxt_check_ptype_constants(void)
178 RTE_BUILD_BUG_ON(BNXT_CMPL_ITYPE_TO_IDX(RX_PKT_CMPL_FLAGS_ITYPE_MASK) !=
179 BNXT_PTYPE_TBL_TYPE_MSK);
180 RTE_BUILD_BUG_ON(BNXT_CMPL_VLAN_TUN_TO_IDX(CMPL_FLAGS2_VLAN_TUN_MSK) !=
181 (BNXT_PTYPE_TBL_VLAN_MSK | BNXT_PTYPE_TBL_TUN_MSK));
182 RTE_BUILD_BUG_ON(BNXT_CMPL_IP_VER_TO_IDX(RX_PKT_CMPL_FLAGS2_IP_TYPE) !=
183 BNXT_PTYPE_TBL_IP_VER_MSK);
186 extern uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM];
188 static inline void bnxt_set_vlan(struct rx_pkt_cmpl_hi *rxcmp1,
189 struct rte_mbuf *mbuf)
191 uint32_t metadata = rte_le_to_cpu_32(rxcmp1->metadata);
193 mbuf->vlan_tci = metadata & (RX_PKT_CMPL_METADATA_VID_MASK |
194 RX_PKT_CMPL_METADATA_DE |
195 RX_PKT_CMPL_METADATA_PRI_MASK);
198 /* Stingray2 specific code for RX completion parsing */
199 #define RX_CMP_VLAN_VALID(rxcmp) \
200 (((struct rx_pkt_v2_cmpl *)rxcmp)->metadata1_payload_offset & \
201 RX_PKT_V2_CMPL_METADATA1_VALID)
203 #define RX_CMP_METADATA0_VID(rxcmp1) \
204 ((((struct rx_pkt_v2_cmpl_hi *)rxcmp1)->metadata0) & \
205 (RX_PKT_V2_CMPL_HI_METADATA0_VID_MASK | \
206 RX_PKT_V2_CMPL_HI_METADATA0_DE | \
207 RX_PKT_V2_CMPL_HI_METADATA0_PRI_MASK))
209 static inline void bnxt_rx_vlan_v2(struct rte_mbuf *mbuf,
210 struct rx_pkt_cmpl *rxcmp,
211 struct rx_pkt_cmpl_hi *rxcmp1)
213 if (RX_CMP_VLAN_VALID(rxcmp)) {
214 mbuf->vlan_tci = RX_CMP_METADATA0_VID(rxcmp1);
215 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
219 #define RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK (0x1 << 3)
220 #define RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK (0x7 << 10)
221 #define RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK (0x1 << 13)
222 #define RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK (0x1 << 14)
224 #define RX_CMP_V2_CS_OK_HDR_CNT(flags) \
225 (((flags) & RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK) >> \
226 RX_PKT_V2_CMPL_HI_FLAGS2_CS_OK_SFT)
228 #define RX_CMP_V2_CS_ALL_OK_MODE(flags) \
229 (((flags) & RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK))
231 #define RX_CMP_FLAGS2_L3_CS_OK_MASK (0x7 << 10)
232 #define RX_CMP_FLAGS2_L4_CS_OK_MASK (0x38 << 10)
233 #define RX_CMP_FLAGS2_L3_CS_OK_SFT 10
234 #define RX_CMP_FLAGS2_L4_CS_OK_SFT 13
236 #define RX_CMP_V2_L4_CS_OK(flags2) \
237 (((flags2) & RX_CMP_FLAGS2_L4_CS_OK_MASK) >> \
238 RX_CMP_FLAGS2_L4_CS_OK_SFT)
240 #define RX_CMP_V2_L3_CS_OK(flags2) \
241 (((flags2) & RX_CMP_FLAGS2_L3_CS_OK_MASK) >> \
242 RX_CMP_FLAGS2_L3_CS_OK_SFT)
244 #define RX_CMP_V2_L4_CS_ERR(err) \
245 (((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK) == \
246 RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_L4_CS_ERROR)
248 #define RX_CMP_V2_L3_CS_ERR(err) \
249 (((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK) == \
250 RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_IP_CS_ERROR)
252 #define RX_CMP_V2_T_IP_CS_ERR(err) \
253 (((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) == \
254 RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_IP_CS_ERROR)
256 #define RX_CMP_V2_T_L4_CS_ERR(err) \
257 (((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) == \
258 RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_L4_CS_ERROR)
260 #define RX_CMP_V2_OT_L4_CS_ERR(err) \
261 (((err) & RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_MASK) == \
262 RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_OT_L4_CS_ERROR)
264 static inline void bnxt_parse_csum_v2(struct rte_mbuf *mbuf,
265 struct rx_pkt_cmpl_hi *rxcmp1)
267 struct rx_pkt_v2_cmpl_hi *v2_cmp =
268 (struct rx_pkt_v2_cmpl_hi *)(rxcmp1);
269 uint16_t error_v2 = rte_le_to_cpu_16(v2_cmp->errors_v2);
270 uint32_t flags2 = rte_le_to_cpu_32(v2_cmp->flags2);
271 uint32_t hdr_cnt = 0, t_pkt = 0;
273 if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) {
274 hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2);
278 if (unlikely(RX_CMP_V2_L4_CS_ERR(error_v2)))
279 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
280 else if (flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK)
281 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
283 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
285 if (unlikely(RX_CMP_V2_L3_CS_ERR(error_v2)))
286 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
287 else if (flags2 & RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK)
288 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
290 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
292 hdr_cnt = RX_CMP_V2_L4_CS_OK(flags2);
296 if (RX_CMP_V2_L4_CS_OK(flags2))
297 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
298 else if (RX_CMP_V2_L4_CS_ERR(error_v2))
299 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
301 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
303 if (RX_CMP_V2_L3_CS_OK(flags2))
304 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
305 else if (RX_CMP_V2_L3_CS_ERR(error_v2))
306 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
308 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
312 if (unlikely(RX_CMP_V2_OT_L4_CS_ERR(error_v2) ||
313 RX_CMP_V2_T_L4_CS_ERR(error_v2)))
314 mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
316 mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
318 if (unlikely(RX_CMP_V2_T_IP_CS_ERR(error_v2)))
319 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
324 bnxt_parse_pkt_type_v2(struct rte_mbuf *mbuf,
325 struct rx_pkt_cmpl *rxcmp,
326 struct rx_pkt_cmpl_hi *rxcmp1)
328 struct rx_pkt_v2_cmpl *v2_cmp =
329 (struct rx_pkt_v2_cmpl *)(rxcmp);
330 struct rx_pkt_v2_cmpl_hi *v2_cmp1 =
331 (struct rx_pkt_v2_cmpl_hi *)(rxcmp1);
332 uint16_t flags_type = v2_cmp->flags_type &
333 rte_cpu_to_le_32(RX_PKT_V2_CMPL_FLAGS_ITYPE_MASK);
334 uint32_t flags2 = rte_le_to_cpu_32(v2_cmp1->flags2);
335 uint32_t l3, pkt_type = 0, vlan = 0;
336 uint32_t ip6 = 0, t_pkt = 0;
337 uint32_t hdr_cnt, csum_count;
339 if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) {
340 hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2);
344 csum_count = RX_CMP_V2_L4_CS_OK(flags2);
349 vlan = !!RX_CMP_VLAN_VALID(rxcmp);
350 pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
352 ip6 = !!(flags2 & RX_PKT_V2_CMPL_HI_FLAGS2_IP_TYPE);
355 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
356 else if (!t_pkt && ip6)
357 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
358 else if (t_pkt && !ip6)
359 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
361 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
363 switch (flags_type) {
364 case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_ICMP):
366 pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
368 pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
370 case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_TCP):
372 pkt_type |= l3 | RTE_PTYPE_L4_TCP;
374 pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
376 case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_UDP):
378 pkt_type |= l3 | RTE_PTYPE_L4_UDP;
380 pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
382 case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_IP):
387 mbuf->packet_type = pkt_type;
390 #endif /* _BNXT_RXR_H_ */