1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_bitmap.h>
10 #include <rte_byteorder.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
15 #include "bnxt_reps.h"
16 #include "bnxt_ring.h"
19 #include "hsi_struct_def_dpdk.h"
20 #ifdef RTE_LIBRTE_IEEE1588
21 #include "bnxt_hwrm.h"
24 #include <bnxt_tf_common.h>
25 #include <ulp_mark_mgr.h>
31 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
33 struct rte_mbuf *data;
35 data = rte_mbuf_raw_alloc(mb);
40 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
41 struct bnxt_rx_ring_info *rxr,
44 uint16_t prod = RING_IDX(rxr->rx_ring_struct, raw_prod);
45 struct rx_prod_pkt_bd *rxbd;
46 struct rte_mbuf **rx_buf;
47 struct rte_mbuf *mbuf;
49 rxbd = &rxr->rx_desc_ring[prod];
50 rx_buf = &rxr->rx_buf_ring[prod];
51 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
53 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
58 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
60 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
65 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
66 struct bnxt_rx_ring_info *rxr,
69 uint16_t prod = RING_IDX(rxr->ag_ring_struct, raw_prod);
70 struct rx_prod_pkt_bd *rxbd;
71 struct rte_mbuf **rx_buf;
72 struct rte_mbuf *mbuf;
74 rxbd = &rxr->ag_desc_ring[prod];
75 rx_buf = &rxr->ag_buf_ring[prod];
77 PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
82 PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
86 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
88 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
93 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
95 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
100 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
101 struct rte_mbuf *mbuf)
103 uint16_t prod, raw_prod = RING_NEXT(rxr->rx_raw_prod);
104 struct rte_mbuf **prod_rx_buf;
105 struct rx_prod_pkt_bd *prod_bd;
107 prod = RING_IDX(rxr->rx_ring_struct, raw_prod);
108 prod_rx_buf = &rxr->rx_buf_ring[prod];
110 RTE_ASSERT(*prod_rx_buf == NULL);
111 RTE_ASSERT(mbuf != NULL);
115 prod_bd = &rxr->rx_desc_ring[prod];
117 prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
119 rxr->rx_raw_prod = raw_prod;
123 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
126 struct rte_mbuf **cons_rx_buf;
127 struct rte_mbuf *mbuf;
129 cons_rx_buf = &rxr->rx_buf_ring[RING_IDX(rxr->rx_ring_struct, cons)];
130 RTE_ASSERT(*cons_rx_buf != NULL);
137 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
138 struct rx_tpa_start_cmpl *tpa_start,
139 struct rx_tpa_start_cmpl_hi *tpa_start1)
141 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
144 struct bnxt_tpa_info *tpa_info;
145 struct rte_mbuf *mbuf;
147 agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start);
149 data_cons = tpa_start->opaque;
150 tpa_info = &rxr->tpa_info[agg_id];
152 mbuf = bnxt_consume_rx_buf(rxr, data_cons);
154 bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
156 tpa_info->agg_count = 0;
157 tpa_info->mbuf = mbuf;
158 tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
160 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
163 mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
164 mbuf->data_len = mbuf->pkt_len;
165 mbuf->port = rxq->port_id;
166 mbuf->ol_flags = PKT_RX_LRO;
167 if (likely(tpa_start->flags_type &
168 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
169 mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
170 mbuf->ol_flags |= PKT_RX_RSS_HASH;
172 mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
173 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
175 if (tpa_start1->flags2 &
176 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
177 mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
178 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
180 if (likely(tpa_start1->flags2 &
181 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
182 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
184 /* recycle next mbuf */
185 data_cons = RING_NEXT(data_cons);
186 bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
189 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
190 uint8_t agg_bufs, uint32_t raw_cp_cons)
192 uint16_t last_cp_cons;
193 struct rx_pkt_cmpl *agg_cmpl;
195 raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
196 last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
197 agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
198 cpr->valid = FLIP_VALID(raw_cp_cons,
199 cpr->cp_ring_struct->ring_mask,
201 return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
204 /* TPA consume agg buffer out of order, allocate connected data only */
205 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
207 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
208 uint16_t raw_next = RING_NEXT(rxr->ag_raw_prod);
209 uint16_t bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
211 /* TODO batch allocation for better performance */
212 while (rte_bitmap_get(rxr->ag_bitmap, bmap_next)) {
213 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, raw_next))) {
214 PMD_DRV_LOG(ERR, "agg mbuf alloc failed: prod=0x%x\n",
218 rte_bitmap_clear(rxr->ag_bitmap, bmap_next);
219 rxr->ag_raw_prod = raw_next;
220 raw_next = RING_NEXT(raw_next);
221 bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
227 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
228 struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
229 uint8_t agg_buf, struct bnxt_tpa_info *tpa_info)
231 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
232 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
234 uint16_t cp_cons, ag_cons;
235 struct rx_pkt_cmpl *rxcmp;
236 struct rte_mbuf *last = mbuf;
237 bool is_p5_tpa = tpa_info && BNXT_CHIP_P5(rxq->bp);
239 for (i = 0; i < agg_buf; i++) {
240 struct rte_mbuf **ag_buf;
241 struct rte_mbuf *ag_mbuf;
244 rxcmp = (void *)&tpa_info->agg_arr[i];
246 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
247 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
248 rxcmp = (struct rx_pkt_cmpl *)
249 &cpr->cp_desc_ring[cp_cons];
253 bnxt_dump_cmpl(cp_cons, rxcmp);
256 ag_cons = rxcmp->opaque;
257 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
258 ag_buf = &rxr->ag_buf_ring[ag_cons];
260 RTE_ASSERT(ag_mbuf != NULL);
262 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
265 mbuf->pkt_len += ag_mbuf->data_len;
267 last->next = ag_mbuf;
273 * As aggregation buffer consumed out of order in TPA module,
274 * use bitmap to track freed slots to be allocated and notified
277 rte_bitmap_set(rxr->ag_bitmap, ag_cons);
279 bnxt_prod_ag_mbuf(rxq);
283 static inline struct rte_mbuf *bnxt_tpa_end(
284 struct bnxt_rx_queue *rxq,
285 uint32_t *raw_cp_cons,
286 struct rx_tpa_end_cmpl *tpa_end,
287 struct rx_tpa_end_cmpl_hi *tpa_end1)
289 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
290 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
292 struct rte_mbuf *mbuf;
294 uint8_t payload_offset;
295 struct bnxt_tpa_info *tpa_info;
297 if (BNXT_CHIP_P5(rxq->bp)) {
298 struct rx_tpa_v2_end_cmpl *th_tpa_end;
299 struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
301 th_tpa_end = (void *)tpa_end;
302 th_tpa_end1 = (void *)tpa_end1;
303 agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end);
304 agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1);
305 payload_offset = th_tpa_end1->payload_offset;
307 agg_id = BNXT_TPA_END_AGG_ID(tpa_end);
308 agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
309 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
311 payload_offset = tpa_end->payload_offset;
314 tpa_info = &rxr->tpa_info[agg_id];
315 mbuf = tpa_info->mbuf;
316 RTE_ASSERT(mbuf != NULL);
319 bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info);
321 mbuf->l4_len = payload_offset;
323 struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
324 RTE_ASSERT(new_data != NULL);
326 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
329 tpa_info->mbuf = new_data;
334 uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM] __rte_cache_aligned;
336 static void __rte_cold
337 bnxt_init_ptype_table(void)
339 uint32_t *pt = bnxt_ptype_table;
340 static bool initialized;
348 for (i = 0; i < BNXT_PTYPE_TBL_DIM; i++) {
349 if (i & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN >> 2))
350 pt[i] = RTE_PTYPE_L2_ETHER_VLAN;
352 pt[i] = RTE_PTYPE_L2_ETHER;
354 ip6 = i & (RX_PKT_CMPL_FLAGS2_IP_TYPE >> 7);
355 tun = i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC >> 2);
356 type = (i & 0x38) << 9;
359 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
360 else if (!tun && ip6)
361 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
362 else if (tun && !ip6)
363 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
365 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
368 case RX_PKT_CMPL_FLAGS_ITYPE_ICMP:
370 pt[i] |= l3 | RTE_PTYPE_INNER_L4_ICMP;
372 pt[i] |= l3 | RTE_PTYPE_L4_ICMP;
374 case RX_PKT_CMPL_FLAGS_ITYPE_TCP:
376 pt[i] |= l3 | RTE_PTYPE_INNER_L4_TCP;
378 pt[i] |= l3 | RTE_PTYPE_L4_TCP;
380 case RX_PKT_CMPL_FLAGS_ITYPE_UDP:
382 pt[i] |= l3 | RTE_PTYPE_INNER_L4_UDP;
384 pt[i] |= l3 | RTE_PTYPE_L4_UDP;
386 case RX_PKT_CMPL_FLAGS_ITYPE_IP:
395 bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
397 uint32_t flags_type, flags2;
400 flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
401 flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
405 * bit 0: RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC
406 * bit 1: RX_CMPL_FLAGS2_IP_TYPE
407 * bit 2: RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
408 * bits 3-6: RX_PKT_CMPL_FLAGS_ITYPE
410 index = ((flags_type & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> 9) |
411 ((flags2 & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
412 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)) >> 2) |
413 ((flags2 & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> 7);
415 return bnxt_ptype_table[index];
418 static void __rte_cold
419 bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
421 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
422 struct rte_eth_conf *dev_conf;
423 bool outer_cksum_enabled;
428 dev_conf = &rxq->bp->eth_dev->data->dev_conf;
429 offloads = dev_conf->rxmode.offloads;
431 outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
432 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
434 /* Initialize ol_flags table. */
435 pt = rxr->ol_flags_table;
436 for (i = 0; i < BNXT_OL_FLAGS_TBL_DIM; i++) {
439 if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)
440 pt[i] |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
442 if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 3)) {
444 if (outer_cksum_enabled) {
445 if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
446 pt[i] |= PKT_RX_IP_CKSUM_GOOD;
448 if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
449 pt[i] |= PKT_RX_L4_CKSUM_GOOD;
451 if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
452 pt[i] |= PKT_RX_OUTER_L4_CKSUM_GOOD;
454 if (i & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
455 pt[i] |= PKT_RX_IP_CKSUM_GOOD;
457 if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
458 pt[i] |= PKT_RX_L4_CKSUM_GOOD;
461 /* Non-tunnel case. */
462 if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
463 pt[i] |= PKT_RX_IP_CKSUM_GOOD;
465 if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
466 pt[i] |= PKT_RX_L4_CKSUM_GOOD;
470 /* Initialize checksum error table. */
471 pt = rxr->ol_flags_err_table;
472 for (i = 0; i < BNXT_OL_FLAGS_ERR_TBL_DIM; i++) {
475 if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 2)) {
477 if (outer_cksum_enabled) {
478 if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
479 pt[i] |= PKT_RX_IP_CKSUM_BAD;
481 if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
482 pt[i] |= PKT_RX_EIP_CKSUM_BAD;
484 if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
485 pt[i] |= PKT_RX_L4_CKSUM_BAD;
487 if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
488 pt[i] |= PKT_RX_OUTER_L4_CKSUM_BAD;
490 if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
491 pt[i] |= PKT_RX_IP_CKSUM_BAD;
493 if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
494 pt[i] |= PKT_RX_L4_CKSUM_BAD;
497 /* Non-tunnel case. */
498 if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
499 pt[i] |= PKT_RX_IP_CKSUM_BAD;
501 if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
502 pt[i] |= PKT_RX_L4_CKSUM_BAD;
508 bnxt_set_ol_flags(struct bnxt_rx_ring_info *rxr, struct rx_pkt_cmpl *rxcmp,
509 struct rx_pkt_cmpl_hi *rxcmp1, struct rte_mbuf *mbuf)
511 uint16_t flags_type, errors, flags;
514 flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
516 flags = rte_le_to_cpu_32(rxcmp1->flags2) &
517 (RX_PKT_CMPL_FLAGS2_IP_CS_CALC |
518 RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
519 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC |
520 RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC |
521 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN);
523 flags |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 3;
524 errors = rte_le_to_cpu_16(rxcmp1->errors_v2) &
525 (RX_PKT_CMPL_ERRORS_IP_CS_ERROR |
526 RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
527 RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR |
528 RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR);
529 errors = (errors >> 4) & flags;
531 ol_flags = rxr->ol_flags_table[flags & ~errors];
533 if (unlikely(errors)) {
534 errors |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 2;
535 ol_flags |= rxr->ol_flags_err_table[errors];
538 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
539 mbuf->hash.rss = rte_le_to_cpu_32(rxcmp->rss_hash);
540 ol_flags |= PKT_RX_RSS_HASH;
543 mbuf->ol_flags = ol_flags;
546 #ifdef RTE_LIBRTE_IEEE1588
548 bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl)
550 uint64_t systime_cycles = 0;
552 if (!BNXT_CHIP_P5(bp))
555 /* On Thor, Rx timestamps are provided directly in the
556 * Rx completion records to the driver. Only 32 bits of
557 * the timestamp is present in the completion. Driver needs
558 * to read the current 48 bit free running timer using the
559 * HWRM_PORT_TS_QUERY command and combine the upper 16 bits
560 * from the HWRM response with the lower 32 bits in the
561 * Rx completion to produce the 48 bit timestamp for the Rx packet
563 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
565 bp->ptp_cfg->rx_timestamp = (systime_cycles & 0xFFFF00000000);
566 bp->ptp_cfg->rx_timestamp |= rx_ts_cmpl;
571 bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
572 struct rte_mbuf *mbuf, uint32_t *vfr_flag)
580 uint32_t gfid_support = 0;
583 if (BNXT_GFID_ENABLED(bp))
586 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
587 flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
588 meta = rte_le_to_cpu_32(rxcmp1->metadata);
591 * The flags field holds extra bits of info from [6:4]
592 * which indicate if the flow is in TCAM or EM or EEM
594 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
595 BNXT_CFA_META_FMT_SHFT;
600 /* Not an LFID or GFID, a flush cmd. */
603 /* LFID mode, no vlan scenario */
611 * Assume that EM doesn't support Mark due to GFID
612 * collisions with EEM. Simply return without setting the mark
615 if (BNXT_CFA_META_EM_TEST(meta)) {
616 /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */
618 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
619 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
622 * It is a TCAM entry, so it is an LFID.
623 * The TCAM IDX and Mode can also be determined
624 * by decoding the meta_data. We are not
625 * using these for now.
631 /* EEM Case, only using gfid in EEM for now. */
635 * For EEM flows, The first part of cfa_code is 16 bits.
636 * The second part is embedded in the
637 * metadata field from bit 19 onwards. The driver needs to
638 * ignore the first 19 bits of metadata and use the next 12
639 * bits as higher 12 bits of cfa_code.
641 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
642 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
645 /* For other values, the cfa_code is assumed to be an LFID. */
649 rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
650 cfa_code, vfr_flag, &mark_id);
652 /* VF to VFR Rx path. So, skip mark_id injection in mbuf */
653 if (vfr_flag && *vfr_flag)
655 /* Got the mark, write it to the mbuf and return */
656 mbuf->hash.fdir.hi = mark_id;
657 *bnxt_cfa_code_dynfield(mbuf) = cfa_code & 0xffffffffull;
658 mbuf->hash.fdir.id = rxcmp1->cfa_code;
659 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
664 mbuf->hash.fdir.hi = 0;
665 mbuf->hash.fdir.id = 0;
670 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
671 struct rx_pkt_cmpl_hi *rxcmp1,
672 struct rte_mbuf *mbuf)
674 uint32_t cfa_code = 0;
675 uint8_t meta_fmt = 0;
679 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
683 if (cfa_code && !bp->mark_table[cfa_code].valid)
686 flags2 = rte_le_to_cpu_16(rxcmp1->flags2);
687 meta = rte_le_to_cpu_32(rxcmp1->metadata);
689 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
691 /* The flags field holds extra bits of info from [6:4]
692 * which indicate if the flow is in TCAM or EM or EEM
694 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
695 BNXT_CFA_META_FMT_SHFT;
697 /* meta_fmt == 4 => 'b100 => 'b10x => EM.
698 * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN
699 * meta_fmt == 6 => 'b110 => 'b11x => EEM
700 * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN.
702 meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT;
705 mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
706 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
709 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
710 struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
712 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
713 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
714 struct rx_pkt_cmpl *rxcmp;
715 struct rx_pkt_cmpl_hi *rxcmp1;
716 uint32_t tmp_raw_cons = *raw_cons;
717 uint16_t cons, raw_prod, cp_cons =
718 RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
719 struct rte_mbuf *mbuf;
723 uint32_t vfr_flag = 0, mark_id = 0;
724 struct bnxt *bp = rxq->bp;
726 rxcmp = (struct rx_pkt_cmpl *)
727 &cpr->cp_desc_ring[cp_cons];
729 cmp_type = CMP_TYPE(rxcmp);
731 if (cmp_type == RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG) {
732 struct rx_tpa_v2_abuf_cmpl *rx_agg = (void *)rxcmp;
733 uint16_t agg_id = rte_cpu_to_le_16(rx_agg->agg_id);
734 struct bnxt_tpa_info *tpa_info;
736 tpa_info = &rxr->tpa_info[agg_id];
737 RTE_ASSERT(tpa_info->agg_count < 16);
738 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
739 rc = -EINVAL; /* Continue w/o new mbuf */
743 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
744 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
745 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
747 if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
750 cpr->valid = FLIP_VALID(cp_cons,
751 cpr->cp_ring_struct->ring_mask,
754 if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) {
755 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
756 (struct rx_tpa_start_cmpl_hi *)rxcmp1);
757 rc = -EINVAL; /* Continue w/o new mbuf */
759 } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
760 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
761 (struct rx_tpa_end_cmpl *)rxcmp,
762 (struct rx_tpa_end_cmpl_hi *)rxcmp1);
767 } else if (cmp_type != 0x11) {
772 agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
773 >> RX_PKT_CMPL_AGG_BUFS_SFT;
774 if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
777 raw_prod = rxr->rx_raw_prod;
779 cons = rxcmp->opaque;
780 mbuf = bnxt_consume_rx_buf(rxr, cons);
784 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
787 mbuf->pkt_len = rxcmp->len;
788 mbuf->data_len = mbuf->pkt_len;
789 mbuf->port = rxq->port_id;
791 bnxt_set_ol_flags(rxr, rxcmp, rxcmp1, mbuf);
793 #ifdef RTE_LIBRTE_IEEE1588
794 if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) &
795 RX_PKT_CMPL_FLAGS_MASK) ==
796 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) {
797 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
798 bnxt_get_rx_ts_p5(rxq->bp, rxcmp1->reorder);
802 if (BNXT_TRUFLOW_EN(bp))
803 mark_id = bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf,
806 bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf);
809 bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL);
811 mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
814 if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
815 /* Re-install the mbuf back to the rx ring */
816 bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
823 * TODO: Redesign this....
824 * If the allocation fails, the packet does not get received.
825 * Simply returning this will result in slowly falling behind
826 * on the producer ring buffers.
827 * Instead, "filling up" the producer just before ringing the
828 * doorbell could be a better solution since it will let the
829 * producer ring starve until memory is available again pushing
830 * the drops into hardware and getting them out of the driver
831 * allowing recovery to a full producer ring.
833 * This could also help with cache usage by preventing per-packet
834 * calls in favour of a tight loop with the same function being called
837 raw_prod = RING_NEXT(raw_prod);
838 if (bnxt_alloc_rx_data(rxq, rxr, raw_prod)) {
839 PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n",
844 rxr->rx_raw_prod = raw_prod;
846 if (BNXT_TRUFLOW_EN(bp) && (BNXT_VF_IS_TRUSTED(bp) || BNXT_PF(bp)) &&
848 bnxt_vfr_recv(mark_id, rxq->queue_id, mbuf);
849 /* Now return an error so that nb_rx_pkts is not
851 * This packet was meant to be given to the representor.
852 * So no need to account the packet and give it to
853 * parent Rx burst function.
859 * All MBUFs are allocated with the same size under DPDK,
860 * no optimization for rx_copy_thresh
867 *raw_cons = tmp_raw_cons;
872 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
875 struct bnxt_rx_queue *rxq = rx_queue;
876 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
877 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
878 uint16_t rx_raw_prod = rxr->rx_raw_prod;
879 uint16_t ag_raw_prod = rxr->ag_raw_prod;
880 uint32_t raw_cons = cpr->cp_raw_cons;
881 bool alloc_failed = false;
884 int nb_rep_rx_pkts = 0;
885 struct rx_pkt_cmpl *rxcmp;
889 if (unlikely(is_bnxt_in_error(rxq->bp)))
892 /* If Rx Q was stopped return */
893 if (unlikely(!rxq->rx_started))
896 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
898 * Replenish buffers if needed when a transition has been made from
899 * vector- to non-vector- receive processing.
901 while (unlikely(rxq->rxrearm_nb)) {
902 if (!bnxt_alloc_rx_data(rxq, rxr, rxq->rxrearm_start)) {
903 rxr->rx_raw_prod = rxq->rxrearm_start;
904 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
905 rxq->rxrearm_start++;
908 /* Retry allocation on next call. */
914 /* Handle RX burst request */
916 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
917 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
919 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
921 cpr->valid = FLIP_VALID(cons,
922 cpr->cp_ring_struct->ring_mask,
925 /* TODO: Avoid magic numbers... */
926 if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
927 rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
930 else if (rc == -EBUSY) /* partial completion */
932 else if (rc == -ENODEV) /* completion for representor */
934 else if (rc == -ENOMEM) {
938 } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
940 bnxt_event_hwrm_resp_handler(rxq->bp,
941 (struct cmpl_base *)rxcmp);
942 /* If the async event is Fatal error, return */
943 if (unlikely(is_bnxt_in_error(rxq->bp)))
947 raw_cons = NEXT_RAW_CMP(raw_cons);
948 if (nb_rx_pkts == nb_pkts || nb_rep_rx_pkts == nb_pkts || evt)
950 /* Post some Rx buf early in case of larger burst processing */
951 if (nb_rx_pkts == BNXT_RX_POST_THRESH)
952 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
955 cpr->cp_raw_cons = raw_cons;
956 if (!nb_rx_pkts && !nb_rep_rx_pkts && !evt) {
958 * For PMD, there is no need to keep on pushing to REARM
959 * the doorbell if there are no new completions
964 /* Ring the completion queue doorbell. */
967 /* Ring the receive descriptor doorbell. */
968 if (rx_raw_prod != rxr->rx_raw_prod)
969 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
971 /* Ring the AGG ring DB */
972 if (ag_raw_prod != rxr->ag_raw_prod)
973 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
975 /* Attempt to alloc Rx buf in case of a previous allocation failure. */
979 rx_raw_prod = RING_NEXT(rx_raw_prod);
980 for (cnt = 0; cnt < nb_rx_pkts + nb_rep_rx_pkts; cnt++) {
981 struct rte_mbuf **rx_buf;
984 ndx = RING_IDX(rxr->rx_ring_struct, rx_raw_prod + cnt);
985 rx_buf = &rxr->rx_buf_ring[ndx];
987 /* Buffer already allocated for this index. */
988 if (*rx_buf != NULL && *rx_buf != &rxq->fake_mbuf)
991 /* This slot is empty. Alloc buffer for Rx */
992 if (!bnxt_alloc_rx_data(rxq, rxr, rx_raw_prod + cnt)) {
993 rxr->rx_raw_prod = rx_raw_prod + cnt;
994 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
996 PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
1007 * Dummy DPDK callback for RX.
1009 * This function is used to temporarily replace the real callback during
1010 * unsafe control operations on the queue, or in case of error.
1013 bnxt_dummy_recv_pkts(void *rx_queue __rte_unused,
1014 struct rte_mbuf **rx_pkts __rte_unused,
1015 uint16_t nb_pkts __rte_unused)
1020 void bnxt_free_rx_rings(struct bnxt *bp)
1023 struct bnxt_rx_queue *rxq;
1028 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
1029 rxq = bp->rx_queues[i];
1033 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
1034 rte_free(rxq->rx_ring->rx_ring_struct);
1036 /* Free the Aggregator ring */
1037 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
1038 rte_free(rxq->rx_ring->ag_ring_struct);
1039 rxq->rx_ring->ag_ring_struct = NULL;
1041 rte_free(rxq->rx_ring);
1043 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
1044 rte_free(rxq->cp_ring->cp_ring_struct);
1045 rte_free(rxq->cp_ring);
1048 bp->rx_queues[i] = NULL;
1052 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
1054 struct rte_eth_dev *eth_dev = rxq->bp->eth_dev;
1055 struct rte_eth_rxmode *rxmode;
1056 struct bnxt_cp_ring_info *cpr;
1057 struct bnxt_rx_ring_info *rxr;
1058 struct bnxt_ring *ring;
1061 rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
1063 rxr = rte_zmalloc_socket("bnxt_rx_ring",
1064 sizeof(struct bnxt_rx_ring_info),
1065 RTE_CACHE_LINE_SIZE, socket_id);
1070 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1071 sizeof(struct bnxt_ring),
1072 RTE_CACHE_LINE_SIZE, socket_id);
1075 rxr->rx_ring_struct = ring;
1076 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
1077 ring->ring_mask = ring->ring_size - 1;
1078 ring->bd = (void *)rxr->rx_desc_ring;
1079 ring->bd_dma = rxr->rx_desc_mapping;
1081 /* Allocate extra rx ring entries for vector rx. */
1082 ring->vmem_size = sizeof(struct rte_mbuf *) *
1083 (ring->ring_size + RTE_BNXT_DESCS_PER_LOOP);
1085 ring->vmem = (void **)&rxr->rx_buf_ring;
1086 ring->fw_ring_id = INVALID_HW_RING_ID;
1088 cpr = rte_zmalloc_socket("bnxt_rx_ring",
1089 sizeof(struct bnxt_cp_ring_info),
1090 RTE_CACHE_LINE_SIZE, socket_id);
1095 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1096 sizeof(struct bnxt_ring),
1097 RTE_CACHE_LINE_SIZE, socket_id);
1100 cpr->cp_ring_struct = ring;
1102 rxmode = ð_dev->data->dev_conf.rxmode;
1103 use_agg_ring = (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
1104 (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) ||
1105 (rxmode->max_rx_pkt_len >
1106 (uint32_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1107 RTE_PKTMBUF_HEADROOM));
1109 /* Allocate two completion slots per entry in desc ring. */
1110 ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
1112 /* Allocate additional slots if aggregation ring is in use. */
1114 ring->ring_size *= AGG_RING_SIZE_FACTOR;
1116 ring->ring_size = rte_align32pow2(ring->ring_size);
1117 ring->ring_mask = ring->ring_size - 1;
1118 ring->bd = (void *)cpr->cp_desc_ring;
1119 ring->bd_dma = cpr->cp_desc_mapping;
1120 ring->vmem_size = 0;
1122 ring->fw_ring_id = INVALID_HW_RING_ID;
1124 /* Allocate Aggregator rings */
1125 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1126 sizeof(struct bnxt_ring),
1127 RTE_CACHE_LINE_SIZE, socket_id);
1130 rxr->ag_ring_struct = ring;
1131 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
1132 AGG_RING_SIZE_FACTOR);
1133 ring->ring_mask = ring->ring_size - 1;
1134 ring->bd = (void *)rxr->ag_desc_ring;
1135 ring->bd_dma = rxr->ag_desc_mapping;
1136 ring->vmem_size = ring->ring_size * sizeof(struct rte_mbuf *);
1137 ring->vmem = (void **)&rxr->ag_buf_ring;
1138 ring->fw_ring_id = INVALID_HW_RING_ID;
1143 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
1147 struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
1151 for (j = 0; j < ring->ring_size; j++) {
1152 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
1153 rx_bd_ring[j].len = rte_cpu_to_le_16(len);
1154 rx_bd_ring[j].opaque = j;
1158 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
1160 struct bnxt_rx_ring_info *rxr;
1161 struct bnxt_ring *ring;
1162 uint32_t raw_prod, type;
1166 /* Initialize packet type table. */
1167 bnxt_init_ptype_table();
1169 size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
1170 size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
1172 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
1175 ring = rxr->rx_ring_struct;
1176 bnxt_init_rxbds(ring, type, size);
1178 /* Initialize offload flags parsing table. */
1179 bnxt_init_ol_flags_tables(rxq);
1181 raw_prod = rxr->rx_raw_prod;
1182 for (i = 0; i < ring->ring_size; i++) {
1183 if (unlikely(!rxr->rx_buf_ring[i])) {
1184 if (bnxt_alloc_rx_data(rxq, rxr, raw_prod) != 0) {
1185 PMD_DRV_LOG(WARNING,
1186 "init'ed rx ring %d with %d/%d mbufs only\n",
1187 rxq->queue_id, i, ring->ring_size);
1191 rxr->rx_raw_prod = raw_prod;
1192 raw_prod = RING_NEXT(raw_prod);
1195 /* Initialize dummy mbuf pointers for vector mode rx. */
1196 for (i = ring->ring_size;
1197 i < ring->ring_size + RTE_BNXT_DESCS_PER_LOOP; i++) {
1198 rxr->rx_buf_ring[i] = &rxq->fake_mbuf;
1201 ring = rxr->ag_ring_struct;
1202 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
1203 bnxt_init_rxbds(ring, type, size);
1204 raw_prod = rxr->ag_raw_prod;
1206 for (i = 0; i < ring->ring_size; i++) {
1207 if (unlikely(!rxr->ag_buf_ring[i])) {
1208 if (bnxt_alloc_ag_data(rxq, rxr, raw_prod) != 0) {
1209 PMD_DRV_LOG(WARNING,
1210 "init'ed AG ring %d with %d/%d mbufs only\n",
1211 rxq->queue_id, i, ring->ring_size);
1215 rxr->ag_raw_prod = raw_prod;
1216 raw_prod = RING_NEXT(raw_prod);
1218 PMD_DRV_LOG(DEBUG, "AGG Done!\n");
1220 if (rxr->tpa_info) {
1221 unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
1223 for (i = 0; i < max_aggs; i++) {
1224 if (unlikely(!rxr->tpa_info[i].mbuf)) {
1225 rxr->tpa_info[i].mbuf =
1226 __bnxt_alloc_rx_data(rxq->mb_pool);
1227 if (!rxr->tpa_info[i].mbuf) {
1228 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
1234 PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");