1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
9 #include <rte_bitmap.h>
10 #include <rte_byteorder.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
15 #include "bnxt_reps.h"
16 #include "bnxt_ring.h"
19 #include "hsi_struct_def_dpdk.h"
20 #ifdef RTE_LIBRTE_IEEE1588
21 #include "bnxt_hwrm.h"
24 #include <bnxt_tf_common.h>
25 #include <ulp_mark_mgr.h>
31 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
33 struct rte_mbuf *data;
35 data = rte_mbuf_raw_alloc(mb);
40 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
41 struct bnxt_rx_ring_info *rxr,
44 uint16_t prod = RING_IDX(rxr->rx_ring_struct, raw_prod);
45 struct rx_prod_pkt_bd *rxbd;
46 struct rte_mbuf **rx_buf;
47 struct rte_mbuf *mbuf;
49 rxbd = &rxr->rx_desc_ring[prod];
50 rx_buf = &rxr->rx_buf_ring[prod];
51 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
53 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
58 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
60 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
65 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
66 struct bnxt_rx_ring_info *rxr,
69 uint16_t prod = RING_IDX(rxr->ag_ring_struct, raw_prod);
70 struct rx_prod_pkt_bd *rxbd;
71 struct rte_mbuf **rx_buf;
72 struct rte_mbuf *mbuf;
74 rxbd = &rxr->ag_desc_ring[prod];
75 rx_buf = &rxr->ag_buf_ring[prod];
77 PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
82 PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
86 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
88 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
93 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
95 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
100 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
101 struct rte_mbuf *mbuf)
103 uint16_t prod, raw_prod = RING_NEXT(rxr->rx_raw_prod);
104 struct rte_mbuf **prod_rx_buf;
105 struct rx_prod_pkt_bd *prod_bd;
107 prod = RING_IDX(rxr->rx_ring_struct, raw_prod);
108 prod_rx_buf = &rxr->rx_buf_ring[prod];
110 RTE_ASSERT(*prod_rx_buf == NULL);
111 RTE_ASSERT(mbuf != NULL);
115 prod_bd = &rxr->rx_desc_ring[prod];
117 prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
119 rxr->rx_raw_prod = raw_prod;
123 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
126 struct rte_mbuf **cons_rx_buf;
127 struct rte_mbuf *mbuf;
129 cons_rx_buf = &rxr->rx_buf_ring[RING_IDX(rxr->rx_ring_struct, cons)];
130 RTE_ASSERT(*cons_rx_buf != NULL);
137 static void bnxt_tpa_get_metadata(struct bnxt *bp,
138 struct bnxt_tpa_info *tpa_info,
139 struct rx_tpa_start_cmpl *tpa_start,
140 struct rx_tpa_start_cmpl_hi *tpa_start1)
142 tpa_info->cfa_code_valid = 0;
143 tpa_info->vlan_valid = 0;
144 tpa_info->hash_valid = 0;
145 tpa_info->l4_csum_valid = 0;
147 if (likely(tpa_start->flags_type &
148 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
149 tpa_info->hash_valid = 1;
150 tpa_info->rss_hash = rte_le_to_cpu_32(tpa_start->rss_hash);
153 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
154 struct rx_tpa_start_v2_cmpl *v2_tpa_start = (void *)tpa_start;
155 struct rx_tpa_start_v2_cmpl_hi *v2_tpa_start1 =
158 if (v2_tpa_start->agg_id &
159 RX_TPA_START_V2_CMPL_METADATA1_VALID) {
160 tpa_info->vlan_valid = 1;
162 rte_le_to_cpu_16(v2_tpa_start1->metadata0);
165 if (v2_tpa_start1->flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK)
166 tpa_info->l4_csum_valid = 1;
171 tpa_info->cfa_code_valid = 1;
172 tpa_info->cfa_code = rte_le_to_cpu_16(tpa_start1->cfa_code);
173 if (tpa_start1->flags2 &
174 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
175 tpa_info->vlan_valid = 1;
176 tpa_info->vlan = rte_le_to_cpu_32(tpa_start1->metadata);
179 if (likely(tpa_start1->flags2 &
180 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
181 tpa_info->l4_csum_valid = 1;
184 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
185 struct rx_tpa_start_cmpl *tpa_start,
186 struct rx_tpa_start_cmpl_hi *tpa_start1)
188 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
191 struct bnxt_tpa_info *tpa_info;
192 struct rte_mbuf *mbuf;
194 agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start);
196 data_cons = tpa_start->opaque;
197 tpa_info = &rxr->tpa_info[agg_id];
199 mbuf = bnxt_consume_rx_buf(rxr, data_cons);
201 bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
203 tpa_info->agg_count = 0;
204 tpa_info->mbuf = mbuf;
205 tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
207 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
210 mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
211 mbuf->data_len = mbuf->pkt_len;
212 mbuf->port = rxq->port_id;
213 mbuf->ol_flags = PKT_RX_LRO;
215 bnxt_tpa_get_metadata(rxq->bp, tpa_info, tpa_start, tpa_start1);
217 if (likely(tpa_info->hash_valid)) {
218 mbuf->hash.rss = tpa_info->rss_hash;
219 mbuf->ol_flags |= PKT_RX_RSS_HASH;
220 } else if (tpa_info->cfa_code_valid) {
221 mbuf->hash.fdir.id = tpa_info->cfa_code;
222 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
225 if (tpa_info->vlan_valid) {
226 mbuf->vlan_tci = tpa_info->vlan;
227 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
230 if (likely(tpa_info->l4_csum_valid))
231 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
233 /* recycle next mbuf */
234 data_cons = RING_NEXT(data_cons);
235 bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
238 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
239 uint8_t agg_bufs, uint32_t raw_cp_cons)
241 uint16_t last_cp_cons;
242 struct rx_pkt_cmpl *agg_cmpl;
244 raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
245 last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
246 agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
247 cpr->valid = FLIP_VALID(raw_cp_cons,
248 cpr->cp_ring_struct->ring_mask,
250 return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
253 /* TPA consume agg buffer out of order, allocate connected data only */
254 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
256 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
257 uint16_t raw_next = RING_NEXT(rxr->ag_raw_prod);
258 uint16_t bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
260 /* TODO batch allocation for better performance */
261 while (rte_bitmap_get(rxr->ag_bitmap, bmap_next)) {
262 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, raw_next))) {
263 PMD_DRV_LOG(ERR, "agg mbuf alloc failed: prod=0x%x\n",
267 rte_bitmap_clear(rxr->ag_bitmap, bmap_next);
268 rxr->ag_raw_prod = raw_next;
269 raw_next = RING_NEXT(raw_next);
270 bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
276 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
277 struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
278 uint8_t agg_buf, struct bnxt_tpa_info *tpa_info)
280 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
281 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
283 uint16_t cp_cons, ag_cons;
284 struct rx_pkt_cmpl *rxcmp;
285 struct rte_mbuf *last = mbuf;
286 bool is_p5_tpa = tpa_info && BNXT_CHIP_P5(rxq->bp);
288 for (i = 0; i < agg_buf; i++) {
289 struct rte_mbuf **ag_buf;
290 struct rte_mbuf *ag_mbuf;
293 rxcmp = (void *)&tpa_info->agg_arr[i];
295 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
296 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
297 rxcmp = (struct rx_pkt_cmpl *)
298 &cpr->cp_desc_ring[cp_cons];
302 bnxt_dump_cmpl(cp_cons, rxcmp);
305 ag_cons = rxcmp->opaque;
306 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
307 ag_buf = &rxr->ag_buf_ring[ag_cons];
309 RTE_ASSERT(ag_mbuf != NULL);
311 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
314 mbuf->pkt_len += ag_mbuf->data_len;
316 last->next = ag_mbuf;
322 * As aggregation buffer consumed out of order in TPA module,
323 * use bitmap to track freed slots to be allocated and notified
326 rte_bitmap_set(rxr->ag_bitmap, ag_cons);
329 bnxt_prod_ag_mbuf(rxq);
333 static inline struct rte_mbuf *bnxt_tpa_end(
334 struct bnxt_rx_queue *rxq,
335 uint32_t *raw_cp_cons,
336 struct rx_tpa_end_cmpl *tpa_end,
337 struct rx_tpa_end_cmpl_hi *tpa_end1)
339 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
340 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
342 struct rte_mbuf *mbuf;
344 uint8_t payload_offset;
345 struct bnxt_tpa_info *tpa_info;
347 if (BNXT_CHIP_P5(rxq->bp)) {
348 struct rx_tpa_v2_end_cmpl *th_tpa_end;
349 struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
351 th_tpa_end = (void *)tpa_end;
352 th_tpa_end1 = (void *)tpa_end1;
353 agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end);
354 agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1);
355 payload_offset = th_tpa_end1->payload_offset;
357 agg_id = BNXT_TPA_END_AGG_ID(tpa_end);
358 agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
359 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
361 payload_offset = tpa_end->payload_offset;
364 tpa_info = &rxr->tpa_info[agg_id];
365 mbuf = tpa_info->mbuf;
366 RTE_ASSERT(mbuf != NULL);
369 bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info);
371 mbuf->l4_len = payload_offset;
373 struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
374 RTE_ASSERT(new_data != NULL);
376 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
379 tpa_info->mbuf = new_data;
384 uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM] __rte_cache_aligned;
386 static void __rte_cold
387 bnxt_init_ptype_table(void)
389 uint32_t *pt = bnxt_ptype_table;
390 static bool initialized;
398 for (i = 0; i < BNXT_PTYPE_TBL_DIM; i++) {
399 if (i & BNXT_PTYPE_TBL_VLAN_MSK)
400 pt[i] = RTE_PTYPE_L2_ETHER_VLAN;
402 pt[i] = RTE_PTYPE_L2_ETHER;
404 ip6 = !!(i & BNXT_PTYPE_TBL_IP_VER_MSK);
405 tun = !!(i & BNXT_PTYPE_TBL_TUN_MSK);
406 type = (i & BNXT_PTYPE_TBL_TYPE_MSK) >> BNXT_PTYPE_TBL_TYPE_SFT;
409 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
410 else if (!tun && ip6)
411 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
412 else if (tun && !ip6)
413 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
415 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
418 case BNXT_PTYPE_TBL_TYPE_ICMP:
420 pt[i] |= l3 | RTE_PTYPE_INNER_L4_ICMP;
422 pt[i] |= l3 | RTE_PTYPE_L4_ICMP;
424 case BNXT_PTYPE_TBL_TYPE_TCP:
426 pt[i] |= l3 | RTE_PTYPE_INNER_L4_TCP;
428 pt[i] |= l3 | RTE_PTYPE_L4_TCP;
430 case BNXT_PTYPE_TBL_TYPE_UDP:
432 pt[i] |= l3 | RTE_PTYPE_INNER_L4_UDP;
434 pt[i] |= l3 | RTE_PTYPE_L4_UDP;
436 case BNXT_PTYPE_TBL_TYPE_IP:
445 bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
447 uint32_t flags_type, flags2;
450 flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
451 flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
453 /* Validate ptype table indexing at build time. */
454 bnxt_check_ptype_constants();
458 * bit 0: Set if IP tunnel encapsulated packet.
459 * bit 1: Set if IPv6 packet, clear if IPv4.
460 * bit 2: Set if VLAN tag present.
461 * bits 3-6: Four-bit hardware packet type field.
463 index = BNXT_CMPL_ITYPE_TO_IDX(flags_type) |
464 BNXT_CMPL_VLAN_TUN_TO_IDX(flags2) |
465 BNXT_CMPL_IP_VER_TO_IDX(flags2);
467 return bnxt_ptype_table[index];
470 static void __rte_cold
471 bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
473 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
474 struct rte_eth_conf *dev_conf;
475 bool outer_cksum_enabled;
480 dev_conf = &rxq->bp->eth_dev->data->dev_conf;
481 offloads = dev_conf->rxmode.offloads;
483 outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
484 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
486 /* Initialize ol_flags table. */
487 pt = rxr->ol_flags_table;
488 for (i = 0; i < BNXT_OL_FLAGS_TBL_DIM; i++) {
491 if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)
492 pt[i] |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
494 if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 3)) {
496 if (outer_cksum_enabled) {
497 if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
498 pt[i] |= PKT_RX_IP_CKSUM_GOOD;
500 if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
501 pt[i] |= PKT_RX_L4_CKSUM_GOOD;
503 if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
504 pt[i] |= PKT_RX_OUTER_L4_CKSUM_GOOD;
506 if (i & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
507 pt[i] |= PKT_RX_IP_CKSUM_GOOD;
509 if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
510 pt[i] |= PKT_RX_L4_CKSUM_GOOD;
513 /* Non-tunnel case. */
514 if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
515 pt[i] |= PKT_RX_IP_CKSUM_GOOD;
517 if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
518 pt[i] |= PKT_RX_L4_CKSUM_GOOD;
522 /* Initialize checksum error table. */
523 pt = rxr->ol_flags_err_table;
524 for (i = 0; i < BNXT_OL_FLAGS_ERR_TBL_DIM; i++) {
527 if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 2)) {
529 if (outer_cksum_enabled) {
530 if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
531 pt[i] |= PKT_RX_IP_CKSUM_BAD;
533 if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
534 pt[i] |= PKT_RX_OUTER_IP_CKSUM_BAD;
536 if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
537 pt[i] |= PKT_RX_L4_CKSUM_BAD;
539 if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
540 pt[i] |= PKT_RX_OUTER_L4_CKSUM_BAD;
542 if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
543 pt[i] |= PKT_RX_IP_CKSUM_BAD;
545 if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
546 pt[i] |= PKT_RX_L4_CKSUM_BAD;
549 /* Non-tunnel case. */
550 if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
551 pt[i] |= PKT_RX_IP_CKSUM_BAD;
553 if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
554 pt[i] |= PKT_RX_L4_CKSUM_BAD;
560 bnxt_set_ol_flags(struct bnxt_rx_ring_info *rxr, struct rx_pkt_cmpl *rxcmp,
561 struct rx_pkt_cmpl_hi *rxcmp1, struct rte_mbuf *mbuf)
563 uint16_t flags_type, errors, flags;
566 flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
568 flags = rte_le_to_cpu_32(rxcmp1->flags2) &
569 (RX_PKT_CMPL_FLAGS2_IP_CS_CALC |
570 RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
571 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC |
572 RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC |
573 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN);
575 flags |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 3;
576 errors = rte_le_to_cpu_16(rxcmp1->errors_v2) &
577 (RX_PKT_CMPL_ERRORS_IP_CS_ERROR |
578 RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
579 RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR |
580 RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR);
581 errors = (errors >> 4) & flags;
583 ol_flags = rxr->ol_flags_table[flags & ~errors];
585 if (unlikely(errors)) {
586 errors |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 2;
587 ol_flags |= rxr->ol_flags_err_table[errors];
590 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
591 mbuf->hash.rss = rte_le_to_cpu_32(rxcmp->rss_hash);
592 ol_flags |= PKT_RX_RSS_HASH;
595 #ifdef RTE_LIBRTE_IEEE1588
596 if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) ==
597 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
598 ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
601 mbuf->ol_flags = ol_flags;
604 #ifdef RTE_LIBRTE_IEEE1588
606 bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl)
608 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
609 uint64_t last_hwrm_time;
610 uint64_t pkt_time = 0;
612 if (!BNXT_CHIP_P5(bp) || !ptp)
615 /* On Thor, Rx timestamps are provided directly in the
616 * Rx completion records to the driver. Only 32 bits of
617 * the timestamp is present in the completion. Driver needs
618 * to read the current 48 bit free running timer using the
619 * HWRM_PORT_TS_QUERY command and combine the upper 16 bits
620 * from the HWRM response with the lower 32 bits in the
621 * Rx completion to produce the 48 bit timestamp for the Rx packet
623 last_hwrm_time = ptp->current_time;
624 pkt_time = (last_hwrm_time & BNXT_PTP_CURRENT_TIME_MASK) | rx_ts_cmpl;
625 if (rx_ts_cmpl < (uint32_t)last_hwrm_time) {
626 /* timer has rolled over */
627 pkt_time += (1ULL << 32);
629 ptp->rx_timestamp = pkt_time;
634 bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
635 struct rte_mbuf *mbuf, uint32_t *vfr_flag)
643 uint32_t gfid_support = 0;
646 if (BNXT_GFID_ENABLED(bp))
649 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
650 flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
651 meta = rte_le_to_cpu_32(rxcmp1->metadata);
654 * The flags field holds extra bits of info from [6:4]
655 * which indicate if the flow is in TCAM or EM or EEM
657 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
658 BNXT_CFA_META_FMT_SHFT;
663 /* Not an LFID or GFID, a flush cmd. */
666 /* LFID mode, no vlan scenario */
674 * Assume that EM doesn't support Mark due to GFID
675 * collisions with EEM. Simply return without setting the mark
678 if (BNXT_CFA_META_EM_TEST(meta)) {
679 /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */
681 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
682 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
685 * It is a TCAM entry, so it is an LFID.
686 * The TCAM IDX and Mode can also be determined
687 * by decoding the meta_data. We are not
688 * using these for now.
694 /* EEM Case, only using gfid in EEM for now. */
698 * For EEM flows, The first part of cfa_code is 16 bits.
699 * The second part is embedded in the
700 * metadata field from bit 19 onwards. The driver needs to
701 * ignore the first 19 bits of metadata and use the next 12
702 * bits as higher 12 bits of cfa_code.
704 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
705 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
708 /* For other values, the cfa_code is assumed to be an LFID. */
712 rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
713 cfa_code, vfr_flag, &mark_id);
715 /* VF to VFR Rx path. So, skip mark_id injection in mbuf */
716 if (vfr_flag && *vfr_flag)
718 /* Got the mark, write it to the mbuf and return */
719 mbuf->hash.fdir.hi = mark_id;
720 *bnxt_cfa_code_dynfield(mbuf) = cfa_code & 0xffffffffull;
721 mbuf->hash.fdir.id = rxcmp1->cfa_code;
722 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
727 mbuf->hash.fdir.hi = 0;
728 mbuf->hash.fdir.id = 0;
733 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
734 struct rx_pkt_cmpl_hi *rxcmp1,
735 struct rte_mbuf *mbuf)
737 uint32_t cfa_code = 0;
738 uint8_t meta_fmt = 0;
742 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
746 if (cfa_code && !bp->mark_table[cfa_code].valid)
749 flags2 = rte_le_to_cpu_16(rxcmp1->flags2);
750 meta = rte_le_to_cpu_32(rxcmp1->metadata);
752 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
754 /* The flags field holds extra bits of info from [6:4]
755 * which indicate if the flow is in TCAM or EM or EEM
757 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
758 BNXT_CFA_META_FMT_SHFT;
760 /* meta_fmt == 4 => 'b100 => 'b10x => EM.
761 * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN
762 * meta_fmt == 6 => 'b110 => 'b11x => EEM
763 * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN.
765 meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT;
768 mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
769 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
772 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
773 struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
775 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
776 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
777 struct rx_pkt_cmpl *rxcmp;
778 struct rx_pkt_cmpl_hi *rxcmp1;
779 uint32_t tmp_raw_cons = *raw_cons;
780 uint16_t cons, raw_prod, cp_cons =
781 RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
782 struct rte_mbuf *mbuf;
786 uint32_t vfr_flag = 0, mark_id = 0;
787 struct bnxt *bp = rxq->bp;
789 rxcmp = (struct rx_pkt_cmpl *)
790 &cpr->cp_desc_ring[cp_cons];
792 cmp_type = CMP_TYPE(rxcmp);
794 if (cmp_type == RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG) {
795 struct rx_tpa_v2_abuf_cmpl *rx_agg = (void *)rxcmp;
796 uint16_t agg_id = rte_cpu_to_le_16(rx_agg->agg_id);
797 struct bnxt_tpa_info *tpa_info;
799 tpa_info = &rxr->tpa_info[agg_id];
800 RTE_ASSERT(tpa_info->agg_count < 16);
801 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
802 rc = -EINVAL; /* Continue w/o new mbuf */
806 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
807 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
808 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
810 if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
813 cpr->valid = FLIP_VALID(cp_cons,
814 cpr->cp_ring_struct->ring_mask,
817 if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START ||
818 cmp_type == RX_TPA_START_V2_CMPL_TYPE_RX_TPA_START_V2) {
819 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
820 (struct rx_tpa_start_cmpl_hi *)rxcmp1);
821 rc = -EINVAL; /* Continue w/o new mbuf */
823 } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
824 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
825 (struct rx_tpa_end_cmpl *)rxcmp,
826 (struct rx_tpa_end_cmpl_hi *)rxcmp1);
831 } else if ((cmp_type != CMPL_BASE_TYPE_RX_L2) &&
832 (cmp_type != CMPL_BASE_TYPE_RX_L2_V2)) {
837 agg_buf = BNXT_RX_L2_AGG_BUFS(rxcmp);
838 if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
841 raw_prod = rxr->rx_raw_prod;
843 cons = rxcmp->opaque;
844 mbuf = bnxt_consume_rx_buf(rxr, cons);
848 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
851 mbuf->pkt_len = rxcmp->len;
852 mbuf->data_len = mbuf->pkt_len;
853 mbuf->port = rxq->port_id;
855 #ifdef RTE_LIBRTE_IEEE1588
856 if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) &
857 RX_PKT_CMPL_FLAGS_MASK) ==
858 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
859 bnxt_get_rx_ts_p5(rxq->bp, rxcmp1->reorder);
862 if (cmp_type == CMPL_BASE_TYPE_RX_L2_V2) {
863 bnxt_parse_csum_v2(mbuf, rxcmp1);
864 bnxt_parse_pkt_type_v2(mbuf, rxcmp, rxcmp1);
865 bnxt_rx_vlan_v2(mbuf, rxcmp, rxcmp1);
866 /* TODO Add support for cfa_code parsing */
870 bnxt_set_ol_flags(rxr, rxcmp, rxcmp1, mbuf);
872 mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
874 if (BNXT_TRUFLOW_EN(bp))
875 mark_id = bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf,
878 bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf);
882 bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL);
885 if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
886 /* Re-install the mbuf back to the rx ring */
887 bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
894 * TODO: Redesign this....
895 * If the allocation fails, the packet does not get received.
896 * Simply returning this will result in slowly falling behind
897 * on the producer ring buffers.
898 * Instead, "filling up" the producer just before ringing the
899 * doorbell could be a better solution since it will let the
900 * producer ring starve until memory is available again pushing
901 * the drops into hardware and getting them out of the driver
902 * allowing recovery to a full producer ring.
904 * This could also help with cache usage by preventing per-packet
905 * calls in favour of a tight loop with the same function being called
908 raw_prod = RING_NEXT(raw_prod);
909 if (bnxt_alloc_rx_data(rxq, rxr, raw_prod)) {
910 PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n",
915 rxr->rx_raw_prod = raw_prod;
917 if (BNXT_TRUFLOW_EN(bp) && (BNXT_VF_IS_TRUSTED(bp) || BNXT_PF(bp)) &&
919 bnxt_vfr_recv(mark_id, rxq->queue_id, mbuf);
920 /* Now return an error so that nb_rx_pkts is not
922 * This packet was meant to be given to the representor.
923 * So no need to account the packet and give it to
924 * parent Rx burst function.
930 * All MBUFs are allocated with the same size under DPDK,
931 * no optimization for rx_copy_thresh
938 *raw_cons = tmp_raw_cons;
943 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
946 struct bnxt_rx_queue *rxq = rx_queue;
947 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
948 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
949 uint16_t rx_raw_prod = rxr->rx_raw_prod;
950 uint16_t ag_raw_prod = rxr->ag_raw_prod;
951 uint32_t raw_cons = cpr->cp_raw_cons;
952 bool alloc_failed = false;
955 int nb_rep_rx_pkts = 0;
956 struct rx_pkt_cmpl *rxcmp;
960 if (unlikely(is_bnxt_in_error(rxq->bp)))
963 /* If Rx Q was stopped return */
964 if (unlikely(!rxq->rx_started))
967 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
969 * Replenish buffers if needed when a transition has been made from
970 * vector- to non-vector- receive processing.
972 while (unlikely(rxq->rxrearm_nb)) {
973 if (!bnxt_alloc_rx_data(rxq, rxr, rxq->rxrearm_start)) {
974 rxr->rx_raw_prod = rxq->rxrearm_start;
975 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
976 rxq->rxrearm_start++;
979 /* Retry allocation on next call. */
985 /* Handle RX burst request */
987 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
988 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
990 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
992 cpr->valid = FLIP_VALID(cons,
993 cpr->cp_ring_struct->ring_mask,
996 if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) {
997 PMD_DRV_LOG(ERR, "Rx flush done\n");
998 } else if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) &&
999 (CMP_TYPE(rxcmp) <= RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG)) {
1000 rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
1003 else if (rc == -EBUSY) /* partial completion */
1005 else if (rc == -ENODEV) /* completion for representor */
1007 else if (rc == -ENOMEM) {
1009 alloc_failed = true;
1011 } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
1013 bnxt_event_hwrm_resp_handler(rxq->bp,
1014 (struct cmpl_base *)rxcmp);
1015 /* If the async event is Fatal error, return */
1016 if (unlikely(is_bnxt_in_error(rxq->bp)))
1020 raw_cons = NEXT_RAW_CMP(raw_cons);
1021 if (nb_rx_pkts == nb_pkts || nb_rep_rx_pkts == nb_pkts || evt)
1025 cpr->cp_raw_cons = raw_cons;
1026 if (!nb_rx_pkts && !nb_rep_rx_pkts && !evt) {
1028 * For PMD, there is no need to keep on pushing to REARM
1029 * the doorbell if there are no new completions
1034 /* Ring the completion queue doorbell. */
1037 /* Ring the receive descriptor doorbell. */
1038 if (rx_raw_prod != rxr->rx_raw_prod)
1039 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
1041 /* Ring the AGG ring DB */
1042 if (ag_raw_prod != rxr->ag_raw_prod)
1043 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
1045 /* Attempt to alloc Rx buf in case of a previous allocation failure. */
1049 rx_raw_prod = RING_NEXT(rx_raw_prod);
1050 for (cnt = 0; cnt < nb_rx_pkts + nb_rep_rx_pkts; cnt++) {
1051 struct rte_mbuf **rx_buf;
1054 ndx = RING_IDX(rxr->rx_ring_struct, rx_raw_prod + cnt);
1055 rx_buf = &rxr->rx_buf_ring[ndx];
1057 /* Buffer already allocated for this index. */
1058 if (*rx_buf != NULL && *rx_buf != &rxq->fake_mbuf)
1061 /* This slot is empty. Alloc buffer for Rx */
1062 if (!bnxt_alloc_rx_data(rxq, rxr, rx_raw_prod + cnt)) {
1063 rxr->rx_raw_prod = rx_raw_prod + cnt;
1064 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
1066 PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
1077 * Dummy DPDK callback for RX.
1079 * This function is used to temporarily replace the real callback during
1080 * unsafe control operations on the queue, or in case of error.
1083 bnxt_dummy_recv_pkts(void *rx_queue __rte_unused,
1084 struct rte_mbuf **rx_pkts __rte_unused,
1085 uint16_t nb_pkts __rte_unused)
1090 void bnxt_free_rx_rings(struct bnxt *bp)
1093 struct bnxt_rx_queue *rxq;
1098 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
1099 rxq = bp->rx_queues[i];
1103 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
1104 rte_free(rxq->rx_ring->rx_ring_struct);
1106 /* Free the Aggregator ring */
1107 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
1108 rte_free(rxq->rx_ring->ag_ring_struct);
1109 rxq->rx_ring->ag_ring_struct = NULL;
1111 rte_free(rxq->rx_ring);
1113 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
1114 rte_free(rxq->cp_ring->cp_ring_struct);
1115 rte_free(rxq->cp_ring);
1118 bp->rx_queues[i] = NULL;
1122 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
1124 struct bnxt_cp_ring_info *cpr;
1125 struct bnxt_rx_ring_info *rxr;
1126 struct bnxt_ring *ring;
1128 rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
1130 rxr = rte_zmalloc_socket("bnxt_rx_ring",
1131 sizeof(struct bnxt_rx_ring_info),
1132 RTE_CACHE_LINE_SIZE, socket_id);
1137 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1138 sizeof(struct bnxt_ring),
1139 RTE_CACHE_LINE_SIZE, socket_id);
1142 rxr->rx_ring_struct = ring;
1143 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
1144 ring->ring_mask = ring->ring_size - 1;
1145 ring->bd = (void *)rxr->rx_desc_ring;
1146 ring->bd_dma = rxr->rx_desc_mapping;
1148 /* Allocate extra rx ring entries for vector rx. */
1149 ring->vmem_size = sizeof(struct rte_mbuf *) *
1150 (ring->ring_size + RTE_BNXT_DESCS_PER_LOOP);
1152 ring->vmem = (void **)&rxr->rx_buf_ring;
1153 ring->fw_ring_id = INVALID_HW_RING_ID;
1155 cpr = rte_zmalloc_socket("bnxt_rx_ring",
1156 sizeof(struct bnxt_cp_ring_info),
1157 RTE_CACHE_LINE_SIZE, socket_id);
1162 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1163 sizeof(struct bnxt_ring),
1164 RTE_CACHE_LINE_SIZE, socket_id);
1167 cpr->cp_ring_struct = ring;
1169 /* Allocate two completion slots per entry in desc ring. */
1170 ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
1171 ring->ring_size *= AGG_RING_SIZE_FACTOR;
1173 ring->ring_size = rte_align32pow2(ring->ring_size);
1174 ring->ring_mask = ring->ring_size - 1;
1175 ring->bd = (void *)cpr->cp_desc_ring;
1176 ring->bd_dma = cpr->cp_desc_mapping;
1177 ring->vmem_size = 0;
1179 ring->fw_ring_id = INVALID_HW_RING_ID;
1181 /* Allocate Aggregator rings */
1182 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1183 sizeof(struct bnxt_ring),
1184 RTE_CACHE_LINE_SIZE, socket_id);
1187 rxr->ag_ring_struct = ring;
1188 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
1189 AGG_RING_SIZE_FACTOR);
1190 ring->ring_mask = ring->ring_size - 1;
1191 ring->bd = (void *)rxr->ag_desc_ring;
1192 ring->bd_dma = rxr->ag_desc_mapping;
1193 ring->vmem_size = ring->ring_size * sizeof(struct rte_mbuf *);
1194 ring->vmem = (void **)&rxr->ag_buf_ring;
1195 ring->fw_ring_id = INVALID_HW_RING_ID;
1200 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
1204 struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
1208 for (j = 0; j < ring->ring_size; j++) {
1209 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
1210 rx_bd_ring[j].len = rte_cpu_to_le_16(len);
1211 rx_bd_ring[j].opaque = j;
1215 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
1217 struct bnxt_rx_ring_info *rxr;
1218 struct bnxt_ring *ring;
1219 uint32_t raw_prod, type;
1223 /* Initialize packet type table. */
1224 bnxt_init_ptype_table();
1226 size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
1227 size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
1229 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
1232 ring = rxr->rx_ring_struct;
1233 bnxt_init_rxbds(ring, type, size);
1235 /* Initialize offload flags parsing table. */
1236 bnxt_init_ol_flags_tables(rxq);
1238 raw_prod = rxr->rx_raw_prod;
1239 for (i = 0; i < ring->ring_size; i++) {
1240 if (unlikely(!rxr->rx_buf_ring[i])) {
1241 if (bnxt_alloc_rx_data(rxq, rxr, raw_prod) != 0) {
1242 PMD_DRV_LOG(WARNING,
1243 "init'ed rx ring %d with %d/%d mbufs only\n",
1244 rxq->queue_id, i, ring->ring_size);
1248 rxr->rx_raw_prod = raw_prod;
1249 raw_prod = RING_NEXT(raw_prod);
1252 /* Initialize dummy mbuf pointers for vector mode rx. */
1253 for (i = ring->ring_size;
1254 i < ring->ring_size + RTE_BNXT_DESCS_PER_LOOP; i++) {
1255 rxr->rx_buf_ring[i] = &rxq->fake_mbuf;
1258 ring = rxr->ag_ring_struct;
1259 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
1260 bnxt_init_rxbds(ring, type, size);
1261 raw_prod = rxr->ag_raw_prod;
1263 for (i = 0; i < ring->ring_size; i++) {
1264 if (unlikely(!rxr->ag_buf_ring[i])) {
1265 if (bnxt_alloc_ag_data(rxq, rxr, raw_prod) != 0) {
1266 PMD_DRV_LOG(WARNING,
1267 "init'ed AG ring %d with %d/%d mbufs only\n",
1268 rxq->queue_id, i, ring->ring_size);
1272 rxr->ag_raw_prod = raw_prod;
1273 raw_prod = RING_NEXT(raw_prod);
1275 PMD_DRV_LOG(DEBUG, "AGG Done!\n");
1277 if (rxr->tpa_info) {
1278 unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
1280 for (i = 0; i < max_aggs; i++) {
1281 if (unlikely(!rxr->tpa_info[i].mbuf)) {
1282 rxr->tpa_info[i].mbuf =
1283 __bnxt_alloc_rx_data(rxq->mb_pool);
1284 if (!rxr->tpa_info[i].mbuf) {
1285 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
1291 PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");
1296 /* Sweep the Rx completion queue till HWRM_DONE for ring flush is received.
1297 * The mbufs will not be freed in this call.
1298 * They will be freed during ring free as a part of mem cleanup.
1300 int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr)
1302 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
1303 uint32_t ring_mask = cp_ring_struct->ring_mask;
1304 uint32_t raw_cons = cpr->cp_raw_cons;
1305 struct rx_pkt_cmpl *rxcmp;
1310 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1311 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1313 if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE)
1316 raw_cons = NEXT_RAW_CMP(raw_cons);
1318 } while (nb_rx < ring_mask);
1320 cpr->cp_raw_cons = raw_cons;
1322 /* Ring the completion queue doorbell. */