1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_bitmap.h>
10 #include <rte_byteorder.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
15 #include "bnxt_ring.h"
18 #include "hsi_struct_def_dpdk.h"
19 #ifdef RTE_LIBRTE_IEEE1588
20 #include "bnxt_hwrm.h"
27 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
29 struct rte_mbuf *data;
31 data = rte_mbuf_raw_alloc(mb);
36 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
37 struct bnxt_rx_ring_info *rxr,
40 struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
41 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
42 struct rte_mbuf *mbuf;
44 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
46 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
51 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
53 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
58 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
59 struct bnxt_rx_ring_info *rxr,
62 struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
63 struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
64 struct rte_mbuf *mbuf;
67 PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
72 PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
76 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
78 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
83 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
85 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
90 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
91 struct rte_mbuf *mbuf)
93 uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
94 struct bnxt_sw_rx_bd *prod_rx_buf;
95 struct rx_prod_pkt_bd *prod_bd;
97 prod_rx_buf = &rxr->rx_buf_ring[prod];
99 RTE_ASSERT(prod_rx_buf->mbuf == NULL);
100 RTE_ASSERT(mbuf != NULL);
102 prod_rx_buf->mbuf = mbuf;
104 prod_bd = &rxr->rx_desc_ring[prod];
106 prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
112 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
115 struct bnxt_sw_rx_bd *cons_rx_buf;
116 struct rte_mbuf *mbuf;
118 cons_rx_buf = &rxr->rx_buf_ring[cons];
119 RTE_ASSERT(cons_rx_buf->mbuf != NULL);
120 mbuf = cons_rx_buf->mbuf;
121 cons_rx_buf->mbuf = NULL;
125 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
126 struct rx_tpa_start_cmpl *tpa_start,
127 struct rx_tpa_start_cmpl_hi *tpa_start1)
129 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
132 struct bnxt_tpa_info *tpa_info;
133 struct rte_mbuf *mbuf;
135 agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start);
137 data_cons = tpa_start->opaque;
138 tpa_info = &rxr->tpa_info[agg_id];
140 mbuf = bnxt_consume_rx_buf(rxr, data_cons);
142 bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
144 tpa_info->agg_count = 0;
145 tpa_info->mbuf = mbuf;
146 tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
150 mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
151 mbuf->data_len = mbuf->pkt_len;
152 mbuf->port = rxq->port_id;
153 mbuf->ol_flags = PKT_RX_LRO;
154 if (likely(tpa_start->flags_type &
155 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
156 mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
157 mbuf->ol_flags |= PKT_RX_RSS_HASH;
159 mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
160 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
162 if (tpa_start1->flags2 &
163 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
164 mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
165 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
167 if (likely(tpa_start1->flags2 &
168 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
169 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
171 /* recycle next mbuf */
172 data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
173 bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
176 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
177 uint8_t agg_bufs, uint32_t raw_cp_cons)
179 uint16_t last_cp_cons;
180 struct rx_pkt_cmpl *agg_cmpl;
182 raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
183 last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
184 agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
185 cpr->valid = FLIP_VALID(raw_cp_cons,
186 cpr->cp_ring_struct->ring_mask,
188 return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
191 /* TPA consume agg buffer out of order, allocate connected data only */
192 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
194 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
195 uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
197 /* TODO batch allocation for better performance */
198 while (rte_bitmap_get(rxr->ag_bitmap, next)) {
199 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
201 "agg mbuf alloc failed: prod=0x%x\n", next);
204 rte_bitmap_clear(rxr->ag_bitmap, next);
206 next = RING_NEXT(rxr->ag_ring_struct, next);
212 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
213 struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
214 uint8_t agg_buf, struct bnxt_tpa_info *tpa_info)
216 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
217 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
219 uint16_t cp_cons, ag_cons;
220 struct rx_pkt_cmpl *rxcmp;
221 struct rte_mbuf *last = mbuf;
222 bool is_thor_tpa = tpa_info && BNXT_CHIP_THOR(rxq->bp);
224 for (i = 0; i < agg_buf; i++) {
225 struct bnxt_sw_rx_bd *ag_buf;
226 struct rte_mbuf *ag_mbuf;
229 rxcmp = (void *)&tpa_info->agg_arr[i];
231 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
232 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
233 rxcmp = (struct rx_pkt_cmpl *)
234 &cpr->cp_desc_ring[cp_cons];
238 bnxt_dump_cmpl(cp_cons, rxcmp);
241 ag_cons = rxcmp->opaque;
242 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
243 ag_buf = &rxr->ag_buf_ring[ag_cons];
244 ag_mbuf = ag_buf->mbuf;
245 RTE_ASSERT(ag_mbuf != NULL);
247 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
250 mbuf->pkt_len += ag_mbuf->data_len;
252 last->next = ag_mbuf;
258 * As aggregation buffer consumed out of order in TPA module,
259 * use bitmap to track freed slots to be allocated and notified
262 rte_bitmap_set(rxr->ag_bitmap, ag_cons);
264 bnxt_prod_ag_mbuf(rxq);
268 static inline struct rte_mbuf *bnxt_tpa_end(
269 struct bnxt_rx_queue *rxq,
270 uint32_t *raw_cp_cons,
271 struct rx_tpa_end_cmpl *tpa_end,
272 struct rx_tpa_end_cmpl_hi *tpa_end1)
274 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
275 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
277 struct rte_mbuf *mbuf;
279 uint8_t payload_offset;
280 struct bnxt_tpa_info *tpa_info;
282 if (BNXT_CHIP_THOR(rxq->bp)) {
283 struct rx_tpa_v2_end_cmpl *th_tpa_end;
284 struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
286 th_tpa_end = (void *)tpa_end;
287 th_tpa_end1 = (void *)tpa_end1;
288 agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end);
289 agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1);
290 payload_offset = th_tpa_end1->payload_offset;
292 agg_id = BNXT_TPA_END_AGG_ID(tpa_end);
293 agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
294 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
296 payload_offset = tpa_end->payload_offset;
299 tpa_info = &rxr->tpa_info[agg_id];
300 mbuf = tpa_info->mbuf;
301 RTE_ASSERT(mbuf != NULL);
305 bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info);
307 mbuf->l4_len = payload_offset;
309 struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
310 RTE_ASSERT(new_data != NULL);
312 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
315 tpa_info->mbuf = new_data;
321 bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
323 uint32_t l3, pkt_type = 0;
324 uint32_t t_ipcs = 0, ip6 = 0, vlan = 0;
327 vlan = !!(rxcmp1->flags2 &
328 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
329 pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
331 t_ipcs = !!(rxcmp1->flags2 &
332 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
333 ip6 = !!(rxcmp1->flags2 &
334 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
336 flags_type = rxcmp->flags_type &
337 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
340 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
341 else if (!t_ipcs && ip6)
342 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
343 else if (t_ipcs && !ip6)
344 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
346 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
348 switch (flags_type) {
349 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):
351 pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
353 pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
356 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):
358 pkt_type |= l3 | RTE_PTYPE_L4_TCP;
360 pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
363 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):
365 pkt_type |= l3 | RTE_PTYPE_L4_UDP;
367 pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
370 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):
378 #ifdef RTE_LIBRTE_IEEE1588
380 bnxt_get_rx_ts_thor(struct bnxt *bp, uint32_t rx_ts_cmpl)
382 uint64_t systime_cycles = 0;
384 if (!BNXT_CHIP_THOR(bp))
387 /* On Thor, Rx timestamps are provided directly in the
388 * Rx completion records to the driver. Only 32 bits of
389 * the timestamp is present in the completion. Driver needs
390 * to read the current 48 bit free running timer using the
391 * HWRM_PORT_TS_QUERY command and combine the upper 16 bits
392 * from the HWRM response with the lower 32 bits in the
393 * Rx completion to produce the 48 bit timestamp for the Rx packet
395 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
397 bp->ptp_cfg->rx_timestamp = (systime_cycles & 0xFFFF00000000);
398 bp->ptp_cfg->rx_timestamp |= rx_ts_cmpl;
402 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
403 struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
405 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
406 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
407 struct rx_pkt_cmpl *rxcmp;
408 struct rx_pkt_cmpl_hi *rxcmp1;
409 uint32_t tmp_raw_cons = *raw_cons;
410 uint16_t cons, prod, cp_cons =
411 RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
412 struct rte_mbuf *mbuf;
416 uint32_t flags2_f = 0;
419 rxcmp = (struct rx_pkt_cmpl *)
420 &cpr->cp_desc_ring[cp_cons];
422 cmp_type = CMP_TYPE(rxcmp);
424 if (cmp_type == RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG) {
425 struct rx_tpa_v2_abuf_cmpl *rx_agg = (void *)rxcmp;
426 uint16_t agg_id = rte_cpu_to_le_16(rx_agg->agg_id);
427 struct bnxt_tpa_info *tpa_info;
429 tpa_info = &rxr->tpa_info[agg_id];
430 RTE_ASSERT(tpa_info->agg_count < 16);
431 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
432 rc = -EINVAL; /* Continue w/o new mbuf */
436 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
437 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
438 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
440 if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
443 cpr->valid = FLIP_VALID(cp_cons,
444 cpr->cp_ring_struct->ring_mask,
447 if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) {
448 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
449 (struct rx_tpa_start_cmpl_hi *)rxcmp1);
450 rc = -EINVAL; /* Continue w/o new mbuf */
452 } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
453 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
454 (struct rx_tpa_end_cmpl *)rxcmp,
455 (struct rx_tpa_end_cmpl_hi *)rxcmp1);
460 } else if (cmp_type != 0x11) {
465 agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
466 >> RX_PKT_CMPL_AGG_BUFS_SFT;
467 if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
472 cons = rxcmp->opaque;
473 mbuf = bnxt_consume_rx_buf(rxr, cons);
479 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
482 mbuf->pkt_len = rxcmp->len;
483 mbuf->data_len = mbuf->pkt_len;
484 mbuf->port = rxq->port_id;
487 flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
488 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
489 mbuf->hash.rss = rxcmp->rss_hash;
490 mbuf->ol_flags |= PKT_RX_RSS_HASH;
493 bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf);
495 #ifdef RTE_LIBRTE_IEEE1588
496 if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) ==
497 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) {
498 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
499 bnxt_get_rx_ts_thor(rxq->bp, rxcmp1->reorder);
503 bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL);
505 if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
506 mbuf->vlan_tci = rxcmp1->metadata &
507 (RX_PKT_CMPL_METADATA_VID_MASK |
508 RX_PKT_CMPL_METADATA_DE |
509 RX_PKT_CMPL_METADATA_PRI_MASK);
510 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
513 flags2_f = flags2_0xf(rxcmp1);
515 if (likely(IS_IP_NONTUNNEL_PKT(flags2_f))) {
516 if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1)))
517 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
518 else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
519 mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
521 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
522 } else if (IS_IP_TUNNEL_PKT(flags2_f)) {
523 if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) ||
524 RX_CMP_IP_CS_ERROR(rxcmp1)))
525 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
526 else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
527 mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
529 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
533 if (likely(IS_L4_NONTUNNEL_PKT(flags2_f))) {
534 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
535 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
537 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
538 } else if (IS_L4_TUNNEL_PKT(flags2_f)) {
539 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
540 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
542 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
543 if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) {
544 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
545 } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS
547 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
549 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
551 } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) {
552 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
555 mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
558 if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
559 /* Re-install the mbuf back to the rx ring */
560 bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
567 * TODO: Redesign this....
568 * If the allocation fails, the packet does not get received.
569 * Simply returning this will result in slowly falling behind
570 * on the producer ring buffers.
571 * Instead, "filling up" the producer just before ringing the
572 * doorbell could be a better solution since it will let the
573 * producer ring starve until memory is available again pushing
574 * the drops into hardware and getting them out of the driver
575 * allowing recovery to a full producer ring.
577 * This could also help with cache usage by preventing per-packet
578 * calls in favour of a tight loop with the same function being called
581 prod = RING_NEXT(rxr->rx_ring_struct, prod);
582 if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
583 PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
589 * All MBUFs are allocated with the same size under DPDK,
590 * no optimization for rx_copy_thresh
597 *raw_cons = tmp_raw_cons;
602 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
605 struct bnxt_rx_queue *rxq = rx_queue;
606 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
607 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
608 uint32_t raw_cons = cpr->cp_raw_cons;
611 struct rx_pkt_cmpl *rxcmp;
612 uint16_t prod = rxr->rx_prod;
613 uint16_t ag_prod = rxr->ag_prod;
617 if (unlikely(is_bnxt_in_error(rxq->bp)))
620 /* If Rx Q was stopped return */
621 if (unlikely(!rxq->rx_started ||
622 !rte_spinlock_trylock(&rxq->lock)))
625 /* Handle RX burst request */
627 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
628 rte_prefetch0(&cpr->cp_desc_ring[cons]);
629 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
631 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
633 cpr->valid = FLIP_VALID(cons,
634 cpr->cp_ring_struct->ring_mask,
637 /* TODO: Avoid magic numbers... */
638 if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
639 rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
640 if (likely(!rc) || rc == -ENOMEM)
642 if (rc == -EBUSY) /* partial completion */
644 } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
646 bnxt_event_hwrm_resp_handler(rxq->bp,
647 (struct cmpl_base *)rxcmp);
648 /* If the async event is Fatal error, return */
649 if (unlikely(is_bnxt_in_error(rxq->bp)))
653 raw_cons = NEXT_RAW_CMP(raw_cons);
654 if (nb_rx_pkts == nb_pkts || evt)
656 /* Post some Rx buf early in case of larger burst processing */
657 if (nb_rx_pkts == BNXT_RX_POST_THRESH)
658 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
661 cpr->cp_raw_cons = raw_cons;
662 if (!nb_rx_pkts && !evt) {
664 * For PMD, there is no need to keep on pushing to REARM
665 * the doorbell if there are no new completions
670 if (prod != rxr->rx_prod)
671 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
673 /* Ring the AGG ring DB */
674 if (ag_prod != rxr->ag_prod)
675 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
679 /* Attempt to alloc Rx buf in case of a previous allocation failure. */
683 for (i = prod; i <= nb_rx_pkts;
684 i = RING_NEXT(rxr->rx_ring_struct, i)) {
685 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
687 /* Buffer already allocated for this index. */
688 if (rx_buf->mbuf != NULL)
691 /* This slot is empty. Alloc buffer for Rx */
692 if (!bnxt_alloc_rx_data(rxq, rxr, i)) {
694 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
696 PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
703 rte_spinlock_unlock(&rxq->lock);
709 * Dummy DPDK callback for RX.
711 * This function is used to temporarily replace the real callback during
712 * unsafe control operations on the queue, or in case of error.
715 bnxt_dummy_recv_pkts(void *rx_queue __rte_unused,
716 struct rte_mbuf **rx_pkts __rte_unused,
717 uint16_t nb_pkts __rte_unused)
722 void bnxt_free_rx_rings(struct bnxt *bp)
725 struct bnxt_rx_queue *rxq;
730 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
731 rxq = bp->rx_queues[i];
735 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
736 rte_free(rxq->rx_ring->rx_ring_struct);
738 /* Free the Aggregator ring */
739 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
740 rte_free(rxq->rx_ring->ag_ring_struct);
741 rxq->rx_ring->ag_ring_struct = NULL;
743 rte_free(rxq->rx_ring);
745 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
746 rte_free(rxq->cp_ring->cp_ring_struct);
747 rte_free(rxq->cp_ring);
750 bp->rx_queues[i] = NULL;
754 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
756 struct bnxt_cp_ring_info *cpr;
757 struct bnxt_rx_ring_info *rxr;
758 struct bnxt_ring *ring;
760 rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
762 rxr = rte_zmalloc_socket("bnxt_rx_ring",
763 sizeof(struct bnxt_rx_ring_info),
764 RTE_CACHE_LINE_SIZE, socket_id);
769 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
770 sizeof(struct bnxt_ring),
771 RTE_CACHE_LINE_SIZE, socket_id);
774 rxr->rx_ring_struct = ring;
775 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
776 ring->ring_mask = ring->ring_size - 1;
777 ring->bd = (void *)rxr->rx_desc_ring;
778 ring->bd_dma = rxr->rx_desc_mapping;
779 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
780 ring->vmem = (void **)&rxr->rx_buf_ring;
782 cpr = rte_zmalloc_socket("bnxt_rx_ring",
783 sizeof(struct bnxt_cp_ring_info),
784 RTE_CACHE_LINE_SIZE, socket_id);
789 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
790 sizeof(struct bnxt_ring),
791 RTE_CACHE_LINE_SIZE, socket_id);
794 cpr->cp_ring_struct = ring;
795 ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size *
796 (2 + AGG_RING_SIZE_FACTOR));
797 ring->ring_mask = ring->ring_size - 1;
798 ring->bd = (void *)cpr->cp_desc_ring;
799 ring->bd_dma = cpr->cp_desc_mapping;
803 /* Allocate Aggregator rings */
804 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
805 sizeof(struct bnxt_ring),
806 RTE_CACHE_LINE_SIZE, socket_id);
809 rxr->ag_ring_struct = ring;
810 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
811 AGG_RING_SIZE_FACTOR);
812 ring->ring_mask = ring->ring_size - 1;
813 ring->bd = (void *)rxr->ag_desc_ring;
814 ring->bd_dma = rxr->ag_desc_mapping;
815 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
816 ring->vmem = (void **)&rxr->ag_buf_ring;
821 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
825 struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
829 for (j = 0; j < ring->ring_size; j++) {
830 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
831 rx_bd_ring[j].len = rte_cpu_to_le_16(len);
832 rx_bd_ring[j].opaque = j;
836 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
838 struct bnxt_rx_ring_info *rxr;
839 struct bnxt_ring *ring;
844 size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
845 size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
847 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
850 ring = rxr->rx_ring_struct;
851 bnxt_init_rxbds(ring, type, size);
854 for (i = 0; i < ring->ring_size; i++) {
855 if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
857 "init'ed rx ring %d with %d/%d mbufs only\n",
858 rxq->queue_id, i, ring->ring_size);
862 prod = RING_NEXT(rxr->rx_ring_struct, prod);
865 ring = rxr->ag_ring_struct;
866 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
867 bnxt_init_rxbds(ring, type, size);
870 for (i = 0; i < ring->ring_size; i++) {
871 if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
873 "init'ed AG ring %d with %d/%d mbufs only\n",
874 rxq->queue_id, i, ring->ring_size);
878 prod = RING_NEXT(rxr->ag_ring_struct, prod);
880 PMD_DRV_LOG(DEBUG, "AGG Done!\n");
883 unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
885 for (i = 0; i < max_aggs; i++) {
886 rxr->tpa_info[i].mbuf =
887 __bnxt_alloc_rx_data(rxq->mb_pool);
888 if (!rxr->tpa_info[i].mbuf) {
889 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
894 PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");
899 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
900 struct rx_pkt_cmpl_hi *rxcmp1,
901 struct rte_mbuf *mbuf)
903 uint32_t cfa_code = 0;
904 uint8_t meta_fmt = 0;
908 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
912 if (cfa_code && !bp->mark_table[cfa_code].valid) {
913 PMD_DRV_LOG(WARNING, "Invalid mark_tbl entry! cfa_code: 0x%x\n",
918 flags2 = rte_le_to_cpu_16(rxcmp1->flags2);
919 meta = rte_le_to_cpu_32(rxcmp1->metadata);
921 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
924 * The flags field holds extra bits of info from [6:4]
925 * which indicate if the flow is in TCAM or EM or EEM
927 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
928 BNXT_CFA_META_FMT_SHFT;
931 * meta_fmt == 4 => 'b100 => 'b10x => EM.
932 * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN
933 * meta_fmt == 6 => 'b110 => 'b11x => EEM
934 * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN.
936 meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT;
939 mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
940 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;