#include <rte_memory.h>
#include "bnxt.h"
-#include "bnxt_cpr.h"
#include "bnxt_ring.h"
#include "bnxt_rxr.h"
#include "bnxt_rxq.h"
#include "hsi_struct_def_dpdk.h"
+#ifdef RTE_LIBRTE_IEEE1588
+#include "bnxt_hwrm.h"
+#endif
+
+#include <bnxt_tf_common.h>
+#include <ulp_mark_mgr.h>
/*
* RX Ring handling
struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
struct rte_mbuf *mbuf;
+ if (rxbd == NULL) {
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
+ return -EINVAL;
+ }
+
+ if (rx_buf == NULL) {
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
+ return -EINVAL;
+ }
+
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
return -ENOMEM;
}
- if (rxbd == NULL)
- PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
- if (rx_buf == NULL)
- PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
-
-
rx_buf->mbuf = mbuf;
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
struct rx_tpa_start_cmpl_hi *tpa_start1)
{
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
- uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
- RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
+ uint16_t agg_id;
uint16_t data_cons;
struct bnxt_tpa_info *tpa_info;
struct rte_mbuf *mbuf;
+ agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start);
+
data_cons = tpa_start->opaque;
tpa_info = &rxr->tpa_info[agg_id];
bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
+ tpa_info->agg_count = 0;
tpa_info->mbuf = mbuf;
tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
- uint8_t agg_buf)
+ uint8_t agg_buf, struct bnxt_tpa_info *tpa_info)
{
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
uint16_t cp_cons, ag_cons;
struct rx_pkt_cmpl *rxcmp;
struct rte_mbuf *last = mbuf;
+ bool is_thor_tpa = tpa_info && BNXT_CHIP_THOR(rxq->bp);
for (i = 0; i < agg_buf; i++) {
struct bnxt_sw_rx_bd *ag_buf;
struct rte_mbuf *ag_mbuf;
- *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
- cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
- rxcmp = (struct rx_pkt_cmpl *)
+
+ if (is_thor_tpa) {
+ rxcmp = (void *)&tpa_info->agg_arr[i];
+ } else {
+ *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)
&cpr->cp_desc_ring[cp_cons];
+ }
#ifdef BNXT_DEBUG
bnxt_dump_cmpl(cp_cons, rxcmp);
struct bnxt_rx_queue *rxq,
uint32_t *raw_cp_cons,
struct rx_tpa_end_cmpl *tpa_end,
- struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused)
+ struct rx_tpa_end_cmpl_hi *tpa_end1)
{
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
- uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK)
- >> RX_TPA_END_CMPL_AGG_ID_SFT;
+ uint16_t agg_id;
struct rte_mbuf *mbuf;
uint8_t agg_bufs;
+ uint8_t payload_offset;
struct bnxt_tpa_info *tpa_info;
+ if (BNXT_CHIP_THOR(rxq->bp)) {
+ struct rx_tpa_v2_end_cmpl *th_tpa_end;
+ struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
+
+ th_tpa_end = (void *)tpa_end;
+ th_tpa_end1 = (void *)tpa_end1;
+ agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end);
+ agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1);
+ payload_offset = th_tpa_end1->payload_offset;
+ } else {
+ agg_id = BNXT_TPA_END_AGG_ID(tpa_end);
+ agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
+ if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
+ return NULL;
+ payload_offset = tpa_end->payload_offset;
+ }
+
tpa_info = &rxr->tpa_info[agg_id];
mbuf = tpa_info->mbuf;
RTE_ASSERT(mbuf != NULL);
rte_prefetch0(mbuf);
- agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) &
- RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT;
if (agg_bufs) {
- if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
- return NULL;
- bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs);
+ bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info);
}
- mbuf->l4_len = tpa_end->payload_offset;
+ mbuf->l4_len = payload_offset;
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
return pkt_type;
}
+#ifdef RTE_LIBRTE_IEEE1588
+static void
+bnxt_get_rx_ts_thor(struct bnxt *bp, uint32_t rx_ts_cmpl)
+{
+ uint64_t systime_cycles = 0;
+
+ if (!BNXT_CHIP_THOR(bp))
+ return;
+
+ /* On Thor, Rx timestamps are provided directly in the
+ * Rx completion records to the driver. Only 32 bits of
+ * the timestamp is present in the completion. Driver needs
+ * to read the current 48 bit free running timer using the
+ * HWRM_PORT_TS_QUERY command and combine the upper 16 bits
+ * from the HWRM response with the lower 32 bits in the
+ * Rx completion to produce the 48 bit timestamp for the Rx packet
+ */
+ bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
+ &systime_cycles);
+ bp->ptp_cfg->rx_timestamp = (systime_cycles & 0xFFFF00000000);
+ bp->ptp_cfg->rx_timestamp |= rx_ts_cmpl;
+}
+#endif
+
+static void
+bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
+ struct rte_mbuf *mbuf)
+{
+ uint32_t cfa_code;
+ uint32_t meta_fmt;
+ uint32_t meta;
+ bool gfid = false;
+ uint32_t mark_id;
+ uint32_t flags2;
+ int rc;
+
+ cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
+ flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
+ meta = rte_le_to_cpu_32(rxcmp1->metadata);
+
+ /*
+ * The flags field holds extra bits of info from [6:4]
+ * which indicate if the flow is in TCAM or EM or EEM
+ */
+ meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
+ BNXT_CFA_META_FMT_SHFT;
+
+ switch (meta_fmt) {
+ case 0:
+ /* Not an LFID or GFID, a flush cmd. */
+ goto skip_mark;
+ case 4:
+ case 5:
+ /*
+ * EM/TCAM case
+ * Assume that EM doesn't support Mark due to GFID
+ * collisions with EEM. Simply return without setting the mark
+ * in the mbuf.
+ */
+ if (BNXT_CFA_META_EM_TEST(meta))
+ goto skip_mark;
+ /*
+ * It is a TCAM entry, so it is an LFID. The TCAM IDX and Mode
+ * can also be determined by decoding the meta_data. We are not
+ * using these for now.
+ */
+ break;
+ case 6:
+ case 7:
+ /* EEM Case, only using gfid in EEM for now. */
+ gfid = true;
+
+ /*
+ * For EEM flows, The first part of cfa_code is 16 bits.
+ * The second part is embedded in the
+ * metadata field from bit 19 onwards. The driver needs to
+ * ignore the first 19 bits of metadata and use the next 12
+ * bits as higher 12 bits of cfa_code.
+ */
+ meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
+ cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
+ break;
+ default:
+ /* For other values, the cfa_code is assumed to be an LFID. */
+ break;
+ }
+
+ if (cfa_code) {
+ rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
+ cfa_code, &mark_id);
+ if (!rc) {
+ /* Got the mark, write it to the mbuf and return */
+ mbuf->hash.fdir.hi = mark_id;
+ mbuf->udata64 = (cfa_code & 0xffffffffull) << 32;
+ mbuf->hash.fdir.id = rxcmp1->cfa_code;
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ return;
+ }
+ }
+
+skip_mark:
+ mbuf->hash.fdir.hi = 0;
+ mbuf->hash.fdir.id = 0;
+}
+
+void bnxt_set_mark_in_mbuf(struct bnxt *bp,
+ struct rx_pkt_cmpl_hi *rxcmp1,
+ struct rte_mbuf *mbuf)
+{
+ uint32_t cfa_code = 0;
+ uint8_t meta_fmt = 0;
+ uint16_t flags2 = 0;
+ uint32_t meta = 0;
+
+ cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
+ if (!cfa_code)
+ return;
+
+ if (cfa_code && !bp->mark_table[cfa_code].valid)
+ return;
+
+ flags2 = rte_le_to_cpu_16(rxcmp1->flags2);
+ meta = rte_le_to_cpu_32(rxcmp1->metadata);
+ if (meta) {
+ meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
+
+ /* The flags field holds extra bits of info from [6:4]
+ * which indicate if the flow is in TCAM or EM or EEM
+ */
+ meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
+ BNXT_CFA_META_FMT_SHFT;
+
+ /* meta_fmt == 4 => 'b100 => 'b10x => EM.
+ * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN
+ * meta_fmt == 6 => 'b110 => 'b11x => EEM
+ * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN.
+ */
+ meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT;
+ }
+
+ mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+}
+
static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
{
uint8_t agg_buf = 0;
uint16_t cmp_type;
uint32_t flags2_f = 0;
+ uint16_t flags_type;
+ struct bnxt *bp = rxq->bp;
rxcmp = (struct rx_pkt_cmpl *)
&cpr->cp_desc_ring[cp_cons];
+ cmp_type = CMP_TYPE(rxcmp);
+
+ if (cmp_type == RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG) {
+ struct rx_tpa_v2_abuf_cmpl *rx_agg = (void *)rxcmp;
+ uint16_t agg_id = rte_cpu_to_le_16(rx_agg->agg_id);
+ struct bnxt_tpa_info *tpa_info;
+
+ tpa_info = &rxr->tpa_info[agg_id];
+ RTE_ASSERT(tpa_info->agg_count < 16);
+ tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
+ rc = -EINVAL; /* Continue w/o new mbuf */
+ goto next_rx;
+ }
+
tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
cpr->cp_ring_struct->ring_mask,
cpr->valid);
- cmp_type = CMP_TYPE(rxcmp);
if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) {
bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
(struct rx_tpa_start_cmpl_hi *)rxcmp1);
mbuf->data_len = mbuf->pkt_len;
mbuf->port = rxq->port_id;
mbuf->ol_flags = 0;
- if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
+
+ flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
+ if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
mbuf->hash.rss = rxcmp->rss_hash;
mbuf->ol_flags |= PKT_RX_RSS_HASH;
- } else {
- mbuf->hash.fdir.id = rxcmp1->cfa_code;
- mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
}
- if ((rxcmp->flags_type & rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_MASK)) ==
- RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)
- mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+ if (BNXT_TRUFLOW_EN(bp))
+ bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf);
+ else
+ bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf);
+#ifdef RTE_LIBRTE_IEEE1588
+ if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) ==
+ RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) {
+ mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+ bnxt_get_rx_ts_thor(rxq->bp, rxcmp1->reorder);
+ }
+#endif
if (agg_buf)
- bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
+ bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL);
if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
mbuf->vlan_tci = rxcmp1->metadata &
flags2_f = flags2_0xf(rxcmp1);
/* IP Checksum */
- if (unlikely(((IS_IP_NONTUNNEL_PKT(flags2_f)) &&
- (RX_CMP_IP_CS_ERROR(rxcmp1))) ||
- (IS_IP_TUNNEL_PKT(flags2_f) &&
- (RX_CMP_IP_OUTER_CS_ERROR(rxcmp1))))) {
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
- } else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) {
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
- } else {
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ if (likely(IS_IP_NONTUNNEL_PKT(flags2_f))) {
+ if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ else
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ } else if (IS_IP_TUNNEL_PKT(flags2_f)) {
+ if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) ||
+ RX_CMP_IP_CS_ERROR(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ else
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
}
/* L4 Checksum */
if (unlikely(is_bnxt_in_error(rxq->bp)))
return 0;
- /* If Rx Q was stopped return. RxQ0 cannot be stopped. */
- if (unlikely(((rxq->rx_deferred_start ||
- !rte_spinlock_trylock(&rxq->lock)) &&
- rxq->queue_id)))
+ /* If Rx Q was stopped return */
+ if (unlikely(!rxq->rx_started ||
+ !rte_spinlock_trylock(&rxq->lock)))
return 0;
/* Handle RX burst request */
evt =
bnxt_event_hwrm_resp_handler(rxq->bp,
(struct cmpl_base *)rxcmp);
+ /* If the async event is Fatal error, return */
+ if (unlikely(is_bnxt_in_error(rxq->bp)))
+ goto done;
}
raw_cons = NEXT_RAW_CMP(raw_cons);
/* Attempt to alloc Rx buf in case of a previous allocation failure. */
if (rc == -ENOMEM) {
- int i;
+ int i = RING_NEXT(rxr->rx_ring_struct, prod);
+ int cnt = nb_rx_pkts;
- for (i = prod; i <= nb_rx_pkts;
- i = RING_NEXT(rxr->rx_ring_struct, i)) {
+ for (; cnt;
+ i = RING_NEXT(rxr->rx_ring_struct, i), cnt--) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
/* Buffer already allocated for this index. */
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
{
struct bnxt_cp_ring_info *cpr;
- struct bnxt_cp_ring_info *nqr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_ring *ring;
- rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
- rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
+ rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
rxr = rte_zmalloc_socket("bnxt_rx_ring",
sizeof(struct bnxt_rx_ring_info),
ring->vmem_size = 0;
ring->vmem = NULL;
- if (BNXT_HAS_NQ(rxq->bp)) {
- nqr = rte_zmalloc_socket("bnxt_rx_ring_cq",
- sizeof(struct bnxt_cp_ring_info),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (nqr == NULL)
- return -ENOMEM;
-
- rxq->nq_ring = nqr;
-
- ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
- sizeof(struct bnxt_ring),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (ring == NULL)
- return -ENOMEM;
-
- nqr->cp_ring_struct = ring;
- ring->ring_size =
- rte_align32pow2(rxr->rx_ring_struct->ring_size *
- (2 + AGG_RING_SIZE_FACTOR));
- ring->ring_mask = ring->ring_size - 1;
- ring->bd = (void *)nqr->cp_desc_ring;
- ring->bd_dma = nqr->cp_desc_mapping;
- ring->vmem_size = 0;
- ring->vmem = NULL;
- }
-
/* Allocate Aggregator rings */
ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
sizeof(struct bnxt_ring),
uint16_t size;
size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (rxq->rx_buf_use_size <= size)
- size = rxq->rx_buf_use_size;
+ size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
prod = rxr->rx_prod;
for (i = 0; i < ring->ring_size; i++) {
- if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
- PMD_DRV_LOG(WARNING,
- "init'ed rx ring %d with %d/%d mbufs only\n",
- rxq->queue_id, i, ring->ring_size);
- break;
+ if (unlikely(!rxr->rx_buf_ring[i].mbuf)) {
+ if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
+ PMD_DRV_LOG(WARNING,
+ "init'ed rx ring %d with %d/%d mbufs only\n",
+ rxq->queue_id, i, ring->ring_size);
+ break;
+ }
}
rxr->rx_prod = prod;
prod = RING_NEXT(rxr->rx_ring_struct, prod);
prod = rxr->ag_prod;
for (i = 0; i < ring->ring_size; i++) {
- if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
- PMD_DRV_LOG(WARNING,
- "init'ed AG ring %d with %d/%d mbufs only\n",
- rxq->queue_id, i, ring->ring_size);
- break;
+ if (unlikely(!rxr->ag_buf_ring[i].mbuf)) {
+ if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
+ PMD_DRV_LOG(WARNING,
+ "init'ed AG ring %d with %d/%d mbufs only\n",
+ rxq->queue_id, i, ring->ring_size);
+ break;
+ }
}
rxr->ag_prod = prod;
prod = RING_NEXT(rxr->ag_ring_struct, prod);
PMD_DRV_LOG(DEBUG, "AGG Done!\n");
if (rxr->tpa_info) {
- for (i = 0; i < BNXT_TPA_MAX; i++) {
- rxr->tpa_info[i].mbuf =
- __bnxt_alloc_rx_data(rxq->mb_pool);
- if (!rxr->tpa_info[i].mbuf) {
- rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
- return -ENOMEM;
+ unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
+
+ for (i = 0; i < max_aggs; i++) {
+ if (unlikely(!rxr->tpa_info[i].mbuf)) {
+ rxr->tpa_info[i].mbuf =
+ __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!rxr->tpa_info[i].mbuf) {
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ return -ENOMEM;
+ }
}
}
}