"ring_dma_zone_reserve for rx_ring failed!\n");
goto err;
}
- rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
+ rxq->rx_mbuf_alloc_fail = 0;
/* rxq 0 must not be stopped when used as async CPR */
if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
struct rte_mbuf fake_mbuf;
- rte_atomic64_t rx_mbuf_alloc_fail;
+ uint64_t rx_mbuf_alloc_fail;
const struct rte_memzone *mz;
};
rx_buf = &rxr->rx_buf_ring[prod];
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
return -ENOMEM;
}
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
return -ENOMEM;
}
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data) {
- rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
return NULL;
}
tpa_info->mbuf = new_data;
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf) {
- rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
+ __ATOMIC_RELAXED);
return -ENOMEM;
}
}
bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- rte_atomic64_read(&rxq->rx_mbuf_alloc_fail);
+ __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
- rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail);
+ rxq->rx_mbuf_alloc_fail = 0;
}
bnxt_clear_prev_stat(bp);