The fast mbuf free offload for non-vector mode requires
additional checks in order to handle long tx buffer
descriptors, so dedicated functions are needed for
vector- and non-vector-modes.
Fixes:
369f6077c515 ("net/bnxt: support fast mbuf free")
Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
rxq->rxrearm_nb -= nb;
}
+/*
+ * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * is enabled.
+ */
+static inline void
+bnxt_tx_cmp_vec_fast(struct bnxt_tx_queue *txq, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
+ struct rte_mbuf **free = txq->free;
+ uint16_t cons = txr->tx_cons;
+ unsigned int blk = 0;
+
+ while (nr_pkts--) {
+ struct bnxt_sw_tx_bd *tx_buf;
+
+ tx_buf = &txr->tx_buf_ring[cons];
+ cons = (cons + 1) & ring_mask;
+ free[blk++] = tx_buf->mbuf;
+ tx_buf->mbuf = NULL;
+ }
+ if (blk)
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
+
+ txr->tx_cons = cons;
+}
+
static inline void
bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
{
cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
if (nb_tx_pkts) {
if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- bnxt_tx_cmp_fast(txq, nb_tx_pkts);
+ bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp_vec(txq, nb_tx_pkts);
cpr->cp_raw_cons = raw_cons;
cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
if (nb_tx_pkts) {
if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- bnxt_tx_cmp_fast(txq, nb_tx_pkts);
+ bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp_vec(txq, nb_tx_pkts);
cpr->cp_raw_cons = raw_cons;
return 0;
}
+/*
+ * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * is enabled.
+ */
+static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct rte_mbuf **free = txq->free;
+ uint16_t cons = txr->tx_cons;
+ unsigned int blk = 0;
+ int i, j;
+
+ for (i = 0; i < nr_pkts; i++) {
+ struct bnxt_sw_tx_bd *tx_buf;
+ unsigned short nr_bds;
+
+ tx_buf = &txr->tx_buf_ring[cons];
+ nr_bds = tx_buf->nr_bds;
+ for (j = 0; j < nr_bds; j++) {
+ if (tx_buf->mbuf) {
+ /* Add mbuf to the bulk free array */
+ free[blk++] = tx_buf->mbuf;
+ tx_buf->mbuf = NULL;
+ }
+ cons = RING_NEXT(txr->tx_ring_struct, cons);
+ tx_buf = &txr->tx_buf_ring[cons];
+ }
+ }
+ if (blk)
+ rte_mempool_put_bulk(free[0]->pool, (void *)free, blk);
+
+ txr->tx_cons = cons;
+}
+
static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
bnxt_tx_bds_in_hw(txq)) - 1);
}
-/*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
- * is enabled.
- */
-static inline void
-bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
-{
- struct bnxt_tx_ring_info *txr = txq->tx_ring;
- uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
- struct rte_mbuf **free = txq->free;
- uint16_t cons = txr->tx_cons;
- unsigned int blk = 0;
-
- while (nr_pkts--) {
- struct bnxt_sw_tx_bd *tx_buf;
-
- tx_buf = &txr->tx_buf_ring[cons];
- cons = (cons + 1) & ring_mask;
- free[blk++] = tx_buf->mbuf;
- tx_buf->mbuf = NULL;
- }
- if (blk)
- rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
-
- txr->tx_cons = cons;
-}
-
void bnxt_free_tx_rings(struct bnxt *bp);
int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);