net/bnxt: support bulk free of Tx mbufs
authorAjit Khaparde <ajit.khaparde@broadcom.com>
Wed, 15 May 2019 18:08:17 +0000 (11:08 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 27 May 2019 16:09:07 +0000 (18:09 +0200)
The driver currently uses rte_pktmbuf_free() to free each mbuf
after transmit completion. This is optimized to free multiple
mbufs using rte_mempool_put_bulk().

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
drivers/net/bnxt/bnxt_txq.c
drivers/net/bnxt/bnxt_txq.h
drivers/net/bnxt/bnxt_txr.c

index b9b975e..5a7bfaf 100644 (file)
@@ -69,6 +69,7 @@ void bnxt_tx_queue_release_op(void *tx_queue)
                rte_memzone_free(txq->mz);
                txq->mz = NULL;
 
+               rte_free(txq->free);
                rte_free(txq);
        }
 }
@@ -110,6 +111,16 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
                rc = -ENOMEM;
                goto out;
        }
+
+       txq->free = rte_zmalloc_socket(NULL,
+                                      sizeof(struct rte_mbuf *) * nb_desc,
+                                      RTE_CACHE_LINE_SIZE, socket_id);
+       if (!txq->free) {
+               PMD_DRV_LOG(ERR, "allocation of tx mbuf free array failed!");
+               rte_free(txq);
+               rc = -ENOMEM;
+               goto out;
+       }
        txq->bp = bp;
        txq->nb_tx_desc = nb_desc;
        txq->tx_free_thresh = tx_conf->tx_free_thresh;
index 720ca90..a0d4678 100644 (file)
@@ -33,6 +33,7 @@ struct bnxt_tx_queue {
        unsigned int            cp_nr_rings;
        struct bnxt_cp_ring_info        *cp_ring;
        const struct rte_memzone *mz;
+       struct rte_mbuf **free;
 };
 
 void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
index b15778b..9de12e0 100644 (file)
@@ -320,6 +320,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
                RTE_VERIFY(m_seg->data_len);
                txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
                tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+               tx_buf->mbuf = m_seg;
 
                txbd = &txr->tx_desc_ring[txr->tx_prod];
                txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
@@ -339,24 +340,53 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
 {
        struct bnxt_tx_ring_info *txr = txq->tx_ring;
+       struct rte_mempool *pool = NULL;
+       struct rte_mbuf **free = txq->free;
        uint16_t cons = txr->tx_cons;
+       unsigned int blk = 0;
        int i, j;
 
        for (i = 0; i < nr_pkts; i++) {
-               struct bnxt_sw_tx_bd *tx_buf;
                struct rte_mbuf *mbuf;
+               struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[cons];
+               unsigned short nr_bds = tx_buf->nr_bds;
 
-               tx_buf = &txr->tx_buf_ring[cons];
-               cons = RING_NEXT(txr->tx_ring_struct, cons);
-               mbuf = tx_buf->mbuf;
-               tx_buf->mbuf = NULL;
-
-               /* EW - no need to unmap DMA memory? */
-
-               for (j = 1; j < tx_buf->nr_bds; j++)
+               for (j = 0; j < nr_bds; j++) {
+                       mbuf = tx_buf->mbuf;
+                       tx_buf->mbuf = NULL;
                        cons = RING_NEXT(txr->tx_ring_struct, cons);
-               rte_pktmbuf_free(mbuf);
+                       tx_buf = &txr->tx_buf_ring[cons];
+                       if (!mbuf)      /* long_bd's tx_buf ? */
+                               continue;
+
+                       mbuf = rte_pktmbuf_prefree_seg(mbuf);
+                       if (unlikely(!mbuf))
+                               continue;
+
+                       /* EW - no need to unmap DMA memory? */
+
+                       if (likely(mbuf->pool == pool)) {
+                               /* Add mbuf to the bulk free array */
+                               free[blk++] = mbuf;
+                       } else {
+                               /* Found an mbuf from a different pool. Free
+                                * mbufs accumulated so far to the previous
+                                * pool
+                                */
+                               if (likely(pool != NULL))
+                                       rte_mempool_put_bulk(pool,
+                                                            (void *)free,
+                                                            blk);
+
+                               /* Start accumulating mbufs in a new pool */
+                               free[0] = mbuf;
+                               pool = mbuf->pool;
+                               blk = 1;
+                       }
+               }
        }
+       if (blk)
+               rte_mempool_put_bulk(pool, (void *)free, blk);
 
        txr->tx_cons = cons;
 }