static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
struct bnxt_tx_queue *txq,
uint16_t *coal_pkts,
- uint16_t *cmpl_next,
struct tx_bd_long **last_txbd)
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
txbd->opaque = *coal_pkts;
txbd->flags_type = nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
- if (!*cmpl_next) {
- txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
- } else {
- *coal_pkts = 0;
- *cmpl_next = false;
- }
+ txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
txbd->len = tx_pkt->data_len;
if (tx_pkt->pkt_len >= 2014)
txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
RTE_VERIFY(m_seg->data_len);
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+ tx_buf->mbuf = m_seg;
txbd = &txr->tx_desc_ring[txr->tx_prod];
txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct rte_mempool *pool = NULL;
+ struct rte_mbuf **free = txq->free;
uint16_t cons = txr->tx_cons;
+ unsigned int blk = 0;
int i, j;
for (i = 0; i < nr_pkts; i++) {
- struct bnxt_sw_tx_bd *tx_buf;
struct rte_mbuf *mbuf;
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[cons];
+ unsigned short nr_bds = tx_buf->nr_bds;
- tx_buf = &txr->tx_buf_ring[cons];
- cons = RING_NEXT(txr->tx_ring_struct, cons);
- mbuf = tx_buf->mbuf;
- tx_buf->mbuf = NULL;
-
- /* EW - no need to unmap DMA memory? */
-
- for (j = 1; j < tx_buf->nr_bds; j++)
+ for (j = 0; j < nr_bds; j++) {
+ mbuf = tx_buf->mbuf;
+ tx_buf->mbuf = NULL;
cons = RING_NEXT(txr->tx_ring_struct, cons);
- rte_pktmbuf_free(mbuf);
+ tx_buf = &txr->tx_buf_ring[cons];
+ if (!mbuf) /* long_bd's tx_buf ? */
+ continue;
+
+ mbuf = rte_pktmbuf_prefree_seg(mbuf);
+ if (unlikely(!mbuf))
+ continue;
+
+ /* EW - no need to unmap DMA memory? */
+
+ if (likely(mbuf->pool == pool)) {
+ /* Add mbuf to the bulk free array */
+ free[blk++] = mbuf;
+ } else {
+ /* Found an mbuf from a different pool. Free
+ * mbufs accumulated so far to the previous
+ * pool
+ */
+ if (likely(pool != NULL))
+ rte_mempool_put_bulk(pool,
+ (void *)free,
+ blk);
+
+ /* Start accumulating mbufs in a new pool */
+ free[0] = mbuf;
+ pool = mbuf->pool;
+ blk = 1;
+ }
+ }
}
+ if (blk)
+ rte_mempool_put_bulk(pool, (void *)free, blk);
txr->tx_cons = cons;
}
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct bnxt_tx_queue *txq = tx_queue;
+ int rc;
uint16_t nb_tx_pkts = 0;
uint16_t coal_pkts = 0;
- uint16_t cmpl_next = 0;
+ struct bnxt_tx_queue *txq = tx_queue;
struct tx_bd_long *last_txbd = NULL;
/* Handle TX completions */
/* Handle TX burst request */
for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
- int rc;
-
- /* Request a completion on the last packet */
- cmpl_next |= (nb_pkts == nb_tx_pkts + 1);
coal_pkts++;
rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
- &coal_pkts, &cmpl_next, &last_txbd);
+ &coal_pkts, &last_txbd);
- if (unlikely(rc)) {
- /* Request a completion on the last successfully
- * enqueued packet
- */
- if (last_txbd)
- last_txbd->flags_type &=
- ~TX_BD_LONG_FLAGS_NO_CMPL;
+ if (unlikely(rc))
break;
- }
}
- if (nb_tx_pkts)
+ if (likely(nb_tx_pkts)) {
+ /* Request a completion on the last packet */
+ last_txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod);
+ }
return nb_tx_pkts;
}