* the mbuf and return to the caller.
*
* Returns:
- * 0 = Success, !0 = Failure
+ * void.
+ *
* Note the side effect that an mbuf may be freed if it causes a problem.
*/
-int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_pkts)
+void bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head,
+ int m_pkts)
{
struct rte_mbuf *m0;
struct eth_tx_start_bd *tx_start_bd;
m0 = *m_head++;
- if (unlikely(txq->nb_tx_avail < 3)) {
- PMD_TX_LOG(ERR, "no enough bds %d/%d",
- bd_prod, txq->nb_tx_avail);
- return -ENOMEM;
- }
-
txq->sw_ring[TX_BD(pkt_prod, txq)] = m0;
tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
fp->tx_db.data.prod += (m_pkts << 1) + nbds;
DOORBELL(sc, txq->queue_id, fp->tx_db.raw);
mb();
-
- return 0;
}
static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc *sc)
#define TX_PAGE(x) (((x) & ~USABLE_TX_BD_PER_PAGE) >> 8)
#define TX_IDX(x) ((x) & USABLE_TX_BD_PER_PAGE)
+#define BDS_PER_TX_PKT (3)
+
/*
* Trigger pending transmits when the number of available BDs is greater
* than 1/8 of the total number of usable BDs.
int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc);
void bnx2x_free_ilt_mem(struct bnx2x_softc *sc);
void bnx2x_dump_tx_chain(struct bnx2x_fastpath * fp, int bd_prod, int count);
-int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_pkts);
+void bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head,
+ int m_pkts);
uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp);
void bnx2x_print_adapter_info(struct bnx2x_softc *sc);
int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp);
struct bnx2x_tx_queue *txq;
struct bnx2x_softc *sc;
struct bnx2x_fastpath *fp;
- uint32_t burst, nb_tx;
+ uint32_t burst;
struct rte_mbuf **m = tx_pkts;
- int ret;
+ uint16_t nb_tx_pkts;
+ uint16_t nb_pkt_sent = 0;
txq = p_txq;
sc = txq->sc;
fp = &sc->fp[txq->queue_id];
- nb_tx = nb_pkts;
+ if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) >
+ txq->tx_free_thresh)))
+ bnx2x_txeof(sc, fp);
- do {
- burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST);
+ nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT);
+ if (unlikely(nb_tx_pkts == 0))
+ return 0;
- ret = bnx2x_tx_encap(txq, m, burst);
- if (unlikely(ret)) {
- PMD_TX_LOG(ERR, "tx_encap failed!");
- }
+ burst = RTE_MIN(nb_tx_pkts, RTE_PMD_BNX2X_TX_MAX_BURST);
+ while (nb_tx_pkts--) {
+ assert(m != NULL);
+ bnx2x_tx_encap(txq, m, burst);
bnx2x_update_fp_sb_idx(fp);
-
- if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) {
+ if ((txq->nb_tx_desc - txq->nb_tx_avail) >
+ txq->tx_free_thresh)
bnx2x_txeof(sc, fp);
- }
-
- if (unlikely(ret == -ENOMEM)) {
- break;
- }
-
m += burst;
- nb_pkts -= burst;
-
- } while (nb_pkts);
+ nb_pkt_sent++;
+ }
- return nb_tx - nb_pkts;
+ return nb_pkt_sent;
}
int