X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_rxtx_vec_common.h;h=8a7772a7e39ead7e2668406e926afddf29ee550b;hb=df9688427d082a44b73b0659ad4f582a76556f1e;hp=3da3c48f4ece8318d887d0c99e57043ceaf1f97f;hpb=398358341419ce2666fec38eff8fff98d5be5941;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_common.h b/drivers/net/bnxt/bnxt_rxtx_vec_common.h index 3da3c48f4e..8a7772a7e3 100644 --- a/drivers/net/bnxt/bnxt_rxtx_vec_common.h +++ b/drivers/net/bnxt/bnxt_rxtx_vec_common.h @@ -5,11 +5,9 @@ #ifndef _BNXT_RXTX_VEC_COMMON_H_ #define _BNXT_RXTX_VEC_COMMON_H_ - -#define RTE_BNXT_MAX_RX_BURST 32 -#define RTE_BNXT_MAX_TX_BURST 32 -#define RTE_BNXT_RXQ_REARM_THRESH 32 -#define RTE_BNXT_DESCS_PER_LOOP 4 +#include "hsi_struct_def_dpdk.h" +#include "bnxt_rxq.h" +#include "bnxt_rxr.h" #define TX_BD_FLAGS_CMPL ((1 << TX_BD_LONG_FLAGS_BD_CNT_SFT) | \ TX_BD_SHORT_FLAGS_COAL_NOW | \ @@ -54,4 +52,105 @@ bnxt_rxq_vec_setup_common(struct bnxt_rx_queue *rxq) rxq->rxrearm_start = 0; return 0; } + +static inline void +bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr) +{ + struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start]; + struct rte_mbuf **rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start]; + int nb, i; + + /* + * Number of mbufs to allocate must be a multiple of four. The + * allocation must not go past the end of the ring. + */ + nb = RTE_MIN(rxq->rxrearm_nb & ~0x3, + rxq->nb_rx_desc - rxq->rxrearm_start); + + /* Allocate new mbufs into the software ring. */ + if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) { + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb; + + for (i = 0; i < nb; i++) + rx_bufs[i] = &rxq->fake_mbuf; + return; + } + + /* Initialize the mbufs in vector, process 4 mbufs per loop. */ + for (i = 0; i < nb; i += 4) { + rxbds[0].address = rte_mbuf_data_iova_default(rx_bufs[0]); + rxbds[1].address = rte_mbuf_data_iova_default(rx_bufs[1]); + rxbds[2].address = rte_mbuf_data_iova_default(rx_bufs[2]); + rxbds[3].address = rte_mbuf_data_iova_default(rx_bufs[3]); + + rxbds += 4; + rx_bufs += 4; + } + + rxq->rxrearm_start += nb; + bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1); + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= nb; +} + +/* + * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE + * is enabled. + */ +static inline void +bnxt_tx_cmp_vec_fast(struct bnxt_tx_queue *txq, int nr_pkts) +{ + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct rte_mbuf **free = txq->free; + uint16_t cons, raw_cons = txr->tx_raw_cons; + unsigned int blk = 0; + uint32_t ring_mask = txr->tx_ring_struct->ring_mask; + + while (nr_pkts--) { + struct bnxt_sw_tx_bd *tx_buf; + + cons = raw_cons++ & ring_mask; + tx_buf = &txr->tx_buf_ring[cons]; + free[blk++] = tx_buf->mbuf; + tx_buf->mbuf = NULL; + } + if (blk) + rte_mempool_put_bulk(free[0]->pool, (void **)free, blk); + + txr->tx_raw_cons = raw_cons; +} + +static inline void +bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts) +{ + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct rte_mbuf **free = txq->free; + uint16_t cons, raw_cons = txr->tx_raw_cons; + unsigned int blk = 0; + uint32_t ring_mask = txr->tx_ring_struct->ring_mask; + + while (nr_pkts--) { + struct bnxt_sw_tx_bd *tx_buf; + struct rte_mbuf *mbuf; + + cons = raw_cons++ & ring_mask; + tx_buf = &txr->tx_buf_ring[cons]; + mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf); + if (unlikely(mbuf == NULL)) + continue; + tx_buf->mbuf = NULL; + + if (blk && mbuf->pool != free[0]->pool) { + rte_mempool_put_bulk(free[0]->pool, (void **)free, blk); + blk = 0; + } + free[blk++] = mbuf; + } + if (blk) + rte_mempool_put_bulk(free[0]->pool, (void **)free, blk); + + txr->tx_raw_cons = raw_cons; +} #endif /* _BNXT_RXTX_VEC_COMMON_H_ */