#ifndef _BNXT_RXTX_VEC_COMMON_H_
#define _BNXT_RXTX_VEC_COMMON_H_
-
-#define RTE_BNXT_MAX_RX_BURST 32
-#define RTE_BNXT_MAX_TX_BURST 32
-#define RTE_BNXT_RXQ_REARM_THRESH 32
-#define RTE_BNXT_DESCS_PER_LOOP 4
+#include "hsi_struct_def_dpdk.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
#define TX_BD_FLAGS_CMPL ((1 << TX_BD_LONG_FLAGS_BD_CNT_SFT) | \
TX_BD_SHORT_FLAGS_COAL_NOW | \
rxq->rxrearm_start = 0;
return 0;
}
+
+static inline void
+bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
+{
+ struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start];
+ struct rte_mbuf **rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start];
+ int nb, i;
+
+ /*
+ * Number of mbufs to allocate must be a multiple of four. The
+ * allocation must not go past the end of the ring.
+ */
+ nb = RTE_MIN(rxq->rxrearm_nb & ~0x3,
+ rxq->nb_rx_desc - rxq->rxrearm_start);
+
+ /* Allocate new mbufs into the software ring. */
+ if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) {
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb;
+
+ for (i = 0; i < nb; i++)
+ rx_bufs[i] = &rxq->fake_mbuf;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 4 mbufs per loop. */
+ for (i = 0; i < nb; i += 4) {
+ rxbds[0].address = rte_mbuf_data_iova_default(rx_bufs[0]);
+ rxbds[1].address = rte_mbuf_data_iova_default(rx_bufs[1]);
+ rxbds[2].address = rte_mbuf_data_iova_default(rx_bufs[2]);
+ rxbds[3].address = rte_mbuf_data_iova_default(rx_bufs[3]);
+
+ rxbds += 4;
+ rx_bufs += 4;
+ }
+
+ rxq->rxrearm_start += nb;
+ bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1);
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= nb;
+}
#endif /* _BNXT_RXTX_VEC_COMMON_H_ */