/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Broadcom
+ * Copyright(c) 2020-2021 Broadcom
* All rights reserved.
*/
#include "bnxt_rxq.h"
#include "bnxt_rxr.h"
-#define RTE_BNXT_DESCS_PER_LOOP 4U
-
#define TX_BD_FLAGS_CMPL ((1 << TX_BD_LONG_FLAGS_BD_CNT_SFT) | \
TX_BD_SHORT_FLAGS_COAL_NOW | \
TX_BD_SHORT_TYPE_TX_BD_SHORT | \
rxq->rxrearm_start = 0;
return 0;
}
+
+static inline void
+bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
+{
+ struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start];
+ struct rte_mbuf **rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start];
+ int nb, i;
+
+ /*
+ * Number of mbufs to allocate must be a multiple of four. The
+ * allocation must not go past the end of the ring.
+ */
+ nb = RTE_MIN(rxq->rxrearm_nb & ~0x3,
+ rxq->nb_rx_desc - rxq->rxrearm_start);
+
+ /* Allocate new mbufs into the software ring. */
+ if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) {
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb;
+
+ for (i = 0; i < nb; i++)
+ rx_bufs[i] = &rxq->fake_mbuf;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 4 mbufs per loop. */
+ for (i = 0; i < nb; i += 4) {
+ rxbds[0].address = rte_mbuf_data_iova_default(rx_bufs[0]);
+ rxbds[1].address = rte_mbuf_data_iova_default(rx_bufs[1]);
+ rxbds[2].address = rte_mbuf_data_iova_default(rx_bufs[2]);
+ rxbds[3].address = rte_mbuf_data_iova_default(rx_bufs[3]);
+
+ rxbds += 4;
+ rx_bufs += 4;
+ }
+
+ rxq->rxrearm_start += nb;
+ bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1);
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= nb;
+}
+
+/*
+ * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * is enabled.
+ */
+static inline void
+bnxt_tx_cmp_vec_fast(struct bnxt_tx_queue *txq, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct rte_mbuf **free = txq->free;
+ uint16_t cons, raw_cons = txr->tx_raw_cons;
+ unsigned int blk = 0;
+ uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
+
+ while (nr_pkts--) {
+ struct bnxt_sw_tx_bd *tx_buf;
+
+ cons = raw_cons++ & ring_mask;
+ tx_buf = &txr->tx_buf_ring[cons];
+ free[blk++] = tx_buf->mbuf;
+ tx_buf->mbuf = NULL;
+ }
+ if (blk)
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
+
+ txr->tx_raw_cons = raw_cons;
+}
+
+static inline void
+bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct rte_mbuf **free = txq->free;
+ uint16_t cons, raw_cons = txr->tx_raw_cons;
+ unsigned int blk = 0;
+ uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
+
+ while (nr_pkts--) {
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct rte_mbuf *mbuf;
+
+ cons = raw_cons++ & ring_mask;
+ tx_buf = &txr->tx_buf_ring[cons];
+ mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf);
+ if (unlikely(mbuf == NULL))
+ continue;
+ tx_buf->mbuf = NULL;
+
+ if (blk && mbuf->pool != free[0]->pool) {
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
+ blk = 0;
+ }
+ free[blk++] = mbuf;
+ }
+ if (blk)
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
+
+ txr->tx_raw_cons = raw_cons;
+}
#endif /* _BNXT_RXTX_VEC_COMMON_H_ */