/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Broadcom
+ * Copyright(c) 2020-2021 Broadcom
* All rights reserved.
*/
#ifndef _BNXT_RXTX_VEC_COMMON_H_
#define _BNXT_RXTX_VEC_COMMON_H_
-
-#define RTE_BNXT_MAX_RX_BURST 32
-#define RTE_BNXT_MAX_TX_BURST 32
-#define RTE_BNXT_RXQ_REARM_THRESH 32
-#define RTE_BNXT_DESCS_PER_LOOP 4
+#include "hsi_struct_def_dpdk.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
#define TX_BD_FLAGS_CMPL ((1 << TX_BD_LONG_FLAGS_BD_CNT_SFT) | \
TX_BD_SHORT_FLAGS_COAL_NOW | \
rxq->rxrearm_start = 0;
return 0;
}
+
+static inline void
+bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
+{
+ struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start];
+ struct rte_mbuf **rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start];
+ int nb, i;
+
+ /*
+ * Number of mbufs to allocate must be a multiple of four. The
+ * allocation must not go past the end of the ring.
+ */
+ nb = RTE_MIN(rxq->rxrearm_nb & ~0x3,
+ rxq->nb_rx_desc - rxq->rxrearm_start);
+
+ /* Allocate new mbufs into the software ring. */
+ if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) {
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb;
+
+ for (i = 0; i < nb; i++)
+ rx_bufs[i] = &rxq->fake_mbuf;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 4 mbufs per loop. */
+ for (i = 0; i < nb; i += 4) {
+ rxbds[0].address = rte_mbuf_data_iova_default(rx_bufs[0]);
+ rxbds[1].address = rte_mbuf_data_iova_default(rx_bufs[1]);
+ rxbds[2].address = rte_mbuf_data_iova_default(rx_bufs[2]);
+ rxbds[3].address = rte_mbuf_data_iova_default(rx_bufs[3]);
+
+ rxbds += 4;
+ rx_bufs += 4;
+ }
+
+ rxq->rxrearm_start += nb;
+ bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1);
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= nb;
+}
+
+/*
+ * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * is enabled.
+ */
+static inline void
+bnxt_tx_cmp_vec_fast(struct bnxt_tx_queue *txq, uint32_t nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ uint16_t cons, raw_cons = txr->tx_raw_cons;
+ uint32_t ring_mask, ring_size, num;
+ struct rte_mempool *pool;
+
+ ring_mask = txr->tx_ring_struct->ring_mask;
+ ring_size = txr->tx_ring_struct->ring_size;
+
+ cons = raw_cons & ring_mask;
+ num = RTE_MIN(nr_pkts, ring_size - cons);
+ pool = txr->tx_buf_ring[cons]->pool;
+
+ rte_mempool_put_bulk(pool, (void **)&txr->tx_buf_ring[cons], num);
+ memset(&txr->tx_buf_ring[cons], 0, num * sizeof(struct rte_mbuf *));
+ raw_cons += num;
+ num = nr_pkts - num;
+ if (num) {
+ cons = raw_cons & ring_mask;
+ rte_mempool_put_bulk(pool, (void **)&txr->tx_buf_ring[cons],
+ num);
+ memset(&txr->tx_buf_ring[cons], 0,
+ num * sizeof(struct rte_mbuf *));
+ raw_cons += num;
+ }
+
+ txr->tx_raw_cons = raw_cons;
+}
+
+static inline void
+bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, uint32_t nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ uint16_t cons, raw_cons = txr->tx_raw_cons;
+ uint32_t ring_mask, ring_size, num, blk;
+ struct rte_mempool *pool;
+
+ ring_mask = txr->tx_ring_struct->ring_mask;
+ ring_size = txr->tx_ring_struct->ring_size;
+
+ while (nr_pkts) {
+ struct rte_mbuf *mbuf;
+
+ cons = raw_cons & ring_mask;
+ num = RTE_MIN(nr_pkts, ring_size - cons);
+ pool = txr->tx_buf_ring[cons]->pool;
+
+ blk = 0;
+ do {
+ mbuf = txr->tx_buf_ring[cons + blk];
+ mbuf = rte_pktmbuf_prefree_seg(mbuf);
+ if (!mbuf || mbuf->pool != pool)
+ break;
+ blk++;
+ } while (blk < num);
+
+ if (blk) {
+ rte_mempool_put_bulk(pool,
+ (void **)&txr->tx_buf_ring[cons],
+ blk);
+ memset(&txr->tx_buf_ring[cons], 0,
+ blk * sizeof(struct rte_mbuf *));
+ raw_cons += blk;
+ nr_pkts -= blk;
+ }
+ if (!mbuf) {
+ /* Skip freeing mbufs with non-zero reference count. */
+ raw_cons++;
+ nr_pkts--;
+ }
+ }
+ txr->tx_raw_cons = raw_cons;
+}
#endif /* _BNXT_RXTX_VEC_COMMON_H_ */