Link status = Y
Link status event = Y
Rx interrupt = Y
+Fast mbuf free = Y
Queue start/stop = Y
Burst mode info = Y
MTU update = Y
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
- dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
+ dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
+ dev_info->tx_queue_offload_capa;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
{
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
#ifndef RTE_LIBRTE_IEEE1588
+ uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads;
struct bnxt *bp = eth_dev->data->dev_private;
/*
* or tx offloads.
*/
if (!eth_dev->data->scattered_rx &&
- !eth_dev->data->dev_conf.txmode.offloads &&
+ !(offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
!BNXT_TRUFLOW_EN(bp)) {
PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
eth_dev->data->port_id);
"Port %d scatter: %d tx offload: %" PRIX64 "\n",
eth_dev->data->port_id,
eth_dev->data->scattered_rx,
- eth_dev->data->dev_conf.txmode.offloads);
+ offloads);
#endif
#endif
return bnxt_xmit_pkts;
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = 0;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
- qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
+ qinfo->conf.offloads = txq->offloads;
}
static const struct {
rxq->rxrearm_nb -= nb;
}
+
+static inline void
+bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct rte_mbuf **free = txq->free;
+ uint16_t cons = txr->tx_cons;
+ unsigned int blk = 0;
+ uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
+
+ while (nr_pkts--) {
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct rte_mbuf *mbuf;
+
+ tx_buf = &txr->tx_buf_ring[cons];
+ cons = (cons + 1) & ring_mask;
+ mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf);
+ if (unlikely(mbuf == NULL))
+ continue;
+ tx_buf->mbuf = NULL;
+
+ if (blk && mbuf->pool != free[0]->pool) {
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
+ blk = 0;
+ }
+ free[blk++] = mbuf;
+ }
+ if (blk)
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
+
+ txr->tx_cons = cons;
+}
#endif /* _BNXT_RXTX_VEC_COMMON_H_ */
#include "bnxt.h"
#include "bnxt_cpr.h"
#include "bnxt_ring.h"
-#include "bnxt_rxtx_vec_common.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
+#include "bnxt_rxtx_vec_common.h"
/*
* RX Ring handling
return nb_rx_pkts;
}
-static void
-bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
-{
- struct bnxt_tx_ring_info *txr = txq->tx_ring;
- struct rte_mbuf **free = txq->free;
- uint16_t cons = txr->tx_cons;
- unsigned int blk = 0;
-
- while (nr_pkts--) {
- struct bnxt_sw_tx_bd *tx_buf;
- struct rte_mbuf *mbuf;
-
- tx_buf = &txr->tx_buf_ring[cons];
- cons = RING_NEXT(txr->tx_ring_struct, cons);
- mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf);
- if (unlikely(mbuf == NULL))
- continue;
- tx_buf->mbuf = NULL;
-
- if (blk && mbuf->pool != free[0]->pool) {
- rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
- blk = 0;
- }
- free[blk++] = mbuf;
- }
- if (blk)
- rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
-
- txr->tx_cons = cons;
-}
-
static void
bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
{
cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
if (nb_tx_pkts) {
- bnxt_tx_cmp_vec(txq, nb_tx_pkts);
+ if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ bnxt_tx_cmp_fast(txq, nb_tx_pkts);
+ else
+ bnxt_tx_cmp_vec(txq, nb_tx_pkts);
cpr->cp_raw_cons = raw_cons;
bnxt_db_cq(cpr);
}
#include "bnxt.h"
#include "bnxt_cpr.h"
#include "bnxt_ring.h"
-#include "bnxt_rxtx_vec_common.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
+#include "bnxt_rxtx_vec_common.h"
/*
* RX Ring handling
return nb_rx_pkts;
}
-static void
-bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
-{
- struct bnxt_tx_ring_info *txr = txq->tx_ring;
- struct rte_mbuf **free = txq->free;
- uint16_t cons = txr->tx_cons;
- unsigned int blk = 0;
- uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
-
- while (nr_pkts--) {
- struct bnxt_sw_tx_bd *tx_buf;
- struct rte_mbuf *mbuf;
-
- tx_buf = &txr->tx_buf_ring[cons];
- cons = (cons + 1) & ring_mask;
- mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf);
- if (unlikely(mbuf == NULL))
- continue;
- tx_buf->mbuf = NULL;
-
- if (blk && mbuf->pool != free[0]->pool) {
- rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
- blk = 0;
- }
- free[blk++] = mbuf;
- }
- if (blk)
- rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
-
- txr->tx_cons = cons;
-}
-
static void
bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
{
cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
if (nb_tx_pkts) {
- bnxt_tx_cmp_vec(txq, nb_tx_pkts);
+ if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ bnxt_tx_cmp_fast(txq, nb_tx_pkts);
+ else
+ bnxt_tx_cmp_vec(txq, nb_tx_pkts);
cpr->cp_raw_cons = raw_cons;
bnxt_db_cq(cpr);
}
txq->nb_tx_desc = nb_desc;
txq->tx_free_thresh =
RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_TX_BURST);
+ txq->offloads = eth_dev->data->dev_conf.txmode.offloads |
+ tx_conf->offloads;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
struct bnxt_cp_ring_info *cp_ring;
const struct rte_memzone *mz;
struct rte_mbuf **free;
+ uint64_t offloads;
};
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
- bnxt_tx_cmp(txq, nb_tx_pkts);
+ if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ bnxt_tx_cmp_fast(txq, nb_tx_pkts);
+ else
+ bnxt_tx_cmp(txq, nb_tx_pkts);
cpr->cp_raw_cons = raw_cons;
bnxt_db_cq(cpr);
}
bnxt_tx_bds_in_hw(txq)) - 1);
}
+/*
+ * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * is enabled.
+ */
+static inline void
+bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
+ struct rte_mbuf **free = txq->free;
+ uint16_t cons = txr->tx_cons;
+ unsigned int blk = 0;
+
+ while (nr_pkts--) {
+ struct bnxt_sw_tx_bd *tx_buf;
+
+ tx_buf = &txr->tx_buf_ring[cons];
+ cons = (cons + 1) & ring_mask;
+ free[blk++] = tx_buf->mbuf;
+ tx_buf->mbuf = NULL;
+ }
+ if (blk)
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
+
+ txr->tx_cons = cons;
+}
+
void bnxt_free_tx_rings(struct bnxt *bp);
int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);