- RX VLAN stripping.
- TX VLAN insertion.
- RX CRC stripping configuration.
+- TX mbuf fast free offload.
- Promiscuous mode on PF and VF.
- Multicast promiscuous mode on PF and VF.
- Hardware checksum offloads.
for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
+- TX mbuf fast free:
+
+ - fast free offload assumes the all mbufs being sent are originated from the
+ same memory pool and there is no any extra references to the mbufs (the
+ reference counter for each mbuf is equal 1 on tx_burst call). The latter
+ means there should be no any externally attached buffers in mbufs. It is
+ an application responsibility to provide the correct mbufs if the fast
+ free offload is engaged. The mlx5 PMD implicitly produces the mbufs with
+ externally attached buffers if MPRQ option is enabled, hence, the fast
+ free offload is neither supported nor advertised if there is MPRQ enabled.
+
- Sample flow:
- Supports ``RTE_FLOW_ACTION_TYPE_SAMPLE`` action only within NIC Rx and E-Switch steering domain.
/**
* Free the mbufs from the linear array of pointers.
*
+ * @param txq
+ * Pointer to Tx queue structure.
* @param pkts
* Pointer to array of packets to be free.
* @param pkts_n
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
unsigned int pkts_n,
unsigned int olx __rte_unused)
{
*/
MLX5_ASSERT(pkts);
MLX5_ASSERT(pkts_n);
+ /*
+ * Free mbufs directly to the pool in bulk
+ * if fast free offload is engaged
+ */
+ if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
+ mbuf = *pkts;
+ pool = mbuf->pool;
+ rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
+ return;
+ }
for (;;) {
for (;;) {
/*
* on the tx_burst completion.
*/
static __rte_noinline void
-__mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+__mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
unsigned int pkts_n,
unsigned int olx __rte_unused)
{
- mlx5_tx_free_mbuf(pkts, pkts_n, olx);
+ mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
}
/**
part = RTE_MIN(part, n_elts);
MLX5_ASSERT(part);
MLX5_ASSERT(part <= txq->elts_s);
- mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
+ mlx5_tx_free_mbuf(txq,
+ &txq->elts[txq->elts_tail & txq->elts_m],
part, olx);
txq->elts_tail += part;
n_elts -= part;
txq->stats.opackets += loc.pkts_sent;
#endif
if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
- __mlx5_tx_free_mbuf(pkts, loc.mbuf_free, olx);
+ __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
return loc.pkts_sent;
}
int
mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
- uint16_t tx_queue_id __rte_unused,
+ uint16_t tx_queue_id,
struct rte_eth_burst_mode *mode)
{
eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
unsigned int i, olx;
for (i = 0; i < RTE_DIM(txoff_func); i++) {
if (pkt_burst == txoff_func[i].func) {
olx = txoff_func[i].olx;
snprintf(mode->info, sizeof(mode->info),
- "%s%s%s%s%s%s%s%s%s",
+ "%s%s%s%s%s%s%s%s%s%s",
(olx & MLX5_TXOFF_CONFIG_EMPW) ?
((olx & MLX5_TXOFF_CONFIG_MPW) ?
"Legacy MPW" : "Enhanced MPW") : "No MPW",
(olx & MLX5_TXOFF_CONFIG_METADATA) ?
" + METADATA" : "",
(olx & MLX5_TXOFF_CONFIG_TXPP) ?
- " + TXPP" : "");
+ " + TXPP" : "",
+ (txq && txq->fast_free) ?
+ " + Fast Free" : "");
return 0;
}
}