#define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
+/* static asserts */
+static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
+static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(rte_v128u32_t)),
+ "invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(struct rte_vlan_hdr) +
+ 2 * RTE_ETHER_ADDR_LEN),
+ "invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(rte_v128u32_t)),
+ "invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(struct rte_vlan_hdr) +
+ 2 * RTE_ETHER_ADDR_LEN),
+ "invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(rte_v128u32_t)),
+ "invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(struct rte_vlan_hdr) +
+ 2 * RTE_ETHER_ADDR_LEN),
+ "invalid Ethernet Segment data size");
+static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
+ (2 * RTE_ETHER_ADDR_LEN),
+ "invalid Data Segment data size");
+static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
+static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
+static_assert((sizeof(struct rte_vlan_hdr) +
+ sizeof(struct rte_ether_hdr)) ==
+ MLX5_ESEG_MIN_INLINE_SIZE,
+ "invalid min inline data size");
+static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
+ MLX5_DSEG_MAX, "invalid WQE max size");
+static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
+ "invalid WQE Control Segment size");
+static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
+ "invalid WQE Ethernet Segment size");
+static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
+ "invalid WQE Data Segment size");
+static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
+ "invalid WQE size");
+
static __rte_always_inline uint32_t
rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
volatile struct mlx5_mini_cqe8 *mcqe);
struct rxq_zip *zip = &rxq->zip;
volatile struct mlx5_cqe *cqe;
const unsigned int cqe_n = (1 << rxq->cqe_n);
+ const unsigned int sges_n = (1 << rxq->sges_n);
+ const unsigned int elts_n = (1 << rxq->elts_n);
+ const unsigned int strd_n = (1 << rxq->strd_num_n);
const unsigned int cqe_cnt = cqe_n - 1;
unsigned int cq_ci, used;
used += n;
cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
}
- used = RTE_MIN(used, cqe_n);
+ used = RTE_MIN(used * sges_n, elts_n * strd_n);
return used;
}
}
}
}
- if (rxq->dynf_meta && cqe->flow_table_metadata) {
- pkt->ol_flags |= rxq->flow_meta_mask;
- *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, uint32_t *) =
- cqe->flow_table_metadata;
+ if (rxq->dynf_meta) {
+ uint32_t meta = cqe->flow_table_metadata &
+ rxq->flow_meta_port_mask;
+
+ if (meta) {
+ pkt->ol_flags |= rxq->flow_meta_mask;
+ *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset,
+ uint32_t *) = meta;
+ }
}
if (rxq->csum)
pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
rte_mbuf_raw_free(pkt);
pkt = rep;
}
+ rq_ci >>= sges_n;
+ ++rq_ci;
+ rq_ci <<= sges_n;
break;
}
if (!pkt) {
/**
* Free the mbufs from the linear array of pointers.
*
+ * @param txq
+ * Pointer to Tx queue structure.
* @param pkts
* Pointer to array of packets to be free.
* @param pkts_n
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
unsigned int pkts_n,
unsigned int olx __rte_unused)
{
*/
MLX5_ASSERT(pkts);
MLX5_ASSERT(pkts_n);
+ /*
+ * Free mbufs directly to the pool in bulk
+ * if fast free offload is engaged
+ */
+ if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
+ mbuf = *pkts;
+ pool = mbuf->pool;
+ rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
+ return;
+ }
for (;;) {
for (;;) {
/*
}
}
}
+/*
+ * No inline version to free buffers for optimal call
+ * on the tx_burst completion.
+ */
+static __rte_noinline void
+__mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
+ unsigned int pkts_n,
+ unsigned int olx __rte_unused)
+{
+ mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
+}
/**
* Free the mbuf from the elts ring buffer till new tail.
part = RTE_MIN(part, n_elts);
MLX5_ASSERT(part);
MLX5_ASSERT(part <= txq->elts_s);
- mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
+ mlx5_tx_free_mbuf(txq,
+ &txq->elts[txq->elts_tail & txq->elts_m],
part, olx);
txq->elts_tail += part;
n_elts -= part;
bool ring_doorbell = false;
int ret;
- static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
- static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
do {
volatile struct mlx5_cqe *cqe;
}
/* Normal transmit completion. */
MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
+#ifdef RTE_LIBRTE_MLX5_DEBUG
MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
cqe->wqe_counter);
+#endif
ring_doorbell = true;
++txq->cq_ci;
last_cqe = cqe;
qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
qs->max_index = rte_cpu_to_be_32(wci);
- qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq->id);
+ qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
qs->reserved0 = RTE_BE32(0);
qs->reserved1 = RTE_BE32(0);
}
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
*RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
- static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
- (sizeof(uint16_t) +
- sizeof(rte_v128u32_t)),
- "invalid Ethernet Segment data size");
- static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
- (sizeof(uint16_t) +
- sizeof(struct rte_vlan_hdr) +
- 2 * RTE_ETHER_ADDR_LEN),
- "invalid Ethernet Segment data size");
psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
es->inline_data = *(unaligned_uint16_t *)psrc;
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
*RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
- static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
- (sizeof(uint16_t) +
- sizeof(rte_v128u32_t)),
- "invalid Ethernet Segment data size");
- static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
- (sizeof(uint16_t) +
- sizeof(struct rte_vlan_hdr) +
- 2 * RTE_ETHER_ADDR_LEN),
- "invalid Ethernet Segment data size");
psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
es->inline_data = *(unaligned_uint16_t *)psrc;
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
*RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
- static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
- (sizeof(uint16_t) +
- sizeof(rte_v128u32_t)),
- "invalid Ethernet Segment data size");
- static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
- (sizeof(uint16_t) +
- sizeof(struct rte_vlan_hdr) +
- 2 * RTE_ETHER_ADDR_LEN),
- "invalid Ethernet Segment data size");
MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
pdst = (uint8_t *)&es->inline_data;
if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
uint8_t *pdst;
MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
- static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
- (2 * RTE_ETHER_ADDR_LEN),
- "invalid Data Segment data size");
if (!MLX5_TXOFF_CONFIG(MPW)) {
/* Store the descriptor byte counter for eMPW sessions. */
dseg->bcount = rte_cpu_to_be_32
MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
MLX5_ASSERT(loc->elts_free && loc->wqe_free);
MLX5_ASSERT(pkts_n > loc->pkts_sent);
- static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
MLX5_ASSERT(loc->elts_free && loc->wqe_free);
MLX5_ASSERT(pkts_n > loc->pkts_sent);
- static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
MLX5_ASSERT(room >= tlen);
room -= tlen;
/*
- * Packet data are completely inlined,
- * free the packet immediately.
+ * Packet data are completely inline,
+ * we can try to free the packet.
+ */
+ if (likely(loc->pkts_sent == loc->mbuf_free)) {
+ /*
+ * All the packets from the burst beginning
+ * are inline, we can free mbufs directly
+ * from the origin array on tx_burst exit().
+ */
+ loc->mbuf_free++;
+ goto next_mbuf;
+ }
+ /*
+ * In order no to call rte_pktmbuf_free_seg() here,
+ * in the most inner loop (that might be very
+ * expensive) we just save the mbuf in elts.
*/
- rte_pktmbuf_free_seg(loc->mbuf);
+ txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+ loc->elts_free--;
goto next_mbuf;
pointer_empw:
/*
mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
/* We have to store mbuf in elts.*/
txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+ loc->elts_free--;
room -= MLX5_WQE_DSEG_SIZE;
/* Ring buffer wraparound is checked at the loop end.*/
++dseg;
slen += dlen;
#endif
loc->pkts_sent++;
- loc->elts_free--;
pkts_n--;
if (unlikely(!pkts_n || !loc->elts_free)) {
/*
loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
vlan = sizeof(struct rte_vlan_hdr);
inlen += vlan;
- static_assert((sizeof(struct rte_vlan_hdr) +
- sizeof(struct rte_ether_hdr)) ==
- MLX5_ESEG_MIN_INLINE_SIZE,
- "invalid min inline data size");
}
/*
* If inlining is enabled at configuration time
MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
if (unlikely(!pkts_n))
return 0;
+ if (MLX5_TXOFF_CONFIG(INLINE))
+ loc.mbuf_free = 0;
loc.pkts_sent = 0;
loc.pkts_copy = 0;
loc.wqe_last = NULL;
/* Increment sent packets counter. */
txq->stats.opackets += loc.pkts_sent;
#endif
+ if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
+ __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
return loc.pkts_sent;
}
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
unsigned int diff = 0, olx = 0, i, m;
- static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
- MLX5_DSEG_MAX, "invalid WQE max size");
- static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
- "invalid WQE Control Segment size");
- static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
- "invalid WQE Ethernet Segment size");
- static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
- "invalid WQE Data Segment size");
- static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
- "invalid WQE size");
MLX5_ASSERT(priv);
if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
/* We should support Multi-Segment Packets. */
int
mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
- uint16_t tx_queue_id __rte_unused,
+ uint16_t tx_queue_id,
struct rte_eth_burst_mode *mode)
{
eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
unsigned int i, olx;
for (i = 0; i < RTE_DIM(txoff_func); i++) {
if (pkt_burst == txoff_func[i].func) {
olx = txoff_func[i].olx;
snprintf(mode->info, sizeof(mode->info),
- "%s%s%s%s%s%s%s%s%s",
+ "%s%s%s%s%s%s%s%s%s%s",
(olx & MLX5_TXOFF_CONFIG_EMPW) ?
((olx & MLX5_TXOFF_CONFIG_MPW) ?
"Legacy MPW" : "Enhanced MPW") : "No MPW",
(olx & MLX5_TXOFF_CONFIG_METADATA) ?
" + METADATA" : "",
(olx & MLX5_TXOFF_CONFIG_TXPP) ?
- " + TXPP" : "");
+ " + TXPP" : "",
+ (txq && txq->fast_free) ?
+ " + Fast Free" : "");
return 0;
}
}