#include <rte_common.h>
#include <rte_spinlock.h>
+#include <mlx5_common.h>
#include <mlx5_common_mr.h>
#include "mlx5.h"
#include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
/* TX burst subroutines return codes. */
enum mlx5_txcmp_code {
/* Mbuf dynamic flag offset for inline. */
extern uint64_t rte_net_mlx5_dynf_inline_mask;
-#define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
+#define RTE_MBUF_F_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
extern uint32_t mlx5_ptype_table[] __rte_cache_aligned;
extern uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
+ uint16_t rt_timestamp:1; /* Realtime timestamp format. */
+ uint16_t wait_on_time:1; /* WQE with timestamp is supported. */
uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */
uint16_t inlen_send; /* Ordinary send data inline size. */
uint16_t inlen_empw; /* eMPW max packet size to inline. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
uint16_t port_id; /* Port ID of device. */
uint16_t idx; /* Queue index. */
+ uint64_t rt_timemask; /* Scheduling timestamp mask. */
uint64_t ts_mask; /* Timestamp flag dynamic mask. */
int32_t ts_offset; /* Timestamp field dynamic offset. */
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
struct mlx5_txq_stats stats; /* TX queue counters. */
-#ifndef RTE_ARCH_64
- rte_spinlock_t *uar_lock;
- /* UAR access lock required for 32bit implementations */
-#endif
+ struct mlx5_txq_stats stats_reset; /* stats on last reset. */
+ struct mlx5_uar_data uar_data;
struct rte_mbuf *elts[0];
/* Storage for queued packets, must be the last field. */
} __rte_cache_aligned;
-enum mlx5_txq_type {
- MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */
- MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Tx queue. */
-};
-
/* TX queue control descriptor. */
struct mlx5_txq_ctrl {
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
uint32_t refcnt; /* Reference counter. */
unsigned int socket; /* CPU socket ID for allocations. */
- enum mlx5_txq_type type; /* The txq ctrl type. */
+ bool is_hairpin; /* Whether TxQ type is Hairpin. */
unsigned int max_inline_data; /* Max inline data. */
unsigned int max_tso_header; /* Max TSO header size. */
struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
struct mlx5_priv *priv; /* Back pointer to private data. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
- void *bf_reg; /* BlueFlame register from Verbs. */
uint16_t dump_file_n; /* Number of dump files. */
struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
uint32_t hairpin_status; /* Hairpin binding status. */
(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf);
void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
-void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
/* mlx5_tx.c */
-uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
- uint16_t pkts_n);
void mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
unsigned int olx __rte_unused);
int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
struct rte_eth_burst_mode *mode);
-/* mlx5_mr.c */
-
-uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
-
/* mlx5_tx_empw.c */
MLX5_TXOFF_PRE_DECL(full_empw);
MLX5_TXOFF_PRE_DECL(mc_mpw);
MLX5_TXOFF_PRE_DECL(i_mpw);
-static __rte_always_inline uint64_t *
+static __rte_always_inline struct mlx5_uar_data *
mlx5_tx_bfreg(struct mlx5_txq_data *txq)
{
- return MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx];
-}
-
-/**
- * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
- * 64bit architectures.
- *
- * @param val
- * value to write in CPU endian format.
- * @param addr
- * Address to write to.
- * @param lock
- * Address of the lock to use for that UAR access.
- */
-static __rte_always_inline void
-__mlx5_uar_write64_relaxed(uint64_t val, void *addr,
- rte_spinlock_t *lock __rte_unused)
-{
-#ifdef RTE_ARCH_64
- *(uint64_t *)addr = val;
-#else /* !RTE_ARCH_64 */
- rte_spinlock_lock(lock);
- *(uint32_t *)addr = val;
- rte_io_wmb();
- *((uint32_t *)addr + 1) = val >> 32;
- rte_spinlock_unlock(lock);
-#endif
-}
-
-/**
- * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
- * 64bit architectures while guaranteeing the order of execution with the
- * code being executed.
- *
- * @param val
- * value to write in CPU endian format.
- * @param addr
- * Address to write to.
- * @param lock
- * Address of the lock to use for that UAR access.
- */
-static __rte_always_inline void
-__mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
-{
- rte_io_wmb();
- __mlx5_uar_write64_relaxed(val, addr, lock);
-}
-
-/* Assist macros, used instead of directly calling the functions they wrap. */
-#ifdef RTE_ARCH_64
-#define mlx5_uar_write64_relaxed(val, dst, lock) \
- __mlx5_uar_write64_relaxed(val, dst, NULL)
-#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
-#else
-#define mlx5_uar_write64_relaxed(val, dst, lock) \
- __mlx5_uar_write64_relaxed(val, dst, lock)
-#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
-#endif
-
-/**
- * Query LKey from a packet buffer for Tx. If not found, add the mempool.
- *
- * @param txq
- * Pointer to Tx queue structure.
- * @param addr
- * Address to search.
- *
- * @return
- * Searched LKey on success, UINT32_MAX on no match.
- */
-static __rte_always_inline uint32_t
-mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
-{
- struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- uintptr_t addr = (uintptr_t)mb->buf_addr;
- uint32_t lkey;
-
- /* Check generation bit to see if there's any change on existing MRs. */
- if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
- mlx5_mr_flush_local_cache(mr_ctrl);
- /* Linear search on MR cache array. */
- lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
- MLX5_MR_CACHE_N, addr);
- if (likely(lkey != UINT32_MAX))
- return lkey;
- /* Take slower bottom-half on miss. */
- return mlx5_tx_mb2mr_bh(txq, mb);
-}
-
-/**
- * Ring TX queue doorbell and flush the update if requested.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param wqe
- * Pointer to the last WQE posted in the NIC.
- * @param cond
- * Request for write memory barrier after BlueFlame update.
- */
-static __rte_always_inline void
-mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
- int cond)
-{
- uint64_t *dst = mlx5_tx_bfreg(txq);
- volatile uint64_t *src = ((volatile uint64_t *)wqe);
-
- rte_io_wmb();
- *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
- /* Ensure ordering between DB record and BF copy. */
- rte_wmb();
- mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
- if (cond)
- rte_wmb();
+ return &MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx];
}
/**
static __rte_always_inline void
mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
{
- mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
+ mlx5_doorbell_ring(mlx5_tx_bfreg(txq), *(volatile uint64_t *)wqe,
+ txq->wqe_ci, txq->qp_db, 1);
}
/**
if (!MLX5_TXOFF_CONFIG(SWP))
return 0;
ol = loc->mbuf->ol_flags;
- tunnel = ol & PKT_TX_TUNNEL_MASK;
+ tunnel = ol & RTE_MBUF_F_TX_TUNNEL_MASK;
/*
* Check whether Software Parser is required.
* Only customized tunnels may ask for.
*/
- if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
+ if (likely(tunnel != RTE_MBUF_F_TX_TUNNEL_UDP && tunnel != RTE_MBUF_F_TX_TUNNEL_IP))
return 0;
/*
* The index should have:
- * bit[0:1] = PKT_TX_L4_MASK
- * bit[4] = PKT_TX_IPV6
- * bit[8] = PKT_TX_OUTER_IPV6
- * bit[9] = PKT_TX_OUTER_UDP
+ * bit[0:1] = RTE_MBUF_F_TX_L4_MASK
+ * bit[4] = RTE_MBUF_F_TX_IPV6
+ * bit[8] = RTE_MBUF_F_TX_OUTER_IPV6
+ * bit[9] = RTE_MBUF_F_TX_OUTER_UDP
*/
- idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
- idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
+ idx = (ol & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_IPV6 | RTE_MBUF_F_TX_OUTER_IPV6)) >> 52;
+ idx |= (tunnel == RTE_MBUF_F_TX_TUNNEL_UDP) ? (1 << 9) : 0;
*swp_flags = mlx5_swp_types_table[idx];
/*
* Set offsets for SW parser. Since ConnectX-5, SW parser just
* should be set regardless of HW offload.
*/
off = loc->mbuf->outer_l2_len;
- if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && ol & RTE_MBUF_F_TX_VLAN)
off += sizeof(struct rte_vlan_hdr);
set = (off >> 1) << 8; /* Outer L3 offset. */
off += loc->mbuf->outer_l3_len;
- if (tunnel == PKT_TX_TUNNEL_UDP)
+ if (tunnel == RTE_MBUF_F_TX_TUNNEL_UDP)
set |= off >> 1; /* Outer L4 offset. */
- if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
- const uint64_t csum = ol & PKT_TX_L4_MASK;
+ if (ol & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) { /* Inner IP. */
+ const uint64_t csum = ol & RTE_MBUF_F_TX_L4_MASK;
off += loc->mbuf->l2_len;
set |= (off >> 1) << 24; /* Inner L3 offset. */
- if (csum == PKT_TX_TCP_CKSUM ||
- csum == PKT_TX_UDP_CKSUM ||
- (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
+ if (csum == RTE_MBUF_F_TX_TCP_CKSUM ||
+ csum == RTE_MBUF_F_TX_UDP_CKSUM ||
+ (MLX5_TXOFF_CONFIG(TSO) && ol & RTE_MBUF_F_TX_TCP_SEG)) {
off += loc->mbuf->l3_len;
set |= (off >> 1) << 16; /* Inner L4 offset. */
}
txq_ol_cksum_to_cs(struct rte_mbuf *buf)
{
uint32_t idx;
- uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
- const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
- PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
+ uint8_t is_tunnel = !!(buf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
+ const uint64_t ol_flags_mask = RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_L4_MASK |
+ RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM;
/*
* The index should have:
- * bit[0] = PKT_TX_TCP_SEG
- * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
- * bit[4] = PKT_TX_IP_CKSUM
- * bit[8] = PKT_TX_OUTER_IP_CKSUM
+ * bit[0] = RTE_MBUF_F_TX_TCP_SEG
+ * bit[2:3] = RTE_MBUF_F_TX_UDP_CKSUM, RTE_MBUF_F_TX_TCP_CKSUM
+ * bit[4] = RTE_MBUF_F_TX_IP_CKSUM
+ * bit[8] = RTE_MBUF_F_TX_OUTER_IP_CKSUM
* bit[9] = tunnel
*/
idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
+mlx5_tx_qseg_init(struct mlx5_txq_data *restrict txq,
struct mlx5_txq_local *restrict loc __rte_unused,
struct mlx5_wqe *restrict wqe,
unsigned int wci,
qs->reserved1 = RTE_BE32(0);
}
+/**
+ * Build the Wait on Time Segment with specified timestamp value.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param wqe
+ * Pointer to WQE to fill with built Control Segment.
+ * @param ts
+ * Timesatmp value to wait.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
+ struct mlx5_txq_local *restrict loc __rte_unused,
+ struct mlx5_wqe *restrict wqe,
+ uint64_t ts,
+ unsigned int olx __rte_unused)
+{
+ struct mlx5_wqe_wseg *ws;
+
+ ws = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
+ ws->operation = rte_cpu_to_be_32(MLX5_WAIT_COND_CYCLIC_BIGGER);
+ ws->lkey = RTE_BE32(0);
+ ws->va_high = RTE_BE32(0);
+ ws->va_low = RTE_BE32(0);
+ if (txq->rt_timestamp) {
+ ts = ts % (uint64_t)NS_PER_S
+ | (ts / (uint64_t)NS_PER_S) << 32;
+ }
+ ws->value = rte_cpu_to_be_64(ts);
+ ws->mask = txq->rt_timemask;
+}
+
/**
* Build the Ethernet Segment without inlined data.
* Supports Software Parser, Checksums and VLAN insertion Tx offload features.
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
0 : 0;
/* Engage VLAN tag insertion feature if requested. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
/*
* We should get here only if device support
* this feature correctly.
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
0 : 0;
psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
0 : 0;
psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
uint8_t *psrc;
MLX5_ASSERT(len);
- MLX5_ASSERT(must <= len);
do {
/* Allow zero length packets, must check first. */
dlen = rte_pktmbuf_data_len(loc->mbuf);
MLX5_ASSERT(loc->mbuf_nseg > 1);
MLX5_ASSERT(loc->mbuf);
--loc->mbuf_nseg;
- if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
+ if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) {
unsigned int diff;
if (copy >= must) {
if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
/*
* Copy only the minimal required
- * part of the data buffer.
+ * part of the data buffer. Limit amount
+ * of data to be copied to the length of
+ * available space.
*/
- len = diff;
+ len = RTE_MIN(len, diff);
}
}
continue;
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
0 : 0;
MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
{
MLX5_ASSERT(len);
dseg->bcount = rte_cpu_to_be_32(len);
- dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
+ dseg->lkey = mlx5_mr_mb2mr(&txq->mr_ctrl, loc->mbuf);
dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
}
MLX5_ASSERT(len);
if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
dseg->bcount = rte_cpu_to_be_32(len);
- dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
+ dseg->lkey = mlx5_mr_mb2mr(&txq->mr_ctrl, loc->mbuf);
dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
return;
{
if (MLX5_TXOFF_CONFIG(TXPP) &&
loc->mbuf->ol_flags & txq->ts_mask) {
+ struct mlx5_dev_ctx_shared *sh;
struct mlx5_wqe *wqe;
uint64_t ts;
- int32_t wci;
/*
* Estimate the required space quickly and roughly.
return MLX5_TXCMP_CODE_EXIT;
/* Convert the timestamp into completion to wait. */
ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
- wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
- if (unlikely(wci < 0))
- return MLX5_TXCMP_CODE_SINGLE;
- /* Build the WAIT WQE with specified completion. */
wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
- mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
- mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
+ sh = txq->sh;
+ if (txq->wait_on_time) {
+ /* The wait on time capability should be used. */
+ ts -= sh->txpp.skew;
+ mlx5_tx_cseg_init(txq, loc, wqe,
+ 1 + sizeof(struct mlx5_wqe_wseg) /
+ MLX5_WSEG_SIZE,
+ MLX5_OPCODE_WAIT |
+ MLX5_OPC_MOD_WAIT_TIME << 24, olx);
+ mlx5_tx_wseg_init(txq, loc, wqe, ts, olx);
+ } else {
+ /* Legacy cross-channel operation should be used. */
+ int32_t wci;
+
+ wci = mlx5_txpp_convert_tx_ts(sh, ts);
+ if (unlikely(wci < 0))
+ return MLX5_TXCMP_CODE_SINGLE;
+ /* Build the WAIT WQE with specified completion. */
+ mlx5_tx_cseg_init(txq, loc, wqe,
+ 1 + sizeof(struct mlx5_wqe_qseg) /
+ MLX5_WSEG_SIZE,
+ MLX5_OPCODE_WAIT |
+ MLX5_OPC_MOD_WAIT_CQ_PI << 24, olx);
+ mlx5_tx_qseg_init(txq, loc, wqe, wci, olx);
+ }
++txq->wqe_ci;
--loc->wqe_free;
return MLX5_TXCMP_CODE_MULTI;
* the required space in WQE ring buffer.
*/
dlen = rte_pktmbuf_pkt_len(loc->mbuf);
- if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
vlan = sizeof(struct rte_vlan_hdr);
inlen = loc->mbuf->l2_len + vlan +
loc->mbuf->l3_len + loc->mbuf->l4_len;
if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
return MLX5_TXCMP_CODE_ERROR;
- if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
+ if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
/* Packet must contain all TSO headers. */
if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
inlen > (dlen + vlan)))
return MLX5_TXCMP_CODE_ERROR;
- MLX5_ASSERT(inlen >= txq->inlen_mode);
/*
* Check whether there are enough free WQEBBs:
* - Control Segment
/* Update sent data bytes counter. */
txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
txq->stats.obytes += sizeof(struct rte_vlan_hdr);
#endif
/*
* to estimate the required space for WQE.
*/
dlen = rte_pktmbuf_pkt_len(loc->mbuf);
- if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
vlan = sizeof(struct rte_vlan_hdr);
inlen = dlen + vlan;
/* Check against minimal length. */
return MLX5_TXCMP_CODE_ERROR;
MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
if (inlen > txq->inlen_send ||
- loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) {
struct rte_mbuf *mbuf;
unsigned int nxlen;
uintptr_t start;
MLX5_ASSERT(txq->inlen_mode >=
MLX5_ESEG_MIN_INLINE_SIZE);
MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
- inlen = txq->inlen_mode;
+ inlen = RTE_MIN(txq->inlen_mode, inlen);
} else if (vlan && !txq->vlan_en) {
/*
* VLAN insertion is requested and hardware does not
* support the offload, will do with software inline.
*/
inlen = MLX5_ESEG_MIN_INLINE_SIZE;
- } else if (mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
+ } else if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE ||
nxlen > txq->inlen_send) {
return mlx5_tx_packet_multi_send(txq, loc, olx);
} else {
goto do_first;
}
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE)
+ goto do_build;
/*
* Now we know the minimal amount of data is requested
* to inline. Check whether we should inline the buffers
mbuf = NEXT(mbuf);
/* There should be not end of packet. */
MLX5_ASSERT(mbuf);
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE)
+ break;
nxlen = inlen + rte_pktmbuf_data_len(mbuf);
} while (unlikely(nxlen < txq->inlen_send));
}
* Estimate the number of Data Segments conservatively,
* supposing no any mbufs is being freed during inlining.
*/
+do_build:
MLX5_ASSERT(inlen <= txq->inlen_send);
ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
MLX5_ESEG_MIN_INLINE_SIZE +
if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
return MLX5_TXCMP_CODE_EXIT;
/* Check for maximal WQE size. */
- if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
+ if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ds))
return MLX5_TXCMP_CODE_ERROR;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Update sent data bytes/packets counters. */
if (loc->elts_free < NB_SEGS(loc->mbuf))
return MLX5_TXCMP_CODE_EXIT;
if (MLX5_TXOFF_CONFIG(TSO) &&
- unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
+ unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
/* Proceed with multi-segment TSO. */
ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
} else if (MLX5_TXOFF_CONFIG(INLINE)) {
continue;
/* Here ends the series of multi-segment packets. */
if (MLX5_TXOFF_CONFIG(TSO) &&
- unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
+ unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
return MLX5_TXCMP_CODE_TSO;
return MLX5_TXCMP_CODE_SINGLE;
}
}
dlen = rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
vlan = sizeof(struct rte_vlan_hdr);
}
/*
loc->mbuf->l3_len + loc->mbuf->l4_len;
if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
return MLX5_TXCMP_CODE_ERROR;
- if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
+ if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
hlen += loc->mbuf->outer_l2_len +
loc->mbuf->outer_l3_len;
/* Segment must contain all TSO headers. */
if (MLX5_TXOFF_CONFIG(MULTI) &&
unlikely(NB_SEGS(loc->mbuf) > 1))
return MLX5_TXCMP_CODE_MULTI;
- if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
+ if (likely(!(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)))
return MLX5_TXCMP_CODE_SINGLE;
/* Continue with the next TSO packet. */
}
/* Check for TSO packet. */
if (newp &&
MLX5_TXOFF_CONFIG(TSO) &&
- unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
+ unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
return MLX5_TXCMP_CODE_TSO;
/* Check if eMPW is enabled at all. */
if (!MLX5_TXOFF_CONFIG(EMPW))
return MLX5_TXCMP_CODE_SINGLE;
/* Check if eMPW can be engaged. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
+ unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) &&
(!MLX5_TXOFF_CONFIG(INLINE) ||
unlikely((rte_pktmbuf_data_len(loc->mbuf) +
sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
return false;
/* Fill metadata field if needed. */
if (MLX5_TXOFF_CONFIG(METADATA) &&
- es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
- rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) : 0))
+ es->metadata != (loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
+ rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) : 0))
return false;
/* Legacy MPW can send packets with the same length only. */
if (MLX5_TXOFF_CONFIG(MPW) &&
return false;
/* There must be no VLAN packets in eMPW loop. */
if (MLX5_TXOFF_CONFIG(VLAN))
- MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
+ MLX5_ASSERT(!(loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN));
/* Check if the scheduling is requested. */
if (MLX5_TXOFF_CONFIG(TXPP) &&
loc->mbuf->ol_flags & txq->ts_mask)
}
/* Inline or not inline - that's the Question. */
if (dlen > txq->inlen_empw ||
- loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE)
goto pointer_empw;
if (MLX5_TXOFF_CONFIG(MPW)) {
if (dlen > txq->inlen_send)
}
/* Inline entire packet, optional VLAN insertion. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
/*
* The packet length must be checked in
* mlx5_tx_able_to_empw() and packet
MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
if (MLX5_TXOFF_CONFIG(VLAN))
MLX5_ASSERT(!(loc->mbuf->ol_flags &
- PKT_TX_VLAN_PKT));
+ RTE_MBUF_F_TX_VLAN));
mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
/* We have to store mbuf in elts.*/
txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
inlen = rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
vlan = sizeof(struct rte_vlan_hdr);
inlen += vlan;
}
if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
return MLX5_TXCMP_CODE_ERROR;
if (loc->mbuf->ol_flags &
- PKT_TX_DYNF_NOINLINE) {
+ RTE_MBUF_F_TX_DYNF_NOINLINE) {
/*
* The hint flag not to inline packet
* data is set. Check whether we can
/* Update sent data bytes counter. */
txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
txq->stats.obytes +=
sizeof(struct rte_vlan_hdr);
#endif
}
/* Dedicated branch for single-segment TSO packets. */
if (MLX5_TXOFF_CONFIG(TSO) &&
- unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
+ unlikely(loc.mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
/*
* TSO might require special way for inlining
* (dedicated parameters) and is sent with
* packets are coming and the write barrier will be issued on
* the next burst (after descriptor writing, at least).
*/
- mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
- (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
+ mlx5_doorbell_ring(mlx5_tx_bfreg(txq),
+ *(volatile uint64_t *)loc.wqe_last, txq->wqe_ci,
+ txq->qp_db, !txq->db_nc &&
+ (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
/* Not all of the mbufs may be stored into elts yet. */
part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
if (!MLX5_TXOFF_CONFIG(INLINE) && part) {