1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_TX_H_
7 #define RTE_PMD_MLX5_TX_H_
10 #include <sys/queue.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_spinlock.h>
17 #include <mlx5_common_mr.h>
20 #include "mlx5_autoconf.h"
23 /* TX burst subroutines return codes. */
24 enum mlx5_txcmp_code {
25 MLX5_TXCMP_CODE_EXIT = 0,
26 MLX5_TXCMP_CODE_ERROR,
27 MLX5_TXCMP_CODE_SINGLE,
28 MLX5_TXCMP_CODE_MULTI,
34 * These defines are used to configure Tx burst routine option set supported
35 * at compile time. The not specified options are optimized out due to if
36 * conditions can be explicitly calculated at compile time.
37 * The offloads with bigger runtime check (require more CPU cycles toskip)
38 * overhead should have the bigger index - this is needed to select the better
39 * matching routine function if no exact match and some offloads are not
42 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
43 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
44 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
45 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
46 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
47 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
48 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
49 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
50 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
51 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
53 /* The most common offloads groups. */
54 #define MLX5_TXOFF_CONFIG_NONE 0
55 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
56 MLX5_TXOFF_CONFIG_TSO | \
57 MLX5_TXOFF_CONFIG_SWP | \
58 MLX5_TXOFF_CONFIG_CSUM | \
59 MLX5_TXOFF_CONFIG_INLINE | \
60 MLX5_TXOFF_CONFIG_VLAN | \
61 MLX5_TXOFF_CONFIG_METADATA)
63 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
65 #define MLX5_TXOFF_DECL(func, olx) \
66 static uint16_t mlx5_tx_burst_##func(void *txq, \
67 struct rte_mbuf **pkts, \
70 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
71 pkts, pkts_n, (olx)); \
74 /* Mbuf dynamic flag offset for inline. */
75 extern uint64_t rte_net_mlx5_dynf_inline_mask;
76 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
78 extern uint32_t mlx5_ptype_table[] __rte_cache_aligned;
79 extern uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
80 extern uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
82 struct mlx5_txq_stats {
83 #ifdef MLX5_PMD_SOFT_COUNTERS
84 uint64_t opackets; /**< Total of successfully sent packets. */
85 uint64_t obytes; /**< Total of successfully sent bytes. */
87 uint64_t oerrors; /**< Total number of failed transmitted packets. */
90 /* TX queue send local data. */
92 struct mlx5_txq_local {
93 struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
94 struct rte_mbuf *mbuf; /* first mbuf to process. */
95 uint16_t pkts_copy; /* packets copied to elts. */
96 uint16_t pkts_sent; /* packets sent. */
97 uint16_t pkts_loop; /* packets sent on loop entry. */
98 uint16_t elts_free; /* available elts remain. */
99 uint16_t wqe_free; /* available wqe remain. */
100 uint16_t mbuf_off; /* data offset in current mbuf. */
101 uint16_t mbuf_nseg; /* number of remaining mbuf. */
102 uint16_t mbuf_free; /* number of inline mbufs to free. */
105 /* TX queue descriptor. */
107 struct mlx5_txq_data {
108 uint16_t elts_head; /* Current counter in (*elts)[]. */
109 uint16_t elts_tail; /* Counter of first element awaiting completion. */
110 uint16_t elts_comp; /* elts index since last completion request. */
111 uint16_t elts_s; /* Number of mbuf elements. */
112 uint16_t elts_m; /* Mask for mbuf elements indices. */
113 /* Fields related to elts mbuf storage. */
114 uint16_t wqe_ci; /* Consumer index for work queue. */
115 uint16_t wqe_pi; /* Producer index for work queue. */
116 uint16_t wqe_s; /* Number of WQ elements. */
117 uint16_t wqe_m; /* Mask Number for WQ elements. */
118 uint16_t wqe_comp; /* WQE index since last completion request. */
119 uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
120 /* WQ related fields. */
121 uint16_t cq_ci; /* Consumer index for completion queue. */
122 uint16_t cq_pi; /* Production index for completion queue. */
123 uint16_t cqe_s; /* Number of CQ elements. */
124 uint16_t cqe_m; /* Mask for CQ indices. */
125 /* CQ related fields. */
126 uint16_t elts_n:4; /* elts[] length (in log2). */
127 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
128 uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
129 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
130 uint16_t tunnel_en:1;
131 /* When set TX offload for tunneled packets are supported. */
132 uint16_t swp_en:1; /* Whether SW parser is enabled. */
133 uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
134 uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
135 uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
136 uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */
137 uint16_t inlen_send; /* Ordinary send data inline size. */
138 uint16_t inlen_empw; /* eMPW max packet size to inline. */
139 uint16_t inlen_mode; /* Minimal data length to inline. */
140 uint32_t qp_num_8s; /* QP number shifted by 8. */
141 uint64_t offloads; /* Offloads for Tx Queue. */
142 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
143 struct mlx5_wqe *wqes; /* Work queue. */
144 struct mlx5_wqe *wqes_end; /* Work queue array limit. */
145 #ifdef RTE_LIBRTE_MLX5_DEBUG
146 uint32_t *fcqs; /* Free completion queue (debug extended). */
148 uint16_t *fcqs; /* Free completion queue. */
150 volatile struct mlx5_cqe *cqes; /* Completion queue. */
151 volatile uint32_t *qp_db; /* Work queue doorbell. */
152 volatile uint32_t *cq_db; /* Completion queue doorbell. */
153 uint16_t port_id; /* Port ID of device. */
154 uint16_t idx; /* Queue index. */
155 uint64_t ts_mask; /* Timestamp flag dynamic mask. */
156 int32_t ts_offset; /* Timestamp field dynamic offset. */
157 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
158 struct mlx5_txq_stats stats; /* TX queue counters. */
160 rte_spinlock_t *uar_lock;
161 /* UAR access lock required for 32bit implementations */
163 struct rte_mbuf *elts[0];
164 /* Storage for queued packets, must be the last field. */
165 } __rte_cache_aligned;
168 MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */
169 MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Tx queue. */
172 /* TX queue control descriptor. */
173 struct mlx5_txq_ctrl {
174 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
175 uint32_t refcnt; /* Reference counter. */
176 unsigned int socket; /* CPU socket ID for allocations. */
177 enum mlx5_txq_type type; /* The txq ctrl type. */
178 unsigned int max_inline_data; /* Max inline data. */
179 unsigned int max_tso_header; /* Max TSO header size. */
180 struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
181 struct mlx5_priv *priv; /* Back pointer to private data. */
182 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
183 void *bf_reg; /* BlueFlame register from Verbs. */
184 uint16_t dump_file_n; /* Number of dump files. */
185 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
186 uint32_t hairpin_status; /* Hairpin binding status. */
187 struct mlx5_txq_data txq; /* Data path structure. */
188 /* Must be the last field in the structure, contains elts[]. */
193 int mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
194 int mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
195 int mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
196 int mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
197 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
198 unsigned int socket, const struct rte_eth_txconf *conf);
199 int mlx5_tx_hairpin_queue_setup
200 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
201 const struct rte_eth_hairpin_conf *hairpin_conf);
202 void mlx5_tx_queue_release(void *dpdk_txq);
203 void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
204 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
205 void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
206 int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
207 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
208 uint16_t desc, unsigned int socket,
209 const struct rte_eth_txconf *conf);
210 struct mlx5_txq_ctrl *mlx5_txq_hairpin_new
211 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
212 const struct rte_eth_hairpin_conf *hairpin_conf);
213 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
214 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
215 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
216 int mlx5_txq_verify(struct rte_eth_dev *dev);
217 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
218 void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl);
219 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
220 void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
224 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
226 void mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
227 unsigned int olx __rte_unused);
228 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
229 void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
230 struct rte_eth_txq_info *qinfo);
231 int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
232 struct rte_eth_burst_mode *mode);
236 uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
237 uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
238 struct rte_mempool *mp);
240 static __rte_always_inline uint64_t *
241 mlx5_tx_bfreg(struct mlx5_txq_data *txq)
243 return MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx];
247 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
248 * 64bit architectures.
251 * value to write in CPU endian format.
253 * Address to write to.
255 * Address of the lock to use for that UAR access.
257 static __rte_always_inline void
258 __mlx5_uar_write64_relaxed(uint64_t val, void *addr,
259 rte_spinlock_t *lock __rte_unused)
262 *(uint64_t *)addr = val;
263 #else /* !RTE_ARCH_64 */
264 rte_spinlock_lock(lock);
265 *(uint32_t *)addr = val;
267 *((uint32_t *)addr + 1) = val >> 32;
268 rte_spinlock_unlock(lock);
273 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
274 * 64bit architectures while guaranteeing the order of execution with the
275 * code being executed.
278 * value to write in CPU endian format.
280 * Address to write to.
282 * Address of the lock to use for that UAR access.
284 static __rte_always_inline void
285 __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
288 __mlx5_uar_write64_relaxed(val, addr, lock);
291 /* Assist macros, used instead of directly calling the functions they wrap. */
293 #define mlx5_uar_write64_relaxed(val, dst, lock) \
294 __mlx5_uar_write64_relaxed(val, dst, NULL)
295 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
297 #define mlx5_uar_write64_relaxed(val, dst, lock) \
298 __mlx5_uar_write64_relaxed(val, dst, lock)
299 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
303 * Query LKey from a packet buffer for Tx. If not found, add the mempool.
306 * Pointer to Tx queue structure.
311 * Searched LKey on success, UINT32_MAX on no match.
313 static __rte_always_inline uint32_t
314 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
316 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
317 uintptr_t addr = (uintptr_t)mb->buf_addr;
320 /* Check generation bit to see if there's any change on existing MRs. */
321 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
322 mlx5_mr_flush_local_cache(mr_ctrl);
323 /* Linear search on MR cache array. */
324 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
325 MLX5_MR_CACHE_N, addr);
326 if (likely(lkey != UINT32_MAX))
328 /* Take slower bottom-half on miss. */
329 return mlx5_tx_mb2mr_bh(txq, mb);
333 * Ring TX queue doorbell and flush the update if requested.
336 * Pointer to TX queue structure.
338 * Pointer to the last WQE posted in the NIC.
340 * Request for write memory barrier after BlueFlame update.
342 static __rte_always_inline void
343 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
346 uint64_t *dst = mlx5_tx_bfreg(txq);
347 volatile uint64_t *src = ((volatile uint64_t *)wqe);
350 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
351 /* Ensure ordering between DB record and BF copy. */
353 mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
359 * Ring TX queue doorbell and flush the update by write memory barrier.
362 * Pointer to TX queue structure.
364 * Pointer to the last WQE posted in the NIC.
366 static __rte_always_inline void
367 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
369 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
373 * Convert timestamp from mbuf format to linear counter
374 * of Clock Queue completions (24 bits).
377 * Pointer to the device shared context to fetch Tx
378 * packet pacing timestamp and parameters.
380 * Timestamp from mbuf to convert.
382 * positive or zero value - completion ID to wait.
383 * negative value - conversion error.
385 static __rte_always_inline int32_t
386 mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts)
393 * Read atomically two uint64_t fields and compare lsb bits.
394 * It there is no match - the timestamp was updated in
395 * the service thread, data should be re-read.
397 rte_compiler_barrier();
398 ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
399 ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
400 rte_compiler_barrier();
401 if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
404 /* Perform the skew correction, positive value to send earlier. */
405 mts -= sh->txpp.skew;
407 if (unlikely(mts >= UINT64_MAX / 2)) {
408 /* We have negative integer, mts is in the past. */
409 __atomic_fetch_add(&sh->txpp.err_ts_past,
410 1, __ATOMIC_RELAXED);
413 tick = sh->txpp.tick;
415 /* Convert delta to completions, round up. */
416 mts = (mts + tick - 1) / tick;
417 if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
418 /* We have mts is too distant future. */
419 __atomic_fetch_add(&sh->txpp.err_ts_future,
420 1, __ATOMIC_RELAXED);
423 mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
425 ci >>= 64 - MLX5_CQ_INDEX_WIDTH;
430 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
431 * Flags must be preliminary initialized to zero.
434 * Pointer to burst routine local context.
436 * Pointer to store Software Parser flags.
438 * Configured Tx offloads mask. It is fully defined at
439 * compile time and may be used for optimization.
442 * Software Parser offsets packed in dword.
443 * Software Parser flags are set by pointer.
445 static __rte_always_inline uint32_t
446 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
451 unsigned int idx, off;
454 if (!MLX5_TXOFF_CONFIG(SWP))
456 ol = loc->mbuf->ol_flags;
457 tunnel = ol & PKT_TX_TUNNEL_MASK;
459 * Check whether Software Parser is required.
460 * Only customized tunnels may ask for.
462 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
465 * The index should have:
466 * bit[0:1] = PKT_TX_L4_MASK
467 * bit[4] = PKT_TX_IPV6
468 * bit[8] = PKT_TX_OUTER_IPV6
469 * bit[9] = PKT_TX_OUTER_UDP
471 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
472 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
473 *swp_flags = mlx5_swp_types_table[idx];
475 * Set offsets for SW parser. Since ConnectX-5, SW parser just
476 * complements HW parser. SW parser starts to engage only if HW parser
477 * can't reach a header. For the older devices, HW parser will not kick
478 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
479 * should be set regardless of HW offload.
481 off = loc->mbuf->outer_l2_len;
482 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
483 off += sizeof(struct rte_vlan_hdr);
484 set = (off >> 1) << 8; /* Outer L3 offset. */
485 off += loc->mbuf->outer_l3_len;
486 if (tunnel == PKT_TX_TUNNEL_UDP)
487 set |= off >> 1; /* Outer L4 offset. */
488 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
489 const uint64_t csum = ol & PKT_TX_L4_MASK;
490 off += loc->mbuf->l2_len;
491 set |= (off >> 1) << 24; /* Inner L3 offset. */
492 if (csum == PKT_TX_TCP_CKSUM ||
493 csum == PKT_TX_UDP_CKSUM ||
494 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
495 off += loc->mbuf->l3_len;
496 set |= (off >> 1) << 16; /* Inner L4 offset. */
499 set = rte_cpu_to_le_32(set);
504 * Convert the Checksum offloads to Verbs.
507 * Pointer to the mbuf.
510 * Converted checksum flags.
512 static __rte_always_inline uint8_t
513 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
516 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
517 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
518 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
521 * The index should have:
522 * bit[0] = PKT_TX_TCP_SEG
523 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
524 * bit[4] = PKT_TX_IP_CKSUM
525 * bit[8] = PKT_TX_OUTER_IP_CKSUM
528 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
529 return mlx5_cksum_table[idx];
533 * Free the mbufs from the linear array of pointers.
536 * Pointer to Tx queue structure.
538 * Pointer to array of packets to be free.
540 * Number of packets to be freed.
542 * Configured Tx offloads mask. It is fully defined at
543 * compile time and may be used for optimization.
545 static __rte_always_inline void
546 mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
547 struct rte_mbuf **__rte_restrict pkts,
549 unsigned int olx __rte_unused)
551 struct rte_mempool *pool = NULL;
552 struct rte_mbuf **p_free = NULL;
553 struct rte_mbuf *mbuf;
554 unsigned int n_free = 0;
557 * The implemented algorithm eliminates
558 * copying pointers to temporary array
559 * for rte_mempool_put_bulk() calls.
564 * Free mbufs directly to the pool in bulk
565 * if fast free offload is engaged
567 if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
570 rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
576 * Decrement mbuf reference counter, detach
577 * indirect and external buffers if needed.
579 mbuf = rte_pktmbuf_prefree_seg(*pkts);
580 if (likely(mbuf != NULL)) {
581 MLX5_ASSERT(mbuf == *pkts);
582 if (likely(n_free != 0)) {
583 if (unlikely(pool != mbuf->pool))
584 /* From different pool. */
587 /* Start new scan array. */
594 if (unlikely(pkts_n == 0)) {
600 * This happens if mbuf is still referenced.
601 * We can't put it back to the pool, skip.
605 if (unlikely(n_free != 0))
606 /* There is some array to free.*/
608 if (unlikely(pkts_n == 0))
609 /* Last mbuf, nothing to free. */
615 * This loop is implemented to avoid multiple
616 * inlining of rte_mempool_put_bulk().
622 * Free the array of pre-freed mbufs
623 * belonging to the same memory pool.
625 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
626 if (unlikely(mbuf != NULL)) {
627 /* There is the request to start new scan. */
632 if (likely(pkts_n != 0))
635 * This is the last mbuf to be freed.
636 * Do one more loop iteration to complete.
637 * This is rare case of the last unique mbuf.
642 if (likely(pkts_n == 0))
651 * No inline version to free buffers for optimal call
652 * on the tx_burst completion.
654 static __rte_noinline void
655 __mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
656 struct rte_mbuf **__rte_restrict pkts,
658 unsigned int olx __rte_unused)
660 mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
664 * Free the mbuf from the elts ring buffer till new tail.
667 * Pointer to Tx queue structure.
669 * Index in elts to free up to, becomes new elts tail.
671 * Configured Tx offloads mask. It is fully defined at
672 * compile time and may be used for optimization.
674 static __rte_always_inline void
675 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
677 unsigned int olx __rte_unused)
679 uint16_t n_elts = tail - txq->elts_tail;
682 MLX5_ASSERT(n_elts <= txq->elts_s);
684 * Implement a loop to support ring buffer wraparound
685 * with single inlining of mlx5_tx_free_mbuf().
690 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
691 part = RTE_MIN(part, n_elts);
693 MLX5_ASSERT(part <= txq->elts_s);
694 mlx5_tx_free_mbuf(txq,
695 &txq->elts[txq->elts_tail & txq->elts_m],
697 txq->elts_tail += part;
703 * Store the mbuf being sent into elts ring buffer.
704 * On Tx completion these mbufs will be freed.
707 * Pointer to Tx queue structure.
709 * Pointer to array of packets to be stored.
711 * Number of packets to be stored.
713 * Configured Tx offloads mask. It is fully defined at
714 * compile time and may be used for optimization.
716 static __rte_always_inline void
717 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
718 struct rte_mbuf **__rte_restrict pkts,
720 unsigned int olx __rte_unused)
723 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
727 part = txq->elts_s - (txq->elts_head & txq->elts_m);
729 MLX5_ASSERT(part <= txq->elts_s);
730 /* This code is a good candidate for vectorizing with SIMD. */
731 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
733 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
734 txq->elts_head += pkts_n;
735 if (unlikely(part < pkts_n))
736 /* The copy is wrapping around the elts array. */
737 rte_memcpy((void *)elts, (void *)(pkts + part),
738 (pkts_n - part) * sizeof(struct rte_mbuf *));
742 * Check if the completion request flag should be set in the last WQE.
743 * Both pushed mbufs and WQEs are monitored and the completion request
744 * flag is set if any of thresholds is reached.
747 * Pointer to TX queue structure.
749 * Pointer to burst routine local context.
751 * Configured Tx offloads mask. It is fully defined at
752 * compile time and may be used for optimization.
754 static __rte_always_inline void
755 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
756 struct mlx5_txq_local *__rte_restrict loc,
759 uint16_t head = txq->elts_head;
762 part = MLX5_TXOFF_CONFIG(INLINE) ?
763 0 : loc->pkts_sent - loc->pkts_copy;
765 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
766 (MLX5_TXOFF_CONFIG(INLINE) &&
767 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
768 volatile struct mlx5_wqe *last = loc->wqe_last;
771 txq->elts_comp = head;
772 if (MLX5_TXOFF_CONFIG(INLINE))
773 txq->wqe_comp = txq->wqe_ci;
774 /* Request unconditional completion on last WQE. */
775 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
776 MLX5_COMP_MODE_OFFSET);
777 /* Save elts_head in dedicated free on completion queue. */
778 #ifdef RTE_LIBRTE_MLX5_DEBUG
779 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
780 (last->cseg.opcode >> 8) << 16;
782 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
784 /* A CQE slot must always be available. */
785 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
790 * Build the Control Segment with specified opcode:
792 * - MLX5_OPCODE_ENHANCED_MPSW
796 * Pointer to TX queue structure.
798 * Pointer to burst routine local context.
800 * Pointer to WQE to fill with built Control Segment.
802 * Supposed length of WQE in segments.
804 * SQ WQE opcode to put into Control Segment.
806 * Configured Tx offloads mask. It is fully defined at
807 * compile time and may be used for optimization.
809 static __rte_always_inline void
810 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
811 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
812 struct mlx5_wqe *__rte_restrict wqe,
815 unsigned int olx __rte_unused)
817 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
819 /* For legacy MPW replace the EMPW by TSO with modifier. */
820 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
821 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
822 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
823 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
824 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
825 MLX5_COMP_MODE_OFFSET);
826 cs->misc = RTE_BE32(0);
830 * Build the Synchronize Queue Segment with specified completion index.
833 * Pointer to TX queue structure.
835 * Pointer to burst routine local context.
837 * Pointer to WQE to fill with built Control Segment.
839 * Completion index in Clock Queue to wait.
841 * Configured Tx offloads mask. It is fully defined at
842 * compile time and may be used for optimization.
844 static __rte_always_inline void
845 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
846 struct mlx5_txq_local *restrict loc __rte_unused,
847 struct mlx5_wqe *restrict wqe,
849 unsigned int olx __rte_unused)
851 struct mlx5_wqe_qseg *qs;
853 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
854 qs->max_index = rte_cpu_to_be_32(wci);
855 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
856 qs->reserved0 = RTE_BE32(0);
857 qs->reserved1 = RTE_BE32(0);
861 * Build the Ethernet Segment without inlined data.
862 * Supports Software Parser, Checksums and VLAN insertion Tx offload features.
865 * Pointer to TX queue structure.
867 * Pointer to burst routine local context.
869 * Pointer to WQE to fill with built Ethernet Segment.
871 * Configured Tx offloads mask. It is fully defined at
872 * compile time and may be used for optimization.
874 static __rte_always_inline void
875 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
876 struct mlx5_txq_local *__rte_restrict loc,
877 struct mlx5_wqe *__rte_restrict wqe,
880 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
884 * Calculate and set check sum flags first, dword field
885 * in segment may be shared with Software Parser flags.
887 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
888 es->flags = rte_cpu_to_le_32(csum);
890 * Calculate and set Software Parser offsets and flags.
891 * These flags a set for custom UDP and IP tunnel packets.
893 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
894 /* Fill metadata field if needed. */
895 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
896 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
897 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
898 /* Engage VLAN tag insertion feature if requested. */
899 if (MLX5_TXOFF_CONFIG(VLAN) &&
900 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
902 * We should get here only if device support
903 * this feature correctly.
905 MLX5_ASSERT(txq->vlan_en);
906 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
907 loc->mbuf->vlan_tci);
909 es->inline_hdr = RTE_BE32(0);
914 * Build the Ethernet Segment with minimal inlined data
915 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
916 * used to fill the gap in single WQEBB WQEs.
917 * Supports Software Parser, Checksums and VLAN
918 * insertion Tx offload features.
921 * Pointer to TX queue structure.
923 * Pointer to burst routine local context.
925 * Pointer to WQE to fill with built Ethernet Segment.
927 * Length of VLAN tag insertion if any.
929 * Configured Tx offloads mask. It is fully defined at
930 * compile time and may be used for optimization.
932 static __rte_always_inline void
933 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
934 struct mlx5_txq_local *__rte_restrict loc,
935 struct mlx5_wqe *__rte_restrict wqe,
939 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
941 uint8_t *psrc, *pdst;
944 * Calculate and set check sum flags first, dword field
945 * in segment may be shared with Software Parser flags.
947 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
948 es->flags = rte_cpu_to_le_32(csum);
950 * Calculate and set Software Parser offsets and flags.
951 * These flags a set for custom UDP and IP tunnel packets.
953 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
954 /* Fill metadata field if needed. */
955 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
956 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
957 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
958 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
959 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
960 es->inline_data = *(unaligned_uint16_t *)psrc;
961 psrc += sizeof(uint16_t);
962 pdst = (uint8_t *)(es + 1);
963 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
964 /* Implement VLAN tag insertion as part inline data. */
965 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
966 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
967 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
968 /* Insert VLAN ethertype + VLAN tag. */
969 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
970 ((RTE_ETHER_TYPE_VLAN << 16) |
971 loc->mbuf->vlan_tci);
972 pdst += sizeof(struct rte_vlan_hdr);
973 /* Copy the rest two bytes from packet data. */
974 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
975 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
977 /* Fill the gap in the title WQEBB with inline data. */
978 rte_mov16(pdst, psrc);
983 * Build the Ethernet Segment with entire packet data inlining. Checks the
984 * boundary of WQEBB and ring buffer wrapping, supports Software Parser,
985 * Checksums and VLAN insertion Tx offload features.
988 * Pointer to TX queue structure.
990 * Pointer to burst routine local context.
992 * Pointer to WQE to fill with built Ethernet Segment.
994 * Length of VLAN tag insertion if any.
996 * Length of data to inline (VLAN included, if any).
998 * TSO flag, set mss field from the packet.
1000 * Configured Tx offloads mask. It is fully defined at
1001 * compile time and may be used for optimization.
1004 * Pointer to the next Data Segment (aligned and wrapped around).
1006 static __rte_always_inline struct mlx5_wqe_dseg *
1007 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
1008 struct mlx5_txq_local *__rte_restrict loc,
1009 struct mlx5_wqe *__rte_restrict wqe,
1015 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1017 uint8_t *psrc, *pdst;
1021 * Calculate and set check sum flags first, dword field
1022 * in segment may be shared with Software Parser flags.
1024 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1027 csum |= loc->mbuf->tso_segsz;
1028 es->flags = rte_cpu_to_be_32(csum);
1030 es->flags = rte_cpu_to_le_32(csum);
1033 * Calculate and set Software Parser offsets and flags.
1034 * These flags a set for custom UDP and IP tunnel packets.
1036 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1037 /* Fill metadata field if needed. */
1038 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1039 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1040 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1041 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1042 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
1043 es->inline_data = *(unaligned_uint16_t *)psrc;
1044 psrc += sizeof(uint16_t);
1045 pdst = (uint8_t *)(es + 1);
1046 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1047 /* Implement VLAN tag insertion as part inline data. */
1048 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1049 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1050 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1051 /* Insert VLAN ethertype + VLAN tag. */
1052 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1053 ((RTE_ETHER_TYPE_VLAN << 16) |
1054 loc->mbuf->vlan_tci);
1055 pdst += sizeof(struct rte_vlan_hdr);
1056 /* Copy the rest two bytes from packet data. */
1057 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1058 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1059 psrc += sizeof(uint16_t);
1061 /* Fill the gap in the title WQEBB with inline data. */
1062 rte_mov16(pdst, psrc);
1063 psrc += sizeof(rte_v128u32_t);
1065 pdst = (uint8_t *)(es + 2);
1066 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1067 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1068 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
1070 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1071 return (struct mlx5_wqe_dseg *)pdst;
1074 * The WQEBB space availability is checked by caller.
1075 * Here we should be aware of WQE ring buffer wraparound only.
1077 part = (uint8_t *)txq->wqes_end - pdst;
1078 part = RTE_MIN(part, inlen);
1080 rte_memcpy(pdst, psrc, part);
1082 if (likely(!inlen)) {
1084 * If return value is not used by the caller
1085 * the code below will be optimized out.
1088 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1089 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1090 pdst = (uint8_t *)txq->wqes;
1091 return (struct mlx5_wqe_dseg *)pdst;
1093 pdst = (uint8_t *)txq->wqes;
1100 * Copy data from chain of mbuf to the specified linear buffer.
1101 * Checksums and VLAN insertion Tx offload features. If data
1102 * from some mbuf copied completely this mbuf is freed. Local
1103 * structure is used to keep the byte stream state.
1106 * Pointer to the destination linear buffer.
1108 * Pointer to burst routine local context.
1110 * Length of data to be copied.
1112 * Length of data to be copied ignoring no inline hint.
1114 * Configured Tx offloads mask. It is fully defined at
1115 * compile time and may be used for optimization.
1118 * Number of actual copied data bytes. This is always greater than or
1119 * equal to must parameter and might be lesser than len in no inline
1120 * hint flag is encountered.
1122 static __rte_always_inline unsigned int
1123 mlx5_tx_mseg_memcpy(uint8_t *pdst,
1124 struct mlx5_txq_local *__rte_restrict loc,
1127 unsigned int olx __rte_unused)
1129 struct rte_mbuf *mbuf;
1130 unsigned int part, dlen, copy = 0;
1134 MLX5_ASSERT(must <= len);
1136 /* Allow zero length packets, must check first. */
1137 dlen = rte_pktmbuf_data_len(loc->mbuf);
1138 if (dlen <= loc->mbuf_off) {
1139 /* Exhausted packet, just free. */
1141 loc->mbuf = mbuf->next;
1142 rte_pktmbuf_free_seg(mbuf);
1144 MLX5_ASSERT(loc->mbuf_nseg > 1);
1145 MLX5_ASSERT(loc->mbuf);
1147 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
1152 * We already copied the minimal
1153 * requested amount of data.
1158 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
1160 * Copy only the minimal required
1161 * part of the data buffer.
1168 dlen -= loc->mbuf_off;
1169 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1171 part = RTE_MIN(len, dlen);
1172 rte_memcpy(pdst, psrc, part);
1174 loc->mbuf_off += part;
1177 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
1179 /* Exhausted packet, just free. */
1181 loc->mbuf = mbuf->next;
1182 rte_pktmbuf_free_seg(mbuf);
1184 MLX5_ASSERT(loc->mbuf_nseg >= 1);
1194 * Build the Ethernet Segment with inlined data from multi-segment packet.
1195 * Checks the boundary of WQEBB and ring buffer wrapping, supports Software
1196 * Parser, Checksums and VLAN insertion Tx offload features.
1199 * Pointer to TX queue structure.
1201 * Pointer to burst routine local context.
1203 * Pointer to WQE to fill with built Ethernet Segment.
1205 * Length of VLAN tag insertion if any.
1207 * Length of data to inline (VLAN included, if any).
1209 * TSO flag, set mss field from the packet.
1211 * Configured Tx offloads mask. It is fully defined at
1212 * compile time and may be used for optimization.
1215 * Pointer to the next Data Segment (aligned and possible NOT wrapped
1216 * around - caller should do wrapping check on its own).
1218 static __rte_always_inline struct mlx5_wqe_dseg *
1219 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
1220 struct mlx5_txq_local *__rte_restrict loc,
1221 struct mlx5_wqe *__rte_restrict wqe,
1227 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1230 unsigned int part, tlen = 0;
1233 * Calculate and set check sum flags first, uint32_t field
1234 * in segment may be shared with Software Parser flags.
1236 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1239 csum |= loc->mbuf->tso_segsz;
1240 es->flags = rte_cpu_to_be_32(csum);
1242 es->flags = rte_cpu_to_le_32(csum);
1245 * Calculate and set Software Parser offsets and flags.
1246 * These flags a set for custom UDP and IP tunnel packets.
1248 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1249 /* Fill metadata field if needed. */
1250 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1251 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1252 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1253 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1254 pdst = (uint8_t *)&es->inline_data;
1255 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1256 /* Implement VLAN tag insertion as part inline data. */
1257 mlx5_tx_mseg_memcpy(pdst, loc,
1258 2 * RTE_ETHER_ADDR_LEN,
1259 2 * RTE_ETHER_ADDR_LEN, olx);
1260 pdst += 2 * RTE_ETHER_ADDR_LEN;
1261 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1262 ((RTE_ETHER_TYPE_VLAN << 16) |
1263 loc->mbuf->vlan_tci);
1264 pdst += sizeof(struct rte_vlan_hdr);
1265 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
1267 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1269 * The WQEBB space availability is checked by caller.
1270 * Here we should be aware of WQE ring buffer wraparound only.
1272 part = (uint8_t *)txq->wqes_end - pdst;
1273 part = RTE_MIN(part, inlen - tlen);
1279 * Copying may be interrupted inside the routine
1280 * if run into no inline hint flag.
1282 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
1283 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
1285 if (likely(inlen <= tlen) || copy < part) {
1286 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
1288 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1289 return (struct mlx5_wqe_dseg *)pdst;
1291 pdst = (uint8_t *)txq->wqes;
1292 part = inlen - tlen;
1297 * Build the Data Segment of pointer type.
1300 * Pointer to TX queue structure.
1302 * Pointer to burst routine local context.
1304 * Pointer to WQE to fill with built Data Segment.
1306 * Data buffer to point.
1308 * Data buffer length.
1310 * Configured Tx offloads mask. It is fully defined at
1311 * compile time and may be used for optimization.
1313 static __rte_always_inline void
1314 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
1315 struct mlx5_txq_local *__rte_restrict loc,
1316 struct mlx5_wqe_dseg *__rte_restrict dseg,
1319 unsigned int olx __rte_unused)
1323 dseg->bcount = rte_cpu_to_be_32(len);
1324 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1325 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1329 * Build the Data Segment of pointer type or inline if data length is less than
1330 * buffer in minimal Data Segment size.
1333 * Pointer to TX queue structure.
1335 * Pointer to burst routine local context.
1337 * Pointer to WQE to fill with built Data Segment.
1339 * Data buffer to point.
1341 * Data buffer length.
1343 * Configured Tx offloads mask. It is fully defined at
1344 * compile time and may be used for optimization.
1346 static __rte_always_inline void
1347 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
1348 struct mlx5_txq_local *__rte_restrict loc,
1349 struct mlx5_wqe_dseg *__rte_restrict dseg,
1352 unsigned int olx __rte_unused)
1358 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
1359 dseg->bcount = rte_cpu_to_be_32(len);
1360 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1361 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1365 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1366 /* Unrolled implementation of generic rte_memcpy. */
1367 dst = (uintptr_t)&dseg->inline_data[0];
1368 src = (uintptr_t)buf;
1370 #ifdef RTE_ARCH_STRICT_ALIGN
1371 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
1372 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1373 dst += sizeof(uint32_t);
1374 src += sizeof(uint32_t);
1375 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1376 dst += sizeof(uint32_t);
1377 src += sizeof(uint32_t);
1379 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
1380 dst += sizeof(uint64_t);
1381 src += sizeof(uint64_t);
1385 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1386 dst += sizeof(uint32_t);
1387 src += sizeof(uint32_t);
1390 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
1391 dst += sizeof(uint16_t);
1392 src += sizeof(uint16_t);
1395 *(uint8_t *)dst = *(uint8_t *)src;
1399 * Build the Data Segment of inlined data from single
1400 * segment packet, no VLAN insertion.
1403 * Pointer to TX queue structure.
1405 * Pointer to burst routine local context.
1407 * Pointer to WQE to fill with built Data Segment.
1409 * Data buffer to point.
1411 * Data buffer length.
1413 * Configured Tx offloads mask. It is fully defined at
1414 * compile time and may be used for optimization.
1417 * Pointer to the next Data Segment after inlined data.
1418 * Ring buffer wraparound check is needed. We do not do it here because it
1419 * may not be needed for the last packet in the eMPW session.
1421 static __rte_always_inline struct mlx5_wqe_dseg *
1422 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
1423 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1424 struct mlx5_wqe_dseg *__rte_restrict dseg,
1427 unsigned int olx __rte_unused)
1432 if (!MLX5_TXOFF_CONFIG(MPW)) {
1433 /* Store the descriptor byte counter for eMPW sessions. */
1434 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1435 pdst = &dseg->inline_data[0];
1437 /* The entire legacy MPW session counter is stored on close. */
1438 pdst = (uint8_t *)dseg;
1441 * The WQEBB space availability is checked by caller.
1442 * Here we should be aware of WQE ring buffer wraparound only.
1444 part = (uint8_t *)txq->wqes_end - pdst;
1445 part = RTE_MIN(part, len);
1447 rte_memcpy(pdst, buf, part);
1451 if (!MLX5_TXOFF_CONFIG(MPW))
1452 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1453 /* Note: no final wraparound check here. */
1454 return (struct mlx5_wqe_dseg *)pdst;
1456 pdst = (uint8_t *)txq->wqes;
1463 * Build the Data Segment of inlined data from single
1464 * segment packet with VLAN insertion.
1467 * Pointer to TX queue structure.
1469 * Pointer to burst routine local context.
1471 * Pointer to the dseg fill with built Data Segment.
1473 * Data buffer to point.
1475 * Data buffer length.
1477 * Configured Tx offloads mask. It is fully defined at
1478 * compile time and may be used for optimization.
1481 * Pointer to the next Data Segment after inlined data.
1482 * Ring buffer wraparound check is needed.
1484 static __rte_always_inline struct mlx5_wqe_dseg *
1485 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
1486 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1487 struct mlx5_wqe_dseg *__rte_restrict dseg,
1490 unsigned int olx __rte_unused)
1496 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
1497 if (!MLX5_TXOFF_CONFIG(MPW)) {
1498 /* Store the descriptor byte counter for eMPW sessions. */
1499 dseg->bcount = rte_cpu_to_be_32
1500 ((len + sizeof(struct rte_vlan_hdr)) |
1501 MLX5_ETH_WQE_DATA_INLINE);
1502 pdst = &dseg->inline_data[0];
1504 /* The entire legacy MPW session counter is stored on close. */
1505 pdst = (uint8_t *)dseg;
1507 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
1508 buf += MLX5_DSEG_MIN_INLINE_SIZE;
1509 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
1510 len -= MLX5_DSEG_MIN_INLINE_SIZE;
1511 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
1512 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1513 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1514 pdst = (uint8_t *)txq->wqes;
1515 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
1516 loc->mbuf->vlan_tci);
1517 pdst += sizeof(struct rte_vlan_hdr);
1519 * The WQEBB space availability is checked by caller.
1520 * Here we should be aware of WQE ring buffer wraparound only.
1522 part = (uint8_t *)txq->wqes_end - pdst;
1523 part = RTE_MIN(part, len);
1525 rte_memcpy(pdst, buf, part);
1529 if (!MLX5_TXOFF_CONFIG(MPW))
1530 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1531 /* Note: no final wraparound check here. */
1532 return (struct mlx5_wqe_dseg *)pdst;
1534 pdst = (uint8_t *)txq->wqes;
1541 * Build the Ethernet Segment with optionally inlined data with
1542 * VLAN insertion and following Data Segments (if any) from
1543 * multi-segment packet. Used by ordinary send and TSO.
1546 * Pointer to TX queue structure.
1548 * Pointer to burst routine local context.
1550 * Pointer to WQE to fill with built Ethernet/Data Segments.
1552 * Length of VLAN header to insert, 0 means no VLAN insertion.
1554 * Data length to inline. For TSO this parameter specifies exact value,
1555 * for ordinary send routine can be aligned by caller to provide better WQE
1556 * space saving and data buffer start address alignment.
1557 * This length includes VLAN header being inserted.
1559 * Zero means ordinary send, inlined data can be extended,
1560 * otherwise this is TSO, inlined data length is fixed.
1562 * Configured Tx offloads mask. It is fully defined at
1563 * compile time and may be used for optimization.
1566 * Actual size of built WQE in segments.
1568 static __rte_always_inline unsigned int
1569 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
1570 struct mlx5_txq_local *__rte_restrict loc,
1571 struct mlx5_wqe *__rte_restrict wqe,
1575 unsigned int olx __rte_unused)
1577 struct mlx5_wqe_dseg *__rte_restrict dseg;
1580 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
1581 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
1584 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
1585 if (!loc->mbuf_nseg)
1588 * There are still some mbuf remaining, not inlined.
1589 * The first mbuf may be partially inlined and we
1590 * must process the possible non-zero data offset.
1592 if (loc->mbuf_off) {
1597 * Exhausted packets must be dropped before.
1598 * Non-zero offset means there are some data
1599 * remained in the packet.
1601 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
1602 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
1603 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1605 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
1607 * Build the pointer/minimal Data Segment.
1608 * Do ring buffer wrapping check in advance.
1610 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1611 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1612 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
1613 /* Store the mbuf to be freed on completion. */
1614 MLX5_ASSERT(loc->elts_free);
1615 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1618 if (--loc->mbuf_nseg == 0)
1620 loc->mbuf = loc->mbuf->next;
1624 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1625 struct rte_mbuf *mbuf;
1627 /* Zero length segment found, just skip. */
1629 loc->mbuf = loc->mbuf->next;
1630 rte_pktmbuf_free_seg(mbuf);
1631 if (--loc->mbuf_nseg == 0)
1634 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1635 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1638 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1639 rte_pktmbuf_data_len(loc->mbuf), olx);
1640 MLX5_ASSERT(loc->elts_free);
1641 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1644 if (--loc->mbuf_nseg == 0)
1646 loc->mbuf = loc->mbuf->next;
1651 /* Calculate actual segments used from the dseg pointer. */
1652 if ((uintptr_t)wqe < (uintptr_t)dseg)
1653 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
1655 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
1656 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
1661 * The routine checks timestamp flag in the current packet,
1662 * and push WAIT WQE into the queue if scheduling is required.
1665 * Pointer to TX queue structure.
1667 * Pointer to burst routine local context.
1669 * Configured Tx offloads mask. It is fully defined at
1670 * compile time and may be used for optimization.
1673 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1674 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
1675 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
1676 * Local context variables partially updated.
1678 static __rte_always_inline enum mlx5_txcmp_code
1679 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
1680 struct mlx5_txq_local *restrict loc,
1683 if (MLX5_TXOFF_CONFIG(TXPP) &&
1684 loc->mbuf->ol_flags & txq->ts_mask) {
1685 struct mlx5_wqe *wqe;
1690 * Estimate the required space quickly and roughly.
1691 * We would like to ensure the packet can be pushed
1692 * to the queue and we won't get the orphan WAIT WQE.
1694 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
1695 loc->elts_free < NB_SEGS(loc->mbuf))
1696 return MLX5_TXCMP_CODE_EXIT;
1697 /* Convert the timestamp into completion to wait. */
1698 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
1699 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
1700 if (unlikely(wci < 0))
1701 return MLX5_TXCMP_CODE_SINGLE;
1702 /* Build the WAIT WQE with specified completion. */
1703 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1704 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
1705 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
1708 return MLX5_TXCMP_CODE_MULTI;
1710 return MLX5_TXCMP_CODE_SINGLE;
1714 * Tx one packet function for multi-segment TSO. Supports all
1715 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
1716 * sends one packet per WQE.
1718 * This routine is responsible for storing processed mbuf
1719 * into elts ring buffer and update elts_head.
1722 * Pointer to TX queue structure.
1724 * Pointer to burst routine local context.
1726 * Configured Tx offloads mask. It is fully defined at
1727 * compile time and may be used for optimization.
1730 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1731 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1732 * Local context variables partially updated.
1734 static __rte_always_inline enum mlx5_txcmp_code
1735 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
1736 struct mlx5_txq_local *__rte_restrict loc,
1739 struct mlx5_wqe *__rte_restrict wqe;
1740 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
1742 if (MLX5_TXOFF_CONFIG(TXPP)) {
1743 enum mlx5_txcmp_code wret;
1745 /* Generate WAIT for scheduling if requested. */
1746 wret = mlx5_tx_schedule_send(txq, loc, olx);
1747 if (wret == MLX5_TXCMP_CODE_EXIT)
1748 return MLX5_TXCMP_CODE_EXIT;
1749 if (wret == MLX5_TXCMP_CODE_ERROR)
1750 return MLX5_TXCMP_CODE_ERROR;
1753 * Calculate data length to be inlined to estimate
1754 * the required space in WQE ring buffer.
1756 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
1757 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
1758 vlan = sizeof(struct rte_vlan_hdr);
1759 inlen = loc->mbuf->l2_len + vlan +
1760 loc->mbuf->l3_len + loc->mbuf->l4_len;
1761 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
1762 return MLX5_TXCMP_CODE_ERROR;
1763 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
1764 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
1765 /* Packet must contain all TSO headers. */
1766 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
1767 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
1768 inlen > (dlen + vlan)))
1769 return MLX5_TXCMP_CODE_ERROR;
1770 MLX5_ASSERT(inlen >= txq->inlen_mode);
1772 * Check whether there are enough free WQEBBs:
1774 * - Ethernet Segment
1775 * - First Segment of inlined Ethernet data
1776 * - ... data continued ...
1777 * - Data Segments of pointer/min inline type
1779 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
1780 MLX5_ESEG_MIN_INLINE_SIZE +
1782 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
1783 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1784 return MLX5_TXCMP_CODE_EXIT;
1785 /* Check for maximal WQE size. */
1786 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1787 return MLX5_TXCMP_CODE_ERROR;
1788 #ifdef MLX5_PMD_SOFT_COUNTERS
1789 /* Update sent data bytes/packets counters. */
1790 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
1791 loc->mbuf->tso_segsz;
1793 * One will be added for mbuf itself at the end of the mlx5_tx_burst
1794 * from loc->pkts_sent field.
1797 txq->stats.opackets += ntcp;
1798 txq->stats.obytes += dlen + vlan + ntcp * inlen;
1800 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1801 loc->wqe_last = wqe;
1802 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
1803 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
1804 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
1805 txq->wqe_ci += (ds + 3) / 4;
1806 loc->wqe_free -= (ds + 3) / 4;
1807 return MLX5_TXCMP_CODE_MULTI;
1811 * Tx one packet function for multi-segment SEND. Supports all types of Tx
1812 * offloads, uses MLX5_OPCODE_SEND to build WQEs, sends one packet per WQE,
1813 * without any data inlining in Ethernet Segment.
1815 * This routine is responsible for storing processed mbuf
1816 * into elts ring buffer and update elts_head.
1819 * Pointer to TX queue structure.
1821 * Pointer to burst routine local context.
1823 * Configured Tx offloads mask. It is fully defined at
1824 * compile time and may be used for optimization.
1827 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1828 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1829 * Local context variables partially updated.
1831 static __rte_always_inline enum mlx5_txcmp_code
1832 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
1833 struct mlx5_txq_local *__rte_restrict loc,
1836 struct mlx5_wqe_dseg *__rte_restrict dseg;
1837 struct mlx5_wqe *__rte_restrict wqe;
1838 unsigned int ds, nseg;
1840 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
1841 if (MLX5_TXOFF_CONFIG(TXPP)) {
1842 enum mlx5_txcmp_code wret;
1844 /* Generate WAIT for scheduling if requested. */
1845 wret = mlx5_tx_schedule_send(txq, loc, olx);
1846 if (wret == MLX5_TXCMP_CODE_EXIT)
1847 return MLX5_TXCMP_CODE_EXIT;
1848 if (wret == MLX5_TXCMP_CODE_ERROR)
1849 return MLX5_TXCMP_CODE_ERROR;
1852 * No inline at all, it means the CPU cycles saving is prioritized at
1853 * configuration, we should not copy any packet data to WQE.
1855 nseg = NB_SEGS(loc->mbuf);
1857 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1858 return MLX5_TXCMP_CODE_EXIT;
1859 /* Check for maximal WQE size. */
1860 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1861 return MLX5_TXCMP_CODE_ERROR;
1863 * Some Tx offloads may cause an error if packet is not long enough,
1864 * check against assumed minimal length.
1866 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
1867 return MLX5_TXCMP_CODE_ERROR;
1868 #ifdef MLX5_PMD_SOFT_COUNTERS
1869 /* Update sent data bytes counter. */
1870 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
1871 if (MLX5_TXOFF_CONFIG(VLAN) &&
1872 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
1873 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
1876 * SEND WQE, one WQEBB:
1877 * - Control Segment, SEND opcode
1878 * - Ethernet Segment, optional VLAN, no inline
1879 * - Data Segments, pointer only type
1881 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1882 loc->wqe_last = wqe;
1883 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
1884 mlx5_tx_eseg_none(txq, loc, wqe, olx);
1885 dseg = &wqe->dseg[0];
1887 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1888 struct rte_mbuf *mbuf;
1891 * Zero length segment found, have to correct total
1892 * size of WQE in segments.
1893 * It is supposed to be rare occasion, so in normal
1894 * case (no zero length segments) we avoid extra
1895 * writing to the Control Segment.
1898 wqe->cseg.sq_ds -= RTE_BE32(1);
1900 loc->mbuf = mbuf->next;
1901 rte_pktmbuf_free_seg(mbuf);
1907 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1908 rte_pktmbuf_data_len(loc->mbuf), olx);
1909 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1914 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1915 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1916 loc->mbuf = loc->mbuf->next;
1919 txq->wqe_ci += (ds + 3) / 4;
1920 loc->wqe_free -= (ds + 3) / 4;
1921 return MLX5_TXCMP_CODE_MULTI;
1925 * Tx one packet function for multi-segment SEND. Supports all
1926 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
1927 * sends one packet per WQE, with data inlining in
1928 * Ethernet Segment and minimal Data Segments.
1930 * This routine is responsible for storing processed mbuf
1931 * into elts ring buffer and update elts_head.
1934 * Pointer to TX queue structure.
1936 * Pointer to burst routine local context.
1938 * Configured Tx offloads mask. It is fully defined at
1939 * compile time and may be used for optimization.
1942 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1943 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1944 * Local context variables partially updated.
1946 static __rte_always_inline enum mlx5_txcmp_code
1947 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
1948 struct mlx5_txq_local *__rte_restrict loc,
1951 struct mlx5_wqe *__rte_restrict wqe;
1952 unsigned int ds, inlen, dlen, vlan = 0;
1954 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
1955 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
1956 if (MLX5_TXOFF_CONFIG(TXPP)) {
1957 enum mlx5_txcmp_code wret;
1959 /* Generate WAIT for scheduling if requested. */
1960 wret = mlx5_tx_schedule_send(txq, loc, olx);
1961 if (wret == MLX5_TXCMP_CODE_EXIT)
1962 return MLX5_TXCMP_CODE_EXIT;
1963 if (wret == MLX5_TXCMP_CODE_ERROR)
1964 return MLX5_TXCMP_CODE_ERROR;
1967 * First calculate data length to be inlined
1968 * to estimate the required space for WQE.
1970 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
1971 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
1972 vlan = sizeof(struct rte_vlan_hdr);
1973 inlen = dlen + vlan;
1974 /* Check against minimal length. */
1975 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
1976 return MLX5_TXCMP_CODE_ERROR;
1977 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
1978 if (inlen > txq->inlen_send ||
1979 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
1980 struct rte_mbuf *mbuf;
1985 * Packet length exceeds the allowed inline data length,
1986 * check whether the minimal inlining is required.
1988 if (txq->inlen_mode) {
1989 MLX5_ASSERT(txq->inlen_mode >=
1990 MLX5_ESEG_MIN_INLINE_SIZE);
1991 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
1992 inlen = txq->inlen_mode;
1994 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
1995 !vlan || txq->vlan_en) {
1997 * VLAN insertion will be done inside by HW.
1998 * It is not utmost effective - VLAN flag is
1999 * checked twice, but we should proceed the
2000 * inlining length correctly and take into
2001 * account the VLAN header being inserted.
2003 return mlx5_tx_packet_multi_send
2006 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
2009 * Now we know the minimal amount of data is requested
2010 * to inline. Check whether we should inline the buffers
2011 * from the chain beginning to eliminate some mbufs.
2014 nxlen = rte_pktmbuf_data_len(mbuf);
2015 if (unlikely(nxlen <= txq->inlen_send)) {
2016 /* We can inline first mbuf at least. */
2017 if (nxlen < inlen) {
2020 /* Scan mbufs till inlen filled. */
2025 nxlen = rte_pktmbuf_data_len(mbuf);
2027 } while (unlikely(nxlen < inlen));
2028 if (unlikely(nxlen > txq->inlen_send)) {
2029 /* We cannot inline entire mbuf. */
2030 smlen = inlen - smlen;
2031 start = rte_pktmbuf_mtod_offset
2032 (mbuf, uintptr_t, smlen);
2039 /* There should be not end of packet. */
2041 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
2042 } while (unlikely(nxlen < txq->inlen_send));
2044 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
2046 * Check whether we can do inline to align start
2047 * address of data buffer to cacheline.
2050 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
2051 if (unlikely(start)) {
2053 if (start <= txq->inlen_send)
2058 * Check whether there are enough free WQEBBs:
2060 * - Ethernet Segment
2061 * - First Segment of inlined Ethernet data
2062 * - ... data continued ...
2063 * - Data Segments of pointer/min inline type
2065 * Estimate the number of Data Segments conservatively,
2066 * supposing no any mbufs is being freed during inlining.
2068 MLX5_ASSERT(inlen <= txq->inlen_send);
2069 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2070 MLX5_ESEG_MIN_INLINE_SIZE +
2072 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2073 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2074 return MLX5_TXCMP_CODE_EXIT;
2075 /* Check for maximal WQE size. */
2076 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2077 return MLX5_TXCMP_CODE_ERROR;
2078 #ifdef MLX5_PMD_SOFT_COUNTERS
2079 /* Update sent data bytes/packets counters. */
2080 txq->stats.obytes += dlen + vlan;
2082 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2083 loc->wqe_last = wqe;
2084 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
2085 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
2086 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2087 txq->wqe_ci += (ds + 3) / 4;
2088 loc->wqe_free -= (ds + 3) / 4;
2089 return MLX5_TXCMP_CODE_MULTI;
2093 * Tx burst function for multi-segment packets. Supports all
2094 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
2095 * sends one packet per WQE. Function stops sending if it
2096 * encounters the single-segment packet.
2098 * This routine is responsible for storing processed mbuf
2099 * into elts ring buffer and update elts_head.
2102 * Pointer to TX queue structure.
2104 * Packets to transmit.
2106 * Number of packets in array.
2108 * Pointer to burst routine local context.
2110 * Configured Tx offloads mask. It is fully defined at
2111 * compile time and may be used for optimization.
2114 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2115 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2116 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2117 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
2118 * Local context variables updated.
2120 static __rte_always_inline enum mlx5_txcmp_code
2121 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
2122 struct rte_mbuf **__rte_restrict pkts,
2123 unsigned int pkts_n,
2124 struct mlx5_txq_local *__rte_restrict loc,
2127 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2128 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2129 pkts += loc->pkts_sent + 1;
2130 pkts_n -= loc->pkts_sent;
2132 enum mlx5_txcmp_code ret;
2134 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2136 * Estimate the number of free elts quickly but conservatively.
2137 * Some segment may be fully inlined and freed,
2138 * ignore this here - precise estimation is costly.
2140 if (loc->elts_free < NB_SEGS(loc->mbuf))
2141 return MLX5_TXCMP_CODE_EXIT;
2142 if (MLX5_TXOFF_CONFIG(TSO) &&
2143 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
2144 /* Proceed with multi-segment TSO. */
2145 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
2146 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
2147 /* Proceed with multi-segment SEND with inlining. */
2148 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
2150 /* Proceed with multi-segment SEND w/o inlining. */
2151 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
2153 if (ret == MLX5_TXCMP_CODE_EXIT)
2154 return MLX5_TXCMP_CODE_EXIT;
2155 if (ret == MLX5_TXCMP_CODE_ERROR)
2156 return MLX5_TXCMP_CODE_ERROR;
2157 /* WQE is built, go to the next packet. */
2160 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2161 return MLX5_TXCMP_CODE_EXIT;
2162 loc->mbuf = *pkts++;
2164 rte_prefetch0(*pkts);
2165 if (likely(NB_SEGS(loc->mbuf) > 1))
2167 /* Here ends the series of multi-segment packets. */
2168 if (MLX5_TXOFF_CONFIG(TSO) &&
2169 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2170 return MLX5_TXCMP_CODE_TSO;
2171 return MLX5_TXCMP_CODE_SINGLE;
2177 * Tx burst function for single-segment packets with TSO.
2178 * Supports all types of Tx offloads, except multi-packets.
2179 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
2180 * Function stops sending if it encounters the multi-segment
2181 * packet or packet without TSO requested.
2183 * The routine is responsible for storing processed mbuf into elts ring buffer
2184 * and update elts_head if inline offloads is requested due to possible early
2185 * freeing of the inlined mbufs (can not store pkts array in elts as a batch).
2188 * Pointer to TX queue structure.
2190 * Packets to transmit.
2192 * Number of packets in array.
2194 * Pointer to burst routine local context.
2196 * Configured Tx offloads mask. It is fully defined at
2197 * compile time and may be used for optimization.
2200 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2201 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2202 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2203 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2204 * Local context variables updated.
2206 static __rte_always_inline enum mlx5_txcmp_code
2207 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
2208 struct rte_mbuf **__rte_restrict pkts,
2209 unsigned int pkts_n,
2210 struct mlx5_txq_local *__rte_restrict loc,
2213 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2214 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2215 pkts += loc->pkts_sent + 1;
2216 pkts_n -= loc->pkts_sent;
2218 struct mlx5_wqe_dseg *__rte_restrict dseg;
2219 struct mlx5_wqe *__rte_restrict wqe;
2220 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
2223 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2224 if (MLX5_TXOFF_CONFIG(TXPP)) {
2225 enum mlx5_txcmp_code wret;
2227 /* Generate WAIT for scheduling if requested. */
2228 wret = mlx5_tx_schedule_send(txq, loc, olx);
2229 if (wret == MLX5_TXCMP_CODE_EXIT)
2230 return MLX5_TXCMP_CODE_EXIT;
2231 if (wret == MLX5_TXCMP_CODE_ERROR)
2232 return MLX5_TXCMP_CODE_ERROR;
2234 dlen = rte_pktmbuf_data_len(loc->mbuf);
2235 if (MLX5_TXOFF_CONFIG(VLAN) &&
2236 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2237 vlan = sizeof(struct rte_vlan_hdr);
2240 * First calculate the WQE size to check
2241 * whether we have enough space in ring buffer.
2243 hlen = loc->mbuf->l2_len + vlan +
2244 loc->mbuf->l3_len + loc->mbuf->l4_len;
2245 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
2246 return MLX5_TXCMP_CODE_ERROR;
2247 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
2248 hlen += loc->mbuf->outer_l2_len +
2249 loc->mbuf->outer_l3_len;
2250 /* Segment must contain all TSO headers. */
2251 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
2252 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2253 hlen > (dlen + vlan)))
2254 return MLX5_TXCMP_CODE_ERROR;
2256 * Check whether there are enough free WQEBBs:
2258 * - Ethernet Segment
2259 * - First Segment of inlined Ethernet data
2260 * - ... data continued ...
2261 * - Finishing Data Segment of pointer type
2263 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
2264 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2265 if (loc->wqe_free < ((ds + 3) / 4))
2266 return MLX5_TXCMP_CODE_EXIT;
2267 #ifdef MLX5_PMD_SOFT_COUNTERS
2268 /* Update sent data bytes/packets counters. */
2269 ntcp = (dlen + vlan - hlen +
2270 loc->mbuf->tso_segsz - 1) /
2271 loc->mbuf->tso_segsz;
2273 * One will be added for mbuf itself at the end
2274 * of the mlx5_tx_burst from loc->pkts_sent field.
2277 txq->stats.opackets += ntcp;
2278 txq->stats.obytes += dlen + vlan + ntcp * hlen;
2281 * Build the TSO WQE:
2283 * - Ethernet Segment with hlen bytes inlined
2284 * - Data Segment of pointer type
2286 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2287 loc->wqe_last = wqe;
2288 mlx5_tx_cseg_init(txq, loc, wqe, ds,
2289 MLX5_OPCODE_TSO, olx);
2290 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
2291 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
2292 dlen -= hlen - vlan;
2293 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2295 * WQE is built, update the loop parameters
2296 * and go to the next packet.
2298 txq->wqe_ci += (ds + 3) / 4;
2299 loc->wqe_free -= (ds + 3) / 4;
2300 if (MLX5_TXOFF_CONFIG(INLINE))
2301 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2305 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2306 return MLX5_TXCMP_CODE_EXIT;
2307 loc->mbuf = *pkts++;
2309 rte_prefetch0(*pkts);
2310 if (MLX5_TXOFF_CONFIG(MULTI) &&
2311 unlikely(NB_SEGS(loc->mbuf) > 1))
2312 return MLX5_TXCMP_CODE_MULTI;
2313 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
2314 return MLX5_TXCMP_CODE_SINGLE;
2315 /* Continue with the next TSO packet. */
2321 * Analyze the packet and select the best method to send.
2324 * Pointer to TX queue structure.
2326 * Pointer to burst routine local context.
2328 * Configured Tx offloads mask. It is fully defined at
2329 * compile time and may be used for optimization.
2331 * The predefined flag whether do complete check for
2332 * multi-segment packets and TSO.
2335 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2336 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
2337 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
2338 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
2340 static __rte_always_inline enum mlx5_txcmp_code
2341 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
2342 struct mlx5_txq_local *__rte_restrict loc,
2346 /* Check for multi-segment packet. */
2348 MLX5_TXOFF_CONFIG(MULTI) &&
2349 unlikely(NB_SEGS(loc->mbuf) > 1))
2350 return MLX5_TXCMP_CODE_MULTI;
2351 /* Check for TSO packet. */
2353 MLX5_TXOFF_CONFIG(TSO) &&
2354 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2355 return MLX5_TXCMP_CODE_TSO;
2356 /* Check if eMPW is enabled at all. */
2357 if (!MLX5_TXOFF_CONFIG(EMPW))
2358 return MLX5_TXCMP_CODE_SINGLE;
2359 /* Check if eMPW can be engaged. */
2360 if (MLX5_TXOFF_CONFIG(VLAN) &&
2361 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
2362 (!MLX5_TXOFF_CONFIG(INLINE) ||
2363 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
2364 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
2366 * eMPW does not support VLAN insertion offload, we have to
2367 * inline the entire packet but packet is too long for inlining.
2369 return MLX5_TXCMP_CODE_SINGLE;
2371 return MLX5_TXCMP_CODE_EMPW;
2375 * Check the next packet attributes to match with the eMPW batch ones.
2376 * In addition, for legacy MPW the packet length is checked either.
2379 * Pointer to TX queue structure.
2381 * Pointer to Ethernet Segment of eMPW batch.
2383 * Pointer to burst routine local context.
2385 * Length of previous packet in MPW descriptor.
2387 * Configured Tx offloads mask. It is fully defined at
2388 * compile time and may be used for optimization.
2391 * true - packet match with eMPW batch attributes.
2392 * false - no match, eMPW should be restarted.
2394 static __rte_always_inline bool
2395 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
2396 struct mlx5_wqe_eseg *__rte_restrict es,
2397 struct mlx5_txq_local *__rte_restrict loc,
2401 uint8_t swp_flags = 0;
2403 /* Compare the checksum flags, if any. */
2404 if (MLX5_TXOFF_CONFIG(CSUM) &&
2405 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
2407 /* Compare the Software Parser offsets and flags. */
2408 if (MLX5_TXOFF_CONFIG(SWP) &&
2409 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
2410 es->swp_flags != swp_flags))
2412 /* Fill metadata field if needed. */
2413 if (MLX5_TXOFF_CONFIG(METADATA) &&
2414 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2415 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
2417 /* Legacy MPW can send packets with the same length only. */
2418 if (MLX5_TXOFF_CONFIG(MPW) &&
2419 dlen != rte_pktmbuf_data_len(loc->mbuf))
2421 /* There must be no VLAN packets in eMPW loop. */
2422 if (MLX5_TXOFF_CONFIG(VLAN))
2423 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
2424 /* Check if the scheduling is requested. */
2425 if (MLX5_TXOFF_CONFIG(TXPP) &&
2426 loc->mbuf->ol_flags & txq->ts_mask)
2432 * Update send loop variables and WQE for eMPW loop without data inlining.
2433 * Number of Data Segments is equal to the number of sent packets.
2436 * Pointer to TX queue structure.
2438 * Pointer to burst routine local context.
2440 * Number of packets/Data Segments/Packets.
2442 * Accumulated statistics, bytes sent.
2444 * Configured Tx offloads mask. It is fully defined at
2445 * compile time and may be used for optimization.
2448 * true - packet match with eMPW batch attributes.
2449 * false - no match, eMPW should be restarted.
2451 static __rte_always_inline void
2452 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
2453 struct mlx5_txq_local *__rte_restrict loc,
2456 unsigned int olx __rte_unused)
2458 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2459 #ifdef MLX5_PMD_SOFT_COUNTERS
2460 /* Update sent data bytes counter. */
2461 txq->stats.obytes += slen;
2465 loc->elts_free -= ds;
2466 loc->pkts_sent += ds;
2468 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2469 txq->wqe_ci += (ds + 3) / 4;
2470 loc->wqe_free -= (ds + 3) / 4;
2474 * Update send loop variables and WQE for eMPW loop with data inlining.
2475 * Gets the size of pushed descriptors and data to the WQE.
2478 * Pointer to TX queue structure.
2480 * Pointer to burst routine local context.
2482 * Total size of descriptor/data in bytes.
2484 * Accumulated statistics, data bytes sent.
2486 * The base WQE for the eMPW/MPW descriptor.
2488 * Configured Tx offloads mask. It is fully defined at
2489 * compile time and may be used for optimization.
2492 * true - packet match with eMPW batch attributes.
2493 * false - no match, eMPW should be restarted.
2495 static __rte_always_inline void
2496 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
2497 struct mlx5_txq_local *__rte_restrict loc,
2500 struct mlx5_wqe *__rte_restrict wqem,
2501 unsigned int olx __rte_unused)
2503 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
2505 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2506 #ifdef MLX5_PMD_SOFT_COUNTERS
2507 /* Update sent data bytes counter. */
2508 txq->stats.obytes += slen;
2512 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
2514 * If the legacy MPW session contains the inline packets
2515 * we should set the only inline data segment length
2516 * and align the total length to the segment size.
2518 MLX5_ASSERT(len > sizeof(dseg->bcount));
2519 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
2520 MLX5_ETH_WQE_DATA_INLINE);
2521 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
2524 * The session is not legacy MPW or contains the
2525 * data buffer pointer segments.
2527 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
2528 len = len / MLX5_WSEG_SIZE + 2;
2530 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
2531 txq->wqe_ci += (len + 3) / 4;
2532 loc->wqe_free -= (len + 3) / 4;
2533 loc->wqe_last = wqem;
2537 * The set of Tx burst functions for single-segment packets without TSO
2538 * and with Multi-Packet Writing feature support.
2539 * Supports all types of Tx offloads, except multi-packets and TSO.
2541 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends as many packet
2542 * per WQE as it can. If eMPW is not configured or packet can not be sent with
2543 * eMPW (VLAN insertion) the ordinary SEND opcode is used and only one packet
2546 * Functions stop sending if it encounters the multi-segment packet or packet
2547 * with TSO requested.
2549 * The routines are responsible for storing processed mbuf into elts ring buffer
2550 * and update elts_head if inlining offload is requested. Otherwise the copying
2551 * mbufs to elts can be postponed and completed at the end of burst routine.
2554 * Pointer to TX queue structure.
2556 * Packets to transmit.
2558 * Number of packets in array.
2560 * Pointer to burst routine local context.
2562 * Configured Tx offloads mask. It is fully defined at
2563 * compile time and may be used for optimization.
2566 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2567 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2568 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2569 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
2570 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
2571 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
2573 * Local context variables updated.
2576 * The routine sends packets with MLX5_OPCODE_EMPW
2577 * without inlining, this is dedicated optimized branch.
2578 * No VLAN insertion is supported.
2580 static __rte_always_inline enum mlx5_txcmp_code
2581 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
2582 struct rte_mbuf **__rte_restrict pkts,
2583 unsigned int pkts_n,
2584 struct mlx5_txq_local *__rte_restrict loc,
2588 * Subroutine is the part of mlx5_tx_burst_single() and sends
2589 * single-segment packet with eMPW opcode without data inlining.
2591 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2592 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2593 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2594 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2595 pkts += loc->pkts_sent + 1;
2596 pkts_n -= loc->pkts_sent;
2598 struct mlx5_wqe_dseg *__rte_restrict dseg;
2599 struct mlx5_wqe_eseg *__rte_restrict eseg;
2600 enum mlx5_txcmp_code ret;
2601 unsigned int part, loop;
2602 unsigned int slen = 0;
2605 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2606 if (MLX5_TXOFF_CONFIG(TXPP)) {
2607 enum mlx5_txcmp_code wret;
2609 /* Generate WAIT for scheduling if requested. */
2610 wret = mlx5_tx_schedule_send(txq, loc, olx);
2611 if (wret == MLX5_TXCMP_CODE_EXIT)
2612 return MLX5_TXCMP_CODE_EXIT;
2613 if (wret == MLX5_TXCMP_CODE_ERROR)
2614 return MLX5_TXCMP_CODE_ERROR;
2616 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2617 MLX5_MPW_MAX_PACKETS :
2618 MLX5_EMPW_MAX_PACKETS);
2619 if (unlikely(loc->elts_free < part)) {
2620 /* We have no enough elts to save all mbufs. */
2621 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
2622 return MLX5_TXCMP_CODE_EXIT;
2623 /* But we still able to send at least minimal eMPW. */
2624 part = loc->elts_free;
2626 /* Check whether we have enough WQEs */
2627 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
2628 if (unlikely(loc->wqe_free <
2629 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2630 return MLX5_TXCMP_CODE_EXIT;
2631 part = (loc->wqe_free * 4) - 2;
2633 if (likely(part > 1))
2634 rte_prefetch0(*pkts);
2635 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2637 * Build eMPW title WQEBB:
2638 * - Control Segment, eMPW opcode
2639 * - Ethernet Segment, no inline
2641 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
2642 MLX5_OPCODE_ENHANCED_MPSW, olx);
2643 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
2644 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2645 eseg = &loc->wqe_last->eseg;
2646 dseg = &loc->wqe_last->dseg[0];
2648 /* Store the packet length for legacy MPW. */
2649 if (MLX5_TXOFF_CONFIG(MPW))
2650 eseg->mss = rte_cpu_to_be_16
2651 (rte_pktmbuf_data_len(loc->mbuf));
2653 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2654 #ifdef MLX5_PMD_SOFT_COUNTERS
2655 /* Update sent data bytes counter. */
2660 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2662 if (unlikely(--loop == 0))
2664 loc->mbuf = *pkts++;
2665 if (likely(loop > 1))
2666 rte_prefetch0(*pkts);
2667 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2669 * Unroll the completion code to avoid
2670 * returning variable value - it results in
2671 * unoptimized sequent checking in caller.
2673 if (ret == MLX5_TXCMP_CODE_MULTI) {
2675 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2676 if (unlikely(!loc->elts_free ||
2678 return MLX5_TXCMP_CODE_EXIT;
2679 return MLX5_TXCMP_CODE_MULTI;
2681 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2682 if (ret == MLX5_TXCMP_CODE_TSO) {
2684 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2685 if (unlikely(!loc->elts_free ||
2687 return MLX5_TXCMP_CODE_EXIT;
2688 return MLX5_TXCMP_CODE_TSO;
2690 if (ret == MLX5_TXCMP_CODE_SINGLE) {
2692 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2693 if (unlikely(!loc->elts_free ||
2695 return MLX5_TXCMP_CODE_EXIT;
2696 return MLX5_TXCMP_CODE_SINGLE;
2698 if (ret != MLX5_TXCMP_CODE_EMPW) {
2701 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2702 return MLX5_TXCMP_CODE_ERROR;
2705 * Check whether packet parameters coincide
2706 * within assumed eMPW batch:
2707 * - check sum settings
2709 * - software parser settings
2710 * - packets length (legacy MPW only)
2711 * - scheduling is not required
2713 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
2716 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2717 if (unlikely(!loc->elts_free ||
2719 return MLX5_TXCMP_CODE_EXIT;
2723 /* Packet attributes match, continue the same eMPW. */
2725 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2726 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2728 /* eMPW is built successfully, update loop parameters. */
2730 MLX5_ASSERT(pkts_n >= part);
2731 #ifdef MLX5_PMD_SOFT_COUNTERS
2732 /* Update sent data bytes counter. */
2733 txq->stats.obytes += slen;
2735 loc->elts_free -= part;
2736 loc->pkts_sent += part;
2737 txq->wqe_ci += (2 + part + 3) / 4;
2738 loc->wqe_free -= (2 + part + 3) / 4;
2740 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2741 return MLX5_TXCMP_CODE_EXIT;
2742 loc->mbuf = *pkts++;
2743 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2744 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
2746 /* Continue sending eMPW batches. */
2752 * The routine sends packets with MLX5_OPCODE_EMPW
2753 * with inlining, optionally supports VLAN insertion.
2755 static __rte_always_inline enum mlx5_txcmp_code
2756 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
2757 struct rte_mbuf **__rte_restrict pkts,
2758 unsigned int pkts_n,
2759 struct mlx5_txq_local *__rte_restrict loc,
2763 * Subroutine is the part of mlx5_tx_burst_single() and sends
2764 * single-segment packet with eMPW opcode with data inlining.
2766 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2767 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2768 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2769 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2770 pkts += loc->pkts_sent + 1;
2771 pkts_n -= loc->pkts_sent;
2773 struct mlx5_wqe_dseg *__rte_restrict dseg;
2774 struct mlx5_wqe *__rte_restrict wqem;
2775 enum mlx5_txcmp_code ret;
2776 unsigned int room, part, nlim;
2777 unsigned int slen = 0;
2779 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2780 if (MLX5_TXOFF_CONFIG(TXPP)) {
2781 enum mlx5_txcmp_code wret;
2783 /* Generate WAIT for scheduling if requested. */
2784 wret = mlx5_tx_schedule_send(txq, loc, olx);
2785 if (wret == MLX5_TXCMP_CODE_EXIT)
2786 return MLX5_TXCMP_CODE_EXIT;
2787 if (wret == MLX5_TXCMP_CODE_ERROR)
2788 return MLX5_TXCMP_CODE_ERROR;
2791 * Limits the amount of packets in one WQE
2792 * to improve CQE latency generation.
2794 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2795 MLX5_MPW_INLINE_MAX_PACKETS :
2796 MLX5_EMPW_MAX_PACKETS);
2797 /* Check whether we have minimal amount WQEs */
2798 if (unlikely(loc->wqe_free <
2799 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2800 return MLX5_TXCMP_CODE_EXIT;
2801 if (likely(pkts_n > 1))
2802 rte_prefetch0(*pkts);
2803 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2805 * Build eMPW title WQEBB:
2806 * - Control Segment, eMPW opcode, zero DS
2807 * - Ethernet Segment, no inline
2809 mlx5_tx_cseg_init(txq, loc, wqem, 0,
2810 MLX5_OPCODE_ENHANCED_MPSW, olx);
2811 mlx5_tx_eseg_none(txq, loc, wqem,
2812 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2813 dseg = &wqem->dseg[0];
2814 /* Store the packet length for legacy MPW. */
2815 if (MLX5_TXOFF_CONFIG(MPW))
2816 wqem->eseg.mss = rte_cpu_to_be_16
2817 (rte_pktmbuf_data_len(loc->mbuf));
2818 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
2819 loc->wqe_free) * MLX5_WQE_SIZE -
2820 MLX5_WQE_CSEG_SIZE -
2822 /* Limit the room for legacy MPW sessions for performance. */
2823 if (MLX5_TXOFF_CONFIG(MPW))
2824 room = RTE_MIN(room,
2825 RTE_MAX(txq->inlen_empw +
2826 sizeof(dseg->bcount) +
2827 (MLX5_TXOFF_CONFIG(VLAN) ?
2828 sizeof(struct rte_vlan_hdr) : 0),
2829 MLX5_MPW_INLINE_MAX_PACKETS *
2830 MLX5_WQE_DSEG_SIZE));
2831 /* Build WQE till we have space, packets and resources. */
2834 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2835 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2838 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
2839 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
2840 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
2842 * Some Tx offloads may cause an error if packet is not
2843 * long enough, check against assumed minimal length.
2845 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
2847 if (unlikely(!part))
2848 return MLX5_TXCMP_CODE_ERROR;
2850 * We have some successfully built
2851 * packet Data Segments to send.
2853 mlx5_tx_idone_empw(txq, loc, part,
2855 return MLX5_TXCMP_CODE_ERROR;
2857 /* Inline or not inline - that's the Question. */
2858 if (dlen > txq->inlen_empw ||
2859 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
2861 if (MLX5_TXOFF_CONFIG(MPW)) {
2862 if (dlen > txq->inlen_send)
2866 /* Open new inline MPW session. */
2867 tlen += sizeof(dseg->bcount);
2868 dseg->bcount = RTE_BE32(0);
2870 (dseg, sizeof(dseg->bcount));
2873 * No pointer and inline descriptor
2874 * intermix for legacy MPW sessions.
2876 if (wqem->dseg[0].bcount)
2880 tlen = sizeof(dseg->bcount) + dlen;
2882 /* Inline entire packet, optional VLAN insertion. */
2883 if (MLX5_TXOFF_CONFIG(VLAN) &&
2884 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2886 * The packet length must be checked in
2887 * mlx5_tx_able_to_empw() and packet
2888 * fits into inline length guaranteed.
2891 sizeof(struct rte_vlan_hdr)) <=
2893 tlen += sizeof(struct rte_vlan_hdr);
2896 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
2898 #ifdef MLX5_PMD_SOFT_COUNTERS
2899 /* Update sent data bytes counter. */
2900 slen += sizeof(struct rte_vlan_hdr);
2905 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
2908 if (!MLX5_TXOFF_CONFIG(MPW))
2909 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
2910 MLX5_ASSERT(room >= tlen);
2913 * Packet data are completely inline,
2914 * we can try to free the packet.
2916 if (likely(loc->pkts_sent == loc->mbuf_free)) {
2918 * All the packets from the burst beginning
2919 * are inline, we can free mbufs directly
2920 * from the origin array on tx_burst exit().
2926 * In order no to call rte_pktmbuf_free_seg() here,
2927 * in the most inner loop (that might be very
2928 * expensive) we just save the mbuf in elts.
2930 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2935 * No pointer and inline descriptor
2936 * intermix for legacy MPW sessions.
2938 if (MLX5_TXOFF_CONFIG(MPW) &&
2940 wqem->dseg[0].bcount == RTE_BE32(0))
2943 * Not inlinable VLAN packets are
2944 * proceeded outside of this routine.
2946 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
2947 if (MLX5_TXOFF_CONFIG(VLAN))
2948 MLX5_ASSERT(!(loc->mbuf->ol_flags &
2950 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2951 /* We have to store mbuf in elts.*/
2952 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2954 room -= MLX5_WQE_DSEG_SIZE;
2955 /* Ring buffer wraparound is checked at the loop end.*/
2958 #ifdef MLX5_PMD_SOFT_COUNTERS
2959 /* Update sent data bytes counter. */
2964 if (unlikely(!pkts_n || !loc->elts_free)) {
2966 * We have no resources/packets to
2967 * continue build descriptors.
2970 mlx5_tx_idone_empw(txq, loc, part,
2972 return MLX5_TXCMP_CODE_EXIT;
2974 loc->mbuf = *pkts++;
2975 if (likely(pkts_n > 1))
2976 rte_prefetch0(*pkts);
2977 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2979 * Unroll the completion code to avoid
2980 * returning variable value - it results in
2981 * unoptimized sequent checking in caller.
2983 if (ret == MLX5_TXCMP_CODE_MULTI) {
2985 mlx5_tx_idone_empw(txq, loc, part,
2987 if (unlikely(!loc->elts_free ||
2989 return MLX5_TXCMP_CODE_EXIT;
2990 return MLX5_TXCMP_CODE_MULTI;
2992 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2993 if (ret == MLX5_TXCMP_CODE_TSO) {
2995 mlx5_tx_idone_empw(txq, loc, part,
2997 if (unlikely(!loc->elts_free ||
2999 return MLX5_TXCMP_CODE_EXIT;
3000 return MLX5_TXCMP_CODE_TSO;
3002 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3004 mlx5_tx_idone_empw(txq, loc, part,
3006 if (unlikely(!loc->elts_free ||
3008 return MLX5_TXCMP_CODE_EXIT;
3009 return MLX5_TXCMP_CODE_SINGLE;
3011 if (ret != MLX5_TXCMP_CODE_EMPW) {
3014 mlx5_tx_idone_empw(txq, loc, part,
3016 return MLX5_TXCMP_CODE_ERROR;
3018 /* Check if we have minimal room left. */
3020 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
3023 * Check whether packet parameters coincide
3024 * within assumed eMPW batch:
3025 * - check sum settings
3027 * - software parser settings
3028 * - packets length (legacy MPW only)
3029 * - scheduling is not required
3031 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
3034 /* Packet attributes match, continue the same eMPW. */
3035 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3036 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3039 * We get here to close an existing eMPW
3040 * session and start the new one.
3042 MLX5_ASSERT(pkts_n);
3044 if (unlikely(!part))
3045 return MLX5_TXCMP_CODE_EXIT;
3046 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
3047 if (unlikely(!loc->elts_free ||
3049 return MLX5_TXCMP_CODE_EXIT;
3050 /* Continue the loop with new eMPW session. */
3056 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
3057 * Data inlining and VLAN insertion are supported.
3059 static __rte_always_inline enum mlx5_txcmp_code
3060 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
3061 struct rte_mbuf **__rte_restrict pkts,
3062 unsigned int pkts_n,
3063 struct mlx5_txq_local *__rte_restrict loc,
3067 * Subroutine is the part of mlx5_tx_burst_single()
3068 * and sends single-segment packet with SEND opcode.
3070 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3071 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3072 pkts += loc->pkts_sent + 1;
3073 pkts_n -= loc->pkts_sent;
3075 struct mlx5_wqe *__rte_restrict wqe;
3076 enum mlx5_txcmp_code ret;
3078 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3079 if (MLX5_TXOFF_CONFIG(TXPP)) {
3080 enum mlx5_txcmp_code wret;
3082 /* Generate WAIT for scheduling if requested. */
3083 wret = mlx5_tx_schedule_send(txq, loc, olx);
3084 if (wret == MLX5_TXCMP_CODE_EXIT)
3085 return MLX5_TXCMP_CODE_EXIT;
3086 if (wret == MLX5_TXCMP_CODE_ERROR)
3087 return MLX5_TXCMP_CODE_ERROR;
3089 if (MLX5_TXOFF_CONFIG(INLINE)) {
3090 unsigned int inlen, vlan = 0;
3092 inlen = rte_pktmbuf_data_len(loc->mbuf);
3093 if (MLX5_TXOFF_CONFIG(VLAN) &&
3094 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3095 vlan = sizeof(struct rte_vlan_hdr);
3099 * If inlining is enabled at configuration time
3100 * the limit must be not less than minimal size.
3101 * Otherwise we would do extra check for data
3102 * size to avoid crashes due to length overflow.
3104 MLX5_ASSERT(txq->inlen_send >=
3105 MLX5_ESEG_MIN_INLINE_SIZE);
3106 if (inlen <= txq->inlen_send) {
3107 unsigned int seg_n, wqe_n;
3109 rte_prefetch0(rte_pktmbuf_mtod
3110 (loc->mbuf, uint8_t *));
3111 /* Check against minimal length. */
3112 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3113 return MLX5_TXCMP_CODE_ERROR;
3114 if (loc->mbuf->ol_flags &
3115 PKT_TX_DYNF_NOINLINE) {
3117 * The hint flag not to inline packet
3118 * data is set. Check whether we can
3121 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
3123 (MLX5_TXOFF_CONFIG(MPW) &&
3125 if (inlen <= txq->inlen_send)
3128 * The hardware requires the
3129 * minimal inline data header.
3131 goto single_min_inline;
3133 if (MLX5_TXOFF_CONFIG(VLAN) &&
3134 vlan && !txq->vlan_en) {
3136 * We must insert VLAN tag
3137 * by software means.
3139 goto single_part_inline;
3141 goto single_no_inline;
3145 * Completely inlined packet data WQE:
3146 * - Control Segment, SEND opcode
3147 * - Ethernet Segment, no VLAN insertion
3148 * - Data inlined, VLAN optionally inserted
3149 * - Alignment to MLX5_WSEG_SIZE
3150 * Have to estimate amount of WQEBBs
3152 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
3153 MLX5_ESEG_MIN_INLINE_SIZE +
3154 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3155 /* Check if there are enough WQEBBs. */
3156 wqe_n = (seg_n + 3) / 4;
3157 if (wqe_n > loc->wqe_free)
3158 return MLX5_TXCMP_CODE_EXIT;
3159 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3160 loc->wqe_last = wqe;
3161 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
3162 MLX5_OPCODE_SEND, olx);
3163 mlx5_tx_eseg_data(txq, loc, wqe,
3164 vlan, inlen, 0, olx);
3165 txq->wqe_ci += wqe_n;
3166 loc->wqe_free -= wqe_n;
3168 * Packet data are completely inlined,
3169 * free the packet immediately.
3171 rte_pktmbuf_free_seg(loc->mbuf);
3172 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
3173 MLX5_TXOFF_CONFIG(MPW)) &&
3176 * If minimal inlining is requested the eMPW
3177 * feature should be disabled due to data is
3178 * inlined into Ethernet Segment, which can
3179 * not contain inlined data for eMPW due to
3180 * segment shared for all packets.
3182 struct mlx5_wqe_dseg *__rte_restrict dseg;
3187 * The inline-mode settings require
3188 * to inline the specified amount of
3189 * data bytes to the Ethernet Segment.
3190 * We should check the free space in
3191 * WQE ring buffer to inline partially.
3194 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
3195 MLX5_ASSERT(inlen > txq->inlen_mode);
3196 MLX5_ASSERT(txq->inlen_mode >=
3197 MLX5_ESEG_MIN_INLINE_SIZE);
3199 * Check whether there are enough free WQEBBs:
3201 * - Ethernet Segment
3202 * - First Segment of inlined Ethernet data
3203 * - ... data continued ...
3204 * - Finishing Data Segment of pointer type
3206 ds = (MLX5_WQE_CSEG_SIZE +
3207 MLX5_WQE_ESEG_SIZE +
3208 MLX5_WQE_DSEG_SIZE +
3210 MLX5_ESEG_MIN_INLINE_SIZE +
3211 MLX5_WQE_DSEG_SIZE +
3212 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3213 if (loc->wqe_free < ((ds + 3) / 4))
3214 return MLX5_TXCMP_CODE_EXIT;
3216 * Build the ordinary SEND WQE:
3218 * - Ethernet Segment, inline inlen_mode bytes
3219 * - Data Segment of pointer type
3221 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3222 loc->wqe_last = wqe;
3223 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3224 MLX5_OPCODE_SEND, olx);
3225 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
3228 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3229 txq->inlen_mode - vlan;
3230 inlen -= txq->inlen_mode;
3231 mlx5_tx_dseg_ptr(txq, loc, dseg,
3234 * WQE is built, update the loop parameters
3235 * and got to the next packet.
3237 txq->wqe_ci += (ds + 3) / 4;
3238 loc->wqe_free -= (ds + 3) / 4;
3239 /* We have to store mbuf in elts.*/
3240 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3241 txq->elts[txq->elts_head++ & txq->elts_m] =
3249 * Partially inlined packet data WQE, we have
3250 * some space in title WQEBB, we can fill it
3251 * with some packet data. It takes one WQEBB,
3252 * it is available, no extra space check:
3253 * - Control Segment, SEND opcode
3254 * - Ethernet Segment, no VLAN insertion
3255 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
3256 * - Data Segment, pointer type
3258 * We also get here if VLAN insertion is not
3259 * supported by HW, the inline is enabled.
3262 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3263 loc->wqe_last = wqe;
3264 mlx5_tx_cseg_init(txq, loc, wqe, 4,
3265 MLX5_OPCODE_SEND, olx);
3266 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
3267 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3268 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
3270 * The length check is performed above, by
3271 * comparing with txq->inlen_send. We should
3272 * not get overflow here.
3274 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
3275 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
3276 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
3280 /* We have to store mbuf in elts.*/
3281 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3282 txq->elts[txq->elts_head++ & txq->elts_m] =
3286 #ifdef MLX5_PMD_SOFT_COUNTERS
3287 /* Update sent data bytes counter. */
3288 txq->stats.obytes += vlan +
3289 rte_pktmbuf_data_len(loc->mbuf);
3293 * No inline at all, it means the CPU cycles saving
3294 * is prioritized at configuration, we should not
3295 * copy any packet data to WQE.
3297 * SEND WQE, one WQEBB:
3298 * - Control Segment, SEND opcode
3299 * - Ethernet Segment, optional VLAN, no inline
3300 * - Data Segment, pointer type
3303 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3304 loc->wqe_last = wqe;
3305 mlx5_tx_cseg_init(txq, loc, wqe, 3,
3306 MLX5_OPCODE_SEND, olx);
3307 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3309 (txq, loc, &wqe->dseg[0],
3310 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3311 rte_pktmbuf_data_len(loc->mbuf), olx);
3315 * We should not store mbuf pointer in elts
3316 * if no inlining is configured, this is done
3317 * by calling routine in a batch copy.
3319 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3321 #ifdef MLX5_PMD_SOFT_COUNTERS
3322 /* Update sent data bytes counter. */
3323 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
3324 if (MLX5_TXOFF_CONFIG(VLAN) &&
3325 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3326 txq->stats.obytes +=
3327 sizeof(struct rte_vlan_hdr);
3332 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3333 return MLX5_TXCMP_CODE_EXIT;
3334 loc->mbuf = *pkts++;
3336 rte_prefetch0(*pkts);
3337 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3338 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
3344 static __rte_always_inline enum mlx5_txcmp_code
3345 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
3346 struct rte_mbuf **__rte_restrict pkts,
3347 unsigned int pkts_n,
3348 struct mlx5_txq_local *__rte_restrict loc,
3351 enum mlx5_txcmp_code ret;
3353 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
3354 if (ret == MLX5_TXCMP_CODE_SINGLE)
3356 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
3358 /* Optimize for inline/no inline eMPW send. */
3359 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
3360 mlx5_tx_burst_empw_inline
3361 (txq, pkts, pkts_n, loc, olx) :
3362 mlx5_tx_burst_empw_simple
3363 (txq, pkts, pkts_n, loc, olx);
3364 if (ret != MLX5_TXCMP_CODE_SINGLE)
3366 /* The resources to send one packet should remain. */
3367 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3369 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
3370 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
3371 if (ret != MLX5_TXCMP_CODE_EMPW)
3373 /* The resources to send one packet should remain. */
3374 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3379 * DPDK Tx callback template. This is configured template used to generate
3380 * routines optimized for specified offload setup.
3381 * One of this generated functions is chosen at SQ configuration time.
3384 * Generic pointer to TX queue structure.
3386 * Packets to transmit.
3388 * Number of packets in array.
3390 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
3391 * values. Should be static to take compile time static configuration
3395 * Number of packets successfully transmitted (<= pkts_n).
3397 static __rte_always_inline uint16_t
3398 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
3399 struct rte_mbuf **__rte_restrict pkts,
3403 struct mlx5_txq_local loc;
3404 enum mlx5_txcmp_code ret;
3407 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3408 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3409 if (unlikely(!pkts_n))
3411 if (MLX5_TXOFF_CONFIG(INLINE))
3415 loc.wqe_last = NULL;
3418 loc.pkts_loop = loc.pkts_sent;
3420 * Check if there are some CQEs, if any:
3421 * - process an encountered errors
3422 * - process the completed WQEs
3423 * - free related mbufs
3424 * - doorbell the NIC about processed CQEs
3426 rte_prefetch0(*(pkts + loc.pkts_sent));
3427 mlx5_tx_handle_completion(txq, olx);
3429 * Calculate the number of available resources - elts and WQEs.
3430 * There are two possible different scenarios:
3431 * - no data inlining into WQEs, one WQEBB may contains up to
3432 * four packets, in this case elts become scarce resource
3433 * - data inlining into WQEs, one packet may require multiple
3434 * WQEBBs, the WQEs become the limiting factor.
3436 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3437 loc.elts_free = txq->elts_s -
3438 (uint16_t)(txq->elts_head - txq->elts_tail);
3439 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3440 loc.wqe_free = txq->wqe_s -
3441 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
3442 if (unlikely(!loc.elts_free || !loc.wqe_free))
3446 * Fetch the packet from array. Usually this is the first
3447 * packet in series of multi/single segment packets.
3449 loc.mbuf = *(pkts + loc.pkts_sent);
3450 /* Dedicated branch for multi-segment packets. */
3451 if (MLX5_TXOFF_CONFIG(MULTI) &&
3452 unlikely(NB_SEGS(loc.mbuf) > 1)) {
3454 * Multi-segment packet encountered.
3455 * Hardware is able to process it only
3456 * with SEND/TSO opcodes, one packet
3457 * per WQE, do it in dedicated routine.
3460 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
3461 part = loc.pkts_sent - loc.pkts_copy;
3462 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3464 * There are some single-segment mbufs not
3465 * stored in elts. The mbufs must be in the
3466 * same order as WQEs, so we must copy the
3467 * mbufs to elts here, before the coming
3468 * multi-segment packet mbufs is appended.
3470 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
3472 loc.pkts_copy = loc.pkts_sent;
3474 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3475 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
3476 if (!MLX5_TXOFF_CONFIG(INLINE))
3477 loc.pkts_copy = loc.pkts_sent;
3479 * These returned code checks are supposed
3480 * to be optimized out due to routine inlining.
3482 if (ret == MLX5_TXCMP_CODE_EXIT) {
3484 * The routine returns this code when
3485 * all packets are sent or there is no
3486 * enough resources to complete request.
3490 if (ret == MLX5_TXCMP_CODE_ERROR) {
3492 * The routine returns this code when some error
3493 * in the incoming packets format occurred.
3495 txq->stats.oerrors++;
3498 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3500 * The single-segment packet was encountered
3501 * in the array, try to send it with the
3502 * best optimized way, possible engaging eMPW.
3504 goto enter_send_single;
3506 if (MLX5_TXOFF_CONFIG(TSO) &&
3507 ret == MLX5_TXCMP_CODE_TSO) {
3509 * The single-segment TSO packet was
3510 * encountered in the array.
3512 goto enter_send_tso;
3514 /* We must not get here. Something is going wrong. */
3516 txq->stats.oerrors++;
3519 /* Dedicated branch for single-segment TSO packets. */
3520 if (MLX5_TXOFF_CONFIG(TSO) &&
3521 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3523 * TSO might require special way for inlining
3524 * (dedicated parameters) and is sent with
3525 * MLX5_OPCODE_TSO opcode only, provide this
3526 * in dedicated branch.
3529 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
3530 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3531 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
3533 * These returned code checks are supposed
3534 * to be optimized out due to routine inlining.
3536 if (ret == MLX5_TXCMP_CODE_EXIT)
3538 if (ret == MLX5_TXCMP_CODE_ERROR) {
3539 txq->stats.oerrors++;
3542 if (ret == MLX5_TXCMP_CODE_SINGLE)
3543 goto enter_send_single;
3544 if (MLX5_TXOFF_CONFIG(MULTI) &&
3545 ret == MLX5_TXCMP_CODE_MULTI) {
3547 * The multi-segment packet was
3548 * encountered in the array.
3550 goto enter_send_multi;
3552 /* We must not get here. Something is going wrong. */
3554 txq->stats.oerrors++;
3558 * The dedicated branch for the single-segment packets
3559 * without TSO. Often these ones can be sent using
3560 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
3561 * The routine builds the WQEs till it encounters
3562 * the TSO or multi-segment packet (in case if these
3563 * offloads are requested at SQ configuration time).
3566 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3567 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
3569 * These returned code checks are supposed
3570 * to be optimized out due to routine inlining.
3572 if (ret == MLX5_TXCMP_CODE_EXIT)
3574 if (ret == MLX5_TXCMP_CODE_ERROR) {
3575 txq->stats.oerrors++;
3578 if (MLX5_TXOFF_CONFIG(MULTI) &&
3579 ret == MLX5_TXCMP_CODE_MULTI) {
3581 * The multi-segment packet was
3582 * encountered in the array.
3584 goto enter_send_multi;
3586 if (MLX5_TXOFF_CONFIG(TSO) &&
3587 ret == MLX5_TXCMP_CODE_TSO) {
3589 * The single-segment TSO packet was
3590 * encountered in the array.
3592 goto enter_send_tso;
3594 /* We must not get here. Something is going wrong. */
3596 txq->stats.oerrors++;
3600 * Main Tx loop is completed, do the rest:
3601 * - set completion request if thresholds are reached
3602 * - doorbell the hardware
3603 * - copy the rest of mbufs to elts (if any)
3605 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
3606 loc.pkts_sent >= loc.pkts_copy);
3607 /* Take a shortcut if nothing is sent. */
3608 if (unlikely(loc.pkts_sent == loc.pkts_loop))
3610 /* Request CQE generation if limits are reached. */
3611 mlx5_tx_request_completion(txq, &loc, olx);
3613 * Ring QP doorbell immediately after WQE building completion
3614 * to improve latencies. The pure software related data treatment
3615 * can be completed after doorbell. Tx CQEs for this SQ are
3616 * processed in this thread only by the polling.
3618 * The rdma core library can map doorbell register in two ways,
3619 * depending on the environment variable "MLX5_SHUT_UP_BF":
3621 * - as regular cached memory, the variable is either missing or
3622 * set to zero. This type of mapping may cause the significant
3623 * doorbell register writing latency and requires explicit memory
3624 * write barrier to mitigate this issue and prevent write combining.
3626 * - as non-cached memory, the variable is present and set to not "0"
3627 * value. This type of mapping may cause performance impact under
3628 * heavy loading conditions but the explicit write memory barrier is
3629 * not required and it may improve core performance.
3631 * - the legacy behaviour (prior 19.08 release) was to use some
3632 * heuristics to decide whether write memory barrier should
3633 * be performed. This behavior is supported with specifying
3634 * tx_db_nc=2, write barrier is skipped if application provides
3635 * the full recommended burst of packets, it supposes the next
3636 * packets are coming and the write barrier will be issued on
3637 * the next burst (after descriptor writing, at least).
3639 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
3640 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
3641 /* Not all of the mbufs may be stored into elts yet. */
3642 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
3643 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3645 * There are some single-segment mbufs not stored in elts.
3646 * It can be only if the last packet was single-segment.
3647 * The copying is gathered into one place due to it is
3648 * a good opportunity to optimize that with SIMD.
3649 * Unfortunately if inlining is enabled the gaps in pointer
3650 * array may happen due to early freeing of the inlined mbufs.
3652 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
3653 loc.pkts_copy = loc.pkts_sent;
3655 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3656 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3657 if (pkts_n > loc.pkts_sent) {
3659 * If burst size is large there might be no enough CQE
3660 * fetched from completion queue and no enough resources
3661 * freed to send all the packets.
3666 #ifdef MLX5_PMD_SOFT_COUNTERS
3667 /* Increment sent packets counter. */
3668 txq->stats.opackets += loc.pkts_sent;
3670 if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
3671 __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
3672 return loc.pkts_sent;
3675 #endif /* RTE_PMD_MLX5_TX_H_ */