1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_TX_H_
7 #define RTE_PMD_MLX5_TX_H_
10 #include <sys/queue.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_spinlock.h>
17 #include <mlx5_common_mr.h>
20 #include "mlx5_autoconf.h"
22 /* TX burst subroutines return codes. */
23 enum mlx5_txcmp_code {
24 MLX5_TXCMP_CODE_EXIT = 0,
25 MLX5_TXCMP_CODE_ERROR,
26 MLX5_TXCMP_CODE_SINGLE,
27 MLX5_TXCMP_CODE_MULTI,
33 * These defines are used to configure Tx burst routine option set supported
34 * at compile time. The not specified options are optimized out due to if
35 * conditions can be explicitly calculated at compile time.
36 * The offloads with bigger runtime check (require more CPU cycles toskip)
37 * overhead should have the bigger index - this is needed to select the better
38 * matching routine function if no exact match and some offloads are not
41 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
42 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
43 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
44 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
45 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
46 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
47 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
48 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
49 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
50 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
52 /* The most common offloads groups. */
53 #define MLX5_TXOFF_CONFIG_NONE 0
54 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
55 MLX5_TXOFF_CONFIG_TSO | \
56 MLX5_TXOFF_CONFIG_SWP | \
57 MLX5_TXOFF_CONFIG_CSUM | \
58 MLX5_TXOFF_CONFIG_INLINE | \
59 MLX5_TXOFF_CONFIG_VLAN | \
60 MLX5_TXOFF_CONFIG_METADATA)
62 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
64 #define MLX5_TXOFF_PRE_DECL(func) \
65 uint16_t mlx5_tx_burst_##func(void *txq, \
66 struct rte_mbuf **pkts, \
69 #define MLX5_TXOFF_DECL(func, olx) \
70 uint16_t mlx5_tx_burst_##func(void *txq, \
71 struct rte_mbuf **pkts, \
74 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
75 pkts, pkts_n, (olx)); \
78 /* Mbuf dynamic flag offset for inline. */
79 extern uint64_t rte_net_mlx5_dynf_inline_mask;
80 #define RTE_MBUF_F_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
82 extern uint32_t mlx5_ptype_table[] __rte_cache_aligned;
83 extern uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
84 extern uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
86 struct mlx5_txq_stats {
87 #ifdef MLX5_PMD_SOFT_COUNTERS
88 uint64_t opackets; /**< Total of successfully sent packets. */
89 uint64_t obytes; /**< Total of successfully sent bytes. */
91 uint64_t oerrors; /**< Total number of failed transmitted packets. */
94 /* TX queue send local data. */
96 struct mlx5_txq_local {
97 struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
98 struct rte_mbuf *mbuf; /* first mbuf to process. */
99 uint16_t pkts_copy; /* packets copied to elts. */
100 uint16_t pkts_sent; /* packets sent. */
101 uint16_t pkts_loop; /* packets sent on loop entry. */
102 uint16_t elts_free; /* available elts remain. */
103 uint16_t wqe_free; /* available wqe remain. */
104 uint16_t mbuf_off; /* data offset in current mbuf. */
105 uint16_t mbuf_nseg; /* number of remaining mbuf. */
106 uint16_t mbuf_free; /* number of inline mbufs to free. */
109 /* TX queue descriptor. */
111 struct mlx5_txq_data {
112 uint16_t elts_head; /* Current counter in (*elts)[]. */
113 uint16_t elts_tail; /* Counter of first element awaiting completion. */
114 uint16_t elts_comp; /* elts index since last completion request. */
115 uint16_t elts_s; /* Number of mbuf elements. */
116 uint16_t elts_m; /* Mask for mbuf elements indices. */
117 /* Fields related to elts mbuf storage. */
118 uint16_t wqe_ci; /* Consumer index for work queue. */
119 uint16_t wqe_pi; /* Producer index for work queue. */
120 uint16_t wqe_s; /* Number of WQ elements. */
121 uint16_t wqe_m; /* Mask Number for WQ elements. */
122 uint16_t wqe_comp; /* WQE index since last completion request. */
123 uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
124 /* WQ related fields. */
125 uint16_t cq_ci; /* Consumer index for completion queue. */
126 uint16_t cq_pi; /* Production index for completion queue. */
127 uint16_t cqe_s; /* Number of CQ elements. */
128 uint16_t cqe_m; /* Mask for CQ indices. */
129 /* CQ related fields. */
130 uint16_t elts_n:4; /* elts[] length (in log2). */
131 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
132 uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
133 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
134 uint16_t tunnel_en:1;
135 /* When set TX offload for tunneled packets are supported. */
136 uint16_t swp_en:1; /* Whether SW parser is enabled. */
137 uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
138 uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
139 uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
140 uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */
141 uint16_t inlen_send; /* Ordinary send data inline size. */
142 uint16_t inlen_empw; /* eMPW max packet size to inline. */
143 uint16_t inlen_mode; /* Minimal data length to inline. */
144 uint32_t qp_num_8s; /* QP number shifted by 8. */
145 uint64_t offloads; /* Offloads for Tx Queue. */
146 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
147 struct mlx5_wqe *wqes; /* Work queue. */
148 struct mlx5_wqe *wqes_end; /* Work queue array limit. */
149 #ifdef RTE_LIBRTE_MLX5_DEBUG
150 uint32_t *fcqs; /* Free completion queue (debug extended). */
152 uint16_t *fcqs; /* Free completion queue. */
154 volatile struct mlx5_cqe *cqes; /* Completion queue. */
155 volatile uint32_t *qp_db; /* Work queue doorbell. */
156 volatile uint32_t *cq_db; /* Completion queue doorbell. */
157 uint16_t port_id; /* Port ID of device. */
158 uint16_t idx; /* Queue index. */
159 uint64_t ts_mask; /* Timestamp flag dynamic mask. */
160 int32_t ts_offset; /* Timestamp field dynamic offset. */
161 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
162 struct mlx5_txq_stats stats; /* TX queue counters. */
164 rte_spinlock_t *uar_lock;
165 /* UAR access lock required for 32bit implementations */
167 struct rte_mbuf *elts[0];
168 /* Storage for queued packets, must be the last field. */
169 } __rte_cache_aligned;
172 MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */
173 MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Tx queue. */
176 /* TX queue control descriptor. */
177 struct mlx5_txq_ctrl {
178 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
179 uint32_t refcnt; /* Reference counter. */
180 unsigned int socket; /* CPU socket ID for allocations. */
181 enum mlx5_txq_type type; /* The txq ctrl type. */
182 unsigned int max_inline_data; /* Max inline data. */
183 unsigned int max_tso_header; /* Max TSO header size. */
184 struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
185 struct mlx5_priv *priv; /* Back pointer to private data. */
186 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
187 void *bf_reg; /* BlueFlame register from Verbs. */
188 uint16_t dump_file_n; /* Number of dump files. */
189 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
190 uint32_t hairpin_status; /* Hairpin binding status. */
191 struct mlx5_txq_data txq; /* Data path structure. */
192 /* Must be the last field in the structure, contains elts[]. */
197 int mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
198 int mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
199 int mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
200 int mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
201 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
202 unsigned int socket, const struct rte_eth_txconf *conf);
203 int mlx5_tx_hairpin_queue_setup
204 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
205 const struct rte_eth_hairpin_conf *hairpin_conf);
206 void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
207 void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
208 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
209 void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
210 int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
211 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
212 uint16_t desc, unsigned int socket,
213 const struct rte_eth_txconf *conf);
214 struct mlx5_txq_ctrl *mlx5_txq_hairpin_new
215 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
216 const struct rte_eth_hairpin_conf *hairpin_conf);
217 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
218 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
219 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
220 int mlx5_txq_verify(struct rte_eth_dev *dev);
221 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
222 void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl);
223 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
224 void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
228 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
230 void mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
231 unsigned int olx __rte_unused);
232 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
233 void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
234 struct rte_eth_txq_info *qinfo);
235 int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
236 struct rte_eth_burst_mode *mode);
240 MLX5_TXOFF_PRE_DECL(full_empw);
241 MLX5_TXOFF_PRE_DECL(none_empw);
242 MLX5_TXOFF_PRE_DECL(md_empw);
243 MLX5_TXOFF_PRE_DECL(mt_empw);
244 MLX5_TXOFF_PRE_DECL(mtsc_empw);
245 MLX5_TXOFF_PRE_DECL(mti_empw);
246 MLX5_TXOFF_PRE_DECL(mtv_empw);
247 MLX5_TXOFF_PRE_DECL(mtiv_empw);
248 MLX5_TXOFF_PRE_DECL(sc_empw);
249 MLX5_TXOFF_PRE_DECL(sci_empw);
250 MLX5_TXOFF_PRE_DECL(scv_empw);
251 MLX5_TXOFF_PRE_DECL(sciv_empw);
252 MLX5_TXOFF_PRE_DECL(i_empw);
253 MLX5_TXOFF_PRE_DECL(v_empw);
254 MLX5_TXOFF_PRE_DECL(iv_empw);
256 /* mlx5_tx_nompw.c */
258 MLX5_TXOFF_PRE_DECL(full);
259 MLX5_TXOFF_PRE_DECL(none);
260 MLX5_TXOFF_PRE_DECL(md);
261 MLX5_TXOFF_PRE_DECL(mt);
262 MLX5_TXOFF_PRE_DECL(mtsc);
263 MLX5_TXOFF_PRE_DECL(mti);
264 MLX5_TXOFF_PRE_DECL(mtv);
265 MLX5_TXOFF_PRE_DECL(mtiv);
266 MLX5_TXOFF_PRE_DECL(sc);
267 MLX5_TXOFF_PRE_DECL(sci);
268 MLX5_TXOFF_PRE_DECL(scv);
269 MLX5_TXOFF_PRE_DECL(sciv);
270 MLX5_TXOFF_PRE_DECL(i);
271 MLX5_TXOFF_PRE_DECL(v);
272 MLX5_TXOFF_PRE_DECL(iv);
276 MLX5_TXOFF_PRE_DECL(full_ts_nompw);
277 MLX5_TXOFF_PRE_DECL(full_ts_nompwi);
278 MLX5_TXOFF_PRE_DECL(full_ts);
279 MLX5_TXOFF_PRE_DECL(full_ts_noi);
280 MLX5_TXOFF_PRE_DECL(none_ts);
281 MLX5_TXOFF_PRE_DECL(mdi_ts);
282 MLX5_TXOFF_PRE_DECL(mti_ts);
283 MLX5_TXOFF_PRE_DECL(mtiv_ts);
287 MLX5_TXOFF_PRE_DECL(none_mpw);
288 MLX5_TXOFF_PRE_DECL(mci_mpw);
289 MLX5_TXOFF_PRE_DECL(mc_mpw);
290 MLX5_TXOFF_PRE_DECL(i_mpw);
292 static __rte_always_inline uint64_t *
293 mlx5_tx_bfreg(struct mlx5_txq_data *txq)
295 return MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx];
299 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
300 * 64bit architectures.
303 * value to write in CPU endian format.
305 * Address to write to.
307 * Address of the lock to use for that UAR access.
309 static __rte_always_inline void
310 __mlx5_uar_write64_relaxed(uint64_t val, void *addr,
311 rte_spinlock_t *lock __rte_unused)
314 *(uint64_t *)addr = val;
315 #else /* !RTE_ARCH_64 */
316 rte_spinlock_lock(lock);
317 *(uint32_t *)addr = val;
319 *((uint32_t *)addr + 1) = val >> 32;
320 rte_spinlock_unlock(lock);
325 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
326 * 64bit architectures while guaranteeing the order of execution with the
327 * code being executed.
330 * value to write in CPU endian format.
332 * Address to write to.
334 * Address of the lock to use for that UAR access.
336 static __rte_always_inline void
337 __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
340 __mlx5_uar_write64_relaxed(val, addr, lock);
343 /* Assist macros, used instead of directly calling the functions they wrap. */
345 #define mlx5_uar_write64_relaxed(val, dst, lock) \
346 __mlx5_uar_write64_relaxed(val, dst, NULL)
347 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
349 #define mlx5_uar_write64_relaxed(val, dst, lock) \
350 __mlx5_uar_write64_relaxed(val, dst, lock)
351 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
355 * Query LKey from a packet buffer for Tx.
358 * Pointer to Tx queue structure.
363 * Searched LKey on success, UINT32_MAX on no match.
365 static __rte_always_inline uint32_t
366 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
368 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
369 struct mlx5_txq_ctrl *txq_ctrl =
370 container_of(txq, struct mlx5_txq_ctrl, txq);
371 struct mlx5_priv *priv = txq_ctrl->priv;
373 /* Take slower bottom-half on miss. */
374 return mlx5_mr_mb2mr(priv->sh->cdev, &priv->mp_id, mr_ctrl, mb);
378 * Ring TX queue doorbell and flush the update if requested.
381 * Pointer to TX queue structure.
383 * Pointer to the last WQE posted in the NIC.
385 * Request for write memory barrier after BlueFlame update.
387 static __rte_always_inline void
388 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
391 uint64_t *dst = mlx5_tx_bfreg(txq);
392 volatile uint64_t *src = ((volatile uint64_t *)wqe);
395 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
396 /* Ensure ordering between DB record and BF copy. */
398 mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
404 * Ring TX queue doorbell and flush the update by write memory barrier.
407 * Pointer to TX queue structure.
409 * Pointer to the last WQE posted in the NIC.
411 static __rte_always_inline void
412 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
414 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
418 * Convert timestamp from mbuf format to linear counter
419 * of Clock Queue completions (24 bits).
422 * Pointer to the device shared context to fetch Tx
423 * packet pacing timestamp and parameters.
425 * Timestamp from mbuf to convert.
427 * positive or zero value - completion ID to wait.
428 * negative value - conversion error.
430 static __rte_always_inline int32_t
431 mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts)
438 * Read atomically two uint64_t fields and compare lsb bits.
439 * It there is no match - the timestamp was updated in
440 * the service thread, data should be re-read.
442 rte_compiler_barrier();
443 ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
444 ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
445 rte_compiler_barrier();
446 if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
449 /* Perform the skew correction, positive value to send earlier. */
450 mts -= sh->txpp.skew;
452 if (unlikely(mts >= UINT64_MAX / 2)) {
453 /* We have negative integer, mts is in the past. */
454 __atomic_fetch_add(&sh->txpp.err_ts_past,
455 1, __ATOMIC_RELAXED);
458 tick = sh->txpp.tick;
460 /* Convert delta to completions, round up. */
461 mts = (mts + tick - 1) / tick;
462 if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
463 /* We have mts is too distant future. */
464 __atomic_fetch_add(&sh->txpp.err_ts_future,
465 1, __ATOMIC_RELAXED);
468 mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
470 ci >>= 64 - MLX5_CQ_INDEX_WIDTH;
475 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
476 * Flags must be preliminary initialized to zero.
479 * Pointer to burst routine local context.
481 * Pointer to store Software Parser flags.
483 * Configured Tx offloads mask. It is fully defined at
484 * compile time and may be used for optimization.
487 * Software Parser offsets packed in dword.
488 * Software Parser flags are set by pointer.
490 static __rte_always_inline uint32_t
491 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
496 unsigned int idx, off;
499 if (!MLX5_TXOFF_CONFIG(SWP))
501 ol = loc->mbuf->ol_flags;
502 tunnel = ol & RTE_MBUF_F_TX_TUNNEL_MASK;
504 * Check whether Software Parser is required.
505 * Only customized tunnels may ask for.
507 if (likely(tunnel != RTE_MBUF_F_TX_TUNNEL_UDP && tunnel != RTE_MBUF_F_TX_TUNNEL_IP))
510 * The index should have:
511 * bit[0:1] = RTE_MBUF_F_TX_L4_MASK
512 * bit[4] = RTE_MBUF_F_TX_IPV6
513 * bit[8] = RTE_MBUF_F_TX_OUTER_IPV6
514 * bit[9] = RTE_MBUF_F_TX_OUTER_UDP
516 idx = (ol & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_IPV6 | RTE_MBUF_F_TX_OUTER_IPV6)) >> 52;
517 idx |= (tunnel == RTE_MBUF_F_TX_TUNNEL_UDP) ? (1 << 9) : 0;
518 *swp_flags = mlx5_swp_types_table[idx];
520 * Set offsets for SW parser. Since ConnectX-5, SW parser just
521 * complements HW parser. SW parser starts to engage only if HW parser
522 * can't reach a header. For the older devices, HW parser will not kick
523 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
524 * should be set regardless of HW offload.
526 off = loc->mbuf->outer_l2_len;
527 if (MLX5_TXOFF_CONFIG(VLAN) && ol & RTE_MBUF_F_TX_VLAN)
528 off += sizeof(struct rte_vlan_hdr);
529 set = (off >> 1) << 8; /* Outer L3 offset. */
530 off += loc->mbuf->outer_l3_len;
531 if (tunnel == RTE_MBUF_F_TX_TUNNEL_UDP)
532 set |= off >> 1; /* Outer L4 offset. */
533 if (ol & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) { /* Inner IP. */
534 const uint64_t csum = ol & RTE_MBUF_F_TX_L4_MASK;
535 off += loc->mbuf->l2_len;
536 set |= (off >> 1) << 24; /* Inner L3 offset. */
537 if (csum == RTE_MBUF_F_TX_TCP_CKSUM ||
538 csum == RTE_MBUF_F_TX_UDP_CKSUM ||
539 (MLX5_TXOFF_CONFIG(TSO) && ol & RTE_MBUF_F_TX_TCP_SEG)) {
540 off += loc->mbuf->l3_len;
541 set |= (off >> 1) << 16; /* Inner L4 offset. */
544 set = rte_cpu_to_le_32(set);
549 * Convert the Checksum offloads to Verbs.
552 * Pointer to the mbuf.
555 * Converted checksum flags.
557 static __rte_always_inline uint8_t
558 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
561 uint8_t is_tunnel = !!(buf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
562 const uint64_t ol_flags_mask = RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_L4_MASK |
563 RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM;
566 * The index should have:
567 * bit[0] = RTE_MBUF_F_TX_TCP_SEG
568 * bit[2:3] = RTE_MBUF_F_TX_UDP_CKSUM, RTE_MBUF_F_TX_TCP_CKSUM
569 * bit[4] = RTE_MBUF_F_TX_IP_CKSUM
570 * bit[8] = RTE_MBUF_F_TX_OUTER_IP_CKSUM
573 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
574 return mlx5_cksum_table[idx];
578 * Free the mbufs from the linear array of pointers.
581 * Pointer to Tx queue structure.
583 * Pointer to array of packets to be free.
585 * Number of packets to be freed.
587 * Configured Tx offloads mask. It is fully defined at
588 * compile time and may be used for optimization.
590 static __rte_always_inline void
591 mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
592 struct rte_mbuf **__rte_restrict pkts,
594 unsigned int olx __rte_unused)
596 struct rte_mempool *pool = NULL;
597 struct rte_mbuf **p_free = NULL;
598 struct rte_mbuf *mbuf;
599 unsigned int n_free = 0;
602 * The implemented algorithm eliminates
603 * copying pointers to temporary array
604 * for rte_mempool_put_bulk() calls.
609 * Free mbufs directly to the pool in bulk
610 * if fast free offload is engaged
612 if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
615 rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
621 * Decrement mbuf reference counter, detach
622 * indirect and external buffers if needed.
624 mbuf = rte_pktmbuf_prefree_seg(*pkts);
625 if (likely(mbuf != NULL)) {
626 MLX5_ASSERT(mbuf == *pkts);
627 if (likely(n_free != 0)) {
628 if (unlikely(pool != mbuf->pool))
629 /* From different pool. */
632 /* Start new scan array. */
639 if (unlikely(pkts_n == 0)) {
645 * This happens if mbuf is still referenced.
646 * We can't put it back to the pool, skip.
650 if (unlikely(n_free != 0))
651 /* There is some array to free.*/
653 if (unlikely(pkts_n == 0))
654 /* Last mbuf, nothing to free. */
660 * This loop is implemented to avoid multiple
661 * inlining of rte_mempool_put_bulk().
667 * Free the array of pre-freed mbufs
668 * belonging to the same memory pool.
670 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
671 if (unlikely(mbuf != NULL)) {
672 /* There is the request to start new scan. */
677 if (likely(pkts_n != 0))
680 * This is the last mbuf to be freed.
681 * Do one more loop iteration to complete.
682 * This is rare case of the last unique mbuf.
687 if (likely(pkts_n == 0))
696 * No inline version to free buffers for optimal call
697 * on the tx_burst completion.
699 static __rte_noinline void
700 __mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
701 struct rte_mbuf **__rte_restrict pkts,
703 unsigned int olx __rte_unused)
705 mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
709 * Free the mbuf from the elts ring buffer till new tail.
712 * Pointer to Tx queue structure.
714 * Index in elts to free up to, becomes new elts tail.
716 * Configured Tx offloads mask. It is fully defined at
717 * compile time and may be used for optimization.
719 static __rte_always_inline void
720 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
722 unsigned int olx __rte_unused)
724 uint16_t n_elts = tail - txq->elts_tail;
727 MLX5_ASSERT(n_elts <= txq->elts_s);
729 * Implement a loop to support ring buffer wraparound
730 * with single inlining of mlx5_tx_free_mbuf().
735 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
736 part = RTE_MIN(part, n_elts);
738 MLX5_ASSERT(part <= txq->elts_s);
739 mlx5_tx_free_mbuf(txq,
740 &txq->elts[txq->elts_tail & txq->elts_m],
742 txq->elts_tail += part;
748 * Store the mbuf being sent into elts ring buffer.
749 * On Tx completion these mbufs will be freed.
752 * Pointer to Tx queue structure.
754 * Pointer to array of packets to be stored.
756 * Number of packets to be stored.
758 * Configured Tx offloads mask. It is fully defined at
759 * compile time and may be used for optimization.
761 static __rte_always_inline void
762 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
763 struct rte_mbuf **__rte_restrict pkts,
765 unsigned int olx __rte_unused)
768 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
772 part = txq->elts_s - (txq->elts_head & txq->elts_m);
774 MLX5_ASSERT(part <= txq->elts_s);
775 /* This code is a good candidate for vectorizing with SIMD. */
776 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
778 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
779 txq->elts_head += pkts_n;
780 if (unlikely(part < pkts_n))
781 /* The copy is wrapping around the elts array. */
782 rte_memcpy((void *)elts, (void *)(pkts + part),
783 (pkts_n - part) * sizeof(struct rte_mbuf *));
787 * Check if the completion request flag should be set in the last WQE.
788 * Both pushed mbufs and WQEs are monitored and the completion request
789 * flag is set if any of thresholds is reached.
792 * Pointer to TX queue structure.
794 * Pointer to burst routine local context.
796 * Configured Tx offloads mask. It is fully defined at
797 * compile time and may be used for optimization.
799 static __rte_always_inline void
800 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
801 struct mlx5_txq_local *__rte_restrict loc,
804 uint16_t head = txq->elts_head;
807 part = MLX5_TXOFF_CONFIG(INLINE) ?
808 0 : loc->pkts_sent - loc->pkts_copy;
810 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
811 (MLX5_TXOFF_CONFIG(INLINE) &&
812 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
813 volatile struct mlx5_wqe *last = loc->wqe_last;
816 txq->elts_comp = head;
817 if (MLX5_TXOFF_CONFIG(INLINE))
818 txq->wqe_comp = txq->wqe_ci;
819 /* Request unconditional completion on last WQE. */
820 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
821 MLX5_COMP_MODE_OFFSET);
822 /* Save elts_head in dedicated free on completion queue. */
823 #ifdef RTE_LIBRTE_MLX5_DEBUG
824 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
825 (last->cseg.opcode >> 8) << 16;
827 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
829 /* A CQE slot must always be available. */
830 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
835 * Build the Control Segment with specified opcode:
837 * - MLX5_OPCODE_ENHANCED_MPSW
841 * Pointer to TX queue structure.
843 * Pointer to burst routine local context.
845 * Pointer to WQE to fill with built Control Segment.
847 * Supposed length of WQE in segments.
849 * SQ WQE opcode to put into Control Segment.
851 * Configured Tx offloads mask. It is fully defined at
852 * compile time and may be used for optimization.
854 static __rte_always_inline void
855 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
856 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
857 struct mlx5_wqe *__rte_restrict wqe,
860 unsigned int olx __rte_unused)
862 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
864 /* For legacy MPW replace the EMPW by TSO with modifier. */
865 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
866 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
867 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
868 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
869 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
870 MLX5_COMP_MODE_OFFSET);
871 cs->misc = RTE_BE32(0);
875 * Build the Synchronize Queue Segment with specified completion index.
878 * Pointer to TX queue structure.
880 * Pointer to burst routine local context.
882 * Pointer to WQE to fill with built Control Segment.
884 * Completion index in Clock Queue to wait.
886 * Configured Tx offloads mask. It is fully defined at
887 * compile time and may be used for optimization.
889 static __rte_always_inline void
890 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
891 struct mlx5_txq_local *restrict loc __rte_unused,
892 struct mlx5_wqe *restrict wqe,
894 unsigned int olx __rte_unused)
896 struct mlx5_wqe_qseg *qs;
898 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
899 qs->max_index = rte_cpu_to_be_32(wci);
900 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
901 qs->reserved0 = RTE_BE32(0);
902 qs->reserved1 = RTE_BE32(0);
906 * Build the Ethernet Segment without inlined data.
907 * Supports Software Parser, Checksums and VLAN insertion Tx offload features.
910 * Pointer to TX queue structure.
912 * Pointer to burst routine local context.
914 * Pointer to WQE to fill with built Ethernet Segment.
916 * Configured Tx offloads mask. It is fully defined at
917 * compile time and may be used for optimization.
919 static __rte_always_inline void
920 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
921 struct mlx5_txq_local *__rte_restrict loc,
922 struct mlx5_wqe *__rte_restrict wqe,
925 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
929 * Calculate and set check sum flags first, dword field
930 * in segment may be shared with Software Parser flags.
932 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
933 es->flags = rte_cpu_to_le_32(csum);
935 * Calculate and set Software Parser offsets and flags.
936 * These flags a set for custom UDP and IP tunnel packets.
938 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
939 /* Fill metadata field if needed. */
940 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
941 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
942 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
944 /* Engage VLAN tag insertion feature if requested. */
945 if (MLX5_TXOFF_CONFIG(VLAN) &&
946 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
948 * We should get here only if device support
949 * this feature correctly.
951 MLX5_ASSERT(txq->vlan_en);
952 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
953 loc->mbuf->vlan_tci);
955 es->inline_hdr = RTE_BE32(0);
960 * Build the Ethernet Segment with minimal inlined data
961 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
962 * used to fill the gap in single WQEBB WQEs.
963 * Supports Software Parser, Checksums and VLAN
964 * insertion Tx offload features.
967 * Pointer to TX queue structure.
969 * Pointer to burst routine local context.
971 * Pointer to WQE to fill with built Ethernet Segment.
973 * Length of VLAN tag insertion if any.
975 * Configured Tx offloads mask. It is fully defined at
976 * compile time and may be used for optimization.
978 static __rte_always_inline void
979 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
980 struct mlx5_txq_local *__rte_restrict loc,
981 struct mlx5_wqe *__rte_restrict wqe,
985 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
987 uint8_t *psrc, *pdst;
990 * Calculate and set check sum flags first, dword field
991 * in segment may be shared with Software Parser flags.
993 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
994 es->flags = rte_cpu_to_le_32(csum);
996 * Calculate and set Software Parser offsets and flags.
997 * These flags a set for custom UDP and IP tunnel packets.
999 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1000 /* Fill metadata field if needed. */
1001 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1002 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
1003 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1005 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1006 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
1007 es->inline_data = *(unaligned_uint16_t *)psrc;
1008 psrc += sizeof(uint16_t);
1009 pdst = (uint8_t *)(es + 1);
1010 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1011 /* Implement VLAN tag insertion as part inline data. */
1012 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1013 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1014 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1015 /* Insert VLAN ethertype + VLAN tag. */
1016 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1017 ((RTE_ETHER_TYPE_VLAN << 16) |
1018 loc->mbuf->vlan_tci);
1019 pdst += sizeof(struct rte_vlan_hdr);
1020 /* Copy the rest two bytes from packet data. */
1021 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1022 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1024 /* Fill the gap in the title WQEBB with inline data. */
1025 rte_mov16(pdst, psrc);
1030 * Build the Ethernet Segment with entire packet data inlining. Checks the
1031 * boundary of WQEBB and ring buffer wrapping, supports Software Parser,
1032 * Checksums and VLAN insertion Tx offload features.
1035 * Pointer to TX queue structure.
1037 * Pointer to burst routine local context.
1039 * Pointer to WQE to fill with built Ethernet Segment.
1041 * Length of VLAN tag insertion if any.
1043 * Length of data to inline (VLAN included, if any).
1045 * TSO flag, set mss field from the packet.
1047 * Configured Tx offloads mask. It is fully defined at
1048 * compile time and may be used for optimization.
1051 * Pointer to the next Data Segment (aligned and wrapped around).
1053 static __rte_always_inline struct mlx5_wqe_dseg *
1054 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
1055 struct mlx5_txq_local *__rte_restrict loc,
1056 struct mlx5_wqe *__rte_restrict wqe,
1062 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1064 uint8_t *psrc, *pdst;
1068 * Calculate and set check sum flags first, dword field
1069 * in segment may be shared with Software Parser flags.
1071 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1074 csum |= loc->mbuf->tso_segsz;
1075 es->flags = rte_cpu_to_be_32(csum);
1077 es->flags = rte_cpu_to_le_32(csum);
1080 * Calculate and set Software Parser offsets and flags.
1081 * These flags a set for custom UDP and IP tunnel packets.
1083 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1084 /* Fill metadata field if needed. */
1085 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1086 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
1087 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1089 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1090 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
1091 es->inline_data = *(unaligned_uint16_t *)psrc;
1092 psrc += sizeof(uint16_t);
1093 pdst = (uint8_t *)(es + 1);
1094 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1095 /* Implement VLAN tag insertion as part inline data. */
1096 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1097 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1098 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1099 /* Insert VLAN ethertype + VLAN tag. */
1100 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1101 ((RTE_ETHER_TYPE_VLAN << 16) |
1102 loc->mbuf->vlan_tci);
1103 pdst += sizeof(struct rte_vlan_hdr);
1104 /* Copy the rest two bytes from packet data. */
1105 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1106 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1107 psrc += sizeof(uint16_t);
1109 /* Fill the gap in the title WQEBB with inline data. */
1110 rte_mov16(pdst, psrc);
1111 psrc += sizeof(rte_v128u32_t);
1113 pdst = (uint8_t *)(es + 2);
1114 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1115 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1116 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
1118 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1119 return (struct mlx5_wqe_dseg *)pdst;
1122 * The WQEBB space availability is checked by caller.
1123 * Here we should be aware of WQE ring buffer wraparound only.
1125 part = (uint8_t *)txq->wqes_end - pdst;
1126 part = RTE_MIN(part, inlen);
1128 rte_memcpy(pdst, psrc, part);
1130 if (likely(!inlen)) {
1132 * If return value is not used by the caller
1133 * the code below will be optimized out.
1136 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1137 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1138 pdst = (uint8_t *)txq->wqes;
1139 return (struct mlx5_wqe_dseg *)pdst;
1141 pdst = (uint8_t *)txq->wqes;
1148 * Copy data from chain of mbuf to the specified linear buffer.
1149 * Checksums and VLAN insertion Tx offload features. If data
1150 * from some mbuf copied completely this mbuf is freed. Local
1151 * structure is used to keep the byte stream state.
1154 * Pointer to the destination linear buffer.
1156 * Pointer to burst routine local context.
1158 * Length of data to be copied.
1160 * Length of data to be copied ignoring no inline hint.
1162 * Configured Tx offloads mask. It is fully defined at
1163 * compile time and may be used for optimization.
1166 * Number of actual copied data bytes. This is always greater than or
1167 * equal to must parameter and might be lesser than len in no inline
1168 * hint flag is encountered.
1170 static __rte_always_inline unsigned int
1171 mlx5_tx_mseg_memcpy(uint8_t *pdst,
1172 struct mlx5_txq_local *__rte_restrict loc,
1175 unsigned int olx __rte_unused)
1177 struct rte_mbuf *mbuf;
1178 unsigned int part, dlen, copy = 0;
1182 MLX5_ASSERT(must <= len);
1184 /* Allow zero length packets, must check first. */
1185 dlen = rte_pktmbuf_data_len(loc->mbuf);
1186 if (dlen <= loc->mbuf_off) {
1187 /* Exhausted packet, just free. */
1189 loc->mbuf = mbuf->next;
1190 rte_pktmbuf_free_seg(mbuf);
1192 MLX5_ASSERT(loc->mbuf_nseg > 1);
1193 MLX5_ASSERT(loc->mbuf);
1195 if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) {
1200 * We already copied the minimal
1201 * requested amount of data.
1206 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
1208 * Copy only the minimal required
1209 * part of the data buffer.
1216 dlen -= loc->mbuf_off;
1217 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1219 part = RTE_MIN(len, dlen);
1220 rte_memcpy(pdst, psrc, part);
1222 loc->mbuf_off += part;
1225 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
1227 /* Exhausted packet, just free. */
1229 loc->mbuf = mbuf->next;
1230 rte_pktmbuf_free_seg(mbuf);
1232 MLX5_ASSERT(loc->mbuf_nseg >= 1);
1242 * Build the Ethernet Segment with inlined data from multi-segment packet.
1243 * Checks the boundary of WQEBB and ring buffer wrapping, supports Software
1244 * Parser, Checksums and VLAN insertion Tx offload features.
1247 * Pointer to TX queue structure.
1249 * Pointer to burst routine local context.
1251 * Pointer to WQE to fill with built Ethernet Segment.
1253 * Length of VLAN tag insertion if any.
1255 * Length of data to inline (VLAN included, if any).
1257 * TSO flag, set mss field from the packet.
1259 * Configured Tx offloads mask. It is fully defined at
1260 * compile time and may be used for optimization.
1263 * Pointer to the next Data Segment (aligned and possible NOT wrapped
1264 * around - caller should do wrapping check on its own).
1266 static __rte_always_inline struct mlx5_wqe_dseg *
1267 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
1268 struct mlx5_txq_local *__rte_restrict loc,
1269 struct mlx5_wqe *__rte_restrict wqe,
1275 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1278 unsigned int part, tlen = 0;
1281 * Calculate and set check sum flags first, uint32_t field
1282 * in segment may be shared with Software Parser flags.
1284 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1287 csum |= loc->mbuf->tso_segsz;
1288 es->flags = rte_cpu_to_be_32(csum);
1290 es->flags = rte_cpu_to_le_32(csum);
1293 * Calculate and set Software Parser offsets and flags.
1294 * These flags a set for custom UDP and IP tunnel packets.
1296 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1297 /* Fill metadata field if needed. */
1298 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1299 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
1300 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1302 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1303 pdst = (uint8_t *)&es->inline_data;
1304 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1305 /* Implement VLAN tag insertion as part inline data. */
1306 mlx5_tx_mseg_memcpy(pdst, loc,
1307 2 * RTE_ETHER_ADDR_LEN,
1308 2 * RTE_ETHER_ADDR_LEN, olx);
1309 pdst += 2 * RTE_ETHER_ADDR_LEN;
1310 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1311 ((RTE_ETHER_TYPE_VLAN << 16) |
1312 loc->mbuf->vlan_tci);
1313 pdst += sizeof(struct rte_vlan_hdr);
1314 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
1316 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1318 * The WQEBB space availability is checked by caller.
1319 * Here we should be aware of WQE ring buffer wraparound only.
1321 part = (uint8_t *)txq->wqes_end - pdst;
1322 part = RTE_MIN(part, inlen - tlen);
1328 * Copying may be interrupted inside the routine
1329 * if run into no inline hint flag.
1331 copy = tso ? inlen : txq->inlen_mode;
1332 copy = tlen >= copy ? 0 : (copy - tlen);
1333 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
1335 if (likely(inlen <= tlen) || copy < part) {
1336 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
1338 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1339 return (struct mlx5_wqe_dseg *)pdst;
1341 pdst = (uint8_t *)txq->wqes;
1342 part = inlen - tlen;
1347 * Build the Data Segment of pointer type.
1350 * Pointer to TX queue structure.
1352 * Pointer to burst routine local context.
1354 * Pointer to WQE to fill with built Data Segment.
1356 * Data buffer to point.
1358 * Data buffer length.
1360 * Configured Tx offloads mask. It is fully defined at
1361 * compile time and may be used for optimization.
1363 static __rte_always_inline void
1364 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
1365 struct mlx5_txq_local *__rte_restrict loc,
1366 struct mlx5_wqe_dseg *__rte_restrict dseg,
1369 unsigned int olx __rte_unused)
1373 dseg->bcount = rte_cpu_to_be_32(len);
1374 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1375 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1379 * Build the Data Segment of pointer type or inline if data length is less than
1380 * buffer in minimal Data Segment size.
1383 * Pointer to TX queue structure.
1385 * Pointer to burst routine local context.
1387 * Pointer to WQE to fill with built Data Segment.
1389 * Data buffer to point.
1391 * Data buffer length.
1393 * Configured Tx offloads mask. It is fully defined at
1394 * compile time and may be used for optimization.
1396 static __rte_always_inline void
1397 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
1398 struct mlx5_txq_local *__rte_restrict loc,
1399 struct mlx5_wqe_dseg *__rte_restrict dseg,
1402 unsigned int olx __rte_unused)
1408 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
1409 dseg->bcount = rte_cpu_to_be_32(len);
1410 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1411 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1415 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1416 /* Unrolled implementation of generic rte_memcpy. */
1417 dst = (uintptr_t)&dseg->inline_data[0];
1418 src = (uintptr_t)buf;
1420 #ifdef RTE_ARCH_STRICT_ALIGN
1421 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
1422 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1423 dst += sizeof(uint32_t);
1424 src += sizeof(uint32_t);
1425 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1426 dst += sizeof(uint32_t);
1427 src += sizeof(uint32_t);
1429 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
1430 dst += sizeof(uint64_t);
1431 src += sizeof(uint64_t);
1435 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1436 dst += sizeof(uint32_t);
1437 src += sizeof(uint32_t);
1440 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
1441 dst += sizeof(uint16_t);
1442 src += sizeof(uint16_t);
1445 *(uint8_t *)dst = *(uint8_t *)src;
1449 * Build the Data Segment of inlined data from single
1450 * segment packet, no VLAN insertion.
1453 * Pointer to TX queue structure.
1455 * Pointer to burst routine local context.
1457 * Pointer to WQE to fill with built Data Segment.
1459 * Data buffer to point.
1461 * Data buffer length.
1463 * Configured Tx offloads mask. It is fully defined at
1464 * compile time and may be used for optimization.
1467 * Pointer to the next Data Segment after inlined data.
1468 * Ring buffer wraparound check is needed. We do not do it here because it
1469 * may not be needed for the last packet in the eMPW session.
1471 static __rte_always_inline struct mlx5_wqe_dseg *
1472 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
1473 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1474 struct mlx5_wqe_dseg *__rte_restrict dseg,
1477 unsigned int olx __rte_unused)
1482 if (!MLX5_TXOFF_CONFIG(MPW)) {
1483 /* Store the descriptor byte counter for eMPW sessions. */
1484 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1485 pdst = &dseg->inline_data[0];
1487 /* The entire legacy MPW session counter is stored on close. */
1488 pdst = (uint8_t *)dseg;
1491 * The WQEBB space availability is checked by caller.
1492 * Here we should be aware of WQE ring buffer wraparound only.
1494 part = (uint8_t *)txq->wqes_end - pdst;
1495 part = RTE_MIN(part, len);
1497 rte_memcpy(pdst, buf, part);
1501 if (!MLX5_TXOFF_CONFIG(MPW))
1502 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1503 /* Note: no final wraparound check here. */
1504 return (struct mlx5_wqe_dseg *)pdst;
1506 pdst = (uint8_t *)txq->wqes;
1513 * Build the Data Segment of inlined data from single
1514 * segment packet with VLAN insertion.
1517 * Pointer to TX queue structure.
1519 * Pointer to burst routine local context.
1521 * Pointer to the dseg fill with built Data Segment.
1523 * Data buffer to point.
1525 * Data buffer length.
1527 * Configured Tx offloads mask. It is fully defined at
1528 * compile time and may be used for optimization.
1531 * Pointer to the next Data Segment after inlined data.
1532 * Ring buffer wraparound check is needed.
1534 static __rte_always_inline struct mlx5_wqe_dseg *
1535 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
1536 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1537 struct mlx5_wqe_dseg *__rte_restrict dseg,
1540 unsigned int olx __rte_unused)
1546 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
1547 if (!MLX5_TXOFF_CONFIG(MPW)) {
1548 /* Store the descriptor byte counter for eMPW sessions. */
1549 dseg->bcount = rte_cpu_to_be_32
1550 ((len + sizeof(struct rte_vlan_hdr)) |
1551 MLX5_ETH_WQE_DATA_INLINE);
1552 pdst = &dseg->inline_data[0];
1554 /* The entire legacy MPW session counter is stored on close. */
1555 pdst = (uint8_t *)dseg;
1557 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
1558 buf += MLX5_DSEG_MIN_INLINE_SIZE;
1559 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
1560 len -= MLX5_DSEG_MIN_INLINE_SIZE;
1561 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
1562 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1563 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1564 pdst = (uint8_t *)txq->wqes;
1565 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
1566 loc->mbuf->vlan_tci);
1567 pdst += sizeof(struct rte_vlan_hdr);
1569 * The WQEBB space availability is checked by caller.
1570 * Here we should be aware of WQE ring buffer wraparound only.
1572 part = (uint8_t *)txq->wqes_end - pdst;
1573 part = RTE_MIN(part, len);
1575 rte_memcpy(pdst, buf, part);
1579 if (!MLX5_TXOFF_CONFIG(MPW))
1580 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1581 /* Note: no final wraparound check here. */
1582 return (struct mlx5_wqe_dseg *)pdst;
1584 pdst = (uint8_t *)txq->wqes;
1591 * Build the Ethernet Segment with optionally inlined data with
1592 * VLAN insertion and following Data Segments (if any) from
1593 * multi-segment packet. Used by ordinary send and TSO.
1596 * Pointer to TX queue structure.
1598 * Pointer to burst routine local context.
1600 * Pointer to WQE to fill with built Ethernet/Data Segments.
1602 * Length of VLAN header to insert, 0 means no VLAN insertion.
1604 * Data length to inline. For TSO this parameter specifies exact value,
1605 * for ordinary send routine can be aligned by caller to provide better WQE
1606 * space saving and data buffer start address alignment.
1607 * This length includes VLAN header being inserted.
1609 * Zero means ordinary send, inlined data can be extended,
1610 * otherwise this is TSO, inlined data length is fixed.
1612 * Configured Tx offloads mask. It is fully defined at
1613 * compile time and may be used for optimization.
1616 * Actual size of built WQE in segments.
1618 static __rte_always_inline unsigned int
1619 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
1620 struct mlx5_txq_local *__rte_restrict loc,
1621 struct mlx5_wqe *__rte_restrict wqe,
1625 unsigned int olx __rte_unused)
1627 struct mlx5_wqe_dseg *__rte_restrict dseg;
1630 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
1631 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
1634 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
1635 if (!loc->mbuf_nseg)
1638 * There are still some mbuf remaining, not inlined.
1639 * The first mbuf may be partially inlined and we
1640 * must process the possible non-zero data offset.
1642 if (loc->mbuf_off) {
1647 * Exhausted packets must be dropped before.
1648 * Non-zero offset means there are some data
1649 * remained in the packet.
1651 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
1652 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
1653 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1655 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
1657 * Build the pointer/minimal Data Segment.
1658 * Do ring buffer wrapping check in advance.
1660 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1661 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1662 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
1663 /* Store the mbuf to be freed on completion. */
1664 MLX5_ASSERT(loc->elts_free);
1665 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1668 if (--loc->mbuf_nseg == 0)
1670 loc->mbuf = loc->mbuf->next;
1674 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1675 struct rte_mbuf *mbuf;
1677 /* Zero length segment found, just skip. */
1679 loc->mbuf = loc->mbuf->next;
1680 rte_pktmbuf_free_seg(mbuf);
1681 if (--loc->mbuf_nseg == 0)
1684 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1685 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1688 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1689 rte_pktmbuf_data_len(loc->mbuf), olx);
1690 MLX5_ASSERT(loc->elts_free);
1691 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1694 if (--loc->mbuf_nseg == 0)
1696 loc->mbuf = loc->mbuf->next;
1701 /* Calculate actual segments used from the dseg pointer. */
1702 if ((uintptr_t)wqe < (uintptr_t)dseg)
1703 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
1705 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
1706 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
1711 * The routine checks timestamp flag in the current packet,
1712 * and push WAIT WQE into the queue if scheduling is required.
1715 * Pointer to TX queue structure.
1717 * Pointer to burst routine local context.
1719 * Configured Tx offloads mask. It is fully defined at
1720 * compile time and may be used for optimization.
1723 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1724 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
1725 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
1726 * Local context variables partially updated.
1728 static __rte_always_inline enum mlx5_txcmp_code
1729 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
1730 struct mlx5_txq_local *restrict loc,
1733 if (MLX5_TXOFF_CONFIG(TXPP) &&
1734 loc->mbuf->ol_flags & txq->ts_mask) {
1735 struct mlx5_wqe *wqe;
1740 * Estimate the required space quickly and roughly.
1741 * We would like to ensure the packet can be pushed
1742 * to the queue and we won't get the orphan WAIT WQE.
1744 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
1745 loc->elts_free < NB_SEGS(loc->mbuf))
1746 return MLX5_TXCMP_CODE_EXIT;
1747 /* Convert the timestamp into completion to wait. */
1748 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
1749 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
1750 if (unlikely(wci < 0))
1751 return MLX5_TXCMP_CODE_SINGLE;
1752 /* Build the WAIT WQE with specified completion. */
1753 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1754 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
1755 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
1758 return MLX5_TXCMP_CODE_MULTI;
1760 return MLX5_TXCMP_CODE_SINGLE;
1764 * Tx one packet function for multi-segment TSO. Supports all
1765 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
1766 * sends one packet per WQE.
1768 * This routine is responsible for storing processed mbuf
1769 * into elts ring buffer and update elts_head.
1772 * Pointer to TX queue structure.
1774 * Pointer to burst routine local context.
1776 * Configured Tx offloads mask. It is fully defined at
1777 * compile time and may be used for optimization.
1780 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1781 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1782 * Local context variables partially updated.
1784 static __rte_always_inline enum mlx5_txcmp_code
1785 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
1786 struct mlx5_txq_local *__rte_restrict loc,
1789 struct mlx5_wqe *__rte_restrict wqe;
1790 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
1792 if (MLX5_TXOFF_CONFIG(TXPP)) {
1793 enum mlx5_txcmp_code wret;
1795 /* Generate WAIT for scheduling if requested. */
1796 wret = mlx5_tx_schedule_send(txq, loc, olx);
1797 if (wret == MLX5_TXCMP_CODE_EXIT)
1798 return MLX5_TXCMP_CODE_EXIT;
1799 if (wret == MLX5_TXCMP_CODE_ERROR)
1800 return MLX5_TXCMP_CODE_ERROR;
1803 * Calculate data length to be inlined to estimate
1804 * the required space in WQE ring buffer.
1806 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
1807 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
1808 vlan = sizeof(struct rte_vlan_hdr);
1809 inlen = loc->mbuf->l2_len + vlan +
1810 loc->mbuf->l3_len + loc->mbuf->l4_len;
1811 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
1812 return MLX5_TXCMP_CODE_ERROR;
1813 if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
1814 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
1815 /* Packet must contain all TSO headers. */
1816 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
1817 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
1818 inlen > (dlen + vlan)))
1819 return MLX5_TXCMP_CODE_ERROR;
1820 MLX5_ASSERT(inlen >= txq->inlen_mode);
1822 * Check whether there are enough free WQEBBs:
1824 * - Ethernet Segment
1825 * - First Segment of inlined Ethernet data
1826 * - ... data continued ...
1827 * - Data Segments of pointer/min inline type
1829 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
1830 MLX5_ESEG_MIN_INLINE_SIZE +
1832 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
1833 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1834 return MLX5_TXCMP_CODE_EXIT;
1835 /* Check for maximal WQE size. */
1836 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1837 return MLX5_TXCMP_CODE_ERROR;
1838 #ifdef MLX5_PMD_SOFT_COUNTERS
1839 /* Update sent data bytes/packets counters. */
1840 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
1841 loc->mbuf->tso_segsz;
1843 * One will be added for mbuf itself at the end of the mlx5_tx_burst
1844 * from loc->pkts_sent field.
1847 txq->stats.opackets += ntcp;
1848 txq->stats.obytes += dlen + vlan + ntcp * inlen;
1850 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1851 loc->wqe_last = wqe;
1852 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
1853 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
1854 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
1855 txq->wqe_ci += (ds + 3) / 4;
1856 loc->wqe_free -= (ds + 3) / 4;
1857 return MLX5_TXCMP_CODE_MULTI;
1861 * Tx one packet function for multi-segment SEND. Supports all types of Tx
1862 * offloads, uses MLX5_OPCODE_SEND to build WQEs, sends one packet per WQE,
1863 * without any data inlining in Ethernet Segment.
1865 * This routine is responsible for storing processed mbuf
1866 * into elts ring buffer and update elts_head.
1869 * Pointer to TX queue structure.
1871 * Pointer to burst routine local context.
1873 * Configured Tx offloads mask. It is fully defined at
1874 * compile time and may be used for optimization.
1877 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1878 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1879 * Local context variables partially updated.
1881 static __rte_always_inline enum mlx5_txcmp_code
1882 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
1883 struct mlx5_txq_local *__rte_restrict loc,
1886 struct mlx5_wqe_dseg *__rte_restrict dseg;
1887 struct mlx5_wqe *__rte_restrict wqe;
1888 unsigned int ds, nseg;
1890 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
1891 if (MLX5_TXOFF_CONFIG(TXPP)) {
1892 enum mlx5_txcmp_code wret;
1894 /* Generate WAIT for scheduling if requested. */
1895 wret = mlx5_tx_schedule_send(txq, loc, olx);
1896 if (wret == MLX5_TXCMP_CODE_EXIT)
1897 return MLX5_TXCMP_CODE_EXIT;
1898 if (wret == MLX5_TXCMP_CODE_ERROR)
1899 return MLX5_TXCMP_CODE_ERROR;
1902 * No inline at all, it means the CPU cycles saving is prioritized at
1903 * configuration, we should not copy any packet data to WQE.
1905 nseg = NB_SEGS(loc->mbuf);
1907 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1908 return MLX5_TXCMP_CODE_EXIT;
1909 /* Check for maximal WQE size. */
1910 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1911 return MLX5_TXCMP_CODE_ERROR;
1913 * Some Tx offloads may cause an error if packet is not long enough,
1914 * check against assumed minimal length.
1916 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
1917 return MLX5_TXCMP_CODE_ERROR;
1918 #ifdef MLX5_PMD_SOFT_COUNTERS
1919 /* Update sent data bytes counter. */
1920 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
1921 if (MLX5_TXOFF_CONFIG(VLAN) &&
1922 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
1923 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
1926 * SEND WQE, one WQEBB:
1927 * - Control Segment, SEND opcode
1928 * - Ethernet Segment, optional VLAN, no inline
1929 * - Data Segments, pointer only type
1931 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1932 loc->wqe_last = wqe;
1933 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
1934 mlx5_tx_eseg_none(txq, loc, wqe, olx);
1935 dseg = &wqe->dseg[0];
1937 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1938 struct rte_mbuf *mbuf;
1941 * Zero length segment found, have to correct total
1942 * size of WQE in segments.
1943 * It is supposed to be rare occasion, so in normal
1944 * case (no zero length segments) we avoid extra
1945 * writing to the Control Segment.
1948 wqe->cseg.sq_ds -= RTE_BE32(1);
1950 loc->mbuf = mbuf->next;
1951 rte_pktmbuf_free_seg(mbuf);
1957 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1958 rte_pktmbuf_data_len(loc->mbuf), olx);
1959 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1964 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1965 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1966 loc->mbuf = loc->mbuf->next;
1969 txq->wqe_ci += (ds + 3) / 4;
1970 loc->wqe_free -= (ds + 3) / 4;
1971 return MLX5_TXCMP_CODE_MULTI;
1975 * Tx one packet function for multi-segment SEND. Supports all
1976 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
1977 * sends one packet per WQE, with data inlining in
1978 * Ethernet Segment and minimal Data Segments.
1980 * This routine is responsible for storing processed mbuf
1981 * into elts ring buffer and update elts_head.
1984 * Pointer to TX queue structure.
1986 * Pointer to burst routine local context.
1988 * Configured Tx offloads mask. It is fully defined at
1989 * compile time and may be used for optimization.
1992 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1993 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1994 * Local context variables partially updated.
1996 static __rte_always_inline enum mlx5_txcmp_code
1997 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
1998 struct mlx5_txq_local *__rte_restrict loc,
2001 struct mlx5_wqe *__rte_restrict wqe;
2002 unsigned int ds, inlen, dlen, vlan = 0;
2004 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2005 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2006 if (MLX5_TXOFF_CONFIG(TXPP)) {
2007 enum mlx5_txcmp_code wret;
2009 /* Generate WAIT for scheduling if requested. */
2010 wret = mlx5_tx_schedule_send(txq, loc, olx);
2011 if (wret == MLX5_TXCMP_CODE_EXIT)
2012 return MLX5_TXCMP_CODE_EXIT;
2013 if (wret == MLX5_TXCMP_CODE_ERROR)
2014 return MLX5_TXCMP_CODE_ERROR;
2017 * First calculate data length to be inlined
2018 * to estimate the required space for WQE.
2020 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
2021 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
2022 vlan = sizeof(struct rte_vlan_hdr);
2023 inlen = dlen + vlan;
2024 /* Check against minimal length. */
2025 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
2026 return MLX5_TXCMP_CODE_ERROR;
2027 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
2028 if (inlen > txq->inlen_send ||
2029 loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) {
2030 struct rte_mbuf *mbuf;
2035 nxlen = rte_pktmbuf_data_len(mbuf);
2037 * Packet length exceeds the allowed inline data length,
2038 * check whether the minimal inlining is required.
2040 if (txq->inlen_mode) {
2041 MLX5_ASSERT(txq->inlen_mode >=
2042 MLX5_ESEG_MIN_INLINE_SIZE);
2043 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
2044 inlen = txq->inlen_mode;
2045 } else if (vlan && !txq->vlan_en) {
2047 * VLAN insertion is requested and hardware does not
2048 * support the offload, will do with software inline.
2050 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
2051 } else if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE ||
2052 nxlen > txq->inlen_send) {
2053 return mlx5_tx_packet_multi_send(txq, loc, olx);
2058 * Now we know the minimal amount of data is requested
2059 * to inline. Check whether we should inline the buffers
2060 * from the chain beginning to eliminate some mbufs.
2062 if (unlikely(nxlen <= txq->inlen_send)) {
2063 /* We can inline first mbuf at least. */
2064 if (nxlen < inlen) {
2067 /* Scan mbufs till inlen filled. */
2072 nxlen = rte_pktmbuf_data_len(mbuf);
2074 } while (unlikely(nxlen < inlen));
2075 if (unlikely(nxlen > txq->inlen_send)) {
2076 /* We cannot inline entire mbuf. */
2077 smlen = inlen - smlen;
2078 start = rte_pktmbuf_mtod_offset
2079 (mbuf, uintptr_t, smlen);
2087 /* There should be not end of packet. */
2089 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
2090 } while (unlikely(nxlen < txq->inlen_send));
2092 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
2094 * Check whether we can do inline to align start
2095 * address of data buffer to cacheline.
2098 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
2099 if (unlikely(start)) {
2101 if (start <= txq->inlen_send)
2106 * Check whether there are enough free WQEBBs:
2108 * - Ethernet Segment
2109 * - First Segment of inlined Ethernet data
2110 * - ... data continued ...
2111 * - Data Segments of pointer/min inline type
2113 * Estimate the number of Data Segments conservatively,
2114 * supposing no any mbufs is being freed during inlining.
2116 MLX5_ASSERT(inlen <= txq->inlen_send);
2117 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2118 MLX5_ESEG_MIN_INLINE_SIZE +
2120 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2121 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2122 return MLX5_TXCMP_CODE_EXIT;
2123 /* Check for maximal WQE size. */
2124 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2125 return MLX5_TXCMP_CODE_ERROR;
2126 #ifdef MLX5_PMD_SOFT_COUNTERS
2127 /* Update sent data bytes/packets counters. */
2128 txq->stats.obytes += dlen + vlan;
2130 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2131 loc->wqe_last = wqe;
2132 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
2133 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
2134 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2135 txq->wqe_ci += (ds + 3) / 4;
2136 loc->wqe_free -= (ds + 3) / 4;
2137 return MLX5_TXCMP_CODE_MULTI;
2141 * Tx burst function for multi-segment packets. Supports all
2142 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
2143 * sends one packet per WQE. Function stops sending if it
2144 * encounters the single-segment packet.
2146 * This routine is responsible for storing processed mbuf
2147 * into elts ring buffer and update elts_head.
2150 * Pointer to TX queue structure.
2152 * Packets to transmit.
2154 * Number of packets in array.
2156 * Pointer to burst routine local context.
2158 * Configured Tx offloads mask. It is fully defined at
2159 * compile time and may be used for optimization.
2162 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2163 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2164 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2165 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
2166 * Local context variables updated.
2168 static __rte_always_inline enum mlx5_txcmp_code
2169 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
2170 struct rte_mbuf **__rte_restrict pkts,
2171 unsigned int pkts_n,
2172 struct mlx5_txq_local *__rte_restrict loc,
2175 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2176 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2177 pkts += loc->pkts_sent + 1;
2178 pkts_n -= loc->pkts_sent;
2180 enum mlx5_txcmp_code ret;
2182 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2184 * Estimate the number of free elts quickly but conservatively.
2185 * Some segment may be fully inlined and freed,
2186 * ignore this here - precise estimation is costly.
2188 if (loc->elts_free < NB_SEGS(loc->mbuf))
2189 return MLX5_TXCMP_CODE_EXIT;
2190 if (MLX5_TXOFF_CONFIG(TSO) &&
2191 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
2192 /* Proceed with multi-segment TSO. */
2193 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
2194 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
2195 /* Proceed with multi-segment SEND with inlining. */
2196 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
2198 /* Proceed with multi-segment SEND w/o inlining. */
2199 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
2201 if (ret == MLX5_TXCMP_CODE_EXIT)
2202 return MLX5_TXCMP_CODE_EXIT;
2203 if (ret == MLX5_TXCMP_CODE_ERROR)
2204 return MLX5_TXCMP_CODE_ERROR;
2205 /* WQE is built, go to the next packet. */
2208 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2209 return MLX5_TXCMP_CODE_EXIT;
2210 loc->mbuf = *pkts++;
2212 rte_prefetch0(*pkts);
2213 if (likely(NB_SEGS(loc->mbuf) > 1))
2215 /* Here ends the series of multi-segment packets. */
2216 if (MLX5_TXOFF_CONFIG(TSO) &&
2217 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
2218 return MLX5_TXCMP_CODE_TSO;
2219 return MLX5_TXCMP_CODE_SINGLE;
2225 * Tx burst function for single-segment packets with TSO.
2226 * Supports all types of Tx offloads, except multi-packets.
2227 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
2228 * Function stops sending if it encounters the multi-segment
2229 * packet or packet without TSO requested.
2231 * The routine is responsible for storing processed mbuf into elts ring buffer
2232 * and update elts_head if inline offloads is requested due to possible early
2233 * freeing of the inlined mbufs (can not store pkts array in elts as a batch).
2236 * Pointer to TX queue structure.
2238 * Packets to transmit.
2240 * Number of packets in array.
2242 * Pointer to burst routine local context.
2244 * Configured Tx offloads mask. It is fully defined at
2245 * compile time and may be used for optimization.
2248 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2249 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2250 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2251 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2252 * Local context variables updated.
2254 static __rte_always_inline enum mlx5_txcmp_code
2255 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
2256 struct rte_mbuf **__rte_restrict pkts,
2257 unsigned int pkts_n,
2258 struct mlx5_txq_local *__rte_restrict loc,
2261 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2262 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2263 pkts += loc->pkts_sent + 1;
2264 pkts_n -= loc->pkts_sent;
2266 struct mlx5_wqe_dseg *__rte_restrict dseg;
2267 struct mlx5_wqe *__rte_restrict wqe;
2268 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
2271 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2272 if (MLX5_TXOFF_CONFIG(TXPP)) {
2273 enum mlx5_txcmp_code wret;
2275 /* Generate WAIT for scheduling if requested. */
2276 wret = mlx5_tx_schedule_send(txq, loc, olx);
2277 if (wret == MLX5_TXCMP_CODE_EXIT)
2278 return MLX5_TXCMP_CODE_EXIT;
2279 if (wret == MLX5_TXCMP_CODE_ERROR)
2280 return MLX5_TXCMP_CODE_ERROR;
2282 dlen = rte_pktmbuf_data_len(loc->mbuf);
2283 if (MLX5_TXOFF_CONFIG(VLAN) &&
2284 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
2285 vlan = sizeof(struct rte_vlan_hdr);
2288 * First calculate the WQE size to check
2289 * whether we have enough space in ring buffer.
2291 hlen = loc->mbuf->l2_len + vlan +
2292 loc->mbuf->l3_len + loc->mbuf->l4_len;
2293 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
2294 return MLX5_TXCMP_CODE_ERROR;
2295 if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2296 hlen += loc->mbuf->outer_l2_len +
2297 loc->mbuf->outer_l3_len;
2298 /* Segment must contain all TSO headers. */
2299 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
2300 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2301 hlen > (dlen + vlan)))
2302 return MLX5_TXCMP_CODE_ERROR;
2304 * Check whether there are enough free WQEBBs:
2306 * - Ethernet Segment
2307 * - First Segment of inlined Ethernet data
2308 * - ... data continued ...
2309 * - Finishing Data Segment of pointer type
2311 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
2312 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2313 if (loc->wqe_free < ((ds + 3) / 4))
2314 return MLX5_TXCMP_CODE_EXIT;
2315 #ifdef MLX5_PMD_SOFT_COUNTERS
2316 /* Update sent data bytes/packets counters. */
2317 ntcp = (dlen + vlan - hlen +
2318 loc->mbuf->tso_segsz - 1) /
2319 loc->mbuf->tso_segsz;
2321 * One will be added for mbuf itself at the end
2322 * of the mlx5_tx_burst from loc->pkts_sent field.
2325 txq->stats.opackets += ntcp;
2326 txq->stats.obytes += dlen + vlan + ntcp * hlen;
2329 * Build the TSO WQE:
2331 * - Ethernet Segment with hlen bytes inlined
2332 * - Data Segment of pointer type
2334 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2335 loc->wqe_last = wqe;
2336 mlx5_tx_cseg_init(txq, loc, wqe, ds,
2337 MLX5_OPCODE_TSO, olx);
2338 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
2339 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
2340 dlen -= hlen - vlan;
2341 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2343 * WQE is built, update the loop parameters
2344 * and go to the next packet.
2346 txq->wqe_ci += (ds + 3) / 4;
2347 loc->wqe_free -= (ds + 3) / 4;
2348 if (MLX5_TXOFF_CONFIG(INLINE))
2349 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2353 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2354 return MLX5_TXCMP_CODE_EXIT;
2355 loc->mbuf = *pkts++;
2357 rte_prefetch0(*pkts);
2358 if (MLX5_TXOFF_CONFIG(MULTI) &&
2359 unlikely(NB_SEGS(loc->mbuf) > 1))
2360 return MLX5_TXCMP_CODE_MULTI;
2361 if (likely(!(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)))
2362 return MLX5_TXCMP_CODE_SINGLE;
2363 /* Continue with the next TSO packet. */
2369 * Analyze the packet and select the best method to send.
2372 * Pointer to TX queue structure.
2374 * Pointer to burst routine local context.
2376 * Configured Tx offloads mask. It is fully defined at
2377 * compile time and may be used for optimization.
2379 * The predefined flag whether do complete check for
2380 * multi-segment packets and TSO.
2383 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2384 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
2385 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
2386 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
2388 static __rte_always_inline enum mlx5_txcmp_code
2389 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
2390 struct mlx5_txq_local *__rte_restrict loc,
2394 /* Check for multi-segment packet. */
2396 MLX5_TXOFF_CONFIG(MULTI) &&
2397 unlikely(NB_SEGS(loc->mbuf) > 1))
2398 return MLX5_TXCMP_CODE_MULTI;
2399 /* Check for TSO packet. */
2401 MLX5_TXOFF_CONFIG(TSO) &&
2402 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
2403 return MLX5_TXCMP_CODE_TSO;
2404 /* Check if eMPW is enabled at all. */
2405 if (!MLX5_TXOFF_CONFIG(EMPW))
2406 return MLX5_TXCMP_CODE_SINGLE;
2407 /* Check if eMPW can be engaged. */
2408 if (MLX5_TXOFF_CONFIG(VLAN) &&
2409 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) &&
2410 (!MLX5_TXOFF_CONFIG(INLINE) ||
2411 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
2412 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
2414 * eMPW does not support VLAN insertion offload, we have to
2415 * inline the entire packet but packet is too long for inlining.
2417 return MLX5_TXCMP_CODE_SINGLE;
2419 return MLX5_TXCMP_CODE_EMPW;
2423 * Check the next packet attributes to match with the eMPW batch ones.
2424 * In addition, for legacy MPW the packet length is checked either.
2427 * Pointer to TX queue structure.
2429 * Pointer to Ethernet Segment of eMPW batch.
2431 * Pointer to burst routine local context.
2433 * Length of previous packet in MPW descriptor.
2435 * Configured Tx offloads mask. It is fully defined at
2436 * compile time and may be used for optimization.
2439 * true - packet match with eMPW batch attributes.
2440 * false - no match, eMPW should be restarted.
2442 static __rte_always_inline bool
2443 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
2444 struct mlx5_wqe_eseg *__rte_restrict es,
2445 struct mlx5_txq_local *__rte_restrict loc,
2449 uint8_t swp_flags = 0;
2451 /* Compare the checksum flags, if any. */
2452 if (MLX5_TXOFF_CONFIG(CSUM) &&
2453 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
2455 /* Compare the Software Parser offsets and flags. */
2456 if (MLX5_TXOFF_CONFIG(SWP) &&
2457 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
2458 es->swp_flags != swp_flags))
2460 /* Fill metadata field if needed. */
2461 if (MLX5_TXOFF_CONFIG(METADATA) &&
2462 es->metadata != (loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
2463 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) : 0))
2465 /* Legacy MPW can send packets with the same length only. */
2466 if (MLX5_TXOFF_CONFIG(MPW) &&
2467 dlen != rte_pktmbuf_data_len(loc->mbuf))
2469 /* There must be no VLAN packets in eMPW loop. */
2470 if (MLX5_TXOFF_CONFIG(VLAN))
2471 MLX5_ASSERT(!(loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN));
2472 /* Check if the scheduling is requested. */
2473 if (MLX5_TXOFF_CONFIG(TXPP) &&
2474 loc->mbuf->ol_flags & txq->ts_mask)
2480 * Update send loop variables and WQE for eMPW loop without data inlining.
2481 * Number of Data Segments is equal to the number of sent packets.
2484 * Pointer to TX queue structure.
2486 * Pointer to burst routine local context.
2488 * Number of packets/Data Segments/Packets.
2490 * Accumulated statistics, bytes sent.
2492 * Configured Tx offloads mask. It is fully defined at
2493 * compile time and may be used for optimization.
2496 * true - packet match with eMPW batch attributes.
2497 * false - no match, eMPW should be restarted.
2499 static __rte_always_inline void
2500 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
2501 struct mlx5_txq_local *__rte_restrict loc,
2504 unsigned int olx __rte_unused)
2506 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2507 #ifdef MLX5_PMD_SOFT_COUNTERS
2508 /* Update sent data bytes counter. */
2509 txq->stats.obytes += slen;
2513 loc->elts_free -= ds;
2514 loc->pkts_sent += ds;
2516 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2517 txq->wqe_ci += (ds + 3) / 4;
2518 loc->wqe_free -= (ds + 3) / 4;
2522 * Update send loop variables and WQE for eMPW loop with data inlining.
2523 * Gets the size of pushed descriptors and data to the WQE.
2526 * Pointer to TX queue structure.
2528 * Pointer to burst routine local context.
2530 * Total size of descriptor/data in bytes.
2532 * Accumulated statistics, data bytes sent.
2534 * The base WQE for the eMPW/MPW descriptor.
2536 * Configured Tx offloads mask. It is fully defined at
2537 * compile time and may be used for optimization.
2540 * true - packet match with eMPW batch attributes.
2541 * false - no match, eMPW should be restarted.
2543 static __rte_always_inline void
2544 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
2545 struct mlx5_txq_local *__rte_restrict loc,
2548 struct mlx5_wqe *__rte_restrict wqem,
2549 unsigned int olx __rte_unused)
2551 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
2553 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2554 #ifdef MLX5_PMD_SOFT_COUNTERS
2555 /* Update sent data bytes counter. */
2556 txq->stats.obytes += slen;
2560 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
2562 * If the legacy MPW session contains the inline packets
2563 * we should set the only inline data segment length
2564 * and align the total length to the segment size.
2566 MLX5_ASSERT(len > sizeof(dseg->bcount));
2567 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
2568 MLX5_ETH_WQE_DATA_INLINE);
2569 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
2572 * The session is not legacy MPW or contains the
2573 * data buffer pointer segments.
2575 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
2576 len = len / MLX5_WSEG_SIZE + 2;
2578 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
2579 txq->wqe_ci += (len + 3) / 4;
2580 loc->wqe_free -= (len + 3) / 4;
2581 loc->wqe_last = wqem;
2585 * The set of Tx burst functions for single-segment packets without TSO
2586 * and with Multi-Packet Writing feature support.
2587 * Supports all types of Tx offloads, except multi-packets and TSO.
2589 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends as many packet
2590 * per WQE as it can. If eMPW is not configured or packet can not be sent with
2591 * eMPW (VLAN insertion) the ordinary SEND opcode is used and only one packet
2594 * Functions stop sending if it encounters the multi-segment packet or packet
2595 * with TSO requested.
2597 * The routines are responsible for storing processed mbuf into elts ring buffer
2598 * and update elts_head if inlining offload is requested. Otherwise the copying
2599 * mbufs to elts can be postponed and completed at the end of burst routine.
2602 * Pointer to TX queue structure.
2604 * Packets to transmit.
2606 * Number of packets in array.
2608 * Pointer to burst routine local context.
2610 * Configured Tx offloads mask. It is fully defined at
2611 * compile time and may be used for optimization.
2614 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2615 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2616 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2617 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
2618 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
2619 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
2621 * Local context variables updated.
2624 * The routine sends packets with MLX5_OPCODE_EMPW
2625 * without inlining, this is dedicated optimized branch.
2626 * No VLAN insertion is supported.
2628 static __rte_always_inline enum mlx5_txcmp_code
2629 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
2630 struct rte_mbuf **__rte_restrict pkts,
2631 unsigned int pkts_n,
2632 struct mlx5_txq_local *__rte_restrict loc,
2636 * Subroutine is the part of mlx5_tx_burst_single() and sends
2637 * single-segment packet with eMPW opcode without data inlining.
2639 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2640 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2641 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2642 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2643 pkts += loc->pkts_sent + 1;
2644 pkts_n -= loc->pkts_sent;
2646 struct mlx5_wqe_dseg *__rte_restrict dseg;
2647 struct mlx5_wqe_eseg *__rte_restrict eseg;
2648 enum mlx5_txcmp_code ret;
2649 unsigned int part, loop;
2650 unsigned int slen = 0;
2653 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2654 if (MLX5_TXOFF_CONFIG(TXPP)) {
2655 enum mlx5_txcmp_code wret;
2657 /* Generate WAIT for scheduling if requested. */
2658 wret = mlx5_tx_schedule_send(txq, loc, olx);
2659 if (wret == MLX5_TXCMP_CODE_EXIT)
2660 return MLX5_TXCMP_CODE_EXIT;
2661 if (wret == MLX5_TXCMP_CODE_ERROR)
2662 return MLX5_TXCMP_CODE_ERROR;
2664 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2665 MLX5_MPW_MAX_PACKETS :
2666 MLX5_EMPW_MAX_PACKETS);
2667 if (unlikely(loc->elts_free < part)) {
2668 /* We have no enough elts to save all mbufs. */
2669 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
2670 return MLX5_TXCMP_CODE_EXIT;
2671 /* But we still able to send at least minimal eMPW. */
2672 part = loc->elts_free;
2674 /* Check whether we have enough WQEs */
2675 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
2676 if (unlikely(loc->wqe_free <
2677 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2678 return MLX5_TXCMP_CODE_EXIT;
2679 part = (loc->wqe_free * 4) - 2;
2681 if (likely(part > 1))
2682 rte_prefetch0(*pkts);
2683 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2685 * Build eMPW title WQEBB:
2686 * - Control Segment, eMPW opcode
2687 * - Ethernet Segment, no inline
2689 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
2690 MLX5_OPCODE_ENHANCED_MPSW, olx);
2691 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
2692 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2693 eseg = &loc->wqe_last->eseg;
2694 dseg = &loc->wqe_last->dseg[0];
2696 /* Store the packet length for legacy MPW. */
2697 if (MLX5_TXOFF_CONFIG(MPW))
2698 eseg->mss = rte_cpu_to_be_16
2699 (rte_pktmbuf_data_len(loc->mbuf));
2701 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2702 #ifdef MLX5_PMD_SOFT_COUNTERS
2703 /* Update sent data bytes counter. */
2708 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2710 if (unlikely(--loop == 0))
2712 loc->mbuf = *pkts++;
2713 if (likely(loop > 1))
2714 rte_prefetch0(*pkts);
2715 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2717 * Unroll the completion code to avoid
2718 * returning variable value - it results in
2719 * unoptimized sequent checking in caller.
2721 if (ret == MLX5_TXCMP_CODE_MULTI) {
2723 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2724 if (unlikely(!loc->elts_free ||
2726 return MLX5_TXCMP_CODE_EXIT;
2727 return MLX5_TXCMP_CODE_MULTI;
2729 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2730 if (ret == MLX5_TXCMP_CODE_TSO) {
2732 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2733 if (unlikely(!loc->elts_free ||
2735 return MLX5_TXCMP_CODE_EXIT;
2736 return MLX5_TXCMP_CODE_TSO;
2738 if (ret == MLX5_TXCMP_CODE_SINGLE) {
2740 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2741 if (unlikely(!loc->elts_free ||
2743 return MLX5_TXCMP_CODE_EXIT;
2744 return MLX5_TXCMP_CODE_SINGLE;
2746 if (ret != MLX5_TXCMP_CODE_EMPW) {
2749 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2750 return MLX5_TXCMP_CODE_ERROR;
2753 * Check whether packet parameters coincide
2754 * within assumed eMPW batch:
2755 * - check sum settings
2757 * - software parser settings
2758 * - packets length (legacy MPW only)
2759 * - scheduling is not required
2761 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
2764 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2765 if (unlikely(!loc->elts_free ||
2767 return MLX5_TXCMP_CODE_EXIT;
2771 /* Packet attributes match, continue the same eMPW. */
2773 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2774 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2776 /* eMPW is built successfully, update loop parameters. */
2778 MLX5_ASSERT(pkts_n >= part);
2779 #ifdef MLX5_PMD_SOFT_COUNTERS
2780 /* Update sent data bytes counter. */
2781 txq->stats.obytes += slen;
2783 loc->elts_free -= part;
2784 loc->pkts_sent += part;
2785 txq->wqe_ci += (2 + part + 3) / 4;
2786 loc->wqe_free -= (2 + part + 3) / 4;
2788 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2789 return MLX5_TXCMP_CODE_EXIT;
2790 loc->mbuf = *pkts++;
2791 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2792 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
2794 /* Continue sending eMPW batches. */
2800 * The routine sends packets with MLX5_OPCODE_EMPW
2801 * with inlining, optionally supports VLAN insertion.
2803 static __rte_always_inline enum mlx5_txcmp_code
2804 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
2805 struct rte_mbuf **__rte_restrict pkts,
2806 unsigned int pkts_n,
2807 struct mlx5_txq_local *__rte_restrict loc,
2811 * Subroutine is the part of mlx5_tx_burst_single() and sends
2812 * single-segment packet with eMPW opcode with data inlining.
2814 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2815 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2816 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2817 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2818 pkts += loc->pkts_sent + 1;
2819 pkts_n -= loc->pkts_sent;
2821 struct mlx5_wqe_dseg *__rte_restrict dseg;
2822 struct mlx5_wqe *__rte_restrict wqem;
2823 enum mlx5_txcmp_code ret;
2824 unsigned int room, part, nlim;
2825 unsigned int slen = 0;
2827 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2828 if (MLX5_TXOFF_CONFIG(TXPP)) {
2829 enum mlx5_txcmp_code wret;
2831 /* Generate WAIT for scheduling if requested. */
2832 wret = mlx5_tx_schedule_send(txq, loc, olx);
2833 if (wret == MLX5_TXCMP_CODE_EXIT)
2834 return MLX5_TXCMP_CODE_EXIT;
2835 if (wret == MLX5_TXCMP_CODE_ERROR)
2836 return MLX5_TXCMP_CODE_ERROR;
2839 * Limits the amount of packets in one WQE
2840 * to improve CQE latency generation.
2842 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2843 MLX5_MPW_INLINE_MAX_PACKETS :
2844 MLX5_EMPW_MAX_PACKETS);
2845 /* Check whether we have minimal amount WQEs */
2846 if (unlikely(loc->wqe_free <
2847 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2848 return MLX5_TXCMP_CODE_EXIT;
2849 if (likely(pkts_n > 1))
2850 rte_prefetch0(*pkts);
2851 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2853 * Build eMPW title WQEBB:
2854 * - Control Segment, eMPW opcode, zero DS
2855 * - Ethernet Segment, no inline
2857 mlx5_tx_cseg_init(txq, loc, wqem, 0,
2858 MLX5_OPCODE_ENHANCED_MPSW, olx);
2859 mlx5_tx_eseg_none(txq, loc, wqem,
2860 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2861 dseg = &wqem->dseg[0];
2862 /* Store the packet length for legacy MPW. */
2863 if (MLX5_TXOFF_CONFIG(MPW))
2864 wqem->eseg.mss = rte_cpu_to_be_16
2865 (rte_pktmbuf_data_len(loc->mbuf));
2866 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
2867 loc->wqe_free) * MLX5_WQE_SIZE -
2868 MLX5_WQE_CSEG_SIZE -
2870 /* Limit the room for legacy MPW sessions for performance. */
2871 if (MLX5_TXOFF_CONFIG(MPW))
2872 room = RTE_MIN(room,
2873 RTE_MAX(txq->inlen_empw +
2874 sizeof(dseg->bcount) +
2875 (MLX5_TXOFF_CONFIG(VLAN) ?
2876 sizeof(struct rte_vlan_hdr) : 0),
2877 MLX5_MPW_INLINE_MAX_PACKETS *
2878 MLX5_WQE_DSEG_SIZE));
2879 /* Build WQE till we have space, packets and resources. */
2882 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2883 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2886 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
2887 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
2888 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
2890 * Some Tx offloads may cause an error if packet is not
2891 * long enough, check against assumed minimal length.
2893 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
2895 if (unlikely(!part))
2896 return MLX5_TXCMP_CODE_ERROR;
2898 * We have some successfully built
2899 * packet Data Segments to send.
2901 mlx5_tx_idone_empw(txq, loc, part,
2903 return MLX5_TXCMP_CODE_ERROR;
2905 /* Inline or not inline - that's the Question. */
2906 if (dlen > txq->inlen_empw ||
2907 loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE)
2909 if (MLX5_TXOFF_CONFIG(MPW)) {
2910 if (dlen > txq->inlen_send)
2914 /* Open new inline MPW session. */
2915 tlen += sizeof(dseg->bcount);
2916 dseg->bcount = RTE_BE32(0);
2918 (dseg, sizeof(dseg->bcount));
2921 * No pointer and inline descriptor
2922 * intermix for legacy MPW sessions.
2924 if (wqem->dseg[0].bcount)
2928 tlen = sizeof(dseg->bcount) + dlen;
2930 /* Inline entire packet, optional VLAN insertion. */
2931 if (MLX5_TXOFF_CONFIG(VLAN) &&
2932 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
2934 * The packet length must be checked in
2935 * mlx5_tx_able_to_empw() and packet
2936 * fits into inline length guaranteed.
2939 sizeof(struct rte_vlan_hdr)) <=
2941 tlen += sizeof(struct rte_vlan_hdr);
2944 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
2946 #ifdef MLX5_PMD_SOFT_COUNTERS
2947 /* Update sent data bytes counter. */
2948 slen += sizeof(struct rte_vlan_hdr);
2953 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
2956 if (!MLX5_TXOFF_CONFIG(MPW))
2957 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
2958 MLX5_ASSERT(room >= tlen);
2961 * Packet data are completely inline,
2962 * we can try to free the packet.
2964 if (likely(loc->pkts_sent == loc->mbuf_free)) {
2966 * All the packets from the burst beginning
2967 * are inline, we can free mbufs directly
2968 * from the origin array on tx_burst exit().
2974 * In order no to call rte_pktmbuf_free_seg() here,
2975 * in the most inner loop (that might be very
2976 * expensive) we just save the mbuf in elts.
2978 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2983 * No pointer and inline descriptor
2984 * intermix for legacy MPW sessions.
2986 if (MLX5_TXOFF_CONFIG(MPW) &&
2988 wqem->dseg[0].bcount == RTE_BE32(0))
2991 * Not inlinable VLAN packets are
2992 * proceeded outside of this routine.
2994 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
2995 if (MLX5_TXOFF_CONFIG(VLAN))
2996 MLX5_ASSERT(!(loc->mbuf->ol_flags &
2997 RTE_MBUF_F_TX_VLAN));
2998 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2999 /* We have to store mbuf in elts.*/
3000 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3002 room -= MLX5_WQE_DSEG_SIZE;
3003 /* Ring buffer wraparound is checked at the loop end.*/
3006 #ifdef MLX5_PMD_SOFT_COUNTERS
3007 /* Update sent data bytes counter. */
3012 if (unlikely(!pkts_n || !loc->elts_free)) {
3014 * We have no resources/packets to
3015 * continue build descriptors.
3018 mlx5_tx_idone_empw(txq, loc, part,
3020 return MLX5_TXCMP_CODE_EXIT;
3022 loc->mbuf = *pkts++;
3023 if (likely(pkts_n > 1))
3024 rte_prefetch0(*pkts);
3025 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3027 * Unroll the completion code to avoid
3028 * returning variable value - it results in
3029 * unoptimized sequent checking in caller.
3031 if (ret == MLX5_TXCMP_CODE_MULTI) {
3033 mlx5_tx_idone_empw(txq, loc, part,
3035 if (unlikely(!loc->elts_free ||
3037 return MLX5_TXCMP_CODE_EXIT;
3038 return MLX5_TXCMP_CODE_MULTI;
3040 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3041 if (ret == MLX5_TXCMP_CODE_TSO) {
3043 mlx5_tx_idone_empw(txq, loc, part,
3045 if (unlikely(!loc->elts_free ||
3047 return MLX5_TXCMP_CODE_EXIT;
3048 return MLX5_TXCMP_CODE_TSO;
3050 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3052 mlx5_tx_idone_empw(txq, loc, part,
3054 if (unlikely(!loc->elts_free ||
3056 return MLX5_TXCMP_CODE_EXIT;
3057 return MLX5_TXCMP_CODE_SINGLE;
3059 if (ret != MLX5_TXCMP_CODE_EMPW) {
3062 mlx5_tx_idone_empw(txq, loc, part,
3064 return MLX5_TXCMP_CODE_ERROR;
3066 /* Check if we have minimal room left. */
3068 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
3071 * Check whether packet parameters coincide
3072 * within assumed eMPW batch:
3073 * - check sum settings
3075 * - software parser settings
3076 * - packets length (legacy MPW only)
3077 * - scheduling is not required
3079 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
3082 /* Packet attributes match, continue the same eMPW. */
3083 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3084 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3087 * We get here to close an existing eMPW
3088 * session and start the new one.
3090 MLX5_ASSERT(pkts_n);
3092 if (unlikely(!part))
3093 return MLX5_TXCMP_CODE_EXIT;
3094 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
3095 if (unlikely(!loc->elts_free ||
3097 return MLX5_TXCMP_CODE_EXIT;
3098 /* Continue the loop with new eMPW session. */
3104 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
3105 * Data inlining and VLAN insertion are supported.
3107 static __rte_always_inline enum mlx5_txcmp_code
3108 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
3109 struct rte_mbuf **__rte_restrict pkts,
3110 unsigned int pkts_n,
3111 struct mlx5_txq_local *__rte_restrict loc,
3115 * Subroutine is the part of mlx5_tx_burst_single()
3116 * and sends single-segment packet with SEND opcode.
3118 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3119 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3120 pkts += loc->pkts_sent + 1;
3121 pkts_n -= loc->pkts_sent;
3123 struct mlx5_wqe *__rte_restrict wqe;
3124 enum mlx5_txcmp_code ret;
3126 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3127 if (MLX5_TXOFF_CONFIG(TXPP)) {
3128 enum mlx5_txcmp_code wret;
3130 /* Generate WAIT for scheduling if requested. */
3131 wret = mlx5_tx_schedule_send(txq, loc, olx);
3132 if (wret == MLX5_TXCMP_CODE_EXIT)
3133 return MLX5_TXCMP_CODE_EXIT;
3134 if (wret == MLX5_TXCMP_CODE_ERROR)
3135 return MLX5_TXCMP_CODE_ERROR;
3137 if (MLX5_TXOFF_CONFIG(INLINE)) {
3138 unsigned int inlen, vlan = 0;
3140 inlen = rte_pktmbuf_data_len(loc->mbuf);
3141 if (MLX5_TXOFF_CONFIG(VLAN) &&
3142 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
3143 vlan = sizeof(struct rte_vlan_hdr);
3147 * If inlining is enabled at configuration time
3148 * the limit must be not less than minimal size.
3149 * Otherwise we would do extra check for data
3150 * size to avoid crashes due to length overflow.
3152 MLX5_ASSERT(txq->inlen_send >=
3153 MLX5_ESEG_MIN_INLINE_SIZE);
3154 if (inlen <= txq->inlen_send) {
3155 unsigned int seg_n, wqe_n;
3157 rte_prefetch0(rte_pktmbuf_mtod
3158 (loc->mbuf, uint8_t *));
3159 /* Check against minimal length. */
3160 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3161 return MLX5_TXCMP_CODE_ERROR;
3162 if (loc->mbuf->ol_flags &
3163 RTE_MBUF_F_TX_DYNF_NOINLINE) {
3165 * The hint flag not to inline packet
3166 * data is set. Check whether we can
3169 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
3171 (MLX5_TXOFF_CONFIG(MPW) &&
3173 if (inlen <= txq->inlen_send)
3176 * The hardware requires the
3177 * minimal inline data header.
3179 goto single_min_inline;
3181 if (MLX5_TXOFF_CONFIG(VLAN) &&
3182 vlan && !txq->vlan_en) {
3184 * We must insert VLAN tag
3185 * by software means.
3187 goto single_part_inline;
3189 goto single_no_inline;
3193 * Completely inlined packet data WQE:
3194 * - Control Segment, SEND opcode
3195 * - Ethernet Segment, no VLAN insertion
3196 * - Data inlined, VLAN optionally inserted
3197 * - Alignment to MLX5_WSEG_SIZE
3198 * Have to estimate amount of WQEBBs
3200 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
3201 MLX5_ESEG_MIN_INLINE_SIZE +
3202 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3203 /* Check if there are enough WQEBBs. */
3204 wqe_n = (seg_n + 3) / 4;
3205 if (wqe_n > loc->wqe_free)
3206 return MLX5_TXCMP_CODE_EXIT;
3207 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3208 loc->wqe_last = wqe;
3209 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
3210 MLX5_OPCODE_SEND, olx);
3211 mlx5_tx_eseg_data(txq, loc, wqe,
3212 vlan, inlen, 0, olx);
3213 txq->wqe_ci += wqe_n;
3214 loc->wqe_free -= wqe_n;
3216 * Packet data are completely inlined,
3217 * free the packet immediately.
3219 rte_pktmbuf_free_seg(loc->mbuf);
3220 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
3221 MLX5_TXOFF_CONFIG(MPW)) &&
3224 * If minimal inlining is requested the eMPW
3225 * feature should be disabled due to data is
3226 * inlined into Ethernet Segment, which can
3227 * not contain inlined data for eMPW due to
3228 * segment shared for all packets.
3230 struct mlx5_wqe_dseg *__rte_restrict dseg;
3235 * The inline-mode settings require
3236 * to inline the specified amount of
3237 * data bytes to the Ethernet Segment.
3238 * We should check the free space in
3239 * WQE ring buffer to inline partially.
3242 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
3243 MLX5_ASSERT(inlen > txq->inlen_mode);
3244 MLX5_ASSERT(txq->inlen_mode >=
3245 MLX5_ESEG_MIN_INLINE_SIZE);
3247 * Check whether there are enough free WQEBBs:
3249 * - Ethernet Segment
3250 * - First Segment of inlined Ethernet data
3251 * - ... data continued ...
3252 * - Finishing Data Segment of pointer type
3254 ds = (MLX5_WQE_CSEG_SIZE +
3255 MLX5_WQE_ESEG_SIZE +
3256 MLX5_WQE_DSEG_SIZE +
3258 MLX5_ESEG_MIN_INLINE_SIZE +
3259 MLX5_WQE_DSEG_SIZE +
3260 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3261 if (loc->wqe_free < ((ds + 3) / 4))
3262 return MLX5_TXCMP_CODE_EXIT;
3264 * Build the ordinary SEND WQE:
3266 * - Ethernet Segment, inline inlen_mode bytes
3267 * - Data Segment of pointer type
3269 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3270 loc->wqe_last = wqe;
3271 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3272 MLX5_OPCODE_SEND, olx);
3273 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
3276 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3277 txq->inlen_mode - vlan;
3278 inlen -= txq->inlen_mode;
3279 mlx5_tx_dseg_ptr(txq, loc, dseg,
3282 * WQE is built, update the loop parameters
3283 * and got to the next packet.
3285 txq->wqe_ci += (ds + 3) / 4;
3286 loc->wqe_free -= (ds + 3) / 4;
3287 /* We have to store mbuf in elts.*/
3288 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3289 txq->elts[txq->elts_head++ & txq->elts_m] =
3297 * Partially inlined packet data WQE, we have
3298 * some space in title WQEBB, we can fill it
3299 * with some packet data. It takes one WQEBB,
3300 * it is available, no extra space check:
3301 * - Control Segment, SEND opcode
3302 * - Ethernet Segment, no VLAN insertion
3303 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
3304 * - Data Segment, pointer type
3306 * We also get here if VLAN insertion is not
3307 * supported by HW, the inline is enabled.
3310 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3311 loc->wqe_last = wqe;
3312 mlx5_tx_cseg_init(txq, loc, wqe, 4,
3313 MLX5_OPCODE_SEND, olx);
3314 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
3315 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3316 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
3318 * The length check is performed above, by
3319 * comparing with txq->inlen_send. We should
3320 * not get overflow here.
3322 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
3323 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
3324 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
3328 /* We have to store mbuf in elts.*/
3329 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3330 txq->elts[txq->elts_head++ & txq->elts_m] =
3334 #ifdef MLX5_PMD_SOFT_COUNTERS
3335 /* Update sent data bytes counter. */
3336 txq->stats.obytes += vlan +
3337 rte_pktmbuf_data_len(loc->mbuf);
3341 * No inline at all, it means the CPU cycles saving
3342 * is prioritized at configuration, we should not
3343 * copy any packet data to WQE.
3345 * SEND WQE, one WQEBB:
3346 * - Control Segment, SEND opcode
3347 * - Ethernet Segment, optional VLAN, no inline
3348 * - Data Segment, pointer type
3351 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3352 loc->wqe_last = wqe;
3353 mlx5_tx_cseg_init(txq, loc, wqe, 3,
3354 MLX5_OPCODE_SEND, olx);
3355 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3357 (txq, loc, &wqe->dseg[0],
3358 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3359 rte_pktmbuf_data_len(loc->mbuf), olx);
3363 * We should not store mbuf pointer in elts
3364 * if no inlining is configured, this is done
3365 * by calling routine in a batch copy.
3367 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3369 #ifdef MLX5_PMD_SOFT_COUNTERS
3370 /* Update sent data bytes counter. */
3371 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
3372 if (MLX5_TXOFF_CONFIG(VLAN) &&
3373 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
3374 txq->stats.obytes +=
3375 sizeof(struct rte_vlan_hdr);
3380 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3381 return MLX5_TXCMP_CODE_EXIT;
3382 loc->mbuf = *pkts++;
3384 rte_prefetch0(*pkts);
3385 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3386 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
3392 static __rte_always_inline enum mlx5_txcmp_code
3393 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
3394 struct rte_mbuf **__rte_restrict pkts,
3395 unsigned int pkts_n,
3396 struct mlx5_txq_local *__rte_restrict loc,
3399 enum mlx5_txcmp_code ret;
3401 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
3402 if (ret == MLX5_TXCMP_CODE_SINGLE)
3404 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
3406 /* Optimize for inline/no inline eMPW send. */
3407 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
3408 mlx5_tx_burst_empw_inline
3409 (txq, pkts, pkts_n, loc, olx) :
3410 mlx5_tx_burst_empw_simple
3411 (txq, pkts, pkts_n, loc, olx);
3412 if (ret != MLX5_TXCMP_CODE_SINGLE)
3414 /* The resources to send one packet should remain. */
3415 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3417 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
3418 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
3419 if (ret != MLX5_TXCMP_CODE_EMPW)
3421 /* The resources to send one packet should remain. */
3422 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3427 * DPDK Tx callback template. This is configured template used to generate
3428 * routines optimized for specified offload setup.
3429 * One of this generated functions is chosen at SQ configuration time.
3432 * Generic pointer to TX queue structure.
3434 * Packets to transmit.
3436 * Number of packets in array.
3438 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
3439 * values. Should be static to take compile time static configuration
3443 * Number of packets successfully transmitted (<= pkts_n).
3445 static __rte_always_inline uint16_t
3446 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
3447 struct rte_mbuf **__rte_restrict pkts,
3451 struct mlx5_txq_local loc;
3452 enum mlx5_txcmp_code ret;
3455 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3456 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3457 if (unlikely(!pkts_n))
3459 if (MLX5_TXOFF_CONFIG(INLINE))
3463 loc.wqe_last = NULL;
3466 loc.pkts_loop = loc.pkts_sent;
3468 * Check if there are some CQEs, if any:
3469 * - process an encountered errors
3470 * - process the completed WQEs
3471 * - free related mbufs
3472 * - doorbell the NIC about processed CQEs
3474 rte_prefetch0(*(pkts + loc.pkts_sent));
3475 mlx5_tx_handle_completion(txq, olx);
3477 * Calculate the number of available resources - elts and WQEs.
3478 * There are two possible different scenarios:
3479 * - no data inlining into WQEs, one WQEBB may contains up to
3480 * four packets, in this case elts become scarce resource
3481 * - data inlining into WQEs, one packet may require multiple
3482 * WQEBBs, the WQEs become the limiting factor.
3484 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3485 loc.elts_free = txq->elts_s -
3486 (uint16_t)(txq->elts_head - txq->elts_tail);
3487 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3488 loc.wqe_free = txq->wqe_s -
3489 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
3490 if (unlikely(!loc.elts_free || !loc.wqe_free))
3494 * Fetch the packet from array. Usually this is the first
3495 * packet in series of multi/single segment packets.
3497 loc.mbuf = *(pkts + loc.pkts_sent);
3498 /* Dedicated branch for multi-segment packets. */
3499 if (MLX5_TXOFF_CONFIG(MULTI) &&
3500 unlikely(NB_SEGS(loc.mbuf) > 1)) {
3502 * Multi-segment packet encountered.
3503 * Hardware is able to process it only
3504 * with SEND/TSO opcodes, one packet
3505 * per WQE, do it in dedicated routine.
3508 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
3509 part = loc.pkts_sent - loc.pkts_copy;
3510 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3512 * There are some single-segment mbufs not
3513 * stored in elts. The mbufs must be in the
3514 * same order as WQEs, so we must copy the
3515 * mbufs to elts here, before the coming
3516 * multi-segment packet mbufs is appended.
3518 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
3520 loc.pkts_copy = loc.pkts_sent;
3522 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3523 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
3524 if (!MLX5_TXOFF_CONFIG(INLINE))
3525 loc.pkts_copy = loc.pkts_sent;
3527 * These returned code checks are supposed
3528 * to be optimized out due to routine inlining.
3530 if (ret == MLX5_TXCMP_CODE_EXIT) {
3532 * The routine returns this code when
3533 * all packets are sent or there is no
3534 * enough resources to complete request.
3538 if (ret == MLX5_TXCMP_CODE_ERROR) {
3540 * The routine returns this code when some error
3541 * in the incoming packets format occurred.
3543 txq->stats.oerrors++;
3546 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3548 * The single-segment packet was encountered
3549 * in the array, try to send it with the
3550 * best optimized way, possible engaging eMPW.
3552 goto enter_send_single;
3554 if (MLX5_TXOFF_CONFIG(TSO) &&
3555 ret == MLX5_TXCMP_CODE_TSO) {
3557 * The single-segment TSO packet was
3558 * encountered in the array.
3560 goto enter_send_tso;
3562 /* We must not get here. Something is going wrong. */
3564 txq->stats.oerrors++;
3567 /* Dedicated branch for single-segment TSO packets. */
3568 if (MLX5_TXOFF_CONFIG(TSO) &&
3569 unlikely(loc.mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
3571 * TSO might require special way for inlining
3572 * (dedicated parameters) and is sent with
3573 * MLX5_OPCODE_TSO opcode only, provide this
3574 * in dedicated branch.
3577 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
3578 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3579 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
3581 * These returned code checks are supposed
3582 * to be optimized out due to routine inlining.
3584 if (ret == MLX5_TXCMP_CODE_EXIT)
3586 if (ret == MLX5_TXCMP_CODE_ERROR) {
3587 txq->stats.oerrors++;
3590 if (ret == MLX5_TXCMP_CODE_SINGLE)
3591 goto enter_send_single;
3592 if (MLX5_TXOFF_CONFIG(MULTI) &&
3593 ret == MLX5_TXCMP_CODE_MULTI) {
3595 * The multi-segment packet was
3596 * encountered in the array.
3598 goto enter_send_multi;
3600 /* We must not get here. Something is going wrong. */
3602 txq->stats.oerrors++;
3606 * The dedicated branch for the single-segment packets
3607 * without TSO. Often these ones can be sent using
3608 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
3609 * The routine builds the WQEs till it encounters
3610 * the TSO or multi-segment packet (in case if these
3611 * offloads are requested at SQ configuration time).
3614 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3615 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
3617 * These returned code checks are supposed
3618 * to be optimized out due to routine inlining.
3620 if (ret == MLX5_TXCMP_CODE_EXIT)
3622 if (ret == MLX5_TXCMP_CODE_ERROR) {
3623 txq->stats.oerrors++;
3626 if (MLX5_TXOFF_CONFIG(MULTI) &&
3627 ret == MLX5_TXCMP_CODE_MULTI) {
3629 * The multi-segment packet was
3630 * encountered in the array.
3632 goto enter_send_multi;
3634 if (MLX5_TXOFF_CONFIG(TSO) &&
3635 ret == MLX5_TXCMP_CODE_TSO) {
3637 * The single-segment TSO packet was
3638 * encountered in the array.
3640 goto enter_send_tso;
3642 /* We must not get here. Something is going wrong. */
3644 txq->stats.oerrors++;
3648 * Main Tx loop is completed, do the rest:
3649 * - set completion request if thresholds are reached
3650 * - doorbell the hardware
3651 * - copy the rest of mbufs to elts (if any)
3653 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
3654 loc.pkts_sent >= loc.pkts_copy);
3655 /* Take a shortcut if nothing is sent. */
3656 if (unlikely(loc.pkts_sent == loc.pkts_loop))
3658 /* Request CQE generation if limits are reached. */
3659 mlx5_tx_request_completion(txq, &loc, olx);
3661 * Ring QP doorbell immediately after WQE building completion
3662 * to improve latencies. The pure software related data treatment
3663 * can be completed after doorbell. Tx CQEs for this SQ are
3664 * processed in this thread only by the polling.
3666 * The rdma core library can map doorbell register in two ways,
3667 * depending on the environment variable "MLX5_SHUT_UP_BF":
3669 * - as regular cached memory, the variable is either missing or
3670 * set to zero. This type of mapping may cause the significant
3671 * doorbell register writing latency and requires explicit memory
3672 * write barrier to mitigate this issue and prevent write combining.
3674 * - as non-cached memory, the variable is present and set to not "0"
3675 * value. This type of mapping may cause performance impact under
3676 * heavy loading conditions but the explicit write memory barrier is
3677 * not required and it may improve core performance.
3679 * - the legacy behaviour (prior 19.08 release) was to use some
3680 * heuristics to decide whether write memory barrier should
3681 * be performed. This behavior is supported with specifying
3682 * tx_db_nc=2, write barrier is skipped if application provides
3683 * the full recommended burst of packets, it supposes the next
3684 * packets are coming and the write barrier will be issued on
3685 * the next burst (after descriptor writing, at least).
3687 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
3688 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
3689 /* Not all of the mbufs may be stored into elts yet. */
3690 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
3691 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3693 * There are some single-segment mbufs not stored in elts.
3694 * It can be only if the last packet was single-segment.
3695 * The copying is gathered into one place due to it is
3696 * a good opportunity to optimize that with SIMD.
3697 * Unfortunately if inlining is enabled the gaps in pointer
3698 * array may happen due to early freeing of the inlined mbufs.
3700 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
3701 loc.pkts_copy = loc.pkts_sent;
3703 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3704 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3705 if (pkts_n > loc.pkts_sent) {
3707 * If burst size is large there might be no enough CQE
3708 * fetched from completion queue and no enough resources
3709 * freed to send all the packets.
3714 #ifdef MLX5_PMD_SOFT_COUNTERS
3715 /* Increment sent packets counter. */
3716 txq->stats.opackets += loc.pkts_sent;
3718 if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
3719 __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
3720 return loc.pkts_sent;
3723 #endif /* RTE_PMD_MLX5_TX_H_ */