1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_TX_H_
7 #define RTE_PMD_MLX5_TX_H_
10 #include <sys/queue.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_spinlock.h>
17 #include <mlx5_common_mr.h>
20 #include "mlx5_autoconf.h"
22 /* TX burst subroutines return codes. */
23 enum mlx5_txcmp_code {
24 MLX5_TXCMP_CODE_EXIT = 0,
25 MLX5_TXCMP_CODE_ERROR,
26 MLX5_TXCMP_CODE_SINGLE,
27 MLX5_TXCMP_CODE_MULTI,
33 * These defines are used to configure Tx burst routine option set supported
34 * at compile time. The not specified options are optimized out due to if
35 * conditions can be explicitly calculated at compile time.
36 * The offloads with bigger runtime check (require more CPU cycles toskip)
37 * overhead should have the bigger index - this is needed to select the better
38 * matching routine function if no exact match and some offloads are not
41 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
42 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
43 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
44 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
45 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
46 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
47 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
48 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
49 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
50 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
52 /* The most common offloads groups. */
53 #define MLX5_TXOFF_CONFIG_NONE 0
54 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
55 MLX5_TXOFF_CONFIG_TSO | \
56 MLX5_TXOFF_CONFIG_SWP | \
57 MLX5_TXOFF_CONFIG_CSUM | \
58 MLX5_TXOFF_CONFIG_INLINE | \
59 MLX5_TXOFF_CONFIG_VLAN | \
60 MLX5_TXOFF_CONFIG_METADATA)
62 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
64 #define MLX5_TXOFF_PRE_DECL(func) \
65 uint16_t mlx5_tx_burst_##func(void *txq, \
66 struct rte_mbuf **pkts, \
69 #define MLX5_TXOFF_DECL(func, olx) \
70 uint16_t mlx5_tx_burst_##func(void *txq, \
71 struct rte_mbuf **pkts, \
74 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
75 pkts, pkts_n, (olx)); \
78 /* Mbuf dynamic flag offset for inline. */
79 extern uint64_t rte_net_mlx5_dynf_inline_mask;
80 #define RTE_MBUF_F_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
82 extern uint32_t mlx5_ptype_table[] __rte_cache_aligned;
83 extern uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
84 extern uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
86 struct mlx5_txq_stats {
87 #ifdef MLX5_PMD_SOFT_COUNTERS
88 uint64_t opackets; /**< Total of successfully sent packets. */
89 uint64_t obytes; /**< Total of successfully sent bytes. */
91 uint64_t oerrors; /**< Total number of failed transmitted packets. */
94 /* TX queue send local data. */
96 struct mlx5_txq_local {
97 struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
98 struct rte_mbuf *mbuf; /* first mbuf to process. */
99 uint16_t pkts_copy; /* packets copied to elts. */
100 uint16_t pkts_sent; /* packets sent. */
101 uint16_t pkts_loop; /* packets sent on loop entry. */
102 uint16_t elts_free; /* available elts remain. */
103 uint16_t wqe_free; /* available wqe remain. */
104 uint16_t mbuf_off; /* data offset in current mbuf. */
105 uint16_t mbuf_nseg; /* number of remaining mbuf. */
106 uint16_t mbuf_free; /* number of inline mbufs to free. */
109 /* TX queue descriptor. */
111 struct mlx5_txq_data {
112 uint16_t elts_head; /* Current counter in (*elts)[]. */
113 uint16_t elts_tail; /* Counter of first element awaiting completion. */
114 uint16_t elts_comp; /* elts index since last completion request. */
115 uint16_t elts_s; /* Number of mbuf elements. */
116 uint16_t elts_m; /* Mask for mbuf elements indices. */
117 /* Fields related to elts mbuf storage. */
118 uint16_t wqe_ci; /* Consumer index for work queue. */
119 uint16_t wqe_pi; /* Producer index for work queue. */
120 uint16_t wqe_s; /* Number of WQ elements. */
121 uint16_t wqe_m; /* Mask Number for WQ elements. */
122 uint16_t wqe_comp; /* WQE index since last completion request. */
123 uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
124 /* WQ related fields. */
125 uint16_t cq_ci; /* Consumer index for completion queue. */
126 uint16_t cq_pi; /* Production index for completion queue. */
127 uint16_t cqe_s; /* Number of CQ elements. */
128 uint16_t cqe_m; /* Mask for CQ indices. */
129 /* CQ related fields. */
130 uint16_t elts_n:4; /* elts[] length (in log2). */
131 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
132 uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
133 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
134 uint16_t tunnel_en:1;
135 /* When set TX offload for tunneled packets are supported. */
136 uint16_t swp_en:1; /* Whether SW parser is enabled. */
137 uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
138 uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
139 uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
140 uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */
141 uint16_t inlen_send; /* Ordinary send data inline size. */
142 uint16_t inlen_empw; /* eMPW max packet size to inline. */
143 uint16_t inlen_mode; /* Minimal data length to inline. */
144 uint32_t qp_num_8s; /* QP number shifted by 8. */
145 uint64_t offloads; /* Offloads for Tx Queue. */
146 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
147 struct mlx5_wqe *wqes; /* Work queue. */
148 struct mlx5_wqe *wqes_end; /* Work queue array limit. */
149 #ifdef RTE_LIBRTE_MLX5_DEBUG
150 uint32_t *fcqs; /* Free completion queue (debug extended). */
152 uint16_t *fcqs; /* Free completion queue. */
154 volatile struct mlx5_cqe *cqes; /* Completion queue. */
155 volatile uint32_t *qp_db; /* Work queue doorbell. */
156 volatile uint32_t *cq_db; /* Completion queue doorbell. */
157 uint16_t port_id; /* Port ID of device. */
158 uint16_t idx; /* Queue index. */
159 uint64_t ts_mask; /* Timestamp flag dynamic mask. */
160 int32_t ts_offset; /* Timestamp field dynamic offset. */
161 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
162 struct mlx5_txq_stats stats; /* TX queue counters. */
164 rte_spinlock_t *uar_lock;
165 /* UAR access lock required for 32bit implementations */
167 struct rte_mbuf *elts[0];
168 /* Storage for queued packets, must be the last field. */
169 } __rte_cache_aligned;
172 MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */
173 MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Tx queue. */
176 /* TX queue control descriptor. */
177 struct mlx5_txq_ctrl {
178 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
179 uint32_t refcnt; /* Reference counter. */
180 unsigned int socket; /* CPU socket ID for allocations. */
181 enum mlx5_txq_type type; /* The txq ctrl type. */
182 unsigned int max_inline_data; /* Max inline data. */
183 unsigned int max_tso_header; /* Max TSO header size. */
184 struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
185 struct mlx5_priv *priv; /* Back pointer to private data. */
186 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
187 void *bf_reg; /* BlueFlame register from Verbs. */
188 uint16_t dump_file_n; /* Number of dump files. */
189 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
190 uint32_t hairpin_status; /* Hairpin binding status. */
191 struct mlx5_txq_data txq; /* Data path structure. */
192 /* Must be the last field in the structure, contains elts[]. */
197 int mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
198 int mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
199 int mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
200 int mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
201 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
202 unsigned int socket, const struct rte_eth_txconf *conf);
203 int mlx5_tx_hairpin_queue_setup
204 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
205 const struct rte_eth_hairpin_conf *hairpin_conf);
206 void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
207 void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
208 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
209 void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
210 int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
211 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
212 uint16_t desc, unsigned int socket,
213 const struct rte_eth_txconf *conf);
214 struct mlx5_txq_ctrl *mlx5_txq_hairpin_new
215 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
216 const struct rte_eth_hairpin_conf *hairpin_conf);
217 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
218 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
219 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
220 int mlx5_txq_verify(struct rte_eth_dev *dev);
221 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
222 void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl);
223 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
224 void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
228 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
230 void mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
231 unsigned int olx __rte_unused);
232 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
233 void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
234 struct rte_eth_txq_info *qinfo);
235 int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
236 struct rte_eth_burst_mode *mode);
240 MLX5_TXOFF_PRE_DECL(full_empw);
241 MLX5_TXOFF_PRE_DECL(none_empw);
242 MLX5_TXOFF_PRE_DECL(md_empw);
243 MLX5_TXOFF_PRE_DECL(mt_empw);
244 MLX5_TXOFF_PRE_DECL(mtsc_empw);
245 MLX5_TXOFF_PRE_DECL(mti_empw);
246 MLX5_TXOFF_PRE_DECL(mtv_empw);
247 MLX5_TXOFF_PRE_DECL(mtiv_empw);
248 MLX5_TXOFF_PRE_DECL(sc_empw);
249 MLX5_TXOFF_PRE_DECL(sci_empw);
250 MLX5_TXOFF_PRE_DECL(scv_empw);
251 MLX5_TXOFF_PRE_DECL(sciv_empw);
252 MLX5_TXOFF_PRE_DECL(i_empw);
253 MLX5_TXOFF_PRE_DECL(v_empw);
254 MLX5_TXOFF_PRE_DECL(iv_empw);
256 /* mlx5_tx_nompw.c */
258 MLX5_TXOFF_PRE_DECL(full);
259 MLX5_TXOFF_PRE_DECL(none);
260 MLX5_TXOFF_PRE_DECL(md);
261 MLX5_TXOFF_PRE_DECL(mt);
262 MLX5_TXOFF_PRE_DECL(mtsc);
263 MLX5_TXOFF_PRE_DECL(mti);
264 MLX5_TXOFF_PRE_DECL(mtv);
265 MLX5_TXOFF_PRE_DECL(mtiv);
266 MLX5_TXOFF_PRE_DECL(sc);
267 MLX5_TXOFF_PRE_DECL(sci);
268 MLX5_TXOFF_PRE_DECL(scv);
269 MLX5_TXOFF_PRE_DECL(sciv);
270 MLX5_TXOFF_PRE_DECL(i);
271 MLX5_TXOFF_PRE_DECL(v);
272 MLX5_TXOFF_PRE_DECL(iv);
276 MLX5_TXOFF_PRE_DECL(full_ts_nompw);
277 MLX5_TXOFF_PRE_DECL(full_ts_nompwi);
278 MLX5_TXOFF_PRE_DECL(full_ts);
279 MLX5_TXOFF_PRE_DECL(full_ts_noi);
280 MLX5_TXOFF_PRE_DECL(none_ts);
281 MLX5_TXOFF_PRE_DECL(mdi_ts);
282 MLX5_TXOFF_PRE_DECL(mti_ts);
283 MLX5_TXOFF_PRE_DECL(mtiv_ts);
287 MLX5_TXOFF_PRE_DECL(none_mpw);
288 MLX5_TXOFF_PRE_DECL(mci_mpw);
289 MLX5_TXOFF_PRE_DECL(mc_mpw);
290 MLX5_TXOFF_PRE_DECL(i_mpw);
292 static __rte_always_inline uint64_t *
293 mlx5_tx_bfreg(struct mlx5_txq_data *txq)
295 return MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx];
299 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
300 * 64bit architectures.
303 * value to write in CPU endian format.
305 * Address to write to.
307 * Address of the lock to use for that UAR access.
309 static __rte_always_inline void
310 __mlx5_uar_write64_relaxed(uint64_t val, void *addr,
311 rte_spinlock_t *lock __rte_unused)
314 *(uint64_t *)addr = val;
315 #else /* !RTE_ARCH_64 */
316 rte_spinlock_lock(lock);
317 *(uint32_t *)addr = val;
319 *((uint32_t *)addr + 1) = val >> 32;
320 rte_spinlock_unlock(lock);
325 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
326 * 64bit architectures while guaranteeing the order of execution with the
327 * code being executed.
330 * value to write in CPU endian format.
332 * Address to write to.
334 * Address of the lock to use for that UAR access.
336 static __rte_always_inline void
337 __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
340 __mlx5_uar_write64_relaxed(val, addr, lock);
343 /* Assist macros, used instead of directly calling the functions they wrap. */
345 #define mlx5_uar_write64_relaxed(val, dst, lock) \
346 __mlx5_uar_write64_relaxed(val, dst, NULL)
347 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
349 #define mlx5_uar_write64_relaxed(val, dst, lock) \
350 __mlx5_uar_write64_relaxed(val, dst, lock)
351 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
355 * Ring TX queue doorbell and flush the update if requested.
358 * Pointer to TX queue structure.
360 * Pointer to the last WQE posted in the NIC.
362 * Request for write memory barrier after BlueFlame update.
364 static __rte_always_inline void
365 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
368 uint64_t *dst = mlx5_tx_bfreg(txq);
369 volatile uint64_t *src = ((volatile uint64_t *)wqe);
372 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
373 /* Ensure ordering between DB record and BF copy. */
375 mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
381 * Ring TX queue doorbell and flush the update by write memory barrier.
384 * Pointer to TX queue structure.
386 * Pointer to the last WQE posted in the NIC.
388 static __rte_always_inline void
389 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
391 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
395 * Convert timestamp from mbuf format to linear counter
396 * of Clock Queue completions (24 bits).
399 * Pointer to the device shared context to fetch Tx
400 * packet pacing timestamp and parameters.
402 * Timestamp from mbuf to convert.
404 * positive or zero value - completion ID to wait.
405 * negative value - conversion error.
407 static __rte_always_inline int32_t
408 mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts)
415 * Read atomically two uint64_t fields and compare lsb bits.
416 * It there is no match - the timestamp was updated in
417 * the service thread, data should be re-read.
419 rte_compiler_barrier();
420 ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
421 ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
422 rte_compiler_barrier();
423 if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
426 /* Perform the skew correction, positive value to send earlier. */
427 mts -= sh->txpp.skew;
429 if (unlikely(mts >= UINT64_MAX / 2)) {
430 /* We have negative integer, mts is in the past. */
431 __atomic_fetch_add(&sh->txpp.err_ts_past,
432 1, __ATOMIC_RELAXED);
435 tick = sh->txpp.tick;
437 /* Convert delta to completions, round up. */
438 mts = (mts + tick - 1) / tick;
439 if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
440 /* We have mts is too distant future. */
441 __atomic_fetch_add(&sh->txpp.err_ts_future,
442 1, __ATOMIC_RELAXED);
445 mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
447 ci >>= 64 - MLX5_CQ_INDEX_WIDTH;
452 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
453 * Flags must be preliminary initialized to zero.
456 * Pointer to burst routine local context.
458 * Pointer to store Software Parser flags.
460 * Configured Tx offloads mask. It is fully defined at
461 * compile time and may be used for optimization.
464 * Software Parser offsets packed in dword.
465 * Software Parser flags are set by pointer.
467 static __rte_always_inline uint32_t
468 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
473 unsigned int idx, off;
476 if (!MLX5_TXOFF_CONFIG(SWP))
478 ol = loc->mbuf->ol_flags;
479 tunnel = ol & RTE_MBUF_F_TX_TUNNEL_MASK;
481 * Check whether Software Parser is required.
482 * Only customized tunnels may ask for.
484 if (likely(tunnel != RTE_MBUF_F_TX_TUNNEL_UDP && tunnel != RTE_MBUF_F_TX_TUNNEL_IP))
487 * The index should have:
488 * bit[0:1] = RTE_MBUF_F_TX_L4_MASK
489 * bit[4] = RTE_MBUF_F_TX_IPV6
490 * bit[8] = RTE_MBUF_F_TX_OUTER_IPV6
491 * bit[9] = RTE_MBUF_F_TX_OUTER_UDP
493 idx = (ol & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_IPV6 | RTE_MBUF_F_TX_OUTER_IPV6)) >> 52;
494 idx |= (tunnel == RTE_MBUF_F_TX_TUNNEL_UDP) ? (1 << 9) : 0;
495 *swp_flags = mlx5_swp_types_table[idx];
497 * Set offsets for SW parser. Since ConnectX-5, SW parser just
498 * complements HW parser. SW parser starts to engage only if HW parser
499 * can't reach a header. For the older devices, HW parser will not kick
500 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
501 * should be set regardless of HW offload.
503 off = loc->mbuf->outer_l2_len;
504 if (MLX5_TXOFF_CONFIG(VLAN) && ol & RTE_MBUF_F_TX_VLAN)
505 off += sizeof(struct rte_vlan_hdr);
506 set = (off >> 1) << 8; /* Outer L3 offset. */
507 off += loc->mbuf->outer_l3_len;
508 if (tunnel == RTE_MBUF_F_TX_TUNNEL_UDP)
509 set |= off >> 1; /* Outer L4 offset. */
510 if (ol & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) { /* Inner IP. */
511 const uint64_t csum = ol & RTE_MBUF_F_TX_L4_MASK;
512 off += loc->mbuf->l2_len;
513 set |= (off >> 1) << 24; /* Inner L3 offset. */
514 if (csum == RTE_MBUF_F_TX_TCP_CKSUM ||
515 csum == RTE_MBUF_F_TX_UDP_CKSUM ||
516 (MLX5_TXOFF_CONFIG(TSO) && ol & RTE_MBUF_F_TX_TCP_SEG)) {
517 off += loc->mbuf->l3_len;
518 set |= (off >> 1) << 16; /* Inner L4 offset. */
521 set = rte_cpu_to_le_32(set);
526 * Convert the Checksum offloads to Verbs.
529 * Pointer to the mbuf.
532 * Converted checksum flags.
534 static __rte_always_inline uint8_t
535 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
538 uint8_t is_tunnel = !!(buf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
539 const uint64_t ol_flags_mask = RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_L4_MASK |
540 RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM;
543 * The index should have:
544 * bit[0] = RTE_MBUF_F_TX_TCP_SEG
545 * bit[2:3] = RTE_MBUF_F_TX_UDP_CKSUM, RTE_MBUF_F_TX_TCP_CKSUM
546 * bit[4] = RTE_MBUF_F_TX_IP_CKSUM
547 * bit[8] = RTE_MBUF_F_TX_OUTER_IP_CKSUM
550 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
551 return mlx5_cksum_table[idx];
555 * Free the mbufs from the linear array of pointers.
558 * Pointer to Tx queue structure.
560 * Pointer to array of packets to be free.
562 * Number of packets to be freed.
564 * Configured Tx offloads mask. It is fully defined at
565 * compile time and may be used for optimization.
567 static __rte_always_inline void
568 mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
569 struct rte_mbuf **__rte_restrict pkts,
571 unsigned int olx __rte_unused)
573 struct rte_mempool *pool = NULL;
574 struct rte_mbuf **p_free = NULL;
575 struct rte_mbuf *mbuf;
576 unsigned int n_free = 0;
579 * The implemented algorithm eliminates
580 * copying pointers to temporary array
581 * for rte_mempool_put_bulk() calls.
586 * Free mbufs directly to the pool in bulk
587 * if fast free offload is engaged
589 if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
592 rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
598 * Decrement mbuf reference counter, detach
599 * indirect and external buffers if needed.
601 mbuf = rte_pktmbuf_prefree_seg(*pkts);
602 if (likely(mbuf != NULL)) {
603 MLX5_ASSERT(mbuf == *pkts);
604 if (likely(n_free != 0)) {
605 if (unlikely(pool != mbuf->pool))
606 /* From different pool. */
609 /* Start new scan array. */
616 if (unlikely(pkts_n == 0)) {
622 * This happens if mbuf is still referenced.
623 * We can't put it back to the pool, skip.
627 if (unlikely(n_free != 0))
628 /* There is some array to free.*/
630 if (unlikely(pkts_n == 0))
631 /* Last mbuf, nothing to free. */
637 * This loop is implemented to avoid multiple
638 * inlining of rte_mempool_put_bulk().
644 * Free the array of pre-freed mbufs
645 * belonging to the same memory pool.
647 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
648 if (unlikely(mbuf != NULL)) {
649 /* There is the request to start new scan. */
654 if (likely(pkts_n != 0))
657 * This is the last mbuf to be freed.
658 * Do one more loop iteration to complete.
659 * This is rare case of the last unique mbuf.
664 if (likely(pkts_n == 0))
673 * No inline version to free buffers for optimal call
674 * on the tx_burst completion.
676 static __rte_noinline void
677 __mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
678 struct rte_mbuf **__rte_restrict pkts,
680 unsigned int olx __rte_unused)
682 mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
686 * Free the mbuf from the elts ring buffer till new tail.
689 * Pointer to Tx queue structure.
691 * Index in elts to free up to, becomes new elts tail.
693 * Configured Tx offloads mask. It is fully defined at
694 * compile time and may be used for optimization.
696 static __rte_always_inline void
697 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
699 unsigned int olx __rte_unused)
701 uint16_t n_elts = tail - txq->elts_tail;
704 MLX5_ASSERT(n_elts <= txq->elts_s);
706 * Implement a loop to support ring buffer wraparound
707 * with single inlining of mlx5_tx_free_mbuf().
712 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
713 part = RTE_MIN(part, n_elts);
715 MLX5_ASSERT(part <= txq->elts_s);
716 mlx5_tx_free_mbuf(txq,
717 &txq->elts[txq->elts_tail & txq->elts_m],
719 txq->elts_tail += part;
725 * Store the mbuf being sent into elts ring buffer.
726 * On Tx completion these mbufs will be freed.
729 * Pointer to Tx queue structure.
731 * Pointer to array of packets to be stored.
733 * Number of packets to be stored.
735 * Configured Tx offloads mask. It is fully defined at
736 * compile time and may be used for optimization.
738 static __rte_always_inline void
739 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
740 struct rte_mbuf **__rte_restrict pkts,
742 unsigned int olx __rte_unused)
745 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
749 part = txq->elts_s - (txq->elts_head & txq->elts_m);
751 MLX5_ASSERT(part <= txq->elts_s);
752 /* This code is a good candidate for vectorizing with SIMD. */
753 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
755 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
756 txq->elts_head += pkts_n;
757 if (unlikely(part < pkts_n))
758 /* The copy is wrapping around the elts array. */
759 rte_memcpy((void *)elts, (void *)(pkts + part),
760 (pkts_n - part) * sizeof(struct rte_mbuf *));
764 * Check if the completion request flag should be set in the last WQE.
765 * Both pushed mbufs and WQEs are monitored and the completion request
766 * flag is set if any of thresholds is reached.
769 * Pointer to TX queue structure.
771 * Pointer to burst routine local context.
773 * Configured Tx offloads mask. It is fully defined at
774 * compile time and may be used for optimization.
776 static __rte_always_inline void
777 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
778 struct mlx5_txq_local *__rte_restrict loc,
781 uint16_t head = txq->elts_head;
784 part = MLX5_TXOFF_CONFIG(INLINE) ?
785 0 : loc->pkts_sent - loc->pkts_copy;
787 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
788 (MLX5_TXOFF_CONFIG(INLINE) &&
789 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
790 volatile struct mlx5_wqe *last = loc->wqe_last;
793 txq->elts_comp = head;
794 if (MLX5_TXOFF_CONFIG(INLINE))
795 txq->wqe_comp = txq->wqe_ci;
796 /* Request unconditional completion on last WQE. */
797 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
798 MLX5_COMP_MODE_OFFSET);
799 /* Save elts_head in dedicated free on completion queue. */
800 #ifdef RTE_LIBRTE_MLX5_DEBUG
801 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
802 (last->cseg.opcode >> 8) << 16;
804 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
806 /* A CQE slot must always be available. */
807 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
812 * Build the Control Segment with specified opcode:
814 * - MLX5_OPCODE_ENHANCED_MPSW
818 * Pointer to TX queue structure.
820 * Pointer to burst routine local context.
822 * Pointer to WQE to fill with built Control Segment.
824 * Supposed length of WQE in segments.
826 * SQ WQE opcode to put into Control Segment.
828 * Configured Tx offloads mask. It is fully defined at
829 * compile time and may be used for optimization.
831 static __rte_always_inline void
832 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
833 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
834 struct mlx5_wqe *__rte_restrict wqe,
837 unsigned int olx __rte_unused)
839 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
841 /* For legacy MPW replace the EMPW by TSO with modifier. */
842 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
843 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
844 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
845 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
846 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
847 MLX5_COMP_MODE_OFFSET);
848 cs->misc = RTE_BE32(0);
852 * Build the Synchronize Queue Segment with specified completion index.
855 * Pointer to TX queue structure.
857 * Pointer to burst routine local context.
859 * Pointer to WQE to fill with built Control Segment.
861 * Completion index in Clock Queue to wait.
863 * Configured Tx offloads mask. It is fully defined at
864 * compile time and may be used for optimization.
866 static __rte_always_inline void
867 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
868 struct mlx5_txq_local *restrict loc __rte_unused,
869 struct mlx5_wqe *restrict wqe,
871 unsigned int olx __rte_unused)
873 struct mlx5_wqe_qseg *qs;
875 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
876 qs->max_index = rte_cpu_to_be_32(wci);
877 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
878 qs->reserved0 = RTE_BE32(0);
879 qs->reserved1 = RTE_BE32(0);
883 * Build the Ethernet Segment without inlined data.
884 * Supports Software Parser, Checksums and VLAN insertion Tx offload features.
887 * Pointer to TX queue structure.
889 * Pointer to burst routine local context.
891 * Pointer to WQE to fill with built Ethernet Segment.
893 * Configured Tx offloads mask. It is fully defined at
894 * compile time and may be used for optimization.
896 static __rte_always_inline void
897 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
898 struct mlx5_txq_local *__rte_restrict loc,
899 struct mlx5_wqe *__rte_restrict wqe,
902 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
906 * Calculate and set check sum flags first, dword field
907 * in segment may be shared with Software Parser flags.
909 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
910 es->flags = rte_cpu_to_le_32(csum);
912 * Calculate and set Software Parser offsets and flags.
913 * These flags a set for custom UDP and IP tunnel packets.
915 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
916 /* Fill metadata field if needed. */
917 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
918 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
919 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
921 /* Engage VLAN tag insertion feature if requested. */
922 if (MLX5_TXOFF_CONFIG(VLAN) &&
923 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
925 * We should get here only if device support
926 * this feature correctly.
928 MLX5_ASSERT(txq->vlan_en);
929 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
930 loc->mbuf->vlan_tci);
932 es->inline_hdr = RTE_BE32(0);
937 * Build the Ethernet Segment with minimal inlined data
938 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
939 * used to fill the gap in single WQEBB WQEs.
940 * Supports Software Parser, Checksums and VLAN
941 * insertion Tx offload features.
944 * Pointer to TX queue structure.
946 * Pointer to burst routine local context.
948 * Pointer to WQE to fill with built Ethernet Segment.
950 * Length of VLAN tag insertion if any.
952 * Configured Tx offloads mask. It is fully defined at
953 * compile time and may be used for optimization.
955 static __rte_always_inline void
956 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
957 struct mlx5_txq_local *__rte_restrict loc,
958 struct mlx5_wqe *__rte_restrict wqe,
962 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
964 uint8_t *psrc, *pdst;
967 * Calculate and set check sum flags first, dword field
968 * in segment may be shared with Software Parser flags.
970 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
971 es->flags = rte_cpu_to_le_32(csum);
973 * Calculate and set Software Parser offsets and flags.
974 * These flags a set for custom UDP and IP tunnel packets.
976 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
977 /* Fill metadata field if needed. */
978 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
979 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
980 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
982 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
983 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
984 es->inline_data = *(unaligned_uint16_t *)psrc;
985 psrc += sizeof(uint16_t);
986 pdst = (uint8_t *)(es + 1);
987 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
988 /* Implement VLAN tag insertion as part inline data. */
989 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
990 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
991 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
992 /* Insert VLAN ethertype + VLAN tag. */
993 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
994 ((RTE_ETHER_TYPE_VLAN << 16) |
995 loc->mbuf->vlan_tci);
996 pdst += sizeof(struct rte_vlan_hdr);
997 /* Copy the rest two bytes from packet data. */
998 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
999 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1001 /* Fill the gap in the title WQEBB with inline data. */
1002 rte_mov16(pdst, psrc);
1007 * Build the Ethernet Segment with entire packet data inlining. Checks the
1008 * boundary of WQEBB and ring buffer wrapping, supports Software Parser,
1009 * Checksums and VLAN insertion Tx offload features.
1012 * Pointer to TX queue structure.
1014 * Pointer to burst routine local context.
1016 * Pointer to WQE to fill with built Ethernet Segment.
1018 * Length of VLAN tag insertion if any.
1020 * Length of data to inline (VLAN included, if any).
1022 * TSO flag, set mss field from the packet.
1024 * Configured Tx offloads mask. It is fully defined at
1025 * compile time and may be used for optimization.
1028 * Pointer to the next Data Segment (aligned and wrapped around).
1030 static __rte_always_inline struct mlx5_wqe_dseg *
1031 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
1032 struct mlx5_txq_local *__rte_restrict loc,
1033 struct mlx5_wqe *__rte_restrict wqe,
1039 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1041 uint8_t *psrc, *pdst;
1045 * Calculate and set check sum flags first, dword field
1046 * in segment may be shared with Software Parser flags.
1048 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1051 csum |= loc->mbuf->tso_segsz;
1052 es->flags = rte_cpu_to_be_32(csum);
1054 es->flags = rte_cpu_to_le_32(csum);
1057 * Calculate and set Software Parser offsets and flags.
1058 * These flags a set for custom UDP and IP tunnel packets.
1060 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1061 /* Fill metadata field if needed. */
1062 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1063 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
1064 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1066 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1067 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
1068 es->inline_data = *(unaligned_uint16_t *)psrc;
1069 psrc += sizeof(uint16_t);
1070 pdst = (uint8_t *)(es + 1);
1071 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1072 /* Implement VLAN tag insertion as part inline data. */
1073 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1074 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1075 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1076 /* Insert VLAN ethertype + VLAN tag. */
1077 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1078 ((RTE_ETHER_TYPE_VLAN << 16) |
1079 loc->mbuf->vlan_tci);
1080 pdst += sizeof(struct rte_vlan_hdr);
1081 /* Copy the rest two bytes from packet data. */
1082 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1083 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1084 psrc += sizeof(uint16_t);
1086 /* Fill the gap in the title WQEBB with inline data. */
1087 rte_mov16(pdst, psrc);
1088 psrc += sizeof(rte_v128u32_t);
1090 pdst = (uint8_t *)(es + 2);
1091 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1092 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1093 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
1095 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1096 return (struct mlx5_wqe_dseg *)pdst;
1099 * The WQEBB space availability is checked by caller.
1100 * Here we should be aware of WQE ring buffer wraparound only.
1102 part = (uint8_t *)txq->wqes_end - pdst;
1103 part = RTE_MIN(part, inlen);
1105 rte_memcpy(pdst, psrc, part);
1107 if (likely(!inlen)) {
1109 * If return value is not used by the caller
1110 * the code below will be optimized out.
1113 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1114 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1115 pdst = (uint8_t *)txq->wqes;
1116 return (struct mlx5_wqe_dseg *)pdst;
1118 pdst = (uint8_t *)txq->wqes;
1125 * Copy data from chain of mbuf to the specified linear buffer.
1126 * Checksums and VLAN insertion Tx offload features. If data
1127 * from some mbuf copied completely this mbuf is freed. Local
1128 * structure is used to keep the byte stream state.
1131 * Pointer to the destination linear buffer.
1133 * Pointer to burst routine local context.
1135 * Length of data to be copied.
1137 * Length of data to be copied ignoring no inline hint.
1139 * Configured Tx offloads mask. It is fully defined at
1140 * compile time and may be used for optimization.
1143 * Number of actual copied data bytes. This is always greater than or
1144 * equal to must parameter and might be lesser than len in no inline
1145 * hint flag is encountered.
1147 static __rte_always_inline unsigned int
1148 mlx5_tx_mseg_memcpy(uint8_t *pdst,
1149 struct mlx5_txq_local *__rte_restrict loc,
1152 unsigned int olx __rte_unused)
1154 struct rte_mbuf *mbuf;
1155 unsigned int part, dlen, copy = 0;
1159 MLX5_ASSERT(must <= len);
1161 /* Allow zero length packets, must check first. */
1162 dlen = rte_pktmbuf_data_len(loc->mbuf);
1163 if (dlen <= loc->mbuf_off) {
1164 /* Exhausted packet, just free. */
1166 loc->mbuf = mbuf->next;
1167 rte_pktmbuf_free_seg(mbuf);
1169 MLX5_ASSERT(loc->mbuf_nseg > 1);
1170 MLX5_ASSERT(loc->mbuf);
1172 if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) {
1177 * We already copied the minimal
1178 * requested amount of data.
1183 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
1185 * Copy only the minimal required
1186 * part of the data buffer.
1193 dlen -= loc->mbuf_off;
1194 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1196 part = RTE_MIN(len, dlen);
1197 rte_memcpy(pdst, psrc, part);
1199 loc->mbuf_off += part;
1202 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
1204 /* Exhausted packet, just free. */
1206 loc->mbuf = mbuf->next;
1207 rte_pktmbuf_free_seg(mbuf);
1209 MLX5_ASSERT(loc->mbuf_nseg >= 1);
1219 * Build the Ethernet Segment with inlined data from multi-segment packet.
1220 * Checks the boundary of WQEBB and ring buffer wrapping, supports Software
1221 * Parser, Checksums and VLAN insertion Tx offload features.
1224 * Pointer to TX queue structure.
1226 * Pointer to burst routine local context.
1228 * Pointer to WQE to fill with built Ethernet Segment.
1230 * Length of VLAN tag insertion if any.
1232 * Length of data to inline (VLAN included, if any).
1234 * TSO flag, set mss field from the packet.
1236 * Configured Tx offloads mask. It is fully defined at
1237 * compile time and may be used for optimization.
1240 * Pointer to the next Data Segment (aligned and possible NOT wrapped
1241 * around - caller should do wrapping check on its own).
1243 static __rte_always_inline struct mlx5_wqe_dseg *
1244 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
1245 struct mlx5_txq_local *__rte_restrict loc,
1246 struct mlx5_wqe *__rte_restrict wqe,
1252 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1255 unsigned int part, tlen = 0;
1258 * Calculate and set check sum flags first, uint32_t field
1259 * in segment may be shared with Software Parser flags.
1261 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1264 csum |= loc->mbuf->tso_segsz;
1265 es->flags = rte_cpu_to_be_32(csum);
1267 es->flags = rte_cpu_to_le_32(csum);
1270 * Calculate and set Software Parser offsets and flags.
1271 * These flags a set for custom UDP and IP tunnel packets.
1273 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1274 /* Fill metadata field if needed. */
1275 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1276 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
1277 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1279 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1280 pdst = (uint8_t *)&es->inline_data;
1281 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1282 /* Implement VLAN tag insertion as part inline data. */
1283 mlx5_tx_mseg_memcpy(pdst, loc,
1284 2 * RTE_ETHER_ADDR_LEN,
1285 2 * RTE_ETHER_ADDR_LEN, olx);
1286 pdst += 2 * RTE_ETHER_ADDR_LEN;
1287 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1288 ((RTE_ETHER_TYPE_VLAN << 16) |
1289 loc->mbuf->vlan_tci);
1290 pdst += sizeof(struct rte_vlan_hdr);
1291 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
1293 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1295 * The WQEBB space availability is checked by caller.
1296 * Here we should be aware of WQE ring buffer wraparound only.
1298 part = (uint8_t *)txq->wqes_end - pdst;
1299 part = RTE_MIN(part, inlen - tlen);
1305 * Copying may be interrupted inside the routine
1306 * if run into no inline hint flag.
1308 copy = tso ? inlen : txq->inlen_mode;
1309 copy = tlen >= copy ? 0 : (copy - tlen);
1310 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
1312 if (likely(inlen <= tlen) || copy < part) {
1313 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
1315 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1316 return (struct mlx5_wqe_dseg *)pdst;
1318 pdst = (uint8_t *)txq->wqes;
1319 part = inlen - tlen;
1324 * Build the Data Segment of pointer type.
1327 * Pointer to TX queue structure.
1329 * Pointer to burst routine local context.
1331 * Pointer to WQE to fill with built Data Segment.
1333 * Data buffer to point.
1335 * Data buffer length.
1337 * Configured Tx offloads mask. It is fully defined at
1338 * compile time and may be used for optimization.
1340 static __rte_always_inline void
1341 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
1342 struct mlx5_txq_local *__rte_restrict loc,
1343 struct mlx5_wqe_dseg *__rte_restrict dseg,
1346 unsigned int olx __rte_unused)
1350 dseg->bcount = rte_cpu_to_be_32(len);
1351 dseg->lkey = mlx5_mr_mb2mr(&txq->mr_ctrl, loc->mbuf);
1352 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1356 * Build the Data Segment of pointer type or inline if data length is less than
1357 * buffer in minimal Data Segment size.
1360 * Pointer to TX queue structure.
1362 * Pointer to burst routine local context.
1364 * Pointer to WQE to fill with built Data Segment.
1366 * Data buffer to point.
1368 * Data buffer length.
1370 * Configured Tx offloads mask. It is fully defined at
1371 * compile time and may be used for optimization.
1373 static __rte_always_inline void
1374 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
1375 struct mlx5_txq_local *__rte_restrict loc,
1376 struct mlx5_wqe_dseg *__rte_restrict dseg,
1379 unsigned int olx __rte_unused)
1385 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
1386 dseg->bcount = rte_cpu_to_be_32(len);
1387 dseg->lkey = mlx5_mr_mb2mr(&txq->mr_ctrl, loc->mbuf);
1388 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1392 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1393 /* Unrolled implementation of generic rte_memcpy. */
1394 dst = (uintptr_t)&dseg->inline_data[0];
1395 src = (uintptr_t)buf;
1397 #ifdef RTE_ARCH_STRICT_ALIGN
1398 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
1399 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1400 dst += sizeof(uint32_t);
1401 src += sizeof(uint32_t);
1402 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1403 dst += sizeof(uint32_t);
1404 src += sizeof(uint32_t);
1406 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
1407 dst += sizeof(uint64_t);
1408 src += sizeof(uint64_t);
1412 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1413 dst += sizeof(uint32_t);
1414 src += sizeof(uint32_t);
1417 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
1418 dst += sizeof(uint16_t);
1419 src += sizeof(uint16_t);
1422 *(uint8_t *)dst = *(uint8_t *)src;
1426 * Build the Data Segment of inlined data from single
1427 * segment packet, no VLAN insertion.
1430 * Pointer to TX queue structure.
1432 * Pointer to burst routine local context.
1434 * Pointer to WQE to fill with built Data Segment.
1436 * Data buffer to point.
1438 * Data buffer length.
1440 * Configured Tx offloads mask. It is fully defined at
1441 * compile time and may be used for optimization.
1444 * Pointer to the next Data Segment after inlined data.
1445 * Ring buffer wraparound check is needed. We do not do it here because it
1446 * may not be needed for the last packet in the eMPW session.
1448 static __rte_always_inline struct mlx5_wqe_dseg *
1449 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
1450 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1451 struct mlx5_wqe_dseg *__rte_restrict dseg,
1454 unsigned int olx __rte_unused)
1459 if (!MLX5_TXOFF_CONFIG(MPW)) {
1460 /* Store the descriptor byte counter for eMPW sessions. */
1461 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1462 pdst = &dseg->inline_data[0];
1464 /* The entire legacy MPW session counter is stored on close. */
1465 pdst = (uint8_t *)dseg;
1468 * The WQEBB space availability is checked by caller.
1469 * Here we should be aware of WQE ring buffer wraparound only.
1471 part = (uint8_t *)txq->wqes_end - pdst;
1472 part = RTE_MIN(part, len);
1474 rte_memcpy(pdst, buf, part);
1478 if (!MLX5_TXOFF_CONFIG(MPW))
1479 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1480 /* Note: no final wraparound check here. */
1481 return (struct mlx5_wqe_dseg *)pdst;
1483 pdst = (uint8_t *)txq->wqes;
1490 * Build the Data Segment of inlined data from single
1491 * segment packet with VLAN insertion.
1494 * Pointer to TX queue structure.
1496 * Pointer to burst routine local context.
1498 * Pointer to the dseg fill with built Data Segment.
1500 * Data buffer to point.
1502 * Data buffer length.
1504 * Configured Tx offloads mask. It is fully defined at
1505 * compile time and may be used for optimization.
1508 * Pointer to the next Data Segment after inlined data.
1509 * Ring buffer wraparound check is needed.
1511 static __rte_always_inline struct mlx5_wqe_dseg *
1512 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
1513 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1514 struct mlx5_wqe_dseg *__rte_restrict dseg,
1517 unsigned int olx __rte_unused)
1523 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
1524 if (!MLX5_TXOFF_CONFIG(MPW)) {
1525 /* Store the descriptor byte counter for eMPW sessions. */
1526 dseg->bcount = rte_cpu_to_be_32
1527 ((len + sizeof(struct rte_vlan_hdr)) |
1528 MLX5_ETH_WQE_DATA_INLINE);
1529 pdst = &dseg->inline_data[0];
1531 /* The entire legacy MPW session counter is stored on close. */
1532 pdst = (uint8_t *)dseg;
1534 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
1535 buf += MLX5_DSEG_MIN_INLINE_SIZE;
1536 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
1537 len -= MLX5_DSEG_MIN_INLINE_SIZE;
1538 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
1539 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1540 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1541 pdst = (uint8_t *)txq->wqes;
1542 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
1543 loc->mbuf->vlan_tci);
1544 pdst += sizeof(struct rte_vlan_hdr);
1546 * The WQEBB space availability is checked by caller.
1547 * Here we should be aware of WQE ring buffer wraparound only.
1549 part = (uint8_t *)txq->wqes_end - pdst;
1550 part = RTE_MIN(part, len);
1552 rte_memcpy(pdst, buf, part);
1556 if (!MLX5_TXOFF_CONFIG(MPW))
1557 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1558 /* Note: no final wraparound check here. */
1559 return (struct mlx5_wqe_dseg *)pdst;
1561 pdst = (uint8_t *)txq->wqes;
1568 * Build the Ethernet Segment with optionally inlined data with
1569 * VLAN insertion and following Data Segments (if any) from
1570 * multi-segment packet. Used by ordinary send and TSO.
1573 * Pointer to TX queue structure.
1575 * Pointer to burst routine local context.
1577 * Pointer to WQE to fill with built Ethernet/Data Segments.
1579 * Length of VLAN header to insert, 0 means no VLAN insertion.
1581 * Data length to inline. For TSO this parameter specifies exact value,
1582 * for ordinary send routine can be aligned by caller to provide better WQE
1583 * space saving and data buffer start address alignment.
1584 * This length includes VLAN header being inserted.
1586 * Zero means ordinary send, inlined data can be extended,
1587 * otherwise this is TSO, inlined data length is fixed.
1589 * Configured Tx offloads mask. It is fully defined at
1590 * compile time and may be used for optimization.
1593 * Actual size of built WQE in segments.
1595 static __rte_always_inline unsigned int
1596 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
1597 struct mlx5_txq_local *__rte_restrict loc,
1598 struct mlx5_wqe *__rte_restrict wqe,
1602 unsigned int olx __rte_unused)
1604 struct mlx5_wqe_dseg *__rte_restrict dseg;
1607 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
1608 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
1611 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
1612 if (!loc->mbuf_nseg)
1615 * There are still some mbuf remaining, not inlined.
1616 * The first mbuf may be partially inlined and we
1617 * must process the possible non-zero data offset.
1619 if (loc->mbuf_off) {
1624 * Exhausted packets must be dropped before.
1625 * Non-zero offset means there are some data
1626 * remained in the packet.
1628 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
1629 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
1630 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1632 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
1634 * Build the pointer/minimal Data Segment.
1635 * Do ring buffer wrapping check in advance.
1637 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1638 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1639 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
1640 /* Store the mbuf to be freed on completion. */
1641 MLX5_ASSERT(loc->elts_free);
1642 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1645 if (--loc->mbuf_nseg == 0)
1647 loc->mbuf = loc->mbuf->next;
1651 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1652 struct rte_mbuf *mbuf;
1654 /* Zero length segment found, just skip. */
1656 loc->mbuf = loc->mbuf->next;
1657 rte_pktmbuf_free_seg(mbuf);
1658 if (--loc->mbuf_nseg == 0)
1661 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1662 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1665 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1666 rte_pktmbuf_data_len(loc->mbuf), olx);
1667 MLX5_ASSERT(loc->elts_free);
1668 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1671 if (--loc->mbuf_nseg == 0)
1673 loc->mbuf = loc->mbuf->next;
1678 /* Calculate actual segments used from the dseg pointer. */
1679 if ((uintptr_t)wqe < (uintptr_t)dseg)
1680 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
1682 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
1683 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
1688 * The routine checks timestamp flag in the current packet,
1689 * and push WAIT WQE into the queue if scheduling is required.
1692 * Pointer to TX queue structure.
1694 * Pointer to burst routine local context.
1696 * Configured Tx offloads mask. It is fully defined at
1697 * compile time and may be used for optimization.
1700 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1701 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
1702 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
1703 * Local context variables partially updated.
1705 static __rte_always_inline enum mlx5_txcmp_code
1706 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
1707 struct mlx5_txq_local *restrict loc,
1710 if (MLX5_TXOFF_CONFIG(TXPP) &&
1711 loc->mbuf->ol_flags & txq->ts_mask) {
1712 struct mlx5_wqe *wqe;
1717 * Estimate the required space quickly and roughly.
1718 * We would like to ensure the packet can be pushed
1719 * to the queue and we won't get the orphan WAIT WQE.
1721 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
1722 loc->elts_free < NB_SEGS(loc->mbuf))
1723 return MLX5_TXCMP_CODE_EXIT;
1724 /* Convert the timestamp into completion to wait. */
1725 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
1726 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
1727 if (unlikely(wci < 0))
1728 return MLX5_TXCMP_CODE_SINGLE;
1729 /* Build the WAIT WQE with specified completion. */
1730 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1731 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
1732 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
1735 return MLX5_TXCMP_CODE_MULTI;
1737 return MLX5_TXCMP_CODE_SINGLE;
1741 * Tx one packet function for multi-segment TSO. Supports all
1742 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
1743 * sends one packet per WQE.
1745 * This routine is responsible for storing processed mbuf
1746 * into elts ring buffer and update elts_head.
1749 * Pointer to TX queue structure.
1751 * Pointer to burst routine local context.
1753 * Configured Tx offloads mask. It is fully defined at
1754 * compile time and may be used for optimization.
1757 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1758 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1759 * Local context variables partially updated.
1761 static __rte_always_inline enum mlx5_txcmp_code
1762 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
1763 struct mlx5_txq_local *__rte_restrict loc,
1766 struct mlx5_wqe *__rte_restrict wqe;
1767 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
1769 if (MLX5_TXOFF_CONFIG(TXPP)) {
1770 enum mlx5_txcmp_code wret;
1772 /* Generate WAIT for scheduling if requested. */
1773 wret = mlx5_tx_schedule_send(txq, loc, olx);
1774 if (wret == MLX5_TXCMP_CODE_EXIT)
1775 return MLX5_TXCMP_CODE_EXIT;
1776 if (wret == MLX5_TXCMP_CODE_ERROR)
1777 return MLX5_TXCMP_CODE_ERROR;
1780 * Calculate data length to be inlined to estimate
1781 * the required space in WQE ring buffer.
1783 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
1784 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
1785 vlan = sizeof(struct rte_vlan_hdr);
1786 inlen = loc->mbuf->l2_len + vlan +
1787 loc->mbuf->l3_len + loc->mbuf->l4_len;
1788 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
1789 return MLX5_TXCMP_CODE_ERROR;
1790 if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
1791 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
1792 /* Packet must contain all TSO headers. */
1793 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
1794 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
1795 inlen > (dlen + vlan)))
1796 return MLX5_TXCMP_CODE_ERROR;
1797 MLX5_ASSERT(inlen >= txq->inlen_mode);
1799 * Check whether there are enough free WQEBBs:
1801 * - Ethernet Segment
1802 * - First Segment of inlined Ethernet data
1803 * - ... data continued ...
1804 * - Data Segments of pointer/min inline type
1806 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
1807 MLX5_ESEG_MIN_INLINE_SIZE +
1809 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
1810 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1811 return MLX5_TXCMP_CODE_EXIT;
1812 /* Check for maximal WQE size. */
1813 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1814 return MLX5_TXCMP_CODE_ERROR;
1815 #ifdef MLX5_PMD_SOFT_COUNTERS
1816 /* Update sent data bytes/packets counters. */
1817 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
1818 loc->mbuf->tso_segsz;
1820 * One will be added for mbuf itself at the end of the mlx5_tx_burst
1821 * from loc->pkts_sent field.
1824 txq->stats.opackets += ntcp;
1825 txq->stats.obytes += dlen + vlan + ntcp * inlen;
1827 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1828 loc->wqe_last = wqe;
1829 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
1830 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
1831 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
1832 txq->wqe_ci += (ds + 3) / 4;
1833 loc->wqe_free -= (ds + 3) / 4;
1834 return MLX5_TXCMP_CODE_MULTI;
1838 * Tx one packet function for multi-segment SEND. Supports all types of Tx
1839 * offloads, uses MLX5_OPCODE_SEND to build WQEs, sends one packet per WQE,
1840 * without any data inlining in Ethernet Segment.
1842 * This routine is responsible for storing processed mbuf
1843 * into elts ring buffer and update elts_head.
1846 * Pointer to TX queue structure.
1848 * Pointer to burst routine local context.
1850 * Configured Tx offloads mask. It is fully defined at
1851 * compile time and may be used for optimization.
1854 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1855 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1856 * Local context variables partially updated.
1858 static __rte_always_inline enum mlx5_txcmp_code
1859 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
1860 struct mlx5_txq_local *__rte_restrict loc,
1863 struct mlx5_wqe_dseg *__rte_restrict dseg;
1864 struct mlx5_wqe *__rte_restrict wqe;
1865 unsigned int ds, nseg;
1867 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
1868 if (MLX5_TXOFF_CONFIG(TXPP)) {
1869 enum mlx5_txcmp_code wret;
1871 /* Generate WAIT for scheduling if requested. */
1872 wret = mlx5_tx_schedule_send(txq, loc, olx);
1873 if (wret == MLX5_TXCMP_CODE_EXIT)
1874 return MLX5_TXCMP_CODE_EXIT;
1875 if (wret == MLX5_TXCMP_CODE_ERROR)
1876 return MLX5_TXCMP_CODE_ERROR;
1879 * No inline at all, it means the CPU cycles saving is prioritized at
1880 * configuration, we should not copy any packet data to WQE.
1882 nseg = NB_SEGS(loc->mbuf);
1884 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1885 return MLX5_TXCMP_CODE_EXIT;
1886 /* Check for maximal WQE size. */
1887 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1888 return MLX5_TXCMP_CODE_ERROR;
1890 * Some Tx offloads may cause an error if packet is not long enough,
1891 * check against assumed minimal length.
1893 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
1894 return MLX5_TXCMP_CODE_ERROR;
1895 #ifdef MLX5_PMD_SOFT_COUNTERS
1896 /* Update sent data bytes counter. */
1897 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
1898 if (MLX5_TXOFF_CONFIG(VLAN) &&
1899 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
1900 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
1903 * SEND WQE, one WQEBB:
1904 * - Control Segment, SEND opcode
1905 * - Ethernet Segment, optional VLAN, no inline
1906 * - Data Segments, pointer only type
1908 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1909 loc->wqe_last = wqe;
1910 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
1911 mlx5_tx_eseg_none(txq, loc, wqe, olx);
1912 dseg = &wqe->dseg[0];
1914 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1915 struct rte_mbuf *mbuf;
1918 * Zero length segment found, have to correct total
1919 * size of WQE in segments.
1920 * It is supposed to be rare occasion, so in normal
1921 * case (no zero length segments) we avoid extra
1922 * writing to the Control Segment.
1925 wqe->cseg.sq_ds -= RTE_BE32(1);
1927 loc->mbuf = mbuf->next;
1928 rte_pktmbuf_free_seg(mbuf);
1934 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1935 rte_pktmbuf_data_len(loc->mbuf), olx);
1936 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1941 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1942 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1943 loc->mbuf = loc->mbuf->next;
1946 txq->wqe_ci += (ds + 3) / 4;
1947 loc->wqe_free -= (ds + 3) / 4;
1948 return MLX5_TXCMP_CODE_MULTI;
1952 * Tx one packet function for multi-segment SEND. Supports all
1953 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
1954 * sends one packet per WQE, with data inlining in
1955 * Ethernet Segment and minimal Data Segments.
1957 * This routine is responsible for storing processed mbuf
1958 * into elts ring buffer and update elts_head.
1961 * Pointer to TX queue structure.
1963 * Pointer to burst routine local context.
1965 * Configured Tx offloads mask. It is fully defined at
1966 * compile time and may be used for optimization.
1969 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1970 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1971 * Local context variables partially updated.
1973 static __rte_always_inline enum mlx5_txcmp_code
1974 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
1975 struct mlx5_txq_local *__rte_restrict loc,
1978 struct mlx5_wqe *__rte_restrict wqe;
1979 unsigned int ds, inlen, dlen, vlan = 0;
1981 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
1982 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
1983 if (MLX5_TXOFF_CONFIG(TXPP)) {
1984 enum mlx5_txcmp_code wret;
1986 /* Generate WAIT for scheduling if requested. */
1987 wret = mlx5_tx_schedule_send(txq, loc, olx);
1988 if (wret == MLX5_TXCMP_CODE_EXIT)
1989 return MLX5_TXCMP_CODE_EXIT;
1990 if (wret == MLX5_TXCMP_CODE_ERROR)
1991 return MLX5_TXCMP_CODE_ERROR;
1994 * First calculate data length to be inlined
1995 * to estimate the required space for WQE.
1997 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
1998 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
1999 vlan = sizeof(struct rte_vlan_hdr);
2000 inlen = dlen + vlan;
2001 /* Check against minimal length. */
2002 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
2003 return MLX5_TXCMP_CODE_ERROR;
2004 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
2005 if (inlen > txq->inlen_send ||
2006 loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) {
2007 struct rte_mbuf *mbuf;
2012 nxlen = rte_pktmbuf_data_len(mbuf);
2014 * Packet length exceeds the allowed inline data length,
2015 * check whether the minimal inlining is required.
2017 if (txq->inlen_mode) {
2018 MLX5_ASSERT(txq->inlen_mode >=
2019 MLX5_ESEG_MIN_INLINE_SIZE);
2020 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
2021 inlen = txq->inlen_mode;
2022 } else if (vlan && !txq->vlan_en) {
2024 * VLAN insertion is requested and hardware does not
2025 * support the offload, will do with software inline.
2027 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
2028 } else if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE ||
2029 nxlen > txq->inlen_send) {
2030 return mlx5_tx_packet_multi_send(txq, loc, olx);
2035 * Now we know the minimal amount of data is requested
2036 * to inline. Check whether we should inline the buffers
2037 * from the chain beginning to eliminate some mbufs.
2039 if (unlikely(nxlen <= txq->inlen_send)) {
2040 /* We can inline first mbuf at least. */
2041 if (nxlen < inlen) {
2044 /* Scan mbufs till inlen filled. */
2049 nxlen = rte_pktmbuf_data_len(mbuf);
2051 } while (unlikely(nxlen < inlen));
2052 if (unlikely(nxlen > txq->inlen_send)) {
2053 /* We cannot inline entire mbuf. */
2054 smlen = inlen - smlen;
2055 start = rte_pktmbuf_mtod_offset
2056 (mbuf, uintptr_t, smlen);
2064 /* There should be not end of packet. */
2066 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
2067 } while (unlikely(nxlen < txq->inlen_send));
2069 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
2071 * Check whether we can do inline to align start
2072 * address of data buffer to cacheline.
2075 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
2076 if (unlikely(start)) {
2078 if (start <= txq->inlen_send)
2083 * Check whether there are enough free WQEBBs:
2085 * - Ethernet Segment
2086 * - First Segment of inlined Ethernet data
2087 * - ... data continued ...
2088 * - Data Segments of pointer/min inline type
2090 * Estimate the number of Data Segments conservatively,
2091 * supposing no any mbufs is being freed during inlining.
2093 MLX5_ASSERT(inlen <= txq->inlen_send);
2094 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2095 MLX5_ESEG_MIN_INLINE_SIZE +
2097 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2098 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2099 return MLX5_TXCMP_CODE_EXIT;
2100 /* Check for maximal WQE size. */
2101 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2102 return MLX5_TXCMP_CODE_ERROR;
2103 #ifdef MLX5_PMD_SOFT_COUNTERS
2104 /* Update sent data bytes/packets counters. */
2105 txq->stats.obytes += dlen + vlan;
2107 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2108 loc->wqe_last = wqe;
2109 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
2110 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
2111 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2112 txq->wqe_ci += (ds + 3) / 4;
2113 loc->wqe_free -= (ds + 3) / 4;
2114 return MLX5_TXCMP_CODE_MULTI;
2118 * Tx burst function for multi-segment packets. Supports all
2119 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
2120 * sends one packet per WQE. Function stops sending if it
2121 * encounters the single-segment packet.
2123 * This routine is responsible for storing processed mbuf
2124 * into elts ring buffer and update elts_head.
2127 * Pointer to TX queue structure.
2129 * Packets to transmit.
2131 * Number of packets in array.
2133 * Pointer to burst routine local context.
2135 * Configured Tx offloads mask. It is fully defined at
2136 * compile time and may be used for optimization.
2139 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2140 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2141 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2142 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
2143 * Local context variables updated.
2145 static __rte_always_inline enum mlx5_txcmp_code
2146 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
2147 struct rte_mbuf **__rte_restrict pkts,
2148 unsigned int pkts_n,
2149 struct mlx5_txq_local *__rte_restrict loc,
2152 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2153 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2154 pkts += loc->pkts_sent + 1;
2155 pkts_n -= loc->pkts_sent;
2157 enum mlx5_txcmp_code ret;
2159 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2161 * Estimate the number of free elts quickly but conservatively.
2162 * Some segment may be fully inlined and freed,
2163 * ignore this here - precise estimation is costly.
2165 if (loc->elts_free < NB_SEGS(loc->mbuf))
2166 return MLX5_TXCMP_CODE_EXIT;
2167 if (MLX5_TXOFF_CONFIG(TSO) &&
2168 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
2169 /* Proceed with multi-segment TSO. */
2170 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
2171 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
2172 /* Proceed with multi-segment SEND with inlining. */
2173 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
2175 /* Proceed with multi-segment SEND w/o inlining. */
2176 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
2178 if (ret == MLX5_TXCMP_CODE_EXIT)
2179 return MLX5_TXCMP_CODE_EXIT;
2180 if (ret == MLX5_TXCMP_CODE_ERROR)
2181 return MLX5_TXCMP_CODE_ERROR;
2182 /* WQE is built, go to the next packet. */
2185 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2186 return MLX5_TXCMP_CODE_EXIT;
2187 loc->mbuf = *pkts++;
2189 rte_prefetch0(*pkts);
2190 if (likely(NB_SEGS(loc->mbuf) > 1))
2192 /* Here ends the series of multi-segment packets. */
2193 if (MLX5_TXOFF_CONFIG(TSO) &&
2194 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
2195 return MLX5_TXCMP_CODE_TSO;
2196 return MLX5_TXCMP_CODE_SINGLE;
2202 * Tx burst function for single-segment packets with TSO.
2203 * Supports all types of Tx offloads, except multi-packets.
2204 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
2205 * Function stops sending if it encounters the multi-segment
2206 * packet or packet without TSO requested.
2208 * The routine is responsible for storing processed mbuf into elts ring buffer
2209 * and update elts_head if inline offloads is requested due to possible early
2210 * freeing of the inlined mbufs (can not store pkts array in elts as a batch).
2213 * Pointer to TX queue structure.
2215 * Packets to transmit.
2217 * Number of packets in array.
2219 * Pointer to burst routine local context.
2221 * Configured Tx offloads mask. It is fully defined at
2222 * compile time and may be used for optimization.
2225 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2226 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2227 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2228 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2229 * Local context variables updated.
2231 static __rte_always_inline enum mlx5_txcmp_code
2232 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
2233 struct rte_mbuf **__rte_restrict pkts,
2234 unsigned int pkts_n,
2235 struct mlx5_txq_local *__rte_restrict loc,
2238 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2239 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2240 pkts += loc->pkts_sent + 1;
2241 pkts_n -= loc->pkts_sent;
2243 struct mlx5_wqe_dseg *__rte_restrict dseg;
2244 struct mlx5_wqe *__rte_restrict wqe;
2245 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
2248 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2249 if (MLX5_TXOFF_CONFIG(TXPP)) {
2250 enum mlx5_txcmp_code wret;
2252 /* Generate WAIT for scheduling if requested. */
2253 wret = mlx5_tx_schedule_send(txq, loc, olx);
2254 if (wret == MLX5_TXCMP_CODE_EXIT)
2255 return MLX5_TXCMP_CODE_EXIT;
2256 if (wret == MLX5_TXCMP_CODE_ERROR)
2257 return MLX5_TXCMP_CODE_ERROR;
2259 dlen = rte_pktmbuf_data_len(loc->mbuf);
2260 if (MLX5_TXOFF_CONFIG(VLAN) &&
2261 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
2262 vlan = sizeof(struct rte_vlan_hdr);
2265 * First calculate the WQE size to check
2266 * whether we have enough space in ring buffer.
2268 hlen = loc->mbuf->l2_len + vlan +
2269 loc->mbuf->l3_len + loc->mbuf->l4_len;
2270 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
2271 return MLX5_TXCMP_CODE_ERROR;
2272 if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2273 hlen += loc->mbuf->outer_l2_len +
2274 loc->mbuf->outer_l3_len;
2275 /* Segment must contain all TSO headers. */
2276 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
2277 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2278 hlen > (dlen + vlan)))
2279 return MLX5_TXCMP_CODE_ERROR;
2281 * Check whether there are enough free WQEBBs:
2283 * - Ethernet Segment
2284 * - First Segment of inlined Ethernet data
2285 * - ... data continued ...
2286 * - Finishing Data Segment of pointer type
2288 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
2289 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2290 if (loc->wqe_free < ((ds + 3) / 4))
2291 return MLX5_TXCMP_CODE_EXIT;
2292 #ifdef MLX5_PMD_SOFT_COUNTERS
2293 /* Update sent data bytes/packets counters. */
2294 ntcp = (dlen + vlan - hlen +
2295 loc->mbuf->tso_segsz - 1) /
2296 loc->mbuf->tso_segsz;
2298 * One will be added for mbuf itself at the end
2299 * of the mlx5_tx_burst from loc->pkts_sent field.
2302 txq->stats.opackets += ntcp;
2303 txq->stats.obytes += dlen + vlan + ntcp * hlen;
2306 * Build the TSO WQE:
2308 * - Ethernet Segment with hlen bytes inlined
2309 * - Data Segment of pointer type
2311 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2312 loc->wqe_last = wqe;
2313 mlx5_tx_cseg_init(txq, loc, wqe, ds,
2314 MLX5_OPCODE_TSO, olx);
2315 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
2316 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
2317 dlen -= hlen - vlan;
2318 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2320 * WQE is built, update the loop parameters
2321 * and go to the next packet.
2323 txq->wqe_ci += (ds + 3) / 4;
2324 loc->wqe_free -= (ds + 3) / 4;
2325 if (MLX5_TXOFF_CONFIG(INLINE))
2326 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2330 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2331 return MLX5_TXCMP_CODE_EXIT;
2332 loc->mbuf = *pkts++;
2334 rte_prefetch0(*pkts);
2335 if (MLX5_TXOFF_CONFIG(MULTI) &&
2336 unlikely(NB_SEGS(loc->mbuf) > 1))
2337 return MLX5_TXCMP_CODE_MULTI;
2338 if (likely(!(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)))
2339 return MLX5_TXCMP_CODE_SINGLE;
2340 /* Continue with the next TSO packet. */
2346 * Analyze the packet and select the best method to send.
2349 * Pointer to TX queue structure.
2351 * Pointer to burst routine local context.
2353 * Configured Tx offloads mask. It is fully defined at
2354 * compile time and may be used for optimization.
2356 * The predefined flag whether do complete check for
2357 * multi-segment packets and TSO.
2360 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2361 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
2362 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
2363 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
2365 static __rte_always_inline enum mlx5_txcmp_code
2366 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
2367 struct mlx5_txq_local *__rte_restrict loc,
2371 /* Check for multi-segment packet. */
2373 MLX5_TXOFF_CONFIG(MULTI) &&
2374 unlikely(NB_SEGS(loc->mbuf) > 1))
2375 return MLX5_TXCMP_CODE_MULTI;
2376 /* Check for TSO packet. */
2378 MLX5_TXOFF_CONFIG(TSO) &&
2379 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
2380 return MLX5_TXCMP_CODE_TSO;
2381 /* Check if eMPW is enabled at all. */
2382 if (!MLX5_TXOFF_CONFIG(EMPW))
2383 return MLX5_TXCMP_CODE_SINGLE;
2384 /* Check if eMPW can be engaged. */
2385 if (MLX5_TXOFF_CONFIG(VLAN) &&
2386 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) &&
2387 (!MLX5_TXOFF_CONFIG(INLINE) ||
2388 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
2389 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
2391 * eMPW does not support VLAN insertion offload, we have to
2392 * inline the entire packet but packet is too long for inlining.
2394 return MLX5_TXCMP_CODE_SINGLE;
2396 return MLX5_TXCMP_CODE_EMPW;
2400 * Check the next packet attributes to match with the eMPW batch ones.
2401 * In addition, for legacy MPW the packet length is checked either.
2404 * Pointer to TX queue structure.
2406 * Pointer to Ethernet Segment of eMPW batch.
2408 * Pointer to burst routine local context.
2410 * Length of previous packet in MPW descriptor.
2412 * Configured Tx offloads mask. It is fully defined at
2413 * compile time and may be used for optimization.
2416 * true - packet match with eMPW batch attributes.
2417 * false - no match, eMPW should be restarted.
2419 static __rte_always_inline bool
2420 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
2421 struct mlx5_wqe_eseg *__rte_restrict es,
2422 struct mlx5_txq_local *__rte_restrict loc,
2426 uint8_t swp_flags = 0;
2428 /* Compare the checksum flags, if any. */
2429 if (MLX5_TXOFF_CONFIG(CSUM) &&
2430 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
2432 /* Compare the Software Parser offsets and flags. */
2433 if (MLX5_TXOFF_CONFIG(SWP) &&
2434 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
2435 es->swp_flags != swp_flags))
2437 /* Fill metadata field if needed. */
2438 if (MLX5_TXOFF_CONFIG(METADATA) &&
2439 es->metadata != (loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
2440 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) : 0))
2442 /* Legacy MPW can send packets with the same length only. */
2443 if (MLX5_TXOFF_CONFIG(MPW) &&
2444 dlen != rte_pktmbuf_data_len(loc->mbuf))
2446 /* There must be no VLAN packets in eMPW loop. */
2447 if (MLX5_TXOFF_CONFIG(VLAN))
2448 MLX5_ASSERT(!(loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN));
2449 /* Check if the scheduling is requested. */
2450 if (MLX5_TXOFF_CONFIG(TXPP) &&
2451 loc->mbuf->ol_flags & txq->ts_mask)
2457 * Update send loop variables and WQE for eMPW loop without data inlining.
2458 * Number of Data Segments is equal to the number of sent packets.
2461 * Pointer to TX queue structure.
2463 * Pointer to burst routine local context.
2465 * Number of packets/Data Segments/Packets.
2467 * Accumulated statistics, bytes sent.
2469 * Configured Tx offloads mask. It is fully defined at
2470 * compile time and may be used for optimization.
2473 * true - packet match with eMPW batch attributes.
2474 * false - no match, eMPW should be restarted.
2476 static __rte_always_inline void
2477 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
2478 struct mlx5_txq_local *__rte_restrict loc,
2481 unsigned int olx __rte_unused)
2483 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2484 #ifdef MLX5_PMD_SOFT_COUNTERS
2485 /* Update sent data bytes counter. */
2486 txq->stats.obytes += slen;
2490 loc->elts_free -= ds;
2491 loc->pkts_sent += ds;
2493 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2494 txq->wqe_ci += (ds + 3) / 4;
2495 loc->wqe_free -= (ds + 3) / 4;
2499 * Update send loop variables and WQE for eMPW loop with data inlining.
2500 * Gets the size of pushed descriptors and data to the WQE.
2503 * Pointer to TX queue structure.
2505 * Pointer to burst routine local context.
2507 * Total size of descriptor/data in bytes.
2509 * Accumulated statistics, data bytes sent.
2511 * The base WQE for the eMPW/MPW descriptor.
2513 * Configured Tx offloads mask. It is fully defined at
2514 * compile time and may be used for optimization.
2517 * true - packet match with eMPW batch attributes.
2518 * false - no match, eMPW should be restarted.
2520 static __rte_always_inline void
2521 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
2522 struct mlx5_txq_local *__rte_restrict loc,
2525 struct mlx5_wqe *__rte_restrict wqem,
2526 unsigned int olx __rte_unused)
2528 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
2530 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2531 #ifdef MLX5_PMD_SOFT_COUNTERS
2532 /* Update sent data bytes counter. */
2533 txq->stats.obytes += slen;
2537 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
2539 * If the legacy MPW session contains the inline packets
2540 * we should set the only inline data segment length
2541 * and align the total length to the segment size.
2543 MLX5_ASSERT(len > sizeof(dseg->bcount));
2544 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
2545 MLX5_ETH_WQE_DATA_INLINE);
2546 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
2549 * The session is not legacy MPW or contains the
2550 * data buffer pointer segments.
2552 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
2553 len = len / MLX5_WSEG_SIZE + 2;
2555 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
2556 txq->wqe_ci += (len + 3) / 4;
2557 loc->wqe_free -= (len + 3) / 4;
2558 loc->wqe_last = wqem;
2562 * The set of Tx burst functions for single-segment packets without TSO
2563 * and with Multi-Packet Writing feature support.
2564 * Supports all types of Tx offloads, except multi-packets and TSO.
2566 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends as many packet
2567 * per WQE as it can. If eMPW is not configured or packet can not be sent with
2568 * eMPW (VLAN insertion) the ordinary SEND opcode is used and only one packet
2571 * Functions stop sending if it encounters the multi-segment packet or packet
2572 * with TSO requested.
2574 * The routines are responsible for storing processed mbuf into elts ring buffer
2575 * and update elts_head if inlining offload is requested. Otherwise the copying
2576 * mbufs to elts can be postponed and completed at the end of burst routine.
2579 * Pointer to TX queue structure.
2581 * Packets to transmit.
2583 * Number of packets in array.
2585 * Pointer to burst routine local context.
2587 * Configured Tx offloads mask. It is fully defined at
2588 * compile time and may be used for optimization.
2591 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2592 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2593 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2594 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
2595 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
2596 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
2598 * Local context variables updated.
2601 * The routine sends packets with MLX5_OPCODE_EMPW
2602 * without inlining, this is dedicated optimized branch.
2603 * No VLAN insertion is supported.
2605 static __rte_always_inline enum mlx5_txcmp_code
2606 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
2607 struct rte_mbuf **__rte_restrict pkts,
2608 unsigned int pkts_n,
2609 struct mlx5_txq_local *__rte_restrict loc,
2613 * Subroutine is the part of mlx5_tx_burst_single() and sends
2614 * single-segment packet with eMPW opcode without data inlining.
2616 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2617 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2618 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2619 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2620 pkts += loc->pkts_sent + 1;
2621 pkts_n -= loc->pkts_sent;
2623 struct mlx5_wqe_dseg *__rte_restrict dseg;
2624 struct mlx5_wqe_eseg *__rte_restrict eseg;
2625 enum mlx5_txcmp_code ret;
2626 unsigned int part, loop;
2627 unsigned int slen = 0;
2630 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2631 if (MLX5_TXOFF_CONFIG(TXPP)) {
2632 enum mlx5_txcmp_code wret;
2634 /* Generate WAIT for scheduling if requested. */
2635 wret = mlx5_tx_schedule_send(txq, loc, olx);
2636 if (wret == MLX5_TXCMP_CODE_EXIT)
2637 return MLX5_TXCMP_CODE_EXIT;
2638 if (wret == MLX5_TXCMP_CODE_ERROR)
2639 return MLX5_TXCMP_CODE_ERROR;
2641 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2642 MLX5_MPW_MAX_PACKETS :
2643 MLX5_EMPW_MAX_PACKETS);
2644 if (unlikely(loc->elts_free < part)) {
2645 /* We have no enough elts to save all mbufs. */
2646 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
2647 return MLX5_TXCMP_CODE_EXIT;
2648 /* But we still able to send at least minimal eMPW. */
2649 part = loc->elts_free;
2651 /* Check whether we have enough WQEs */
2652 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
2653 if (unlikely(loc->wqe_free <
2654 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2655 return MLX5_TXCMP_CODE_EXIT;
2656 part = (loc->wqe_free * 4) - 2;
2658 if (likely(part > 1))
2659 rte_prefetch0(*pkts);
2660 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2662 * Build eMPW title WQEBB:
2663 * - Control Segment, eMPW opcode
2664 * - Ethernet Segment, no inline
2666 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
2667 MLX5_OPCODE_ENHANCED_MPSW, olx);
2668 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
2669 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2670 eseg = &loc->wqe_last->eseg;
2671 dseg = &loc->wqe_last->dseg[0];
2673 /* Store the packet length for legacy MPW. */
2674 if (MLX5_TXOFF_CONFIG(MPW))
2675 eseg->mss = rte_cpu_to_be_16
2676 (rte_pktmbuf_data_len(loc->mbuf));
2678 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2679 #ifdef MLX5_PMD_SOFT_COUNTERS
2680 /* Update sent data bytes counter. */
2685 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2687 if (unlikely(--loop == 0))
2689 loc->mbuf = *pkts++;
2690 if (likely(loop > 1))
2691 rte_prefetch0(*pkts);
2692 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2694 * Unroll the completion code to avoid
2695 * returning variable value - it results in
2696 * unoptimized sequent checking in caller.
2698 if (ret == MLX5_TXCMP_CODE_MULTI) {
2700 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2701 if (unlikely(!loc->elts_free ||
2703 return MLX5_TXCMP_CODE_EXIT;
2704 return MLX5_TXCMP_CODE_MULTI;
2706 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2707 if (ret == MLX5_TXCMP_CODE_TSO) {
2709 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2710 if (unlikely(!loc->elts_free ||
2712 return MLX5_TXCMP_CODE_EXIT;
2713 return MLX5_TXCMP_CODE_TSO;
2715 if (ret == MLX5_TXCMP_CODE_SINGLE) {
2717 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2718 if (unlikely(!loc->elts_free ||
2720 return MLX5_TXCMP_CODE_EXIT;
2721 return MLX5_TXCMP_CODE_SINGLE;
2723 if (ret != MLX5_TXCMP_CODE_EMPW) {
2726 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2727 return MLX5_TXCMP_CODE_ERROR;
2730 * Check whether packet parameters coincide
2731 * within assumed eMPW batch:
2732 * - check sum settings
2734 * - software parser settings
2735 * - packets length (legacy MPW only)
2736 * - scheduling is not required
2738 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
2741 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2742 if (unlikely(!loc->elts_free ||
2744 return MLX5_TXCMP_CODE_EXIT;
2748 /* Packet attributes match, continue the same eMPW. */
2750 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2751 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2753 /* eMPW is built successfully, update loop parameters. */
2755 MLX5_ASSERT(pkts_n >= part);
2756 #ifdef MLX5_PMD_SOFT_COUNTERS
2757 /* Update sent data bytes counter. */
2758 txq->stats.obytes += slen;
2760 loc->elts_free -= part;
2761 loc->pkts_sent += part;
2762 txq->wqe_ci += (2 + part + 3) / 4;
2763 loc->wqe_free -= (2 + part + 3) / 4;
2765 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2766 return MLX5_TXCMP_CODE_EXIT;
2767 loc->mbuf = *pkts++;
2768 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2769 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
2771 /* Continue sending eMPW batches. */
2777 * The routine sends packets with MLX5_OPCODE_EMPW
2778 * with inlining, optionally supports VLAN insertion.
2780 static __rte_always_inline enum mlx5_txcmp_code
2781 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
2782 struct rte_mbuf **__rte_restrict pkts,
2783 unsigned int pkts_n,
2784 struct mlx5_txq_local *__rte_restrict loc,
2788 * Subroutine is the part of mlx5_tx_burst_single() and sends
2789 * single-segment packet with eMPW opcode with data inlining.
2791 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2792 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2793 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2794 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2795 pkts += loc->pkts_sent + 1;
2796 pkts_n -= loc->pkts_sent;
2798 struct mlx5_wqe_dseg *__rte_restrict dseg;
2799 struct mlx5_wqe *__rte_restrict wqem;
2800 enum mlx5_txcmp_code ret;
2801 unsigned int room, part, nlim;
2802 unsigned int slen = 0;
2804 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2805 if (MLX5_TXOFF_CONFIG(TXPP)) {
2806 enum mlx5_txcmp_code wret;
2808 /* Generate WAIT for scheduling if requested. */
2809 wret = mlx5_tx_schedule_send(txq, loc, olx);
2810 if (wret == MLX5_TXCMP_CODE_EXIT)
2811 return MLX5_TXCMP_CODE_EXIT;
2812 if (wret == MLX5_TXCMP_CODE_ERROR)
2813 return MLX5_TXCMP_CODE_ERROR;
2816 * Limits the amount of packets in one WQE
2817 * to improve CQE latency generation.
2819 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2820 MLX5_MPW_INLINE_MAX_PACKETS :
2821 MLX5_EMPW_MAX_PACKETS);
2822 /* Check whether we have minimal amount WQEs */
2823 if (unlikely(loc->wqe_free <
2824 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2825 return MLX5_TXCMP_CODE_EXIT;
2826 if (likely(pkts_n > 1))
2827 rte_prefetch0(*pkts);
2828 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2830 * Build eMPW title WQEBB:
2831 * - Control Segment, eMPW opcode, zero DS
2832 * - Ethernet Segment, no inline
2834 mlx5_tx_cseg_init(txq, loc, wqem, 0,
2835 MLX5_OPCODE_ENHANCED_MPSW, olx);
2836 mlx5_tx_eseg_none(txq, loc, wqem,
2837 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2838 dseg = &wqem->dseg[0];
2839 /* Store the packet length for legacy MPW. */
2840 if (MLX5_TXOFF_CONFIG(MPW))
2841 wqem->eseg.mss = rte_cpu_to_be_16
2842 (rte_pktmbuf_data_len(loc->mbuf));
2843 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
2844 loc->wqe_free) * MLX5_WQE_SIZE -
2845 MLX5_WQE_CSEG_SIZE -
2847 /* Limit the room for legacy MPW sessions for performance. */
2848 if (MLX5_TXOFF_CONFIG(MPW))
2849 room = RTE_MIN(room,
2850 RTE_MAX(txq->inlen_empw +
2851 sizeof(dseg->bcount) +
2852 (MLX5_TXOFF_CONFIG(VLAN) ?
2853 sizeof(struct rte_vlan_hdr) : 0),
2854 MLX5_MPW_INLINE_MAX_PACKETS *
2855 MLX5_WQE_DSEG_SIZE));
2856 /* Build WQE till we have space, packets and resources. */
2859 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2860 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2863 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
2864 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
2865 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
2867 * Some Tx offloads may cause an error if packet is not
2868 * long enough, check against assumed minimal length.
2870 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
2872 if (unlikely(!part))
2873 return MLX5_TXCMP_CODE_ERROR;
2875 * We have some successfully built
2876 * packet Data Segments to send.
2878 mlx5_tx_idone_empw(txq, loc, part,
2880 return MLX5_TXCMP_CODE_ERROR;
2882 /* Inline or not inline - that's the Question. */
2883 if (dlen > txq->inlen_empw ||
2884 loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE)
2886 if (MLX5_TXOFF_CONFIG(MPW)) {
2887 if (dlen > txq->inlen_send)
2891 /* Open new inline MPW session. */
2892 tlen += sizeof(dseg->bcount);
2893 dseg->bcount = RTE_BE32(0);
2895 (dseg, sizeof(dseg->bcount));
2898 * No pointer and inline descriptor
2899 * intermix for legacy MPW sessions.
2901 if (wqem->dseg[0].bcount)
2905 tlen = sizeof(dseg->bcount) + dlen;
2907 /* Inline entire packet, optional VLAN insertion. */
2908 if (MLX5_TXOFF_CONFIG(VLAN) &&
2909 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
2911 * The packet length must be checked in
2912 * mlx5_tx_able_to_empw() and packet
2913 * fits into inline length guaranteed.
2916 sizeof(struct rte_vlan_hdr)) <=
2918 tlen += sizeof(struct rte_vlan_hdr);
2921 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
2923 #ifdef MLX5_PMD_SOFT_COUNTERS
2924 /* Update sent data bytes counter. */
2925 slen += sizeof(struct rte_vlan_hdr);
2930 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
2933 if (!MLX5_TXOFF_CONFIG(MPW))
2934 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
2935 MLX5_ASSERT(room >= tlen);
2938 * Packet data are completely inline,
2939 * we can try to free the packet.
2941 if (likely(loc->pkts_sent == loc->mbuf_free)) {
2943 * All the packets from the burst beginning
2944 * are inline, we can free mbufs directly
2945 * from the origin array on tx_burst exit().
2951 * In order no to call rte_pktmbuf_free_seg() here,
2952 * in the most inner loop (that might be very
2953 * expensive) we just save the mbuf in elts.
2955 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2960 * No pointer and inline descriptor
2961 * intermix for legacy MPW sessions.
2963 if (MLX5_TXOFF_CONFIG(MPW) &&
2965 wqem->dseg[0].bcount == RTE_BE32(0))
2968 * Not inlinable VLAN packets are
2969 * proceeded outside of this routine.
2971 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
2972 if (MLX5_TXOFF_CONFIG(VLAN))
2973 MLX5_ASSERT(!(loc->mbuf->ol_flags &
2974 RTE_MBUF_F_TX_VLAN));
2975 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2976 /* We have to store mbuf in elts.*/
2977 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2979 room -= MLX5_WQE_DSEG_SIZE;
2980 /* Ring buffer wraparound is checked at the loop end.*/
2983 #ifdef MLX5_PMD_SOFT_COUNTERS
2984 /* Update sent data bytes counter. */
2989 if (unlikely(!pkts_n || !loc->elts_free)) {
2991 * We have no resources/packets to
2992 * continue build descriptors.
2995 mlx5_tx_idone_empw(txq, loc, part,
2997 return MLX5_TXCMP_CODE_EXIT;
2999 loc->mbuf = *pkts++;
3000 if (likely(pkts_n > 1))
3001 rte_prefetch0(*pkts);
3002 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3004 * Unroll the completion code to avoid
3005 * returning variable value - it results in
3006 * unoptimized sequent checking in caller.
3008 if (ret == MLX5_TXCMP_CODE_MULTI) {
3010 mlx5_tx_idone_empw(txq, loc, part,
3012 if (unlikely(!loc->elts_free ||
3014 return MLX5_TXCMP_CODE_EXIT;
3015 return MLX5_TXCMP_CODE_MULTI;
3017 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3018 if (ret == MLX5_TXCMP_CODE_TSO) {
3020 mlx5_tx_idone_empw(txq, loc, part,
3022 if (unlikely(!loc->elts_free ||
3024 return MLX5_TXCMP_CODE_EXIT;
3025 return MLX5_TXCMP_CODE_TSO;
3027 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3029 mlx5_tx_idone_empw(txq, loc, part,
3031 if (unlikely(!loc->elts_free ||
3033 return MLX5_TXCMP_CODE_EXIT;
3034 return MLX5_TXCMP_CODE_SINGLE;
3036 if (ret != MLX5_TXCMP_CODE_EMPW) {
3039 mlx5_tx_idone_empw(txq, loc, part,
3041 return MLX5_TXCMP_CODE_ERROR;
3043 /* Check if we have minimal room left. */
3045 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
3048 * Check whether packet parameters coincide
3049 * within assumed eMPW batch:
3050 * - check sum settings
3052 * - software parser settings
3053 * - packets length (legacy MPW only)
3054 * - scheduling is not required
3056 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
3059 /* Packet attributes match, continue the same eMPW. */
3060 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3061 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3064 * We get here to close an existing eMPW
3065 * session and start the new one.
3067 MLX5_ASSERT(pkts_n);
3069 if (unlikely(!part))
3070 return MLX5_TXCMP_CODE_EXIT;
3071 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
3072 if (unlikely(!loc->elts_free ||
3074 return MLX5_TXCMP_CODE_EXIT;
3075 /* Continue the loop with new eMPW session. */
3081 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
3082 * Data inlining and VLAN insertion are supported.
3084 static __rte_always_inline enum mlx5_txcmp_code
3085 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
3086 struct rte_mbuf **__rte_restrict pkts,
3087 unsigned int pkts_n,
3088 struct mlx5_txq_local *__rte_restrict loc,
3092 * Subroutine is the part of mlx5_tx_burst_single()
3093 * and sends single-segment packet with SEND opcode.
3095 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3096 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3097 pkts += loc->pkts_sent + 1;
3098 pkts_n -= loc->pkts_sent;
3100 struct mlx5_wqe *__rte_restrict wqe;
3101 enum mlx5_txcmp_code ret;
3103 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3104 if (MLX5_TXOFF_CONFIG(TXPP)) {
3105 enum mlx5_txcmp_code wret;
3107 /* Generate WAIT for scheduling if requested. */
3108 wret = mlx5_tx_schedule_send(txq, loc, olx);
3109 if (wret == MLX5_TXCMP_CODE_EXIT)
3110 return MLX5_TXCMP_CODE_EXIT;
3111 if (wret == MLX5_TXCMP_CODE_ERROR)
3112 return MLX5_TXCMP_CODE_ERROR;
3114 if (MLX5_TXOFF_CONFIG(INLINE)) {
3115 unsigned int inlen, vlan = 0;
3117 inlen = rte_pktmbuf_data_len(loc->mbuf);
3118 if (MLX5_TXOFF_CONFIG(VLAN) &&
3119 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
3120 vlan = sizeof(struct rte_vlan_hdr);
3124 * If inlining is enabled at configuration time
3125 * the limit must be not less than minimal size.
3126 * Otherwise we would do extra check for data
3127 * size to avoid crashes due to length overflow.
3129 MLX5_ASSERT(txq->inlen_send >=
3130 MLX5_ESEG_MIN_INLINE_SIZE);
3131 if (inlen <= txq->inlen_send) {
3132 unsigned int seg_n, wqe_n;
3134 rte_prefetch0(rte_pktmbuf_mtod
3135 (loc->mbuf, uint8_t *));
3136 /* Check against minimal length. */
3137 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3138 return MLX5_TXCMP_CODE_ERROR;
3139 if (loc->mbuf->ol_flags &
3140 RTE_MBUF_F_TX_DYNF_NOINLINE) {
3142 * The hint flag not to inline packet
3143 * data is set. Check whether we can
3146 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
3148 (MLX5_TXOFF_CONFIG(MPW) &&
3150 if (inlen <= txq->inlen_send)
3153 * The hardware requires the
3154 * minimal inline data header.
3156 goto single_min_inline;
3158 if (MLX5_TXOFF_CONFIG(VLAN) &&
3159 vlan && !txq->vlan_en) {
3161 * We must insert VLAN tag
3162 * by software means.
3164 goto single_part_inline;
3166 goto single_no_inline;
3170 * Completely inlined packet data WQE:
3171 * - Control Segment, SEND opcode
3172 * - Ethernet Segment, no VLAN insertion
3173 * - Data inlined, VLAN optionally inserted
3174 * - Alignment to MLX5_WSEG_SIZE
3175 * Have to estimate amount of WQEBBs
3177 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
3178 MLX5_ESEG_MIN_INLINE_SIZE +
3179 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3180 /* Check if there are enough WQEBBs. */
3181 wqe_n = (seg_n + 3) / 4;
3182 if (wqe_n > loc->wqe_free)
3183 return MLX5_TXCMP_CODE_EXIT;
3184 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3185 loc->wqe_last = wqe;
3186 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
3187 MLX5_OPCODE_SEND, olx);
3188 mlx5_tx_eseg_data(txq, loc, wqe,
3189 vlan, inlen, 0, olx);
3190 txq->wqe_ci += wqe_n;
3191 loc->wqe_free -= wqe_n;
3193 * Packet data are completely inlined,
3194 * free the packet immediately.
3196 rte_pktmbuf_free_seg(loc->mbuf);
3197 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
3198 MLX5_TXOFF_CONFIG(MPW)) &&
3201 * If minimal inlining is requested the eMPW
3202 * feature should be disabled due to data is
3203 * inlined into Ethernet Segment, which can
3204 * not contain inlined data for eMPW due to
3205 * segment shared for all packets.
3207 struct mlx5_wqe_dseg *__rte_restrict dseg;
3212 * The inline-mode settings require
3213 * to inline the specified amount of
3214 * data bytes to the Ethernet Segment.
3215 * We should check the free space in
3216 * WQE ring buffer to inline partially.
3219 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
3220 MLX5_ASSERT(inlen > txq->inlen_mode);
3221 MLX5_ASSERT(txq->inlen_mode >=
3222 MLX5_ESEG_MIN_INLINE_SIZE);
3224 * Check whether there are enough free WQEBBs:
3226 * - Ethernet Segment
3227 * - First Segment of inlined Ethernet data
3228 * - ... data continued ...
3229 * - Finishing Data Segment of pointer type
3231 ds = (MLX5_WQE_CSEG_SIZE +
3232 MLX5_WQE_ESEG_SIZE +
3233 MLX5_WQE_DSEG_SIZE +
3235 MLX5_ESEG_MIN_INLINE_SIZE +
3236 MLX5_WQE_DSEG_SIZE +
3237 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3238 if (loc->wqe_free < ((ds + 3) / 4))
3239 return MLX5_TXCMP_CODE_EXIT;
3241 * Build the ordinary SEND WQE:
3243 * - Ethernet Segment, inline inlen_mode bytes
3244 * - Data Segment of pointer type
3246 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3247 loc->wqe_last = wqe;
3248 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3249 MLX5_OPCODE_SEND, olx);
3250 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
3253 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3254 txq->inlen_mode - vlan;
3255 inlen -= txq->inlen_mode;
3256 mlx5_tx_dseg_ptr(txq, loc, dseg,
3259 * WQE is built, update the loop parameters
3260 * and got to the next packet.
3262 txq->wqe_ci += (ds + 3) / 4;
3263 loc->wqe_free -= (ds + 3) / 4;
3264 /* We have to store mbuf in elts.*/
3265 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3266 txq->elts[txq->elts_head++ & txq->elts_m] =
3274 * Partially inlined packet data WQE, we have
3275 * some space in title WQEBB, we can fill it
3276 * with some packet data. It takes one WQEBB,
3277 * it is available, no extra space check:
3278 * - Control Segment, SEND opcode
3279 * - Ethernet Segment, no VLAN insertion
3280 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
3281 * - Data Segment, pointer type
3283 * We also get here if VLAN insertion is not
3284 * supported by HW, the inline is enabled.
3287 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3288 loc->wqe_last = wqe;
3289 mlx5_tx_cseg_init(txq, loc, wqe, 4,
3290 MLX5_OPCODE_SEND, olx);
3291 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
3292 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3293 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
3295 * The length check is performed above, by
3296 * comparing with txq->inlen_send. We should
3297 * not get overflow here.
3299 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
3300 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
3301 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
3305 /* We have to store mbuf in elts.*/
3306 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3307 txq->elts[txq->elts_head++ & txq->elts_m] =
3311 #ifdef MLX5_PMD_SOFT_COUNTERS
3312 /* Update sent data bytes counter. */
3313 txq->stats.obytes += vlan +
3314 rte_pktmbuf_data_len(loc->mbuf);
3318 * No inline at all, it means the CPU cycles saving
3319 * is prioritized at configuration, we should not
3320 * copy any packet data to WQE.
3322 * SEND WQE, one WQEBB:
3323 * - Control Segment, SEND opcode
3324 * - Ethernet Segment, optional VLAN, no inline
3325 * - Data Segment, pointer type
3328 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3329 loc->wqe_last = wqe;
3330 mlx5_tx_cseg_init(txq, loc, wqe, 3,
3331 MLX5_OPCODE_SEND, olx);
3332 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3334 (txq, loc, &wqe->dseg[0],
3335 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3336 rte_pktmbuf_data_len(loc->mbuf), olx);
3340 * We should not store mbuf pointer in elts
3341 * if no inlining is configured, this is done
3342 * by calling routine in a batch copy.
3344 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3346 #ifdef MLX5_PMD_SOFT_COUNTERS
3347 /* Update sent data bytes counter. */
3348 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
3349 if (MLX5_TXOFF_CONFIG(VLAN) &&
3350 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
3351 txq->stats.obytes +=
3352 sizeof(struct rte_vlan_hdr);
3357 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3358 return MLX5_TXCMP_CODE_EXIT;
3359 loc->mbuf = *pkts++;
3361 rte_prefetch0(*pkts);
3362 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3363 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
3369 static __rte_always_inline enum mlx5_txcmp_code
3370 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
3371 struct rte_mbuf **__rte_restrict pkts,
3372 unsigned int pkts_n,
3373 struct mlx5_txq_local *__rte_restrict loc,
3376 enum mlx5_txcmp_code ret;
3378 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
3379 if (ret == MLX5_TXCMP_CODE_SINGLE)
3381 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
3383 /* Optimize for inline/no inline eMPW send. */
3384 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
3385 mlx5_tx_burst_empw_inline
3386 (txq, pkts, pkts_n, loc, olx) :
3387 mlx5_tx_burst_empw_simple
3388 (txq, pkts, pkts_n, loc, olx);
3389 if (ret != MLX5_TXCMP_CODE_SINGLE)
3391 /* The resources to send one packet should remain. */
3392 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3394 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
3395 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
3396 if (ret != MLX5_TXCMP_CODE_EMPW)
3398 /* The resources to send one packet should remain. */
3399 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3404 * DPDK Tx callback template. This is configured template used to generate
3405 * routines optimized for specified offload setup.
3406 * One of this generated functions is chosen at SQ configuration time.
3409 * Generic pointer to TX queue structure.
3411 * Packets to transmit.
3413 * Number of packets in array.
3415 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
3416 * values. Should be static to take compile time static configuration
3420 * Number of packets successfully transmitted (<= pkts_n).
3422 static __rte_always_inline uint16_t
3423 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
3424 struct rte_mbuf **__rte_restrict pkts,
3428 struct mlx5_txq_local loc;
3429 enum mlx5_txcmp_code ret;
3432 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3433 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3434 if (unlikely(!pkts_n))
3436 if (MLX5_TXOFF_CONFIG(INLINE))
3440 loc.wqe_last = NULL;
3443 loc.pkts_loop = loc.pkts_sent;
3445 * Check if there are some CQEs, if any:
3446 * - process an encountered errors
3447 * - process the completed WQEs
3448 * - free related mbufs
3449 * - doorbell the NIC about processed CQEs
3451 rte_prefetch0(*(pkts + loc.pkts_sent));
3452 mlx5_tx_handle_completion(txq, olx);
3454 * Calculate the number of available resources - elts and WQEs.
3455 * There are two possible different scenarios:
3456 * - no data inlining into WQEs, one WQEBB may contains up to
3457 * four packets, in this case elts become scarce resource
3458 * - data inlining into WQEs, one packet may require multiple
3459 * WQEBBs, the WQEs become the limiting factor.
3461 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3462 loc.elts_free = txq->elts_s -
3463 (uint16_t)(txq->elts_head - txq->elts_tail);
3464 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3465 loc.wqe_free = txq->wqe_s -
3466 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
3467 if (unlikely(!loc.elts_free || !loc.wqe_free))
3471 * Fetch the packet from array. Usually this is the first
3472 * packet in series of multi/single segment packets.
3474 loc.mbuf = *(pkts + loc.pkts_sent);
3475 /* Dedicated branch for multi-segment packets. */
3476 if (MLX5_TXOFF_CONFIG(MULTI) &&
3477 unlikely(NB_SEGS(loc.mbuf) > 1)) {
3479 * Multi-segment packet encountered.
3480 * Hardware is able to process it only
3481 * with SEND/TSO opcodes, one packet
3482 * per WQE, do it in dedicated routine.
3485 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
3486 part = loc.pkts_sent - loc.pkts_copy;
3487 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3489 * There are some single-segment mbufs not
3490 * stored in elts. The mbufs must be in the
3491 * same order as WQEs, so we must copy the
3492 * mbufs to elts here, before the coming
3493 * multi-segment packet mbufs is appended.
3495 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
3497 loc.pkts_copy = loc.pkts_sent;
3499 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3500 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
3501 if (!MLX5_TXOFF_CONFIG(INLINE))
3502 loc.pkts_copy = loc.pkts_sent;
3504 * These returned code checks are supposed
3505 * to be optimized out due to routine inlining.
3507 if (ret == MLX5_TXCMP_CODE_EXIT) {
3509 * The routine returns this code when
3510 * all packets are sent or there is no
3511 * enough resources to complete request.
3515 if (ret == MLX5_TXCMP_CODE_ERROR) {
3517 * The routine returns this code when some error
3518 * in the incoming packets format occurred.
3520 txq->stats.oerrors++;
3523 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3525 * The single-segment packet was encountered
3526 * in the array, try to send it with the
3527 * best optimized way, possible engaging eMPW.
3529 goto enter_send_single;
3531 if (MLX5_TXOFF_CONFIG(TSO) &&
3532 ret == MLX5_TXCMP_CODE_TSO) {
3534 * The single-segment TSO packet was
3535 * encountered in the array.
3537 goto enter_send_tso;
3539 /* We must not get here. Something is going wrong. */
3541 txq->stats.oerrors++;
3544 /* Dedicated branch for single-segment TSO packets. */
3545 if (MLX5_TXOFF_CONFIG(TSO) &&
3546 unlikely(loc.mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
3548 * TSO might require special way for inlining
3549 * (dedicated parameters) and is sent with
3550 * MLX5_OPCODE_TSO opcode only, provide this
3551 * in dedicated branch.
3554 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
3555 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3556 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
3558 * These returned code checks are supposed
3559 * to be optimized out due to routine inlining.
3561 if (ret == MLX5_TXCMP_CODE_EXIT)
3563 if (ret == MLX5_TXCMP_CODE_ERROR) {
3564 txq->stats.oerrors++;
3567 if (ret == MLX5_TXCMP_CODE_SINGLE)
3568 goto enter_send_single;
3569 if (MLX5_TXOFF_CONFIG(MULTI) &&
3570 ret == MLX5_TXCMP_CODE_MULTI) {
3572 * The multi-segment packet was
3573 * encountered in the array.
3575 goto enter_send_multi;
3577 /* We must not get here. Something is going wrong. */
3579 txq->stats.oerrors++;
3583 * The dedicated branch for the single-segment packets
3584 * without TSO. Often these ones can be sent using
3585 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
3586 * The routine builds the WQEs till it encounters
3587 * the TSO or multi-segment packet (in case if these
3588 * offloads are requested at SQ configuration time).
3591 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3592 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
3594 * These returned code checks are supposed
3595 * to be optimized out due to routine inlining.
3597 if (ret == MLX5_TXCMP_CODE_EXIT)
3599 if (ret == MLX5_TXCMP_CODE_ERROR) {
3600 txq->stats.oerrors++;
3603 if (MLX5_TXOFF_CONFIG(MULTI) &&
3604 ret == MLX5_TXCMP_CODE_MULTI) {
3606 * The multi-segment packet was
3607 * encountered in the array.
3609 goto enter_send_multi;
3611 if (MLX5_TXOFF_CONFIG(TSO) &&
3612 ret == MLX5_TXCMP_CODE_TSO) {
3614 * The single-segment TSO packet was
3615 * encountered in the array.
3617 goto enter_send_tso;
3619 /* We must not get here. Something is going wrong. */
3621 txq->stats.oerrors++;
3625 * Main Tx loop is completed, do the rest:
3626 * - set completion request if thresholds are reached
3627 * - doorbell the hardware
3628 * - copy the rest of mbufs to elts (if any)
3630 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
3631 loc.pkts_sent >= loc.pkts_copy);
3632 /* Take a shortcut if nothing is sent. */
3633 if (unlikely(loc.pkts_sent == loc.pkts_loop))
3635 /* Request CQE generation if limits are reached. */
3636 mlx5_tx_request_completion(txq, &loc, olx);
3638 * Ring QP doorbell immediately after WQE building completion
3639 * to improve latencies. The pure software related data treatment
3640 * can be completed after doorbell. Tx CQEs for this SQ are
3641 * processed in this thread only by the polling.
3643 * The rdma core library can map doorbell register in two ways,
3644 * depending on the environment variable "MLX5_SHUT_UP_BF":
3646 * - as regular cached memory, the variable is either missing or
3647 * set to zero. This type of mapping may cause the significant
3648 * doorbell register writing latency and requires explicit memory
3649 * write barrier to mitigate this issue and prevent write combining.
3651 * - as non-cached memory, the variable is present and set to not "0"
3652 * value. This type of mapping may cause performance impact under
3653 * heavy loading conditions but the explicit write memory barrier is
3654 * not required and it may improve core performance.
3656 * - the legacy behaviour (prior 19.08 release) was to use some
3657 * heuristics to decide whether write memory barrier should
3658 * be performed. This behavior is supported with specifying
3659 * tx_db_nc=2, write barrier is skipped if application provides
3660 * the full recommended burst of packets, it supposes the next
3661 * packets are coming and the write barrier will be issued on
3662 * the next burst (after descriptor writing, at least).
3664 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
3665 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
3666 /* Not all of the mbufs may be stored into elts yet. */
3667 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
3668 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3670 * There are some single-segment mbufs not stored in elts.
3671 * It can be only if the last packet was single-segment.
3672 * The copying is gathered into one place due to it is
3673 * a good opportunity to optimize that with SIMD.
3674 * Unfortunately if inlining is enabled the gaps in pointer
3675 * array may happen due to early freeing of the inlined mbufs.
3677 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
3678 loc.pkts_copy = loc.pkts_sent;
3680 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3681 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3682 if (pkts_n > loc.pkts_sent) {
3684 * If burst size is large there might be no enough CQE
3685 * fetched from completion queue and no enough resources
3686 * freed to send all the packets.
3691 #ifdef MLX5_PMD_SOFT_COUNTERS
3692 /* Increment sent packets counter. */
3693 txq->stats.opackets += loc.pkts_sent;
3695 if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
3696 __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
3697 return loc.pkts_sent;
3700 #endif /* RTE_PMD_MLX5_TX_H_ */