1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_TX_H_
7 #define RTE_PMD_MLX5_TX_H_
10 #include <sys/queue.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_spinlock.h>
17 #include <mlx5_common_mr.h>
20 #include "mlx5_autoconf.h"
23 /* TX burst subroutines return codes. */
24 enum mlx5_txcmp_code {
25 MLX5_TXCMP_CODE_EXIT = 0,
26 MLX5_TXCMP_CODE_ERROR,
27 MLX5_TXCMP_CODE_SINGLE,
28 MLX5_TXCMP_CODE_MULTI,
34 * These defines are used to configure Tx burst routine option set supported
35 * at compile time. The not specified options are optimized out due to if
36 * conditions can be explicitly calculated at compile time.
37 * The offloads with bigger runtime check (require more CPU cycles toskip)
38 * overhead should have the bigger index - this is needed to select the better
39 * matching routine function if no exact match and some offloads are not
42 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
43 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
44 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
45 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
46 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
47 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
48 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
49 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
50 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
51 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
53 /* The most common offloads groups. */
54 #define MLX5_TXOFF_CONFIG_NONE 0
55 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
56 MLX5_TXOFF_CONFIG_TSO | \
57 MLX5_TXOFF_CONFIG_SWP | \
58 MLX5_TXOFF_CONFIG_CSUM | \
59 MLX5_TXOFF_CONFIG_INLINE | \
60 MLX5_TXOFF_CONFIG_VLAN | \
61 MLX5_TXOFF_CONFIG_METADATA)
63 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
65 #define MLX5_TXOFF_PRE_DECL(func) \
66 uint16_t mlx5_tx_burst_##func(void *txq, \
67 struct rte_mbuf **pkts, \
70 #define MLX5_TXOFF_DECL(func, olx) \
71 uint16_t mlx5_tx_burst_##func(void *txq, \
72 struct rte_mbuf **pkts, \
75 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
76 pkts, pkts_n, (olx)); \
79 /* Mbuf dynamic flag offset for inline. */
80 extern uint64_t rte_net_mlx5_dynf_inline_mask;
81 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
83 extern uint32_t mlx5_ptype_table[] __rte_cache_aligned;
84 extern uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
85 extern uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
87 struct mlx5_txq_stats {
88 #ifdef MLX5_PMD_SOFT_COUNTERS
89 uint64_t opackets; /**< Total of successfully sent packets. */
90 uint64_t obytes; /**< Total of successfully sent bytes. */
92 uint64_t oerrors; /**< Total number of failed transmitted packets. */
95 /* TX queue send local data. */
97 struct mlx5_txq_local {
98 struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
99 struct rte_mbuf *mbuf; /* first mbuf to process. */
100 uint16_t pkts_copy; /* packets copied to elts. */
101 uint16_t pkts_sent; /* packets sent. */
102 uint16_t pkts_loop; /* packets sent on loop entry. */
103 uint16_t elts_free; /* available elts remain. */
104 uint16_t wqe_free; /* available wqe remain. */
105 uint16_t mbuf_off; /* data offset in current mbuf. */
106 uint16_t mbuf_nseg; /* number of remaining mbuf. */
107 uint16_t mbuf_free; /* number of inline mbufs to free. */
110 /* TX queue descriptor. */
112 struct mlx5_txq_data {
113 uint16_t elts_head; /* Current counter in (*elts)[]. */
114 uint16_t elts_tail; /* Counter of first element awaiting completion. */
115 uint16_t elts_comp; /* elts index since last completion request. */
116 uint16_t elts_s; /* Number of mbuf elements. */
117 uint16_t elts_m; /* Mask for mbuf elements indices. */
118 /* Fields related to elts mbuf storage. */
119 uint16_t wqe_ci; /* Consumer index for work queue. */
120 uint16_t wqe_pi; /* Producer index for work queue. */
121 uint16_t wqe_s; /* Number of WQ elements. */
122 uint16_t wqe_m; /* Mask Number for WQ elements. */
123 uint16_t wqe_comp; /* WQE index since last completion request. */
124 uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
125 /* WQ related fields. */
126 uint16_t cq_ci; /* Consumer index for completion queue. */
127 uint16_t cq_pi; /* Production index for completion queue. */
128 uint16_t cqe_s; /* Number of CQ elements. */
129 uint16_t cqe_m; /* Mask for CQ indices. */
130 /* CQ related fields. */
131 uint16_t elts_n:4; /* elts[] length (in log2). */
132 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
133 uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
134 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
135 uint16_t tunnel_en:1;
136 /* When set TX offload for tunneled packets are supported. */
137 uint16_t swp_en:1; /* Whether SW parser is enabled. */
138 uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
139 uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
140 uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
141 uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */
142 uint16_t inlen_send; /* Ordinary send data inline size. */
143 uint16_t inlen_empw; /* eMPW max packet size to inline. */
144 uint16_t inlen_mode; /* Minimal data length to inline. */
145 uint32_t qp_num_8s; /* QP number shifted by 8. */
146 uint64_t offloads; /* Offloads for Tx Queue. */
147 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
148 struct mlx5_wqe *wqes; /* Work queue. */
149 struct mlx5_wqe *wqes_end; /* Work queue array limit. */
150 #ifdef RTE_LIBRTE_MLX5_DEBUG
151 uint32_t *fcqs; /* Free completion queue (debug extended). */
153 uint16_t *fcqs; /* Free completion queue. */
155 volatile struct mlx5_cqe *cqes; /* Completion queue. */
156 volatile uint32_t *qp_db; /* Work queue doorbell. */
157 volatile uint32_t *cq_db; /* Completion queue doorbell. */
158 uint16_t port_id; /* Port ID of device. */
159 uint16_t idx; /* Queue index. */
160 uint64_t ts_mask; /* Timestamp flag dynamic mask. */
161 int32_t ts_offset; /* Timestamp field dynamic offset. */
162 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
163 struct mlx5_txq_stats stats; /* TX queue counters. */
165 rte_spinlock_t *uar_lock;
166 /* UAR access lock required for 32bit implementations */
168 struct rte_mbuf *elts[0];
169 /* Storage for queued packets, must be the last field. */
170 } __rte_cache_aligned;
173 MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */
174 MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Tx queue. */
177 /* TX queue control descriptor. */
178 struct mlx5_txq_ctrl {
179 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
180 uint32_t refcnt; /* Reference counter. */
181 unsigned int socket; /* CPU socket ID for allocations. */
182 enum mlx5_txq_type type; /* The txq ctrl type. */
183 unsigned int max_inline_data; /* Max inline data. */
184 unsigned int max_tso_header; /* Max TSO header size. */
185 struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
186 struct mlx5_priv *priv; /* Back pointer to private data. */
187 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
188 void *bf_reg; /* BlueFlame register from Verbs. */
189 uint16_t dump_file_n; /* Number of dump files. */
190 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
191 uint32_t hairpin_status; /* Hairpin binding status. */
192 struct mlx5_txq_data txq; /* Data path structure. */
193 /* Must be the last field in the structure, contains elts[]. */
198 int mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
199 int mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
200 int mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
201 int mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
202 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
203 unsigned int socket, const struct rte_eth_txconf *conf);
204 int mlx5_tx_hairpin_queue_setup
205 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
206 const struct rte_eth_hairpin_conf *hairpin_conf);
207 void mlx5_tx_queue_release(void *dpdk_txq);
208 void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
209 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
210 void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
211 int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
212 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
213 uint16_t desc, unsigned int socket,
214 const struct rte_eth_txconf *conf);
215 struct mlx5_txq_ctrl *mlx5_txq_hairpin_new
216 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
217 const struct rte_eth_hairpin_conf *hairpin_conf);
218 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
219 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
220 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
221 int mlx5_txq_verify(struct rte_eth_dev *dev);
222 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
223 void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl);
224 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
225 void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
229 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
231 void mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
232 unsigned int olx __rte_unused);
233 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
234 void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
235 struct rte_eth_txq_info *qinfo);
236 int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
237 struct rte_eth_burst_mode *mode);
241 uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
242 uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
243 struct rte_mempool *mp);
247 MLX5_TXOFF_PRE_DECL(full_empw);
248 MLX5_TXOFF_PRE_DECL(none_empw);
249 MLX5_TXOFF_PRE_DECL(md_empw);
250 MLX5_TXOFF_PRE_DECL(mt_empw);
251 MLX5_TXOFF_PRE_DECL(mtsc_empw);
252 MLX5_TXOFF_PRE_DECL(mti_empw);
253 MLX5_TXOFF_PRE_DECL(mtv_empw);
254 MLX5_TXOFF_PRE_DECL(mtiv_empw);
255 MLX5_TXOFF_PRE_DECL(sc_empw);
256 MLX5_TXOFF_PRE_DECL(sci_empw);
257 MLX5_TXOFF_PRE_DECL(scv_empw);
258 MLX5_TXOFF_PRE_DECL(sciv_empw);
259 MLX5_TXOFF_PRE_DECL(i_empw);
260 MLX5_TXOFF_PRE_DECL(v_empw);
261 MLX5_TXOFF_PRE_DECL(iv_empw);
263 MLX5_TXOFF_PRE_DECL(full);
264 MLX5_TXOFF_PRE_DECL(none);
265 MLX5_TXOFF_PRE_DECL(md);
266 MLX5_TXOFF_PRE_DECL(mt);
267 MLX5_TXOFF_PRE_DECL(mtsc);
268 MLX5_TXOFF_PRE_DECL(mti);
269 MLX5_TXOFF_PRE_DECL(mtv);
270 MLX5_TXOFF_PRE_DECL(mtiv);
271 MLX5_TXOFF_PRE_DECL(sc);
272 MLX5_TXOFF_PRE_DECL(sci);
273 MLX5_TXOFF_PRE_DECL(scv);
274 MLX5_TXOFF_PRE_DECL(sciv);
275 MLX5_TXOFF_PRE_DECL(i);
276 MLX5_TXOFF_PRE_DECL(v);
277 MLX5_TXOFF_PRE_DECL(iv);
279 MLX5_TXOFF_PRE_DECL(full_ts_nompw);
280 MLX5_TXOFF_PRE_DECL(full_ts_nompwi);
281 MLX5_TXOFF_PRE_DECL(full_ts);
282 MLX5_TXOFF_PRE_DECL(full_ts_noi);
283 MLX5_TXOFF_PRE_DECL(none_ts);
284 MLX5_TXOFF_PRE_DECL(mdi_ts);
285 MLX5_TXOFF_PRE_DECL(mti_ts);
286 MLX5_TXOFF_PRE_DECL(mtiv_ts);
288 MLX5_TXOFF_PRE_DECL(none_mpw);
289 MLX5_TXOFF_PRE_DECL(mci_mpw);
290 MLX5_TXOFF_PRE_DECL(mc_mpw);
291 MLX5_TXOFF_PRE_DECL(i_mpw);
293 static __rte_always_inline uint64_t *
294 mlx5_tx_bfreg(struct mlx5_txq_data *txq)
296 return MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx];
300 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
301 * 64bit architectures.
304 * value to write in CPU endian format.
306 * Address to write to.
308 * Address of the lock to use for that UAR access.
310 static __rte_always_inline void
311 __mlx5_uar_write64_relaxed(uint64_t val, void *addr,
312 rte_spinlock_t *lock __rte_unused)
315 *(uint64_t *)addr = val;
316 #else /* !RTE_ARCH_64 */
317 rte_spinlock_lock(lock);
318 *(uint32_t *)addr = val;
320 *((uint32_t *)addr + 1) = val >> 32;
321 rte_spinlock_unlock(lock);
326 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
327 * 64bit architectures while guaranteeing the order of execution with the
328 * code being executed.
331 * value to write in CPU endian format.
333 * Address to write to.
335 * Address of the lock to use for that UAR access.
337 static __rte_always_inline void
338 __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
341 __mlx5_uar_write64_relaxed(val, addr, lock);
344 /* Assist macros, used instead of directly calling the functions they wrap. */
346 #define mlx5_uar_write64_relaxed(val, dst, lock) \
347 __mlx5_uar_write64_relaxed(val, dst, NULL)
348 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
350 #define mlx5_uar_write64_relaxed(val, dst, lock) \
351 __mlx5_uar_write64_relaxed(val, dst, lock)
352 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
356 * Query LKey from a packet buffer for Tx. If not found, add the mempool.
359 * Pointer to Tx queue structure.
364 * Searched LKey on success, UINT32_MAX on no match.
366 static __rte_always_inline uint32_t
367 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
369 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
370 uintptr_t addr = (uintptr_t)mb->buf_addr;
373 /* Check generation bit to see if there's any change on existing MRs. */
374 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
375 mlx5_mr_flush_local_cache(mr_ctrl);
376 /* Linear search on MR cache array. */
377 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
378 MLX5_MR_CACHE_N, addr);
379 if (likely(lkey != UINT32_MAX))
381 /* Take slower bottom-half on miss. */
382 return mlx5_tx_mb2mr_bh(txq, mb);
386 * Ring TX queue doorbell and flush the update if requested.
389 * Pointer to TX queue structure.
391 * Pointer to the last WQE posted in the NIC.
393 * Request for write memory barrier after BlueFlame update.
395 static __rte_always_inline void
396 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
399 uint64_t *dst = mlx5_tx_bfreg(txq);
400 volatile uint64_t *src = ((volatile uint64_t *)wqe);
403 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
404 /* Ensure ordering between DB record and BF copy. */
406 mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
412 * Ring TX queue doorbell and flush the update by write memory barrier.
415 * Pointer to TX queue structure.
417 * Pointer to the last WQE posted in the NIC.
419 static __rte_always_inline void
420 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
422 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
426 * Convert timestamp from mbuf format to linear counter
427 * of Clock Queue completions (24 bits).
430 * Pointer to the device shared context to fetch Tx
431 * packet pacing timestamp and parameters.
433 * Timestamp from mbuf to convert.
435 * positive or zero value - completion ID to wait.
436 * negative value - conversion error.
438 static __rte_always_inline int32_t
439 mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts)
446 * Read atomically two uint64_t fields and compare lsb bits.
447 * It there is no match - the timestamp was updated in
448 * the service thread, data should be re-read.
450 rte_compiler_barrier();
451 ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
452 ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
453 rte_compiler_barrier();
454 if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
457 /* Perform the skew correction, positive value to send earlier. */
458 mts -= sh->txpp.skew;
460 if (unlikely(mts >= UINT64_MAX / 2)) {
461 /* We have negative integer, mts is in the past. */
462 __atomic_fetch_add(&sh->txpp.err_ts_past,
463 1, __ATOMIC_RELAXED);
466 tick = sh->txpp.tick;
468 /* Convert delta to completions, round up. */
469 mts = (mts + tick - 1) / tick;
470 if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
471 /* We have mts is too distant future. */
472 __atomic_fetch_add(&sh->txpp.err_ts_future,
473 1, __ATOMIC_RELAXED);
476 mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
478 ci >>= 64 - MLX5_CQ_INDEX_WIDTH;
483 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
484 * Flags must be preliminary initialized to zero.
487 * Pointer to burst routine local context.
489 * Pointer to store Software Parser flags.
491 * Configured Tx offloads mask. It is fully defined at
492 * compile time and may be used for optimization.
495 * Software Parser offsets packed in dword.
496 * Software Parser flags are set by pointer.
498 static __rte_always_inline uint32_t
499 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
504 unsigned int idx, off;
507 if (!MLX5_TXOFF_CONFIG(SWP))
509 ol = loc->mbuf->ol_flags;
510 tunnel = ol & PKT_TX_TUNNEL_MASK;
512 * Check whether Software Parser is required.
513 * Only customized tunnels may ask for.
515 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
518 * The index should have:
519 * bit[0:1] = PKT_TX_L4_MASK
520 * bit[4] = PKT_TX_IPV6
521 * bit[8] = PKT_TX_OUTER_IPV6
522 * bit[9] = PKT_TX_OUTER_UDP
524 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
525 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
526 *swp_flags = mlx5_swp_types_table[idx];
528 * Set offsets for SW parser. Since ConnectX-5, SW parser just
529 * complements HW parser. SW parser starts to engage only if HW parser
530 * can't reach a header. For the older devices, HW parser will not kick
531 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
532 * should be set regardless of HW offload.
534 off = loc->mbuf->outer_l2_len;
535 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
536 off += sizeof(struct rte_vlan_hdr);
537 set = (off >> 1) << 8; /* Outer L3 offset. */
538 off += loc->mbuf->outer_l3_len;
539 if (tunnel == PKT_TX_TUNNEL_UDP)
540 set |= off >> 1; /* Outer L4 offset. */
541 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
542 const uint64_t csum = ol & PKT_TX_L4_MASK;
543 off += loc->mbuf->l2_len;
544 set |= (off >> 1) << 24; /* Inner L3 offset. */
545 if (csum == PKT_TX_TCP_CKSUM ||
546 csum == PKT_TX_UDP_CKSUM ||
547 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
548 off += loc->mbuf->l3_len;
549 set |= (off >> 1) << 16; /* Inner L4 offset. */
552 set = rte_cpu_to_le_32(set);
557 * Convert the Checksum offloads to Verbs.
560 * Pointer to the mbuf.
563 * Converted checksum flags.
565 static __rte_always_inline uint8_t
566 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
569 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
570 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
571 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
574 * The index should have:
575 * bit[0] = PKT_TX_TCP_SEG
576 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
577 * bit[4] = PKT_TX_IP_CKSUM
578 * bit[8] = PKT_TX_OUTER_IP_CKSUM
581 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
582 return mlx5_cksum_table[idx];
586 * Free the mbufs from the linear array of pointers.
589 * Pointer to Tx queue structure.
591 * Pointer to array of packets to be free.
593 * Number of packets to be freed.
595 * Configured Tx offloads mask. It is fully defined at
596 * compile time and may be used for optimization.
598 static __rte_always_inline void
599 mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
600 struct rte_mbuf **__rte_restrict pkts,
602 unsigned int olx __rte_unused)
604 struct rte_mempool *pool = NULL;
605 struct rte_mbuf **p_free = NULL;
606 struct rte_mbuf *mbuf;
607 unsigned int n_free = 0;
610 * The implemented algorithm eliminates
611 * copying pointers to temporary array
612 * for rte_mempool_put_bulk() calls.
617 * Free mbufs directly to the pool in bulk
618 * if fast free offload is engaged
620 if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
623 rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
629 * Decrement mbuf reference counter, detach
630 * indirect and external buffers if needed.
632 mbuf = rte_pktmbuf_prefree_seg(*pkts);
633 if (likely(mbuf != NULL)) {
634 MLX5_ASSERT(mbuf == *pkts);
635 if (likely(n_free != 0)) {
636 if (unlikely(pool != mbuf->pool))
637 /* From different pool. */
640 /* Start new scan array. */
647 if (unlikely(pkts_n == 0)) {
653 * This happens if mbuf is still referenced.
654 * We can't put it back to the pool, skip.
658 if (unlikely(n_free != 0))
659 /* There is some array to free.*/
661 if (unlikely(pkts_n == 0))
662 /* Last mbuf, nothing to free. */
668 * This loop is implemented to avoid multiple
669 * inlining of rte_mempool_put_bulk().
675 * Free the array of pre-freed mbufs
676 * belonging to the same memory pool.
678 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
679 if (unlikely(mbuf != NULL)) {
680 /* There is the request to start new scan. */
685 if (likely(pkts_n != 0))
688 * This is the last mbuf to be freed.
689 * Do one more loop iteration to complete.
690 * This is rare case of the last unique mbuf.
695 if (likely(pkts_n == 0))
704 * No inline version to free buffers for optimal call
705 * on the tx_burst completion.
707 static __rte_noinline void
708 __mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
709 struct rte_mbuf **__rte_restrict pkts,
711 unsigned int olx __rte_unused)
713 mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
717 * Free the mbuf from the elts ring buffer till new tail.
720 * Pointer to Tx queue structure.
722 * Index in elts to free up to, becomes new elts tail.
724 * Configured Tx offloads mask. It is fully defined at
725 * compile time and may be used for optimization.
727 static __rte_always_inline void
728 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
730 unsigned int olx __rte_unused)
732 uint16_t n_elts = tail - txq->elts_tail;
735 MLX5_ASSERT(n_elts <= txq->elts_s);
737 * Implement a loop to support ring buffer wraparound
738 * with single inlining of mlx5_tx_free_mbuf().
743 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
744 part = RTE_MIN(part, n_elts);
746 MLX5_ASSERT(part <= txq->elts_s);
747 mlx5_tx_free_mbuf(txq,
748 &txq->elts[txq->elts_tail & txq->elts_m],
750 txq->elts_tail += part;
756 * Store the mbuf being sent into elts ring buffer.
757 * On Tx completion these mbufs will be freed.
760 * Pointer to Tx queue structure.
762 * Pointer to array of packets to be stored.
764 * Number of packets to be stored.
766 * Configured Tx offloads mask. It is fully defined at
767 * compile time and may be used for optimization.
769 static __rte_always_inline void
770 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
771 struct rte_mbuf **__rte_restrict pkts,
773 unsigned int olx __rte_unused)
776 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
780 part = txq->elts_s - (txq->elts_head & txq->elts_m);
782 MLX5_ASSERT(part <= txq->elts_s);
783 /* This code is a good candidate for vectorizing with SIMD. */
784 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
786 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
787 txq->elts_head += pkts_n;
788 if (unlikely(part < pkts_n))
789 /* The copy is wrapping around the elts array. */
790 rte_memcpy((void *)elts, (void *)(pkts + part),
791 (pkts_n - part) * sizeof(struct rte_mbuf *));
795 * Check if the completion request flag should be set in the last WQE.
796 * Both pushed mbufs and WQEs are monitored and the completion request
797 * flag is set if any of thresholds is reached.
800 * Pointer to TX queue structure.
802 * Pointer to burst routine local context.
804 * Configured Tx offloads mask. It is fully defined at
805 * compile time and may be used for optimization.
807 static __rte_always_inline void
808 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
809 struct mlx5_txq_local *__rte_restrict loc,
812 uint16_t head = txq->elts_head;
815 part = MLX5_TXOFF_CONFIG(INLINE) ?
816 0 : loc->pkts_sent - loc->pkts_copy;
818 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
819 (MLX5_TXOFF_CONFIG(INLINE) &&
820 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
821 volatile struct mlx5_wqe *last = loc->wqe_last;
824 txq->elts_comp = head;
825 if (MLX5_TXOFF_CONFIG(INLINE))
826 txq->wqe_comp = txq->wqe_ci;
827 /* Request unconditional completion on last WQE. */
828 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
829 MLX5_COMP_MODE_OFFSET);
830 /* Save elts_head in dedicated free on completion queue. */
831 #ifdef RTE_LIBRTE_MLX5_DEBUG
832 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
833 (last->cseg.opcode >> 8) << 16;
835 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
837 /* A CQE slot must always be available. */
838 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
843 * Build the Control Segment with specified opcode:
845 * - MLX5_OPCODE_ENHANCED_MPSW
849 * Pointer to TX queue structure.
851 * Pointer to burst routine local context.
853 * Pointer to WQE to fill with built Control Segment.
855 * Supposed length of WQE in segments.
857 * SQ WQE opcode to put into Control Segment.
859 * Configured Tx offloads mask. It is fully defined at
860 * compile time and may be used for optimization.
862 static __rte_always_inline void
863 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
864 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
865 struct mlx5_wqe *__rte_restrict wqe,
868 unsigned int olx __rte_unused)
870 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
872 /* For legacy MPW replace the EMPW by TSO with modifier. */
873 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
874 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
875 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
876 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
877 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
878 MLX5_COMP_MODE_OFFSET);
879 cs->misc = RTE_BE32(0);
883 * Build the Synchronize Queue Segment with specified completion index.
886 * Pointer to TX queue structure.
888 * Pointer to burst routine local context.
890 * Pointer to WQE to fill with built Control Segment.
892 * Completion index in Clock Queue to wait.
894 * Configured Tx offloads mask. It is fully defined at
895 * compile time and may be used for optimization.
897 static __rte_always_inline void
898 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
899 struct mlx5_txq_local *restrict loc __rte_unused,
900 struct mlx5_wqe *restrict wqe,
902 unsigned int olx __rte_unused)
904 struct mlx5_wqe_qseg *qs;
906 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
907 qs->max_index = rte_cpu_to_be_32(wci);
908 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
909 qs->reserved0 = RTE_BE32(0);
910 qs->reserved1 = RTE_BE32(0);
914 * Build the Ethernet Segment without inlined data.
915 * Supports Software Parser, Checksums and VLAN insertion Tx offload features.
918 * Pointer to TX queue structure.
920 * Pointer to burst routine local context.
922 * Pointer to WQE to fill with built Ethernet Segment.
924 * Configured Tx offloads mask. It is fully defined at
925 * compile time and may be used for optimization.
927 static __rte_always_inline void
928 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
929 struct mlx5_txq_local *__rte_restrict loc,
930 struct mlx5_wqe *__rte_restrict wqe,
933 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
937 * Calculate and set check sum flags first, dword field
938 * in segment may be shared with Software Parser flags.
940 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
941 es->flags = rte_cpu_to_le_32(csum);
943 * Calculate and set Software Parser offsets and flags.
944 * These flags a set for custom UDP and IP tunnel packets.
946 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
947 /* Fill metadata field if needed. */
948 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
949 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
950 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
951 /* Engage VLAN tag insertion feature if requested. */
952 if (MLX5_TXOFF_CONFIG(VLAN) &&
953 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
955 * We should get here only if device support
956 * this feature correctly.
958 MLX5_ASSERT(txq->vlan_en);
959 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
960 loc->mbuf->vlan_tci);
962 es->inline_hdr = RTE_BE32(0);
967 * Build the Ethernet Segment with minimal inlined data
968 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
969 * used to fill the gap in single WQEBB WQEs.
970 * Supports Software Parser, Checksums and VLAN
971 * insertion Tx offload features.
974 * Pointer to TX queue structure.
976 * Pointer to burst routine local context.
978 * Pointer to WQE to fill with built Ethernet Segment.
980 * Length of VLAN tag insertion if any.
982 * Configured Tx offloads mask. It is fully defined at
983 * compile time and may be used for optimization.
985 static __rte_always_inline void
986 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
987 struct mlx5_txq_local *__rte_restrict loc,
988 struct mlx5_wqe *__rte_restrict wqe,
992 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
994 uint8_t *psrc, *pdst;
997 * Calculate and set check sum flags first, dword field
998 * in segment may be shared with Software Parser flags.
1000 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1001 es->flags = rte_cpu_to_le_32(csum);
1003 * Calculate and set Software Parser offsets and flags.
1004 * These flags a set for custom UDP and IP tunnel packets.
1006 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1007 /* Fill metadata field if needed. */
1008 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1009 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1010 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1011 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1012 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
1013 es->inline_data = *(unaligned_uint16_t *)psrc;
1014 psrc += sizeof(uint16_t);
1015 pdst = (uint8_t *)(es + 1);
1016 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1017 /* Implement VLAN tag insertion as part inline data. */
1018 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1019 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1020 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1021 /* Insert VLAN ethertype + VLAN tag. */
1022 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1023 ((RTE_ETHER_TYPE_VLAN << 16) |
1024 loc->mbuf->vlan_tci);
1025 pdst += sizeof(struct rte_vlan_hdr);
1026 /* Copy the rest two bytes from packet data. */
1027 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1028 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1030 /* Fill the gap in the title WQEBB with inline data. */
1031 rte_mov16(pdst, psrc);
1036 * Build the Ethernet Segment with entire packet data inlining. Checks the
1037 * boundary of WQEBB and ring buffer wrapping, supports Software Parser,
1038 * Checksums and VLAN insertion Tx offload features.
1041 * Pointer to TX queue structure.
1043 * Pointer to burst routine local context.
1045 * Pointer to WQE to fill with built Ethernet Segment.
1047 * Length of VLAN tag insertion if any.
1049 * Length of data to inline (VLAN included, if any).
1051 * TSO flag, set mss field from the packet.
1053 * Configured Tx offloads mask. It is fully defined at
1054 * compile time and may be used for optimization.
1057 * Pointer to the next Data Segment (aligned and wrapped around).
1059 static __rte_always_inline struct mlx5_wqe_dseg *
1060 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
1061 struct mlx5_txq_local *__rte_restrict loc,
1062 struct mlx5_wqe *__rte_restrict wqe,
1068 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1070 uint8_t *psrc, *pdst;
1074 * Calculate and set check sum flags first, dword field
1075 * in segment may be shared with Software Parser flags.
1077 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1080 csum |= loc->mbuf->tso_segsz;
1081 es->flags = rte_cpu_to_be_32(csum);
1083 es->flags = rte_cpu_to_le_32(csum);
1086 * Calculate and set Software Parser offsets and flags.
1087 * These flags a set for custom UDP and IP tunnel packets.
1089 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1090 /* Fill metadata field if needed. */
1091 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1092 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1093 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1094 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1095 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
1096 es->inline_data = *(unaligned_uint16_t *)psrc;
1097 psrc += sizeof(uint16_t);
1098 pdst = (uint8_t *)(es + 1);
1099 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1100 /* Implement VLAN tag insertion as part inline data. */
1101 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1102 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1103 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1104 /* Insert VLAN ethertype + VLAN tag. */
1105 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1106 ((RTE_ETHER_TYPE_VLAN << 16) |
1107 loc->mbuf->vlan_tci);
1108 pdst += sizeof(struct rte_vlan_hdr);
1109 /* Copy the rest two bytes from packet data. */
1110 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1111 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1112 psrc += sizeof(uint16_t);
1114 /* Fill the gap in the title WQEBB with inline data. */
1115 rte_mov16(pdst, psrc);
1116 psrc += sizeof(rte_v128u32_t);
1118 pdst = (uint8_t *)(es + 2);
1119 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1120 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1121 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
1123 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1124 return (struct mlx5_wqe_dseg *)pdst;
1127 * The WQEBB space availability is checked by caller.
1128 * Here we should be aware of WQE ring buffer wraparound only.
1130 part = (uint8_t *)txq->wqes_end - pdst;
1131 part = RTE_MIN(part, inlen);
1133 rte_memcpy(pdst, psrc, part);
1135 if (likely(!inlen)) {
1137 * If return value is not used by the caller
1138 * the code below will be optimized out.
1141 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1142 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1143 pdst = (uint8_t *)txq->wqes;
1144 return (struct mlx5_wqe_dseg *)pdst;
1146 pdst = (uint8_t *)txq->wqes;
1153 * Copy data from chain of mbuf to the specified linear buffer.
1154 * Checksums and VLAN insertion Tx offload features. If data
1155 * from some mbuf copied completely this mbuf is freed. Local
1156 * structure is used to keep the byte stream state.
1159 * Pointer to the destination linear buffer.
1161 * Pointer to burst routine local context.
1163 * Length of data to be copied.
1165 * Length of data to be copied ignoring no inline hint.
1167 * Configured Tx offloads mask. It is fully defined at
1168 * compile time and may be used for optimization.
1171 * Number of actual copied data bytes. This is always greater than or
1172 * equal to must parameter and might be lesser than len in no inline
1173 * hint flag is encountered.
1175 static __rte_always_inline unsigned int
1176 mlx5_tx_mseg_memcpy(uint8_t *pdst,
1177 struct mlx5_txq_local *__rte_restrict loc,
1180 unsigned int olx __rte_unused)
1182 struct rte_mbuf *mbuf;
1183 unsigned int part, dlen, copy = 0;
1187 MLX5_ASSERT(must <= len);
1189 /* Allow zero length packets, must check first. */
1190 dlen = rte_pktmbuf_data_len(loc->mbuf);
1191 if (dlen <= loc->mbuf_off) {
1192 /* Exhausted packet, just free. */
1194 loc->mbuf = mbuf->next;
1195 rte_pktmbuf_free_seg(mbuf);
1197 MLX5_ASSERT(loc->mbuf_nseg > 1);
1198 MLX5_ASSERT(loc->mbuf);
1200 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
1205 * We already copied the minimal
1206 * requested amount of data.
1211 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
1213 * Copy only the minimal required
1214 * part of the data buffer.
1221 dlen -= loc->mbuf_off;
1222 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1224 part = RTE_MIN(len, dlen);
1225 rte_memcpy(pdst, psrc, part);
1227 loc->mbuf_off += part;
1230 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
1232 /* Exhausted packet, just free. */
1234 loc->mbuf = mbuf->next;
1235 rte_pktmbuf_free_seg(mbuf);
1237 MLX5_ASSERT(loc->mbuf_nseg >= 1);
1247 * Build the Ethernet Segment with inlined data from multi-segment packet.
1248 * Checks the boundary of WQEBB and ring buffer wrapping, supports Software
1249 * Parser, Checksums and VLAN insertion Tx offload features.
1252 * Pointer to TX queue structure.
1254 * Pointer to burst routine local context.
1256 * Pointer to WQE to fill with built Ethernet Segment.
1258 * Length of VLAN tag insertion if any.
1260 * Length of data to inline (VLAN included, if any).
1262 * TSO flag, set mss field from the packet.
1264 * Configured Tx offloads mask. It is fully defined at
1265 * compile time and may be used for optimization.
1268 * Pointer to the next Data Segment (aligned and possible NOT wrapped
1269 * around - caller should do wrapping check on its own).
1271 static __rte_always_inline struct mlx5_wqe_dseg *
1272 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
1273 struct mlx5_txq_local *__rte_restrict loc,
1274 struct mlx5_wqe *__rte_restrict wqe,
1280 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1283 unsigned int part, tlen = 0;
1286 * Calculate and set check sum flags first, uint32_t field
1287 * in segment may be shared with Software Parser flags.
1289 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1292 csum |= loc->mbuf->tso_segsz;
1293 es->flags = rte_cpu_to_be_32(csum);
1295 es->flags = rte_cpu_to_le_32(csum);
1298 * Calculate and set Software Parser offsets and flags.
1299 * These flags a set for custom UDP and IP tunnel packets.
1301 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1302 /* Fill metadata field if needed. */
1303 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1304 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1305 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1306 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1307 pdst = (uint8_t *)&es->inline_data;
1308 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1309 /* Implement VLAN tag insertion as part inline data. */
1310 mlx5_tx_mseg_memcpy(pdst, loc,
1311 2 * RTE_ETHER_ADDR_LEN,
1312 2 * RTE_ETHER_ADDR_LEN, olx);
1313 pdst += 2 * RTE_ETHER_ADDR_LEN;
1314 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1315 ((RTE_ETHER_TYPE_VLAN << 16) |
1316 loc->mbuf->vlan_tci);
1317 pdst += sizeof(struct rte_vlan_hdr);
1318 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
1320 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1322 * The WQEBB space availability is checked by caller.
1323 * Here we should be aware of WQE ring buffer wraparound only.
1325 part = (uint8_t *)txq->wqes_end - pdst;
1326 part = RTE_MIN(part, inlen - tlen);
1332 * Copying may be interrupted inside the routine
1333 * if run into no inline hint flag.
1335 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
1336 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
1338 if (likely(inlen <= tlen) || copy < part) {
1339 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
1341 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1342 return (struct mlx5_wqe_dseg *)pdst;
1344 pdst = (uint8_t *)txq->wqes;
1345 part = inlen - tlen;
1350 * Build the Data Segment of pointer type.
1353 * Pointer to TX queue structure.
1355 * Pointer to burst routine local context.
1357 * Pointer to WQE to fill with built Data Segment.
1359 * Data buffer to point.
1361 * Data buffer length.
1363 * Configured Tx offloads mask. It is fully defined at
1364 * compile time and may be used for optimization.
1366 static __rte_always_inline void
1367 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
1368 struct mlx5_txq_local *__rte_restrict loc,
1369 struct mlx5_wqe_dseg *__rte_restrict dseg,
1372 unsigned int olx __rte_unused)
1376 dseg->bcount = rte_cpu_to_be_32(len);
1377 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1378 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1382 * Build the Data Segment of pointer type or inline if data length is less than
1383 * buffer in minimal Data Segment size.
1386 * Pointer to TX queue structure.
1388 * Pointer to burst routine local context.
1390 * Pointer to WQE to fill with built Data Segment.
1392 * Data buffer to point.
1394 * Data buffer length.
1396 * Configured Tx offloads mask. It is fully defined at
1397 * compile time and may be used for optimization.
1399 static __rte_always_inline void
1400 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
1401 struct mlx5_txq_local *__rte_restrict loc,
1402 struct mlx5_wqe_dseg *__rte_restrict dseg,
1405 unsigned int olx __rte_unused)
1411 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
1412 dseg->bcount = rte_cpu_to_be_32(len);
1413 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1414 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1418 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1419 /* Unrolled implementation of generic rte_memcpy. */
1420 dst = (uintptr_t)&dseg->inline_data[0];
1421 src = (uintptr_t)buf;
1423 #ifdef RTE_ARCH_STRICT_ALIGN
1424 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
1425 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1426 dst += sizeof(uint32_t);
1427 src += sizeof(uint32_t);
1428 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1429 dst += sizeof(uint32_t);
1430 src += sizeof(uint32_t);
1432 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
1433 dst += sizeof(uint64_t);
1434 src += sizeof(uint64_t);
1438 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1439 dst += sizeof(uint32_t);
1440 src += sizeof(uint32_t);
1443 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
1444 dst += sizeof(uint16_t);
1445 src += sizeof(uint16_t);
1448 *(uint8_t *)dst = *(uint8_t *)src;
1452 * Build the Data Segment of inlined data from single
1453 * segment packet, no VLAN insertion.
1456 * Pointer to TX queue structure.
1458 * Pointer to burst routine local context.
1460 * Pointer to WQE to fill with built Data Segment.
1462 * Data buffer to point.
1464 * Data buffer length.
1466 * Configured Tx offloads mask. It is fully defined at
1467 * compile time and may be used for optimization.
1470 * Pointer to the next Data Segment after inlined data.
1471 * Ring buffer wraparound check is needed. We do not do it here because it
1472 * may not be needed for the last packet in the eMPW session.
1474 static __rte_always_inline struct mlx5_wqe_dseg *
1475 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
1476 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1477 struct mlx5_wqe_dseg *__rte_restrict dseg,
1480 unsigned int olx __rte_unused)
1485 if (!MLX5_TXOFF_CONFIG(MPW)) {
1486 /* Store the descriptor byte counter for eMPW sessions. */
1487 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1488 pdst = &dseg->inline_data[0];
1490 /* The entire legacy MPW session counter is stored on close. */
1491 pdst = (uint8_t *)dseg;
1494 * The WQEBB space availability is checked by caller.
1495 * Here we should be aware of WQE ring buffer wraparound only.
1497 part = (uint8_t *)txq->wqes_end - pdst;
1498 part = RTE_MIN(part, len);
1500 rte_memcpy(pdst, buf, part);
1504 if (!MLX5_TXOFF_CONFIG(MPW))
1505 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1506 /* Note: no final wraparound check here. */
1507 return (struct mlx5_wqe_dseg *)pdst;
1509 pdst = (uint8_t *)txq->wqes;
1516 * Build the Data Segment of inlined data from single
1517 * segment packet with VLAN insertion.
1520 * Pointer to TX queue structure.
1522 * Pointer to burst routine local context.
1524 * Pointer to the dseg fill with built Data Segment.
1526 * Data buffer to point.
1528 * Data buffer length.
1530 * Configured Tx offloads mask. It is fully defined at
1531 * compile time and may be used for optimization.
1534 * Pointer to the next Data Segment after inlined data.
1535 * Ring buffer wraparound check is needed.
1537 static __rte_always_inline struct mlx5_wqe_dseg *
1538 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
1539 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1540 struct mlx5_wqe_dseg *__rte_restrict dseg,
1543 unsigned int olx __rte_unused)
1549 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
1550 if (!MLX5_TXOFF_CONFIG(MPW)) {
1551 /* Store the descriptor byte counter for eMPW sessions. */
1552 dseg->bcount = rte_cpu_to_be_32
1553 ((len + sizeof(struct rte_vlan_hdr)) |
1554 MLX5_ETH_WQE_DATA_INLINE);
1555 pdst = &dseg->inline_data[0];
1557 /* The entire legacy MPW session counter is stored on close. */
1558 pdst = (uint8_t *)dseg;
1560 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
1561 buf += MLX5_DSEG_MIN_INLINE_SIZE;
1562 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
1563 len -= MLX5_DSEG_MIN_INLINE_SIZE;
1564 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
1565 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1566 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1567 pdst = (uint8_t *)txq->wqes;
1568 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
1569 loc->mbuf->vlan_tci);
1570 pdst += sizeof(struct rte_vlan_hdr);
1572 * The WQEBB space availability is checked by caller.
1573 * Here we should be aware of WQE ring buffer wraparound only.
1575 part = (uint8_t *)txq->wqes_end - pdst;
1576 part = RTE_MIN(part, len);
1578 rte_memcpy(pdst, buf, part);
1582 if (!MLX5_TXOFF_CONFIG(MPW))
1583 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1584 /* Note: no final wraparound check here. */
1585 return (struct mlx5_wqe_dseg *)pdst;
1587 pdst = (uint8_t *)txq->wqes;
1594 * Build the Ethernet Segment with optionally inlined data with
1595 * VLAN insertion and following Data Segments (if any) from
1596 * multi-segment packet. Used by ordinary send and TSO.
1599 * Pointer to TX queue structure.
1601 * Pointer to burst routine local context.
1603 * Pointer to WQE to fill with built Ethernet/Data Segments.
1605 * Length of VLAN header to insert, 0 means no VLAN insertion.
1607 * Data length to inline. For TSO this parameter specifies exact value,
1608 * for ordinary send routine can be aligned by caller to provide better WQE
1609 * space saving and data buffer start address alignment.
1610 * This length includes VLAN header being inserted.
1612 * Zero means ordinary send, inlined data can be extended,
1613 * otherwise this is TSO, inlined data length is fixed.
1615 * Configured Tx offloads mask. It is fully defined at
1616 * compile time and may be used for optimization.
1619 * Actual size of built WQE in segments.
1621 static __rte_always_inline unsigned int
1622 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
1623 struct mlx5_txq_local *__rte_restrict loc,
1624 struct mlx5_wqe *__rte_restrict wqe,
1628 unsigned int olx __rte_unused)
1630 struct mlx5_wqe_dseg *__rte_restrict dseg;
1633 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
1634 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
1637 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
1638 if (!loc->mbuf_nseg)
1641 * There are still some mbuf remaining, not inlined.
1642 * The first mbuf may be partially inlined and we
1643 * must process the possible non-zero data offset.
1645 if (loc->mbuf_off) {
1650 * Exhausted packets must be dropped before.
1651 * Non-zero offset means there are some data
1652 * remained in the packet.
1654 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
1655 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
1656 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1658 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
1660 * Build the pointer/minimal Data Segment.
1661 * Do ring buffer wrapping check in advance.
1663 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1664 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1665 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
1666 /* Store the mbuf to be freed on completion. */
1667 MLX5_ASSERT(loc->elts_free);
1668 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1671 if (--loc->mbuf_nseg == 0)
1673 loc->mbuf = loc->mbuf->next;
1677 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1678 struct rte_mbuf *mbuf;
1680 /* Zero length segment found, just skip. */
1682 loc->mbuf = loc->mbuf->next;
1683 rte_pktmbuf_free_seg(mbuf);
1684 if (--loc->mbuf_nseg == 0)
1687 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1688 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1691 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1692 rte_pktmbuf_data_len(loc->mbuf), olx);
1693 MLX5_ASSERT(loc->elts_free);
1694 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1697 if (--loc->mbuf_nseg == 0)
1699 loc->mbuf = loc->mbuf->next;
1704 /* Calculate actual segments used from the dseg pointer. */
1705 if ((uintptr_t)wqe < (uintptr_t)dseg)
1706 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
1708 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
1709 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
1714 * The routine checks timestamp flag in the current packet,
1715 * and push WAIT WQE into the queue if scheduling is required.
1718 * Pointer to TX queue structure.
1720 * Pointer to burst routine local context.
1722 * Configured Tx offloads mask. It is fully defined at
1723 * compile time and may be used for optimization.
1726 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1727 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
1728 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
1729 * Local context variables partially updated.
1731 static __rte_always_inline enum mlx5_txcmp_code
1732 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
1733 struct mlx5_txq_local *restrict loc,
1736 if (MLX5_TXOFF_CONFIG(TXPP) &&
1737 loc->mbuf->ol_flags & txq->ts_mask) {
1738 struct mlx5_wqe *wqe;
1743 * Estimate the required space quickly and roughly.
1744 * We would like to ensure the packet can be pushed
1745 * to the queue and we won't get the orphan WAIT WQE.
1747 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
1748 loc->elts_free < NB_SEGS(loc->mbuf))
1749 return MLX5_TXCMP_CODE_EXIT;
1750 /* Convert the timestamp into completion to wait. */
1751 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
1752 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
1753 if (unlikely(wci < 0))
1754 return MLX5_TXCMP_CODE_SINGLE;
1755 /* Build the WAIT WQE with specified completion. */
1756 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1757 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
1758 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
1761 return MLX5_TXCMP_CODE_MULTI;
1763 return MLX5_TXCMP_CODE_SINGLE;
1767 * Tx one packet function for multi-segment TSO. Supports all
1768 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
1769 * sends one packet per WQE.
1771 * This routine is responsible for storing processed mbuf
1772 * into elts ring buffer and update elts_head.
1775 * Pointer to TX queue structure.
1777 * Pointer to burst routine local context.
1779 * Configured Tx offloads mask. It is fully defined at
1780 * compile time and may be used for optimization.
1783 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1784 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1785 * Local context variables partially updated.
1787 static __rte_always_inline enum mlx5_txcmp_code
1788 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
1789 struct mlx5_txq_local *__rte_restrict loc,
1792 struct mlx5_wqe *__rte_restrict wqe;
1793 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
1795 if (MLX5_TXOFF_CONFIG(TXPP)) {
1796 enum mlx5_txcmp_code wret;
1798 /* Generate WAIT for scheduling if requested. */
1799 wret = mlx5_tx_schedule_send(txq, loc, olx);
1800 if (wret == MLX5_TXCMP_CODE_EXIT)
1801 return MLX5_TXCMP_CODE_EXIT;
1802 if (wret == MLX5_TXCMP_CODE_ERROR)
1803 return MLX5_TXCMP_CODE_ERROR;
1806 * Calculate data length to be inlined to estimate
1807 * the required space in WQE ring buffer.
1809 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
1810 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
1811 vlan = sizeof(struct rte_vlan_hdr);
1812 inlen = loc->mbuf->l2_len + vlan +
1813 loc->mbuf->l3_len + loc->mbuf->l4_len;
1814 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
1815 return MLX5_TXCMP_CODE_ERROR;
1816 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
1817 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
1818 /* Packet must contain all TSO headers. */
1819 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
1820 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
1821 inlen > (dlen + vlan)))
1822 return MLX5_TXCMP_CODE_ERROR;
1823 MLX5_ASSERT(inlen >= txq->inlen_mode);
1825 * Check whether there are enough free WQEBBs:
1827 * - Ethernet Segment
1828 * - First Segment of inlined Ethernet data
1829 * - ... data continued ...
1830 * - Data Segments of pointer/min inline type
1832 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
1833 MLX5_ESEG_MIN_INLINE_SIZE +
1835 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
1836 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1837 return MLX5_TXCMP_CODE_EXIT;
1838 /* Check for maximal WQE size. */
1839 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1840 return MLX5_TXCMP_CODE_ERROR;
1841 #ifdef MLX5_PMD_SOFT_COUNTERS
1842 /* Update sent data bytes/packets counters. */
1843 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
1844 loc->mbuf->tso_segsz;
1846 * One will be added for mbuf itself at the end of the mlx5_tx_burst
1847 * from loc->pkts_sent field.
1850 txq->stats.opackets += ntcp;
1851 txq->stats.obytes += dlen + vlan + ntcp * inlen;
1853 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1854 loc->wqe_last = wqe;
1855 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
1856 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
1857 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
1858 txq->wqe_ci += (ds + 3) / 4;
1859 loc->wqe_free -= (ds + 3) / 4;
1860 return MLX5_TXCMP_CODE_MULTI;
1864 * Tx one packet function for multi-segment SEND. Supports all types of Tx
1865 * offloads, uses MLX5_OPCODE_SEND to build WQEs, sends one packet per WQE,
1866 * without any data inlining in Ethernet Segment.
1868 * This routine is responsible for storing processed mbuf
1869 * into elts ring buffer and update elts_head.
1872 * Pointer to TX queue structure.
1874 * Pointer to burst routine local context.
1876 * Configured Tx offloads mask. It is fully defined at
1877 * compile time and may be used for optimization.
1880 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1881 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1882 * Local context variables partially updated.
1884 static __rte_always_inline enum mlx5_txcmp_code
1885 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
1886 struct mlx5_txq_local *__rte_restrict loc,
1889 struct mlx5_wqe_dseg *__rte_restrict dseg;
1890 struct mlx5_wqe *__rte_restrict wqe;
1891 unsigned int ds, nseg;
1893 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
1894 if (MLX5_TXOFF_CONFIG(TXPP)) {
1895 enum mlx5_txcmp_code wret;
1897 /* Generate WAIT for scheduling if requested. */
1898 wret = mlx5_tx_schedule_send(txq, loc, olx);
1899 if (wret == MLX5_TXCMP_CODE_EXIT)
1900 return MLX5_TXCMP_CODE_EXIT;
1901 if (wret == MLX5_TXCMP_CODE_ERROR)
1902 return MLX5_TXCMP_CODE_ERROR;
1905 * No inline at all, it means the CPU cycles saving is prioritized at
1906 * configuration, we should not copy any packet data to WQE.
1908 nseg = NB_SEGS(loc->mbuf);
1910 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1911 return MLX5_TXCMP_CODE_EXIT;
1912 /* Check for maximal WQE size. */
1913 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1914 return MLX5_TXCMP_CODE_ERROR;
1916 * Some Tx offloads may cause an error if packet is not long enough,
1917 * check against assumed minimal length.
1919 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
1920 return MLX5_TXCMP_CODE_ERROR;
1921 #ifdef MLX5_PMD_SOFT_COUNTERS
1922 /* Update sent data bytes counter. */
1923 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
1924 if (MLX5_TXOFF_CONFIG(VLAN) &&
1925 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
1926 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
1929 * SEND WQE, one WQEBB:
1930 * - Control Segment, SEND opcode
1931 * - Ethernet Segment, optional VLAN, no inline
1932 * - Data Segments, pointer only type
1934 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1935 loc->wqe_last = wqe;
1936 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
1937 mlx5_tx_eseg_none(txq, loc, wqe, olx);
1938 dseg = &wqe->dseg[0];
1940 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1941 struct rte_mbuf *mbuf;
1944 * Zero length segment found, have to correct total
1945 * size of WQE in segments.
1946 * It is supposed to be rare occasion, so in normal
1947 * case (no zero length segments) we avoid extra
1948 * writing to the Control Segment.
1951 wqe->cseg.sq_ds -= RTE_BE32(1);
1953 loc->mbuf = mbuf->next;
1954 rte_pktmbuf_free_seg(mbuf);
1960 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1961 rte_pktmbuf_data_len(loc->mbuf), olx);
1962 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1967 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1968 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1969 loc->mbuf = loc->mbuf->next;
1972 txq->wqe_ci += (ds + 3) / 4;
1973 loc->wqe_free -= (ds + 3) / 4;
1974 return MLX5_TXCMP_CODE_MULTI;
1978 * Tx one packet function for multi-segment SEND. Supports all
1979 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
1980 * sends one packet per WQE, with data inlining in
1981 * Ethernet Segment and minimal Data Segments.
1983 * This routine is responsible for storing processed mbuf
1984 * into elts ring buffer and update elts_head.
1987 * Pointer to TX queue structure.
1989 * Pointer to burst routine local context.
1991 * Configured Tx offloads mask. It is fully defined at
1992 * compile time and may be used for optimization.
1995 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1996 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1997 * Local context variables partially updated.
1999 static __rte_always_inline enum mlx5_txcmp_code
2000 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
2001 struct mlx5_txq_local *__rte_restrict loc,
2004 struct mlx5_wqe *__rte_restrict wqe;
2005 unsigned int ds, inlen, dlen, vlan = 0;
2007 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2008 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2009 if (MLX5_TXOFF_CONFIG(TXPP)) {
2010 enum mlx5_txcmp_code wret;
2012 /* Generate WAIT for scheduling if requested. */
2013 wret = mlx5_tx_schedule_send(txq, loc, olx);
2014 if (wret == MLX5_TXCMP_CODE_EXIT)
2015 return MLX5_TXCMP_CODE_EXIT;
2016 if (wret == MLX5_TXCMP_CODE_ERROR)
2017 return MLX5_TXCMP_CODE_ERROR;
2020 * First calculate data length to be inlined
2021 * to estimate the required space for WQE.
2023 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
2024 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2025 vlan = sizeof(struct rte_vlan_hdr);
2026 inlen = dlen + vlan;
2027 /* Check against minimal length. */
2028 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
2029 return MLX5_TXCMP_CODE_ERROR;
2030 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
2031 if (inlen > txq->inlen_send ||
2032 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2033 struct rte_mbuf *mbuf;
2038 * Packet length exceeds the allowed inline data length,
2039 * check whether the minimal inlining is required.
2041 if (txq->inlen_mode) {
2042 MLX5_ASSERT(txq->inlen_mode >=
2043 MLX5_ESEG_MIN_INLINE_SIZE);
2044 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
2045 inlen = txq->inlen_mode;
2047 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
2048 !vlan || txq->vlan_en) {
2050 * VLAN insertion will be done inside by HW.
2051 * It is not utmost effective - VLAN flag is
2052 * checked twice, but we should proceed the
2053 * inlining length correctly and take into
2054 * account the VLAN header being inserted.
2056 return mlx5_tx_packet_multi_send
2059 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
2062 * Now we know the minimal amount of data is requested
2063 * to inline. Check whether we should inline the buffers
2064 * from the chain beginning to eliminate some mbufs.
2067 nxlen = rte_pktmbuf_data_len(mbuf);
2068 if (unlikely(nxlen <= txq->inlen_send)) {
2069 /* We can inline first mbuf at least. */
2070 if (nxlen < inlen) {
2073 /* Scan mbufs till inlen filled. */
2078 nxlen = rte_pktmbuf_data_len(mbuf);
2080 } while (unlikely(nxlen < inlen));
2081 if (unlikely(nxlen > txq->inlen_send)) {
2082 /* We cannot inline entire mbuf. */
2083 smlen = inlen - smlen;
2084 start = rte_pktmbuf_mtod_offset
2085 (mbuf, uintptr_t, smlen);
2092 /* There should be not end of packet. */
2094 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
2095 } while (unlikely(nxlen < txq->inlen_send));
2097 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
2099 * Check whether we can do inline to align start
2100 * address of data buffer to cacheline.
2103 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
2104 if (unlikely(start)) {
2106 if (start <= txq->inlen_send)
2111 * Check whether there are enough free WQEBBs:
2113 * - Ethernet Segment
2114 * - First Segment of inlined Ethernet data
2115 * - ... data continued ...
2116 * - Data Segments of pointer/min inline type
2118 * Estimate the number of Data Segments conservatively,
2119 * supposing no any mbufs is being freed during inlining.
2121 MLX5_ASSERT(inlen <= txq->inlen_send);
2122 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2123 MLX5_ESEG_MIN_INLINE_SIZE +
2125 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2126 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2127 return MLX5_TXCMP_CODE_EXIT;
2128 /* Check for maximal WQE size. */
2129 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2130 return MLX5_TXCMP_CODE_ERROR;
2131 #ifdef MLX5_PMD_SOFT_COUNTERS
2132 /* Update sent data bytes/packets counters. */
2133 txq->stats.obytes += dlen + vlan;
2135 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2136 loc->wqe_last = wqe;
2137 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
2138 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
2139 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2140 txq->wqe_ci += (ds + 3) / 4;
2141 loc->wqe_free -= (ds + 3) / 4;
2142 return MLX5_TXCMP_CODE_MULTI;
2146 * Tx burst function for multi-segment packets. Supports all
2147 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
2148 * sends one packet per WQE. Function stops sending if it
2149 * encounters the single-segment packet.
2151 * This routine is responsible for storing processed mbuf
2152 * into elts ring buffer and update elts_head.
2155 * Pointer to TX queue structure.
2157 * Packets to transmit.
2159 * Number of packets in array.
2161 * Pointer to burst routine local context.
2163 * Configured Tx offloads mask. It is fully defined at
2164 * compile time and may be used for optimization.
2167 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2168 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2169 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2170 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
2171 * Local context variables updated.
2173 static __rte_always_inline enum mlx5_txcmp_code
2174 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
2175 struct rte_mbuf **__rte_restrict pkts,
2176 unsigned int pkts_n,
2177 struct mlx5_txq_local *__rte_restrict loc,
2180 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2181 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2182 pkts += loc->pkts_sent + 1;
2183 pkts_n -= loc->pkts_sent;
2185 enum mlx5_txcmp_code ret;
2187 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2189 * Estimate the number of free elts quickly but conservatively.
2190 * Some segment may be fully inlined and freed,
2191 * ignore this here - precise estimation is costly.
2193 if (loc->elts_free < NB_SEGS(loc->mbuf))
2194 return MLX5_TXCMP_CODE_EXIT;
2195 if (MLX5_TXOFF_CONFIG(TSO) &&
2196 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
2197 /* Proceed with multi-segment TSO. */
2198 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
2199 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
2200 /* Proceed with multi-segment SEND with inlining. */
2201 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
2203 /* Proceed with multi-segment SEND w/o inlining. */
2204 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
2206 if (ret == MLX5_TXCMP_CODE_EXIT)
2207 return MLX5_TXCMP_CODE_EXIT;
2208 if (ret == MLX5_TXCMP_CODE_ERROR)
2209 return MLX5_TXCMP_CODE_ERROR;
2210 /* WQE is built, go to the next packet. */
2213 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2214 return MLX5_TXCMP_CODE_EXIT;
2215 loc->mbuf = *pkts++;
2217 rte_prefetch0(*pkts);
2218 if (likely(NB_SEGS(loc->mbuf) > 1))
2220 /* Here ends the series of multi-segment packets. */
2221 if (MLX5_TXOFF_CONFIG(TSO) &&
2222 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2223 return MLX5_TXCMP_CODE_TSO;
2224 return MLX5_TXCMP_CODE_SINGLE;
2230 * Tx burst function for single-segment packets with TSO.
2231 * Supports all types of Tx offloads, except multi-packets.
2232 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
2233 * Function stops sending if it encounters the multi-segment
2234 * packet or packet without TSO requested.
2236 * The routine is responsible for storing processed mbuf into elts ring buffer
2237 * and update elts_head if inline offloads is requested due to possible early
2238 * freeing of the inlined mbufs (can not store pkts array in elts as a batch).
2241 * Pointer to TX queue structure.
2243 * Packets to transmit.
2245 * Number of packets in array.
2247 * Pointer to burst routine local context.
2249 * Configured Tx offloads mask. It is fully defined at
2250 * compile time and may be used for optimization.
2253 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2254 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2255 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2256 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2257 * Local context variables updated.
2259 static __rte_always_inline enum mlx5_txcmp_code
2260 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
2261 struct rte_mbuf **__rte_restrict pkts,
2262 unsigned int pkts_n,
2263 struct mlx5_txq_local *__rte_restrict loc,
2266 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2267 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2268 pkts += loc->pkts_sent + 1;
2269 pkts_n -= loc->pkts_sent;
2271 struct mlx5_wqe_dseg *__rte_restrict dseg;
2272 struct mlx5_wqe *__rte_restrict wqe;
2273 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
2276 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2277 if (MLX5_TXOFF_CONFIG(TXPP)) {
2278 enum mlx5_txcmp_code wret;
2280 /* Generate WAIT for scheduling if requested. */
2281 wret = mlx5_tx_schedule_send(txq, loc, olx);
2282 if (wret == MLX5_TXCMP_CODE_EXIT)
2283 return MLX5_TXCMP_CODE_EXIT;
2284 if (wret == MLX5_TXCMP_CODE_ERROR)
2285 return MLX5_TXCMP_CODE_ERROR;
2287 dlen = rte_pktmbuf_data_len(loc->mbuf);
2288 if (MLX5_TXOFF_CONFIG(VLAN) &&
2289 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2290 vlan = sizeof(struct rte_vlan_hdr);
2293 * First calculate the WQE size to check
2294 * whether we have enough space in ring buffer.
2296 hlen = loc->mbuf->l2_len + vlan +
2297 loc->mbuf->l3_len + loc->mbuf->l4_len;
2298 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
2299 return MLX5_TXCMP_CODE_ERROR;
2300 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
2301 hlen += loc->mbuf->outer_l2_len +
2302 loc->mbuf->outer_l3_len;
2303 /* Segment must contain all TSO headers. */
2304 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
2305 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2306 hlen > (dlen + vlan)))
2307 return MLX5_TXCMP_CODE_ERROR;
2309 * Check whether there are enough free WQEBBs:
2311 * - Ethernet Segment
2312 * - First Segment of inlined Ethernet data
2313 * - ... data continued ...
2314 * - Finishing Data Segment of pointer type
2316 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
2317 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2318 if (loc->wqe_free < ((ds + 3) / 4))
2319 return MLX5_TXCMP_CODE_EXIT;
2320 #ifdef MLX5_PMD_SOFT_COUNTERS
2321 /* Update sent data bytes/packets counters. */
2322 ntcp = (dlen + vlan - hlen +
2323 loc->mbuf->tso_segsz - 1) /
2324 loc->mbuf->tso_segsz;
2326 * One will be added for mbuf itself at the end
2327 * of the mlx5_tx_burst from loc->pkts_sent field.
2330 txq->stats.opackets += ntcp;
2331 txq->stats.obytes += dlen + vlan + ntcp * hlen;
2334 * Build the TSO WQE:
2336 * - Ethernet Segment with hlen bytes inlined
2337 * - Data Segment of pointer type
2339 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2340 loc->wqe_last = wqe;
2341 mlx5_tx_cseg_init(txq, loc, wqe, ds,
2342 MLX5_OPCODE_TSO, olx);
2343 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
2344 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
2345 dlen -= hlen - vlan;
2346 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2348 * WQE is built, update the loop parameters
2349 * and go to the next packet.
2351 txq->wqe_ci += (ds + 3) / 4;
2352 loc->wqe_free -= (ds + 3) / 4;
2353 if (MLX5_TXOFF_CONFIG(INLINE))
2354 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2358 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2359 return MLX5_TXCMP_CODE_EXIT;
2360 loc->mbuf = *pkts++;
2362 rte_prefetch0(*pkts);
2363 if (MLX5_TXOFF_CONFIG(MULTI) &&
2364 unlikely(NB_SEGS(loc->mbuf) > 1))
2365 return MLX5_TXCMP_CODE_MULTI;
2366 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
2367 return MLX5_TXCMP_CODE_SINGLE;
2368 /* Continue with the next TSO packet. */
2374 * Analyze the packet and select the best method to send.
2377 * Pointer to TX queue structure.
2379 * Pointer to burst routine local context.
2381 * Configured Tx offloads mask. It is fully defined at
2382 * compile time and may be used for optimization.
2384 * The predefined flag whether do complete check for
2385 * multi-segment packets and TSO.
2388 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2389 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
2390 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
2391 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
2393 static __rte_always_inline enum mlx5_txcmp_code
2394 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
2395 struct mlx5_txq_local *__rte_restrict loc,
2399 /* Check for multi-segment packet. */
2401 MLX5_TXOFF_CONFIG(MULTI) &&
2402 unlikely(NB_SEGS(loc->mbuf) > 1))
2403 return MLX5_TXCMP_CODE_MULTI;
2404 /* Check for TSO packet. */
2406 MLX5_TXOFF_CONFIG(TSO) &&
2407 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2408 return MLX5_TXCMP_CODE_TSO;
2409 /* Check if eMPW is enabled at all. */
2410 if (!MLX5_TXOFF_CONFIG(EMPW))
2411 return MLX5_TXCMP_CODE_SINGLE;
2412 /* Check if eMPW can be engaged. */
2413 if (MLX5_TXOFF_CONFIG(VLAN) &&
2414 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
2415 (!MLX5_TXOFF_CONFIG(INLINE) ||
2416 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
2417 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
2419 * eMPW does not support VLAN insertion offload, we have to
2420 * inline the entire packet but packet is too long for inlining.
2422 return MLX5_TXCMP_CODE_SINGLE;
2424 return MLX5_TXCMP_CODE_EMPW;
2428 * Check the next packet attributes to match with the eMPW batch ones.
2429 * In addition, for legacy MPW the packet length is checked either.
2432 * Pointer to TX queue structure.
2434 * Pointer to Ethernet Segment of eMPW batch.
2436 * Pointer to burst routine local context.
2438 * Length of previous packet in MPW descriptor.
2440 * Configured Tx offloads mask. It is fully defined at
2441 * compile time and may be used for optimization.
2444 * true - packet match with eMPW batch attributes.
2445 * false - no match, eMPW should be restarted.
2447 static __rte_always_inline bool
2448 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
2449 struct mlx5_wqe_eseg *__rte_restrict es,
2450 struct mlx5_txq_local *__rte_restrict loc,
2454 uint8_t swp_flags = 0;
2456 /* Compare the checksum flags, if any. */
2457 if (MLX5_TXOFF_CONFIG(CSUM) &&
2458 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
2460 /* Compare the Software Parser offsets and flags. */
2461 if (MLX5_TXOFF_CONFIG(SWP) &&
2462 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
2463 es->swp_flags != swp_flags))
2465 /* Fill metadata field if needed. */
2466 if (MLX5_TXOFF_CONFIG(METADATA) &&
2467 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2468 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
2470 /* Legacy MPW can send packets with the same length only. */
2471 if (MLX5_TXOFF_CONFIG(MPW) &&
2472 dlen != rte_pktmbuf_data_len(loc->mbuf))
2474 /* There must be no VLAN packets in eMPW loop. */
2475 if (MLX5_TXOFF_CONFIG(VLAN))
2476 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
2477 /* Check if the scheduling is requested. */
2478 if (MLX5_TXOFF_CONFIG(TXPP) &&
2479 loc->mbuf->ol_flags & txq->ts_mask)
2485 * Update send loop variables and WQE for eMPW loop without data inlining.
2486 * Number of Data Segments is equal to the number of sent packets.
2489 * Pointer to TX queue structure.
2491 * Pointer to burst routine local context.
2493 * Number of packets/Data Segments/Packets.
2495 * Accumulated statistics, bytes sent.
2497 * Configured Tx offloads mask. It is fully defined at
2498 * compile time and may be used for optimization.
2501 * true - packet match with eMPW batch attributes.
2502 * false - no match, eMPW should be restarted.
2504 static __rte_always_inline void
2505 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
2506 struct mlx5_txq_local *__rte_restrict loc,
2509 unsigned int olx __rte_unused)
2511 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2512 #ifdef MLX5_PMD_SOFT_COUNTERS
2513 /* Update sent data bytes counter. */
2514 txq->stats.obytes += slen;
2518 loc->elts_free -= ds;
2519 loc->pkts_sent += ds;
2521 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2522 txq->wqe_ci += (ds + 3) / 4;
2523 loc->wqe_free -= (ds + 3) / 4;
2527 * Update send loop variables and WQE for eMPW loop with data inlining.
2528 * Gets the size of pushed descriptors and data to the WQE.
2531 * Pointer to TX queue structure.
2533 * Pointer to burst routine local context.
2535 * Total size of descriptor/data in bytes.
2537 * Accumulated statistics, data bytes sent.
2539 * The base WQE for the eMPW/MPW descriptor.
2541 * Configured Tx offloads mask. It is fully defined at
2542 * compile time and may be used for optimization.
2545 * true - packet match with eMPW batch attributes.
2546 * false - no match, eMPW should be restarted.
2548 static __rte_always_inline void
2549 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
2550 struct mlx5_txq_local *__rte_restrict loc,
2553 struct mlx5_wqe *__rte_restrict wqem,
2554 unsigned int olx __rte_unused)
2556 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
2558 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2559 #ifdef MLX5_PMD_SOFT_COUNTERS
2560 /* Update sent data bytes counter. */
2561 txq->stats.obytes += slen;
2565 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
2567 * If the legacy MPW session contains the inline packets
2568 * we should set the only inline data segment length
2569 * and align the total length to the segment size.
2571 MLX5_ASSERT(len > sizeof(dseg->bcount));
2572 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
2573 MLX5_ETH_WQE_DATA_INLINE);
2574 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
2577 * The session is not legacy MPW or contains the
2578 * data buffer pointer segments.
2580 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
2581 len = len / MLX5_WSEG_SIZE + 2;
2583 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
2584 txq->wqe_ci += (len + 3) / 4;
2585 loc->wqe_free -= (len + 3) / 4;
2586 loc->wqe_last = wqem;
2590 * The set of Tx burst functions for single-segment packets without TSO
2591 * and with Multi-Packet Writing feature support.
2592 * Supports all types of Tx offloads, except multi-packets and TSO.
2594 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends as many packet
2595 * per WQE as it can. If eMPW is not configured or packet can not be sent with
2596 * eMPW (VLAN insertion) the ordinary SEND opcode is used and only one packet
2599 * Functions stop sending if it encounters the multi-segment packet or packet
2600 * with TSO requested.
2602 * The routines are responsible for storing processed mbuf into elts ring buffer
2603 * and update elts_head if inlining offload is requested. Otherwise the copying
2604 * mbufs to elts can be postponed and completed at the end of burst routine.
2607 * Pointer to TX queue structure.
2609 * Packets to transmit.
2611 * Number of packets in array.
2613 * Pointer to burst routine local context.
2615 * Configured Tx offloads mask. It is fully defined at
2616 * compile time and may be used for optimization.
2619 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2620 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2621 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2622 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
2623 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
2624 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
2626 * Local context variables updated.
2629 * The routine sends packets with MLX5_OPCODE_EMPW
2630 * without inlining, this is dedicated optimized branch.
2631 * No VLAN insertion is supported.
2633 static __rte_always_inline enum mlx5_txcmp_code
2634 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
2635 struct rte_mbuf **__rte_restrict pkts,
2636 unsigned int pkts_n,
2637 struct mlx5_txq_local *__rte_restrict loc,
2641 * Subroutine is the part of mlx5_tx_burst_single() and sends
2642 * single-segment packet with eMPW opcode without data inlining.
2644 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2645 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2646 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2647 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2648 pkts += loc->pkts_sent + 1;
2649 pkts_n -= loc->pkts_sent;
2651 struct mlx5_wqe_dseg *__rte_restrict dseg;
2652 struct mlx5_wqe_eseg *__rte_restrict eseg;
2653 enum mlx5_txcmp_code ret;
2654 unsigned int part, loop;
2655 unsigned int slen = 0;
2658 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2659 if (MLX5_TXOFF_CONFIG(TXPP)) {
2660 enum mlx5_txcmp_code wret;
2662 /* Generate WAIT for scheduling if requested. */
2663 wret = mlx5_tx_schedule_send(txq, loc, olx);
2664 if (wret == MLX5_TXCMP_CODE_EXIT)
2665 return MLX5_TXCMP_CODE_EXIT;
2666 if (wret == MLX5_TXCMP_CODE_ERROR)
2667 return MLX5_TXCMP_CODE_ERROR;
2669 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2670 MLX5_MPW_MAX_PACKETS :
2671 MLX5_EMPW_MAX_PACKETS);
2672 if (unlikely(loc->elts_free < part)) {
2673 /* We have no enough elts to save all mbufs. */
2674 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
2675 return MLX5_TXCMP_CODE_EXIT;
2676 /* But we still able to send at least minimal eMPW. */
2677 part = loc->elts_free;
2679 /* Check whether we have enough WQEs */
2680 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
2681 if (unlikely(loc->wqe_free <
2682 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2683 return MLX5_TXCMP_CODE_EXIT;
2684 part = (loc->wqe_free * 4) - 2;
2686 if (likely(part > 1))
2687 rte_prefetch0(*pkts);
2688 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2690 * Build eMPW title WQEBB:
2691 * - Control Segment, eMPW opcode
2692 * - Ethernet Segment, no inline
2694 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
2695 MLX5_OPCODE_ENHANCED_MPSW, olx);
2696 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
2697 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2698 eseg = &loc->wqe_last->eseg;
2699 dseg = &loc->wqe_last->dseg[0];
2701 /* Store the packet length for legacy MPW. */
2702 if (MLX5_TXOFF_CONFIG(MPW))
2703 eseg->mss = rte_cpu_to_be_16
2704 (rte_pktmbuf_data_len(loc->mbuf));
2706 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2707 #ifdef MLX5_PMD_SOFT_COUNTERS
2708 /* Update sent data bytes counter. */
2713 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2715 if (unlikely(--loop == 0))
2717 loc->mbuf = *pkts++;
2718 if (likely(loop > 1))
2719 rte_prefetch0(*pkts);
2720 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2722 * Unroll the completion code to avoid
2723 * returning variable value - it results in
2724 * unoptimized sequent checking in caller.
2726 if (ret == MLX5_TXCMP_CODE_MULTI) {
2728 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2729 if (unlikely(!loc->elts_free ||
2731 return MLX5_TXCMP_CODE_EXIT;
2732 return MLX5_TXCMP_CODE_MULTI;
2734 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2735 if (ret == MLX5_TXCMP_CODE_TSO) {
2737 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2738 if (unlikely(!loc->elts_free ||
2740 return MLX5_TXCMP_CODE_EXIT;
2741 return MLX5_TXCMP_CODE_TSO;
2743 if (ret == MLX5_TXCMP_CODE_SINGLE) {
2745 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2746 if (unlikely(!loc->elts_free ||
2748 return MLX5_TXCMP_CODE_EXIT;
2749 return MLX5_TXCMP_CODE_SINGLE;
2751 if (ret != MLX5_TXCMP_CODE_EMPW) {
2754 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2755 return MLX5_TXCMP_CODE_ERROR;
2758 * Check whether packet parameters coincide
2759 * within assumed eMPW batch:
2760 * - check sum settings
2762 * - software parser settings
2763 * - packets length (legacy MPW only)
2764 * - scheduling is not required
2766 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
2769 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2770 if (unlikely(!loc->elts_free ||
2772 return MLX5_TXCMP_CODE_EXIT;
2776 /* Packet attributes match, continue the same eMPW. */
2778 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2779 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2781 /* eMPW is built successfully, update loop parameters. */
2783 MLX5_ASSERT(pkts_n >= part);
2784 #ifdef MLX5_PMD_SOFT_COUNTERS
2785 /* Update sent data bytes counter. */
2786 txq->stats.obytes += slen;
2788 loc->elts_free -= part;
2789 loc->pkts_sent += part;
2790 txq->wqe_ci += (2 + part + 3) / 4;
2791 loc->wqe_free -= (2 + part + 3) / 4;
2793 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2794 return MLX5_TXCMP_CODE_EXIT;
2795 loc->mbuf = *pkts++;
2796 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2797 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
2799 /* Continue sending eMPW batches. */
2805 * The routine sends packets with MLX5_OPCODE_EMPW
2806 * with inlining, optionally supports VLAN insertion.
2808 static __rte_always_inline enum mlx5_txcmp_code
2809 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
2810 struct rte_mbuf **__rte_restrict pkts,
2811 unsigned int pkts_n,
2812 struct mlx5_txq_local *__rte_restrict loc,
2816 * Subroutine is the part of mlx5_tx_burst_single() and sends
2817 * single-segment packet with eMPW opcode with data inlining.
2819 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2820 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2821 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2822 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2823 pkts += loc->pkts_sent + 1;
2824 pkts_n -= loc->pkts_sent;
2826 struct mlx5_wqe_dseg *__rte_restrict dseg;
2827 struct mlx5_wqe *__rte_restrict wqem;
2828 enum mlx5_txcmp_code ret;
2829 unsigned int room, part, nlim;
2830 unsigned int slen = 0;
2832 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2833 if (MLX5_TXOFF_CONFIG(TXPP)) {
2834 enum mlx5_txcmp_code wret;
2836 /* Generate WAIT for scheduling if requested. */
2837 wret = mlx5_tx_schedule_send(txq, loc, olx);
2838 if (wret == MLX5_TXCMP_CODE_EXIT)
2839 return MLX5_TXCMP_CODE_EXIT;
2840 if (wret == MLX5_TXCMP_CODE_ERROR)
2841 return MLX5_TXCMP_CODE_ERROR;
2844 * Limits the amount of packets in one WQE
2845 * to improve CQE latency generation.
2847 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2848 MLX5_MPW_INLINE_MAX_PACKETS :
2849 MLX5_EMPW_MAX_PACKETS);
2850 /* Check whether we have minimal amount WQEs */
2851 if (unlikely(loc->wqe_free <
2852 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2853 return MLX5_TXCMP_CODE_EXIT;
2854 if (likely(pkts_n > 1))
2855 rte_prefetch0(*pkts);
2856 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2858 * Build eMPW title WQEBB:
2859 * - Control Segment, eMPW opcode, zero DS
2860 * - Ethernet Segment, no inline
2862 mlx5_tx_cseg_init(txq, loc, wqem, 0,
2863 MLX5_OPCODE_ENHANCED_MPSW, olx);
2864 mlx5_tx_eseg_none(txq, loc, wqem,
2865 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2866 dseg = &wqem->dseg[0];
2867 /* Store the packet length for legacy MPW. */
2868 if (MLX5_TXOFF_CONFIG(MPW))
2869 wqem->eseg.mss = rte_cpu_to_be_16
2870 (rte_pktmbuf_data_len(loc->mbuf));
2871 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
2872 loc->wqe_free) * MLX5_WQE_SIZE -
2873 MLX5_WQE_CSEG_SIZE -
2875 /* Limit the room for legacy MPW sessions for performance. */
2876 if (MLX5_TXOFF_CONFIG(MPW))
2877 room = RTE_MIN(room,
2878 RTE_MAX(txq->inlen_empw +
2879 sizeof(dseg->bcount) +
2880 (MLX5_TXOFF_CONFIG(VLAN) ?
2881 sizeof(struct rte_vlan_hdr) : 0),
2882 MLX5_MPW_INLINE_MAX_PACKETS *
2883 MLX5_WQE_DSEG_SIZE));
2884 /* Build WQE till we have space, packets and resources. */
2887 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2888 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2891 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
2892 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
2893 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
2895 * Some Tx offloads may cause an error if packet is not
2896 * long enough, check against assumed minimal length.
2898 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
2900 if (unlikely(!part))
2901 return MLX5_TXCMP_CODE_ERROR;
2903 * We have some successfully built
2904 * packet Data Segments to send.
2906 mlx5_tx_idone_empw(txq, loc, part,
2908 return MLX5_TXCMP_CODE_ERROR;
2910 /* Inline or not inline - that's the Question. */
2911 if (dlen > txq->inlen_empw ||
2912 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
2914 if (MLX5_TXOFF_CONFIG(MPW)) {
2915 if (dlen > txq->inlen_send)
2919 /* Open new inline MPW session. */
2920 tlen += sizeof(dseg->bcount);
2921 dseg->bcount = RTE_BE32(0);
2923 (dseg, sizeof(dseg->bcount));
2926 * No pointer and inline descriptor
2927 * intermix for legacy MPW sessions.
2929 if (wqem->dseg[0].bcount)
2933 tlen = sizeof(dseg->bcount) + dlen;
2935 /* Inline entire packet, optional VLAN insertion. */
2936 if (MLX5_TXOFF_CONFIG(VLAN) &&
2937 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2939 * The packet length must be checked in
2940 * mlx5_tx_able_to_empw() and packet
2941 * fits into inline length guaranteed.
2944 sizeof(struct rte_vlan_hdr)) <=
2946 tlen += sizeof(struct rte_vlan_hdr);
2949 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
2951 #ifdef MLX5_PMD_SOFT_COUNTERS
2952 /* Update sent data bytes counter. */
2953 slen += sizeof(struct rte_vlan_hdr);
2958 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
2961 if (!MLX5_TXOFF_CONFIG(MPW))
2962 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
2963 MLX5_ASSERT(room >= tlen);
2966 * Packet data are completely inline,
2967 * we can try to free the packet.
2969 if (likely(loc->pkts_sent == loc->mbuf_free)) {
2971 * All the packets from the burst beginning
2972 * are inline, we can free mbufs directly
2973 * from the origin array on tx_burst exit().
2979 * In order no to call rte_pktmbuf_free_seg() here,
2980 * in the most inner loop (that might be very
2981 * expensive) we just save the mbuf in elts.
2983 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2988 * No pointer and inline descriptor
2989 * intermix for legacy MPW sessions.
2991 if (MLX5_TXOFF_CONFIG(MPW) &&
2993 wqem->dseg[0].bcount == RTE_BE32(0))
2996 * Not inlinable VLAN packets are
2997 * proceeded outside of this routine.
2999 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
3000 if (MLX5_TXOFF_CONFIG(VLAN))
3001 MLX5_ASSERT(!(loc->mbuf->ol_flags &
3003 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3004 /* We have to store mbuf in elts.*/
3005 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3007 room -= MLX5_WQE_DSEG_SIZE;
3008 /* Ring buffer wraparound is checked at the loop end.*/
3011 #ifdef MLX5_PMD_SOFT_COUNTERS
3012 /* Update sent data bytes counter. */
3017 if (unlikely(!pkts_n || !loc->elts_free)) {
3019 * We have no resources/packets to
3020 * continue build descriptors.
3023 mlx5_tx_idone_empw(txq, loc, part,
3025 return MLX5_TXCMP_CODE_EXIT;
3027 loc->mbuf = *pkts++;
3028 if (likely(pkts_n > 1))
3029 rte_prefetch0(*pkts);
3030 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3032 * Unroll the completion code to avoid
3033 * returning variable value - it results in
3034 * unoptimized sequent checking in caller.
3036 if (ret == MLX5_TXCMP_CODE_MULTI) {
3038 mlx5_tx_idone_empw(txq, loc, part,
3040 if (unlikely(!loc->elts_free ||
3042 return MLX5_TXCMP_CODE_EXIT;
3043 return MLX5_TXCMP_CODE_MULTI;
3045 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3046 if (ret == MLX5_TXCMP_CODE_TSO) {
3048 mlx5_tx_idone_empw(txq, loc, part,
3050 if (unlikely(!loc->elts_free ||
3052 return MLX5_TXCMP_CODE_EXIT;
3053 return MLX5_TXCMP_CODE_TSO;
3055 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3057 mlx5_tx_idone_empw(txq, loc, part,
3059 if (unlikely(!loc->elts_free ||
3061 return MLX5_TXCMP_CODE_EXIT;
3062 return MLX5_TXCMP_CODE_SINGLE;
3064 if (ret != MLX5_TXCMP_CODE_EMPW) {
3067 mlx5_tx_idone_empw(txq, loc, part,
3069 return MLX5_TXCMP_CODE_ERROR;
3071 /* Check if we have minimal room left. */
3073 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
3076 * Check whether packet parameters coincide
3077 * within assumed eMPW batch:
3078 * - check sum settings
3080 * - software parser settings
3081 * - packets length (legacy MPW only)
3082 * - scheduling is not required
3084 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
3087 /* Packet attributes match, continue the same eMPW. */
3088 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3089 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3092 * We get here to close an existing eMPW
3093 * session and start the new one.
3095 MLX5_ASSERT(pkts_n);
3097 if (unlikely(!part))
3098 return MLX5_TXCMP_CODE_EXIT;
3099 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
3100 if (unlikely(!loc->elts_free ||
3102 return MLX5_TXCMP_CODE_EXIT;
3103 /* Continue the loop with new eMPW session. */
3109 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
3110 * Data inlining and VLAN insertion are supported.
3112 static __rte_always_inline enum mlx5_txcmp_code
3113 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
3114 struct rte_mbuf **__rte_restrict pkts,
3115 unsigned int pkts_n,
3116 struct mlx5_txq_local *__rte_restrict loc,
3120 * Subroutine is the part of mlx5_tx_burst_single()
3121 * and sends single-segment packet with SEND opcode.
3123 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3124 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3125 pkts += loc->pkts_sent + 1;
3126 pkts_n -= loc->pkts_sent;
3128 struct mlx5_wqe *__rte_restrict wqe;
3129 enum mlx5_txcmp_code ret;
3131 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3132 if (MLX5_TXOFF_CONFIG(TXPP)) {
3133 enum mlx5_txcmp_code wret;
3135 /* Generate WAIT for scheduling if requested. */
3136 wret = mlx5_tx_schedule_send(txq, loc, olx);
3137 if (wret == MLX5_TXCMP_CODE_EXIT)
3138 return MLX5_TXCMP_CODE_EXIT;
3139 if (wret == MLX5_TXCMP_CODE_ERROR)
3140 return MLX5_TXCMP_CODE_ERROR;
3142 if (MLX5_TXOFF_CONFIG(INLINE)) {
3143 unsigned int inlen, vlan = 0;
3145 inlen = rte_pktmbuf_data_len(loc->mbuf);
3146 if (MLX5_TXOFF_CONFIG(VLAN) &&
3147 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3148 vlan = sizeof(struct rte_vlan_hdr);
3152 * If inlining is enabled at configuration time
3153 * the limit must be not less than minimal size.
3154 * Otherwise we would do extra check for data
3155 * size to avoid crashes due to length overflow.
3157 MLX5_ASSERT(txq->inlen_send >=
3158 MLX5_ESEG_MIN_INLINE_SIZE);
3159 if (inlen <= txq->inlen_send) {
3160 unsigned int seg_n, wqe_n;
3162 rte_prefetch0(rte_pktmbuf_mtod
3163 (loc->mbuf, uint8_t *));
3164 /* Check against minimal length. */
3165 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3166 return MLX5_TXCMP_CODE_ERROR;
3167 if (loc->mbuf->ol_flags &
3168 PKT_TX_DYNF_NOINLINE) {
3170 * The hint flag not to inline packet
3171 * data is set. Check whether we can
3174 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
3176 (MLX5_TXOFF_CONFIG(MPW) &&
3178 if (inlen <= txq->inlen_send)
3181 * The hardware requires the
3182 * minimal inline data header.
3184 goto single_min_inline;
3186 if (MLX5_TXOFF_CONFIG(VLAN) &&
3187 vlan && !txq->vlan_en) {
3189 * We must insert VLAN tag
3190 * by software means.
3192 goto single_part_inline;
3194 goto single_no_inline;
3198 * Completely inlined packet data WQE:
3199 * - Control Segment, SEND opcode
3200 * - Ethernet Segment, no VLAN insertion
3201 * - Data inlined, VLAN optionally inserted
3202 * - Alignment to MLX5_WSEG_SIZE
3203 * Have to estimate amount of WQEBBs
3205 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
3206 MLX5_ESEG_MIN_INLINE_SIZE +
3207 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3208 /* Check if there are enough WQEBBs. */
3209 wqe_n = (seg_n + 3) / 4;
3210 if (wqe_n > loc->wqe_free)
3211 return MLX5_TXCMP_CODE_EXIT;
3212 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3213 loc->wqe_last = wqe;
3214 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
3215 MLX5_OPCODE_SEND, olx);
3216 mlx5_tx_eseg_data(txq, loc, wqe,
3217 vlan, inlen, 0, olx);
3218 txq->wqe_ci += wqe_n;
3219 loc->wqe_free -= wqe_n;
3221 * Packet data are completely inlined,
3222 * free the packet immediately.
3224 rte_pktmbuf_free_seg(loc->mbuf);
3225 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
3226 MLX5_TXOFF_CONFIG(MPW)) &&
3229 * If minimal inlining is requested the eMPW
3230 * feature should be disabled due to data is
3231 * inlined into Ethernet Segment, which can
3232 * not contain inlined data for eMPW due to
3233 * segment shared for all packets.
3235 struct mlx5_wqe_dseg *__rte_restrict dseg;
3240 * The inline-mode settings require
3241 * to inline the specified amount of
3242 * data bytes to the Ethernet Segment.
3243 * We should check the free space in
3244 * WQE ring buffer to inline partially.
3247 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
3248 MLX5_ASSERT(inlen > txq->inlen_mode);
3249 MLX5_ASSERT(txq->inlen_mode >=
3250 MLX5_ESEG_MIN_INLINE_SIZE);
3252 * Check whether there are enough free WQEBBs:
3254 * - Ethernet Segment
3255 * - First Segment of inlined Ethernet data
3256 * - ... data continued ...
3257 * - Finishing Data Segment of pointer type
3259 ds = (MLX5_WQE_CSEG_SIZE +
3260 MLX5_WQE_ESEG_SIZE +
3261 MLX5_WQE_DSEG_SIZE +
3263 MLX5_ESEG_MIN_INLINE_SIZE +
3264 MLX5_WQE_DSEG_SIZE +
3265 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3266 if (loc->wqe_free < ((ds + 3) / 4))
3267 return MLX5_TXCMP_CODE_EXIT;
3269 * Build the ordinary SEND WQE:
3271 * - Ethernet Segment, inline inlen_mode bytes
3272 * - Data Segment of pointer type
3274 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3275 loc->wqe_last = wqe;
3276 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3277 MLX5_OPCODE_SEND, olx);
3278 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
3281 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3282 txq->inlen_mode - vlan;
3283 inlen -= txq->inlen_mode;
3284 mlx5_tx_dseg_ptr(txq, loc, dseg,
3287 * WQE is built, update the loop parameters
3288 * and got to the next packet.
3290 txq->wqe_ci += (ds + 3) / 4;
3291 loc->wqe_free -= (ds + 3) / 4;
3292 /* We have to store mbuf in elts.*/
3293 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3294 txq->elts[txq->elts_head++ & txq->elts_m] =
3302 * Partially inlined packet data WQE, we have
3303 * some space in title WQEBB, we can fill it
3304 * with some packet data. It takes one WQEBB,
3305 * it is available, no extra space check:
3306 * - Control Segment, SEND opcode
3307 * - Ethernet Segment, no VLAN insertion
3308 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
3309 * - Data Segment, pointer type
3311 * We also get here if VLAN insertion is not
3312 * supported by HW, the inline is enabled.
3315 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3316 loc->wqe_last = wqe;
3317 mlx5_tx_cseg_init(txq, loc, wqe, 4,
3318 MLX5_OPCODE_SEND, olx);
3319 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
3320 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3321 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
3323 * The length check is performed above, by
3324 * comparing with txq->inlen_send. We should
3325 * not get overflow here.
3327 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
3328 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
3329 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
3333 /* We have to store mbuf in elts.*/
3334 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3335 txq->elts[txq->elts_head++ & txq->elts_m] =
3339 #ifdef MLX5_PMD_SOFT_COUNTERS
3340 /* Update sent data bytes counter. */
3341 txq->stats.obytes += vlan +
3342 rte_pktmbuf_data_len(loc->mbuf);
3346 * No inline at all, it means the CPU cycles saving
3347 * is prioritized at configuration, we should not
3348 * copy any packet data to WQE.
3350 * SEND WQE, one WQEBB:
3351 * - Control Segment, SEND opcode
3352 * - Ethernet Segment, optional VLAN, no inline
3353 * - Data Segment, pointer type
3356 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3357 loc->wqe_last = wqe;
3358 mlx5_tx_cseg_init(txq, loc, wqe, 3,
3359 MLX5_OPCODE_SEND, olx);
3360 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3362 (txq, loc, &wqe->dseg[0],
3363 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3364 rte_pktmbuf_data_len(loc->mbuf), olx);
3368 * We should not store mbuf pointer in elts
3369 * if no inlining is configured, this is done
3370 * by calling routine in a batch copy.
3372 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3374 #ifdef MLX5_PMD_SOFT_COUNTERS
3375 /* Update sent data bytes counter. */
3376 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
3377 if (MLX5_TXOFF_CONFIG(VLAN) &&
3378 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3379 txq->stats.obytes +=
3380 sizeof(struct rte_vlan_hdr);
3385 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3386 return MLX5_TXCMP_CODE_EXIT;
3387 loc->mbuf = *pkts++;
3389 rte_prefetch0(*pkts);
3390 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3391 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
3397 static __rte_always_inline enum mlx5_txcmp_code
3398 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
3399 struct rte_mbuf **__rte_restrict pkts,
3400 unsigned int pkts_n,
3401 struct mlx5_txq_local *__rte_restrict loc,
3404 enum mlx5_txcmp_code ret;
3406 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
3407 if (ret == MLX5_TXCMP_CODE_SINGLE)
3409 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
3411 /* Optimize for inline/no inline eMPW send. */
3412 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
3413 mlx5_tx_burst_empw_inline
3414 (txq, pkts, pkts_n, loc, olx) :
3415 mlx5_tx_burst_empw_simple
3416 (txq, pkts, pkts_n, loc, olx);
3417 if (ret != MLX5_TXCMP_CODE_SINGLE)
3419 /* The resources to send one packet should remain. */
3420 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3422 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
3423 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
3424 if (ret != MLX5_TXCMP_CODE_EMPW)
3426 /* The resources to send one packet should remain. */
3427 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3432 * DPDK Tx callback template. This is configured template used to generate
3433 * routines optimized for specified offload setup.
3434 * One of this generated functions is chosen at SQ configuration time.
3437 * Generic pointer to TX queue structure.
3439 * Packets to transmit.
3441 * Number of packets in array.
3443 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
3444 * values. Should be static to take compile time static configuration
3448 * Number of packets successfully transmitted (<= pkts_n).
3450 static __rte_always_inline uint16_t
3451 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
3452 struct rte_mbuf **__rte_restrict pkts,
3456 struct mlx5_txq_local loc;
3457 enum mlx5_txcmp_code ret;
3460 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3461 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3462 if (unlikely(!pkts_n))
3464 if (MLX5_TXOFF_CONFIG(INLINE))
3468 loc.wqe_last = NULL;
3471 loc.pkts_loop = loc.pkts_sent;
3473 * Check if there are some CQEs, if any:
3474 * - process an encountered errors
3475 * - process the completed WQEs
3476 * - free related mbufs
3477 * - doorbell the NIC about processed CQEs
3479 rte_prefetch0(*(pkts + loc.pkts_sent));
3480 mlx5_tx_handle_completion(txq, olx);
3482 * Calculate the number of available resources - elts and WQEs.
3483 * There are two possible different scenarios:
3484 * - no data inlining into WQEs, one WQEBB may contains up to
3485 * four packets, in this case elts become scarce resource
3486 * - data inlining into WQEs, one packet may require multiple
3487 * WQEBBs, the WQEs become the limiting factor.
3489 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3490 loc.elts_free = txq->elts_s -
3491 (uint16_t)(txq->elts_head - txq->elts_tail);
3492 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3493 loc.wqe_free = txq->wqe_s -
3494 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
3495 if (unlikely(!loc.elts_free || !loc.wqe_free))
3499 * Fetch the packet from array. Usually this is the first
3500 * packet in series of multi/single segment packets.
3502 loc.mbuf = *(pkts + loc.pkts_sent);
3503 /* Dedicated branch for multi-segment packets. */
3504 if (MLX5_TXOFF_CONFIG(MULTI) &&
3505 unlikely(NB_SEGS(loc.mbuf) > 1)) {
3507 * Multi-segment packet encountered.
3508 * Hardware is able to process it only
3509 * with SEND/TSO opcodes, one packet
3510 * per WQE, do it in dedicated routine.
3513 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
3514 part = loc.pkts_sent - loc.pkts_copy;
3515 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3517 * There are some single-segment mbufs not
3518 * stored in elts. The mbufs must be in the
3519 * same order as WQEs, so we must copy the
3520 * mbufs to elts here, before the coming
3521 * multi-segment packet mbufs is appended.
3523 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
3525 loc.pkts_copy = loc.pkts_sent;
3527 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3528 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
3529 if (!MLX5_TXOFF_CONFIG(INLINE))
3530 loc.pkts_copy = loc.pkts_sent;
3532 * These returned code checks are supposed
3533 * to be optimized out due to routine inlining.
3535 if (ret == MLX5_TXCMP_CODE_EXIT) {
3537 * The routine returns this code when
3538 * all packets are sent or there is no
3539 * enough resources to complete request.
3543 if (ret == MLX5_TXCMP_CODE_ERROR) {
3545 * The routine returns this code when some error
3546 * in the incoming packets format occurred.
3548 txq->stats.oerrors++;
3551 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3553 * The single-segment packet was encountered
3554 * in the array, try to send it with the
3555 * best optimized way, possible engaging eMPW.
3557 goto enter_send_single;
3559 if (MLX5_TXOFF_CONFIG(TSO) &&
3560 ret == MLX5_TXCMP_CODE_TSO) {
3562 * The single-segment TSO packet was
3563 * encountered in the array.
3565 goto enter_send_tso;
3567 /* We must not get here. Something is going wrong. */
3569 txq->stats.oerrors++;
3572 /* Dedicated branch for single-segment TSO packets. */
3573 if (MLX5_TXOFF_CONFIG(TSO) &&
3574 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3576 * TSO might require special way for inlining
3577 * (dedicated parameters) and is sent with
3578 * MLX5_OPCODE_TSO opcode only, provide this
3579 * in dedicated branch.
3582 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
3583 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3584 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
3586 * These returned code checks are supposed
3587 * to be optimized out due to routine inlining.
3589 if (ret == MLX5_TXCMP_CODE_EXIT)
3591 if (ret == MLX5_TXCMP_CODE_ERROR) {
3592 txq->stats.oerrors++;
3595 if (ret == MLX5_TXCMP_CODE_SINGLE)
3596 goto enter_send_single;
3597 if (MLX5_TXOFF_CONFIG(MULTI) &&
3598 ret == MLX5_TXCMP_CODE_MULTI) {
3600 * The multi-segment packet was
3601 * encountered in the array.
3603 goto enter_send_multi;
3605 /* We must not get here. Something is going wrong. */
3607 txq->stats.oerrors++;
3611 * The dedicated branch for the single-segment packets
3612 * without TSO. Often these ones can be sent using
3613 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
3614 * The routine builds the WQEs till it encounters
3615 * the TSO or multi-segment packet (in case if these
3616 * offloads are requested at SQ configuration time).
3619 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3620 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
3622 * These returned code checks are supposed
3623 * to be optimized out due to routine inlining.
3625 if (ret == MLX5_TXCMP_CODE_EXIT)
3627 if (ret == MLX5_TXCMP_CODE_ERROR) {
3628 txq->stats.oerrors++;
3631 if (MLX5_TXOFF_CONFIG(MULTI) &&
3632 ret == MLX5_TXCMP_CODE_MULTI) {
3634 * The multi-segment packet was
3635 * encountered in the array.
3637 goto enter_send_multi;
3639 if (MLX5_TXOFF_CONFIG(TSO) &&
3640 ret == MLX5_TXCMP_CODE_TSO) {
3642 * The single-segment TSO packet was
3643 * encountered in the array.
3645 goto enter_send_tso;
3647 /* We must not get here. Something is going wrong. */
3649 txq->stats.oerrors++;
3653 * Main Tx loop is completed, do the rest:
3654 * - set completion request if thresholds are reached
3655 * - doorbell the hardware
3656 * - copy the rest of mbufs to elts (if any)
3658 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
3659 loc.pkts_sent >= loc.pkts_copy);
3660 /* Take a shortcut if nothing is sent. */
3661 if (unlikely(loc.pkts_sent == loc.pkts_loop))
3663 /* Request CQE generation if limits are reached. */
3664 mlx5_tx_request_completion(txq, &loc, olx);
3666 * Ring QP doorbell immediately after WQE building completion
3667 * to improve latencies. The pure software related data treatment
3668 * can be completed after doorbell. Tx CQEs for this SQ are
3669 * processed in this thread only by the polling.
3671 * The rdma core library can map doorbell register in two ways,
3672 * depending on the environment variable "MLX5_SHUT_UP_BF":
3674 * - as regular cached memory, the variable is either missing or
3675 * set to zero. This type of mapping may cause the significant
3676 * doorbell register writing latency and requires explicit memory
3677 * write barrier to mitigate this issue and prevent write combining.
3679 * - as non-cached memory, the variable is present and set to not "0"
3680 * value. This type of mapping may cause performance impact under
3681 * heavy loading conditions but the explicit write memory barrier is
3682 * not required and it may improve core performance.
3684 * - the legacy behaviour (prior 19.08 release) was to use some
3685 * heuristics to decide whether write memory barrier should
3686 * be performed. This behavior is supported with specifying
3687 * tx_db_nc=2, write barrier is skipped if application provides
3688 * the full recommended burst of packets, it supposes the next
3689 * packets are coming and the write barrier will be issued on
3690 * the next burst (after descriptor writing, at least).
3692 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
3693 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
3694 /* Not all of the mbufs may be stored into elts yet. */
3695 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
3696 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3698 * There are some single-segment mbufs not stored in elts.
3699 * It can be only if the last packet was single-segment.
3700 * The copying is gathered into one place due to it is
3701 * a good opportunity to optimize that with SIMD.
3702 * Unfortunately if inlining is enabled the gaps in pointer
3703 * array may happen due to early freeing of the inlined mbufs.
3705 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
3706 loc.pkts_copy = loc.pkts_sent;
3708 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3709 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3710 if (pkts_n > loc.pkts_sent) {
3712 * If burst size is large there might be no enough CQE
3713 * fetched from completion queue and no enough resources
3714 * freed to send all the packets.
3719 #ifdef MLX5_PMD_SOFT_COUNTERS
3720 /* Increment sent packets counter. */
3721 txq->stats.opackets += loc.pkts_sent;
3723 if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
3724 __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
3725 return loc.pkts_sent;
3728 #endif /* RTE_PMD_MLX5_TX_H_ */