1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_TX_H_
7 #define RTE_PMD_MLX5_TX_H_
10 #include <sys/queue.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_spinlock.h>
17 #include <mlx5_common_mr.h>
20 #include "mlx5_autoconf.h"
22 /* TX burst subroutines return codes. */
23 enum mlx5_txcmp_code {
24 MLX5_TXCMP_CODE_EXIT = 0,
25 MLX5_TXCMP_CODE_ERROR,
26 MLX5_TXCMP_CODE_SINGLE,
27 MLX5_TXCMP_CODE_MULTI,
33 * These defines are used to configure Tx burst routine option set supported
34 * at compile time. The not specified options are optimized out due to if
35 * conditions can be explicitly calculated at compile time.
36 * The offloads with bigger runtime check (require more CPU cycles toskip)
37 * overhead should have the bigger index - this is needed to select the better
38 * matching routine function if no exact match and some offloads are not
41 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
42 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
43 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
44 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
45 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
46 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
47 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
48 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
49 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
50 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
52 /* The most common offloads groups. */
53 #define MLX5_TXOFF_CONFIG_NONE 0
54 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
55 MLX5_TXOFF_CONFIG_TSO | \
56 MLX5_TXOFF_CONFIG_SWP | \
57 MLX5_TXOFF_CONFIG_CSUM | \
58 MLX5_TXOFF_CONFIG_INLINE | \
59 MLX5_TXOFF_CONFIG_VLAN | \
60 MLX5_TXOFF_CONFIG_METADATA)
62 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
64 #define MLX5_TXOFF_PRE_DECL(func) \
65 uint16_t mlx5_tx_burst_##func(void *txq, \
66 struct rte_mbuf **pkts, \
69 #define MLX5_TXOFF_DECL(func, olx) \
70 uint16_t mlx5_tx_burst_##func(void *txq, \
71 struct rte_mbuf **pkts, \
74 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
75 pkts, pkts_n, (olx)); \
78 /* Mbuf dynamic flag offset for inline. */
79 extern uint64_t rte_net_mlx5_dynf_inline_mask;
80 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
82 extern uint32_t mlx5_ptype_table[] __rte_cache_aligned;
83 extern uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
84 extern uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
86 struct mlx5_txq_stats {
87 #ifdef MLX5_PMD_SOFT_COUNTERS
88 uint64_t opackets; /**< Total of successfully sent packets. */
89 uint64_t obytes; /**< Total of successfully sent bytes. */
91 uint64_t oerrors; /**< Total number of failed transmitted packets. */
94 /* TX queue send local data. */
96 struct mlx5_txq_local {
97 struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
98 struct rte_mbuf *mbuf; /* first mbuf to process. */
99 uint16_t pkts_copy; /* packets copied to elts. */
100 uint16_t pkts_sent; /* packets sent. */
101 uint16_t pkts_loop; /* packets sent on loop entry. */
102 uint16_t elts_free; /* available elts remain. */
103 uint16_t wqe_free; /* available wqe remain. */
104 uint16_t mbuf_off; /* data offset in current mbuf. */
105 uint16_t mbuf_nseg; /* number of remaining mbuf. */
106 uint16_t mbuf_free; /* number of inline mbufs to free. */
109 /* TX queue descriptor. */
111 struct mlx5_txq_data {
112 uint16_t elts_head; /* Current counter in (*elts)[]. */
113 uint16_t elts_tail; /* Counter of first element awaiting completion. */
114 uint16_t elts_comp; /* elts index since last completion request. */
115 uint16_t elts_s; /* Number of mbuf elements. */
116 uint16_t elts_m; /* Mask for mbuf elements indices. */
117 /* Fields related to elts mbuf storage. */
118 uint16_t wqe_ci; /* Consumer index for work queue. */
119 uint16_t wqe_pi; /* Producer index for work queue. */
120 uint16_t wqe_s; /* Number of WQ elements. */
121 uint16_t wqe_m; /* Mask Number for WQ elements. */
122 uint16_t wqe_comp; /* WQE index since last completion request. */
123 uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
124 /* WQ related fields. */
125 uint16_t cq_ci; /* Consumer index for completion queue. */
126 uint16_t cq_pi; /* Production index for completion queue. */
127 uint16_t cqe_s; /* Number of CQ elements. */
128 uint16_t cqe_m; /* Mask for CQ indices. */
129 /* CQ related fields. */
130 uint16_t elts_n:4; /* elts[] length (in log2). */
131 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
132 uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
133 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
134 uint16_t tunnel_en:1;
135 /* When set TX offload for tunneled packets are supported. */
136 uint16_t swp_en:1; /* Whether SW parser is enabled. */
137 uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
138 uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
139 uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
140 uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */
141 uint16_t inlen_send; /* Ordinary send data inline size. */
142 uint16_t inlen_empw; /* eMPW max packet size to inline. */
143 uint16_t inlen_mode; /* Minimal data length to inline. */
144 uint32_t qp_num_8s; /* QP number shifted by 8. */
145 uint64_t offloads; /* Offloads for Tx Queue. */
146 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
147 struct mlx5_wqe *wqes; /* Work queue. */
148 struct mlx5_wqe *wqes_end; /* Work queue array limit. */
149 #ifdef RTE_LIBRTE_MLX5_DEBUG
150 uint32_t *fcqs; /* Free completion queue (debug extended). */
152 uint16_t *fcqs; /* Free completion queue. */
154 volatile struct mlx5_cqe *cqes; /* Completion queue. */
155 volatile uint32_t *qp_db; /* Work queue doorbell. */
156 volatile uint32_t *cq_db; /* Completion queue doorbell. */
157 uint16_t port_id; /* Port ID of device. */
158 uint16_t idx; /* Queue index. */
159 uint64_t ts_mask; /* Timestamp flag dynamic mask. */
160 int32_t ts_offset; /* Timestamp field dynamic offset. */
161 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
162 struct mlx5_txq_stats stats; /* TX queue counters. */
164 rte_spinlock_t *uar_lock;
165 /* UAR access lock required for 32bit implementations */
167 struct rte_mbuf *elts[0];
168 /* Storage for queued packets, must be the last field. */
169 } __rte_cache_aligned;
172 MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */
173 MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Tx queue. */
176 /* TX queue control descriptor. */
177 struct mlx5_txq_ctrl {
178 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
179 uint32_t refcnt; /* Reference counter. */
180 unsigned int socket; /* CPU socket ID for allocations. */
181 enum mlx5_txq_type type; /* The txq ctrl type. */
182 unsigned int max_inline_data; /* Max inline data. */
183 unsigned int max_tso_header; /* Max TSO header size. */
184 struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
185 struct mlx5_priv *priv; /* Back pointer to private data. */
186 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
187 void *bf_reg; /* BlueFlame register from Verbs. */
188 uint16_t dump_file_n; /* Number of dump files. */
189 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
190 uint32_t hairpin_status; /* Hairpin binding status. */
191 struct mlx5_txq_data txq; /* Data path structure. */
192 /* Must be the last field in the structure, contains elts[]. */
197 int mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
198 int mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
199 int mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
200 int mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
201 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
202 unsigned int socket, const struct rte_eth_txconf *conf);
203 int mlx5_tx_hairpin_queue_setup
204 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
205 const struct rte_eth_hairpin_conf *hairpin_conf);
206 void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
207 void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
208 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
209 void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
210 int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
211 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
212 uint16_t desc, unsigned int socket,
213 const struct rte_eth_txconf *conf);
214 struct mlx5_txq_ctrl *mlx5_txq_hairpin_new
215 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
216 const struct rte_eth_hairpin_conf *hairpin_conf);
217 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
218 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
219 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
220 int mlx5_txq_verify(struct rte_eth_dev *dev);
221 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
222 void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl);
223 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
224 void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
228 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
230 void mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
231 unsigned int olx __rte_unused);
232 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
233 void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
234 struct rte_eth_txq_info *qinfo);
235 int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
236 struct rte_eth_burst_mode *mode);
240 uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
244 MLX5_TXOFF_PRE_DECL(full_empw);
245 MLX5_TXOFF_PRE_DECL(none_empw);
246 MLX5_TXOFF_PRE_DECL(md_empw);
247 MLX5_TXOFF_PRE_DECL(mt_empw);
248 MLX5_TXOFF_PRE_DECL(mtsc_empw);
249 MLX5_TXOFF_PRE_DECL(mti_empw);
250 MLX5_TXOFF_PRE_DECL(mtv_empw);
251 MLX5_TXOFF_PRE_DECL(mtiv_empw);
252 MLX5_TXOFF_PRE_DECL(sc_empw);
253 MLX5_TXOFF_PRE_DECL(sci_empw);
254 MLX5_TXOFF_PRE_DECL(scv_empw);
255 MLX5_TXOFF_PRE_DECL(sciv_empw);
256 MLX5_TXOFF_PRE_DECL(i_empw);
257 MLX5_TXOFF_PRE_DECL(v_empw);
258 MLX5_TXOFF_PRE_DECL(iv_empw);
260 /* mlx5_tx_nompw.c */
262 MLX5_TXOFF_PRE_DECL(full);
263 MLX5_TXOFF_PRE_DECL(none);
264 MLX5_TXOFF_PRE_DECL(md);
265 MLX5_TXOFF_PRE_DECL(mt);
266 MLX5_TXOFF_PRE_DECL(mtsc);
267 MLX5_TXOFF_PRE_DECL(mti);
268 MLX5_TXOFF_PRE_DECL(mtv);
269 MLX5_TXOFF_PRE_DECL(mtiv);
270 MLX5_TXOFF_PRE_DECL(sc);
271 MLX5_TXOFF_PRE_DECL(sci);
272 MLX5_TXOFF_PRE_DECL(scv);
273 MLX5_TXOFF_PRE_DECL(sciv);
274 MLX5_TXOFF_PRE_DECL(i);
275 MLX5_TXOFF_PRE_DECL(v);
276 MLX5_TXOFF_PRE_DECL(iv);
280 MLX5_TXOFF_PRE_DECL(full_ts_nompw);
281 MLX5_TXOFF_PRE_DECL(full_ts_nompwi);
282 MLX5_TXOFF_PRE_DECL(full_ts);
283 MLX5_TXOFF_PRE_DECL(full_ts_noi);
284 MLX5_TXOFF_PRE_DECL(none_ts);
285 MLX5_TXOFF_PRE_DECL(mdi_ts);
286 MLX5_TXOFF_PRE_DECL(mti_ts);
287 MLX5_TXOFF_PRE_DECL(mtiv_ts);
291 MLX5_TXOFF_PRE_DECL(none_mpw);
292 MLX5_TXOFF_PRE_DECL(mci_mpw);
293 MLX5_TXOFF_PRE_DECL(mc_mpw);
294 MLX5_TXOFF_PRE_DECL(i_mpw);
296 static __rte_always_inline uint64_t *
297 mlx5_tx_bfreg(struct mlx5_txq_data *txq)
299 return MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx];
303 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
304 * 64bit architectures.
307 * value to write in CPU endian format.
309 * Address to write to.
311 * Address of the lock to use for that UAR access.
313 static __rte_always_inline void
314 __mlx5_uar_write64_relaxed(uint64_t val, void *addr,
315 rte_spinlock_t *lock __rte_unused)
318 *(uint64_t *)addr = val;
319 #else /* !RTE_ARCH_64 */
320 rte_spinlock_lock(lock);
321 *(uint32_t *)addr = val;
323 *((uint32_t *)addr + 1) = val >> 32;
324 rte_spinlock_unlock(lock);
329 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
330 * 64bit architectures while guaranteeing the order of execution with the
331 * code being executed.
334 * value to write in CPU endian format.
336 * Address to write to.
338 * Address of the lock to use for that UAR access.
340 static __rte_always_inline void
341 __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
344 __mlx5_uar_write64_relaxed(val, addr, lock);
347 /* Assist macros, used instead of directly calling the functions they wrap. */
349 #define mlx5_uar_write64_relaxed(val, dst, lock) \
350 __mlx5_uar_write64_relaxed(val, dst, NULL)
351 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
353 #define mlx5_uar_write64_relaxed(val, dst, lock) \
354 __mlx5_uar_write64_relaxed(val, dst, lock)
355 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
359 * Query LKey from a packet buffer for Tx. If not found, add the mempool.
362 * Pointer to Tx queue structure.
367 * Searched LKey on success, UINT32_MAX on no match.
369 static __rte_always_inline uint32_t
370 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
372 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
373 uintptr_t addr = (uintptr_t)mb->buf_addr;
376 /* Check generation bit to see if there's any change on existing MRs. */
377 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
378 mlx5_mr_flush_local_cache(mr_ctrl);
379 /* Linear search on MR cache array. */
380 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
381 MLX5_MR_CACHE_N, addr);
382 if (likely(lkey != UINT32_MAX))
384 /* Take slower bottom-half on miss. */
385 return mlx5_tx_mb2mr_bh(txq, mb);
389 * Ring TX queue doorbell and flush the update if requested.
392 * Pointer to TX queue structure.
394 * Pointer to the last WQE posted in the NIC.
396 * Request for write memory barrier after BlueFlame update.
398 static __rte_always_inline void
399 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
402 uint64_t *dst = mlx5_tx_bfreg(txq);
403 volatile uint64_t *src = ((volatile uint64_t *)wqe);
406 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
407 /* Ensure ordering between DB record and BF copy. */
409 mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
415 * Ring TX queue doorbell and flush the update by write memory barrier.
418 * Pointer to TX queue structure.
420 * Pointer to the last WQE posted in the NIC.
422 static __rte_always_inline void
423 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
425 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
429 * Convert timestamp from mbuf format to linear counter
430 * of Clock Queue completions (24 bits).
433 * Pointer to the device shared context to fetch Tx
434 * packet pacing timestamp and parameters.
436 * Timestamp from mbuf to convert.
438 * positive or zero value - completion ID to wait.
439 * negative value - conversion error.
441 static __rte_always_inline int32_t
442 mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts)
449 * Read atomically two uint64_t fields and compare lsb bits.
450 * It there is no match - the timestamp was updated in
451 * the service thread, data should be re-read.
453 rte_compiler_barrier();
454 ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
455 ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
456 rte_compiler_barrier();
457 if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
460 /* Perform the skew correction, positive value to send earlier. */
461 mts -= sh->txpp.skew;
463 if (unlikely(mts >= UINT64_MAX / 2)) {
464 /* We have negative integer, mts is in the past. */
465 __atomic_fetch_add(&sh->txpp.err_ts_past,
466 1, __ATOMIC_RELAXED);
469 tick = sh->txpp.tick;
471 /* Convert delta to completions, round up. */
472 mts = (mts + tick - 1) / tick;
473 if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
474 /* We have mts is too distant future. */
475 __atomic_fetch_add(&sh->txpp.err_ts_future,
476 1, __ATOMIC_RELAXED);
479 mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
481 ci >>= 64 - MLX5_CQ_INDEX_WIDTH;
486 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
487 * Flags must be preliminary initialized to zero.
490 * Pointer to burst routine local context.
492 * Pointer to store Software Parser flags.
494 * Configured Tx offloads mask. It is fully defined at
495 * compile time and may be used for optimization.
498 * Software Parser offsets packed in dword.
499 * Software Parser flags are set by pointer.
501 static __rte_always_inline uint32_t
502 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
507 unsigned int idx, off;
510 if (!MLX5_TXOFF_CONFIG(SWP))
512 ol = loc->mbuf->ol_flags;
513 tunnel = ol & PKT_TX_TUNNEL_MASK;
515 * Check whether Software Parser is required.
516 * Only customized tunnels may ask for.
518 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
521 * The index should have:
522 * bit[0:1] = PKT_TX_L4_MASK
523 * bit[4] = PKT_TX_IPV6
524 * bit[8] = PKT_TX_OUTER_IPV6
525 * bit[9] = PKT_TX_OUTER_UDP
527 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
528 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
529 *swp_flags = mlx5_swp_types_table[idx];
531 * Set offsets for SW parser. Since ConnectX-5, SW parser just
532 * complements HW parser. SW parser starts to engage only if HW parser
533 * can't reach a header. For the older devices, HW parser will not kick
534 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
535 * should be set regardless of HW offload.
537 off = loc->mbuf->outer_l2_len;
538 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
539 off += sizeof(struct rte_vlan_hdr);
540 set = (off >> 1) << 8; /* Outer L3 offset. */
541 off += loc->mbuf->outer_l3_len;
542 if (tunnel == PKT_TX_TUNNEL_UDP)
543 set |= off >> 1; /* Outer L4 offset. */
544 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
545 const uint64_t csum = ol & PKT_TX_L4_MASK;
546 off += loc->mbuf->l2_len;
547 set |= (off >> 1) << 24; /* Inner L3 offset. */
548 if (csum == PKT_TX_TCP_CKSUM ||
549 csum == PKT_TX_UDP_CKSUM ||
550 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
551 off += loc->mbuf->l3_len;
552 set |= (off >> 1) << 16; /* Inner L4 offset. */
555 set = rte_cpu_to_le_32(set);
560 * Convert the Checksum offloads to Verbs.
563 * Pointer to the mbuf.
566 * Converted checksum flags.
568 static __rte_always_inline uint8_t
569 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
572 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
573 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
574 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
577 * The index should have:
578 * bit[0] = PKT_TX_TCP_SEG
579 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
580 * bit[4] = PKT_TX_IP_CKSUM
581 * bit[8] = PKT_TX_OUTER_IP_CKSUM
584 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
585 return mlx5_cksum_table[idx];
589 * Free the mbufs from the linear array of pointers.
592 * Pointer to Tx queue structure.
594 * Pointer to array of packets to be free.
596 * Number of packets to be freed.
598 * Configured Tx offloads mask. It is fully defined at
599 * compile time and may be used for optimization.
601 static __rte_always_inline void
602 mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
603 struct rte_mbuf **__rte_restrict pkts,
605 unsigned int olx __rte_unused)
607 struct rte_mempool *pool = NULL;
608 struct rte_mbuf **p_free = NULL;
609 struct rte_mbuf *mbuf;
610 unsigned int n_free = 0;
613 * The implemented algorithm eliminates
614 * copying pointers to temporary array
615 * for rte_mempool_put_bulk() calls.
620 * Free mbufs directly to the pool in bulk
621 * if fast free offload is engaged
623 if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
626 rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
632 * Decrement mbuf reference counter, detach
633 * indirect and external buffers if needed.
635 mbuf = rte_pktmbuf_prefree_seg(*pkts);
636 if (likely(mbuf != NULL)) {
637 MLX5_ASSERT(mbuf == *pkts);
638 if (likely(n_free != 0)) {
639 if (unlikely(pool != mbuf->pool))
640 /* From different pool. */
643 /* Start new scan array. */
650 if (unlikely(pkts_n == 0)) {
656 * This happens if mbuf is still referenced.
657 * We can't put it back to the pool, skip.
661 if (unlikely(n_free != 0))
662 /* There is some array to free.*/
664 if (unlikely(pkts_n == 0))
665 /* Last mbuf, nothing to free. */
671 * This loop is implemented to avoid multiple
672 * inlining of rte_mempool_put_bulk().
678 * Free the array of pre-freed mbufs
679 * belonging to the same memory pool.
681 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
682 if (unlikely(mbuf != NULL)) {
683 /* There is the request to start new scan. */
688 if (likely(pkts_n != 0))
691 * This is the last mbuf to be freed.
692 * Do one more loop iteration to complete.
693 * This is rare case of the last unique mbuf.
698 if (likely(pkts_n == 0))
707 * No inline version to free buffers for optimal call
708 * on the tx_burst completion.
710 static __rte_noinline void
711 __mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
712 struct rte_mbuf **__rte_restrict pkts,
714 unsigned int olx __rte_unused)
716 mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
720 * Free the mbuf from the elts ring buffer till new tail.
723 * Pointer to Tx queue structure.
725 * Index in elts to free up to, becomes new elts tail.
727 * Configured Tx offloads mask. It is fully defined at
728 * compile time and may be used for optimization.
730 static __rte_always_inline void
731 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
733 unsigned int olx __rte_unused)
735 uint16_t n_elts = tail - txq->elts_tail;
738 MLX5_ASSERT(n_elts <= txq->elts_s);
740 * Implement a loop to support ring buffer wraparound
741 * with single inlining of mlx5_tx_free_mbuf().
746 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
747 part = RTE_MIN(part, n_elts);
749 MLX5_ASSERT(part <= txq->elts_s);
750 mlx5_tx_free_mbuf(txq,
751 &txq->elts[txq->elts_tail & txq->elts_m],
753 txq->elts_tail += part;
759 * Store the mbuf being sent into elts ring buffer.
760 * On Tx completion these mbufs will be freed.
763 * Pointer to Tx queue structure.
765 * Pointer to array of packets to be stored.
767 * Number of packets to be stored.
769 * Configured Tx offloads mask. It is fully defined at
770 * compile time and may be used for optimization.
772 static __rte_always_inline void
773 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
774 struct rte_mbuf **__rte_restrict pkts,
776 unsigned int olx __rte_unused)
779 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
783 part = txq->elts_s - (txq->elts_head & txq->elts_m);
785 MLX5_ASSERT(part <= txq->elts_s);
786 /* This code is a good candidate for vectorizing with SIMD. */
787 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
789 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
790 txq->elts_head += pkts_n;
791 if (unlikely(part < pkts_n))
792 /* The copy is wrapping around the elts array. */
793 rte_memcpy((void *)elts, (void *)(pkts + part),
794 (pkts_n - part) * sizeof(struct rte_mbuf *));
798 * Check if the completion request flag should be set in the last WQE.
799 * Both pushed mbufs and WQEs are monitored and the completion request
800 * flag is set if any of thresholds is reached.
803 * Pointer to TX queue structure.
805 * Pointer to burst routine local context.
807 * Configured Tx offloads mask. It is fully defined at
808 * compile time and may be used for optimization.
810 static __rte_always_inline void
811 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
812 struct mlx5_txq_local *__rte_restrict loc,
815 uint16_t head = txq->elts_head;
818 part = MLX5_TXOFF_CONFIG(INLINE) ?
819 0 : loc->pkts_sent - loc->pkts_copy;
821 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
822 (MLX5_TXOFF_CONFIG(INLINE) &&
823 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
824 volatile struct mlx5_wqe *last = loc->wqe_last;
827 txq->elts_comp = head;
828 if (MLX5_TXOFF_CONFIG(INLINE))
829 txq->wqe_comp = txq->wqe_ci;
830 /* Request unconditional completion on last WQE. */
831 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
832 MLX5_COMP_MODE_OFFSET);
833 /* Save elts_head in dedicated free on completion queue. */
834 #ifdef RTE_LIBRTE_MLX5_DEBUG
835 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
836 (last->cseg.opcode >> 8) << 16;
838 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
840 /* A CQE slot must always be available. */
841 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
846 * Build the Control Segment with specified opcode:
848 * - MLX5_OPCODE_ENHANCED_MPSW
852 * Pointer to TX queue structure.
854 * Pointer to burst routine local context.
856 * Pointer to WQE to fill with built Control Segment.
858 * Supposed length of WQE in segments.
860 * SQ WQE opcode to put into Control Segment.
862 * Configured Tx offloads mask. It is fully defined at
863 * compile time and may be used for optimization.
865 static __rte_always_inline void
866 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
867 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
868 struct mlx5_wqe *__rte_restrict wqe,
871 unsigned int olx __rte_unused)
873 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
875 /* For legacy MPW replace the EMPW by TSO with modifier. */
876 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
877 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
878 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
879 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
880 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
881 MLX5_COMP_MODE_OFFSET);
882 cs->misc = RTE_BE32(0);
886 * Build the Synchronize Queue Segment with specified completion index.
889 * Pointer to TX queue structure.
891 * Pointer to burst routine local context.
893 * Pointer to WQE to fill with built Control Segment.
895 * Completion index in Clock Queue to wait.
897 * Configured Tx offloads mask. It is fully defined at
898 * compile time and may be used for optimization.
900 static __rte_always_inline void
901 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
902 struct mlx5_txq_local *restrict loc __rte_unused,
903 struct mlx5_wqe *restrict wqe,
905 unsigned int olx __rte_unused)
907 struct mlx5_wqe_qseg *qs;
909 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
910 qs->max_index = rte_cpu_to_be_32(wci);
911 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
912 qs->reserved0 = RTE_BE32(0);
913 qs->reserved1 = RTE_BE32(0);
917 * Build the Ethernet Segment without inlined data.
918 * Supports Software Parser, Checksums and VLAN insertion Tx offload features.
921 * Pointer to TX queue structure.
923 * Pointer to burst routine local context.
925 * Pointer to WQE to fill with built Ethernet Segment.
927 * Configured Tx offloads mask. It is fully defined at
928 * compile time and may be used for optimization.
930 static __rte_always_inline void
931 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
932 struct mlx5_txq_local *__rte_restrict loc,
933 struct mlx5_wqe *__rte_restrict wqe,
936 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
940 * Calculate and set check sum flags first, dword field
941 * in segment may be shared with Software Parser flags.
943 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
944 es->flags = rte_cpu_to_le_32(csum);
946 * Calculate and set Software Parser offsets and flags.
947 * These flags a set for custom UDP and IP tunnel packets.
949 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
950 /* Fill metadata field if needed. */
951 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
952 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
953 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
955 /* Engage VLAN tag insertion feature if requested. */
956 if (MLX5_TXOFF_CONFIG(VLAN) &&
957 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
959 * We should get here only if device support
960 * this feature correctly.
962 MLX5_ASSERT(txq->vlan_en);
963 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
964 loc->mbuf->vlan_tci);
966 es->inline_hdr = RTE_BE32(0);
971 * Build the Ethernet Segment with minimal inlined data
972 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
973 * used to fill the gap in single WQEBB WQEs.
974 * Supports Software Parser, Checksums and VLAN
975 * insertion Tx offload features.
978 * Pointer to TX queue structure.
980 * Pointer to burst routine local context.
982 * Pointer to WQE to fill with built Ethernet Segment.
984 * Length of VLAN tag insertion if any.
986 * Configured Tx offloads mask. It is fully defined at
987 * compile time and may be used for optimization.
989 static __rte_always_inline void
990 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
991 struct mlx5_txq_local *__rte_restrict loc,
992 struct mlx5_wqe *__rte_restrict wqe,
996 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
998 uint8_t *psrc, *pdst;
1001 * Calculate and set check sum flags first, dword field
1002 * in segment may be shared with Software Parser flags.
1004 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1005 es->flags = rte_cpu_to_le_32(csum);
1007 * Calculate and set Software Parser offsets and flags.
1008 * These flags a set for custom UDP and IP tunnel packets.
1010 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1011 /* Fill metadata field if needed. */
1012 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1013 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1014 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1016 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1017 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
1018 es->inline_data = *(unaligned_uint16_t *)psrc;
1019 psrc += sizeof(uint16_t);
1020 pdst = (uint8_t *)(es + 1);
1021 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1022 /* Implement VLAN tag insertion as part inline data. */
1023 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1024 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1025 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1026 /* Insert VLAN ethertype + VLAN tag. */
1027 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1028 ((RTE_ETHER_TYPE_VLAN << 16) |
1029 loc->mbuf->vlan_tci);
1030 pdst += sizeof(struct rte_vlan_hdr);
1031 /* Copy the rest two bytes from packet data. */
1032 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1033 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1035 /* Fill the gap in the title WQEBB with inline data. */
1036 rte_mov16(pdst, psrc);
1041 * Build the Ethernet Segment with entire packet data inlining. Checks the
1042 * boundary of WQEBB and ring buffer wrapping, supports Software Parser,
1043 * Checksums and VLAN insertion Tx offload features.
1046 * Pointer to TX queue structure.
1048 * Pointer to burst routine local context.
1050 * Pointer to WQE to fill with built Ethernet Segment.
1052 * Length of VLAN tag insertion if any.
1054 * Length of data to inline (VLAN included, if any).
1056 * TSO flag, set mss field from the packet.
1058 * Configured Tx offloads mask. It is fully defined at
1059 * compile time and may be used for optimization.
1062 * Pointer to the next Data Segment (aligned and wrapped around).
1064 static __rte_always_inline struct mlx5_wqe_dseg *
1065 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
1066 struct mlx5_txq_local *__rte_restrict loc,
1067 struct mlx5_wqe *__rte_restrict wqe,
1073 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1075 uint8_t *psrc, *pdst;
1079 * Calculate and set check sum flags first, dword field
1080 * in segment may be shared with Software Parser flags.
1082 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1085 csum |= loc->mbuf->tso_segsz;
1086 es->flags = rte_cpu_to_be_32(csum);
1088 es->flags = rte_cpu_to_le_32(csum);
1091 * Calculate and set Software Parser offsets and flags.
1092 * These flags a set for custom UDP and IP tunnel packets.
1094 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1095 /* Fill metadata field if needed. */
1096 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1097 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1098 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1100 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1101 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
1102 es->inline_data = *(unaligned_uint16_t *)psrc;
1103 psrc += sizeof(uint16_t);
1104 pdst = (uint8_t *)(es + 1);
1105 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1106 /* Implement VLAN tag insertion as part inline data. */
1107 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1108 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1109 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1110 /* Insert VLAN ethertype + VLAN tag. */
1111 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1112 ((RTE_ETHER_TYPE_VLAN << 16) |
1113 loc->mbuf->vlan_tci);
1114 pdst += sizeof(struct rte_vlan_hdr);
1115 /* Copy the rest two bytes from packet data. */
1116 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1117 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1118 psrc += sizeof(uint16_t);
1120 /* Fill the gap in the title WQEBB with inline data. */
1121 rte_mov16(pdst, psrc);
1122 psrc += sizeof(rte_v128u32_t);
1124 pdst = (uint8_t *)(es + 2);
1125 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1126 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1127 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
1129 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1130 return (struct mlx5_wqe_dseg *)pdst;
1133 * The WQEBB space availability is checked by caller.
1134 * Here we should be aware of WQE ring buffer wraparound only.
1136 part = (uint8_t *)txq->wqes_end - pdst;
1137 part = RTE_MIN(part, inlen);
1139 rte_memcpy(pdst, psrc, part);
1141 if (likely(!inlen)) {
1143 * If return value is not used by the caller
1144 * the code below will be optimized out.
1147 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1148 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1149 pdst = (uint8_t *)txq->wqes;
1150 return (struct mlx5_wqe_dseg *)pdst;
1152 pdst = (uint8_t *)txq->wqes;
1159 * Copy data from chain of mbuf to the specified linear buffer.
1160 * Checksums and VLAN insertion Tx offload features. If data
1161 * from some mbuf copied completely this mbuf is freed. Local
1162 * structure is used to keep the byte stream state.
1165 * Pointer to the destination linear buffer.
1167 * Pointer to burst routine local context.
1169 * Length of data to be copied.
1171 * Length of data to be copied ignoring no inline hint.
1173 * Configured Tx offloads mask. It is fully defined at
1174 * compile time and may be used for optimization.
1177 * Number of actual copied data bytes. This is always greater than or
1178 * equal to must parameter and might be lesser than len in no inline
1179 * hint flag is encountered.
1181 static __rte_always_inline unsigned int
1182 mlx5_tx_mseg_memcpy(uint8_t *pdst,
1183 struct mlx5_txq_local *__rte_restrict loc,
1186 unsigned int olx __rte_unused)
1188 struct rte_mbuf *mbuf;
1189 unsigned int part, dlen, copy = 0;
1193 MLX5_ASSERT(must <= len);
1195 /* Allow zero length packets, must check first. */
1196 dlen = rte_pktmbuf_data_len(loc->mbuf);
1197 if (dlen <= loc->mbuf_off) {
1198 /* Exhausted packet, just free. */
1200 loc->mbuf = mbuf->next;
1201 rte_pktmbuf_free_seg(mbuf);
1203 MLX5_ASSERT(loc->mbuf_nseg > 1);
1204 MLX5_ASSERT(loc->mbuf);
1206 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
1211 * We already copied the minimal
1212 * requested amount of data.
1217 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
1219 * Copy only the minimal required
1220 * part of the data buffer.
1227 dlen -= loc->mbuf_off;
1228 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1230 part = RTE_MIN(len, dlen);
1231 rte_memcpy(pdst, psrc, part);
1233 loc->mbuf_off += part;
1236 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
1238 /* Exhausted packet, just free. */
1240 loc->mbuf = mbuf->next;
1241 rte_pktmbuf_free_seg(mbuf);
1243 MLX5_ASSERT(loc->mbuf_nseg >= 1);
1253 * Build the Ethernet Segment with inlined data from multi-segment packet.
1254 * Checks the boundary of WQEBB and ring buffer wrapping, supports Software
1255 * Parser, Checksums and VLAN insertion Tx offload features.
1258 * Pointer to TX queue structure.
1260 * Pointer to burst routine local context.
1262 * Pointer to WQE to fill with built Ethernet Segment.
1264 * Length of VLAN tag insertion if any.
1266 * Length of data to inline (VLAN included, if any).
1268 * TSO flag, set mss field from the packet.
1270 * Configured Tx offloads mask. It is fully defined at
1271 * compile time and may be used for optimization.
1274 * Pointer to the next Data Segment (aligned and possible NOT wrapped
1275 * around - caller should do wrapping check on its own).
1277 static __rte_always_inline struct mlx5_wqe_dseg *
1278 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
1279 struct mlx5_txq_local *__rte_restrict loc,
1280 struct mlx5_wqe *__rte_restrict wqe,
1286 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1289 unsigned int part, tlen = 0;
1292 * Calculate and set check sum flags first, uint32_t field
1293 * in segment may be shared with Software Parser flags.
1295 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1298 csum |= loc->mbuf->tso_segsz;
1299 es->flags = rte_cpu_to_be_32(csum);
1301 es->flags = rte_cpu_to_le_32(csum);
1304 * Calculate and set Software Parser offsets and flags.
1305 * These flags a set for custom UDP and IP tunnel packets.
1307 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1308 /* Fill metadata field if needed. */
1309 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1310 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1311 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1313 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1314 pdst = (uint8_t *)&es->inline_data;
1315 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1316 /* Implement VLAN tag insertion as part inline data. */
1317 mlx5_tx_mseg_memcpy(pdst, loc,
1318 2 * RTE_ETHER_ADDR_LEN,
1319 2 * RTE_ETHER_ADDR_LEN, olx);
1320 pdst += 2 * RTE_ETHER_ADDR_LEN;
1321 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1322 ((RTE_ETHER_TYPE_VLAN << 16) |
1323 loc->mbuf->vlan_tci);
1324 pdst += sizeof(struct rte_vlan_hdr);
1325 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
1327 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1329 * The WQEBB space availability is checked by caller.
1330 * Here we should be aware of WQE ring buffer wraparound only.
1332 part = (uint8_t *)txq->wqes_end - pdst;
1333 part = RTE_MIN(part, inlen - tlen);
1339 * Copying may be interrupted inside the routine
1340 * if run into no inline hint flag.
1342 copy = tso ? inlen : txq->inlen_mode;
1343 copy = tlen >= copy ? 0 : (copy - tlen);
1344 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
1346 if (likely(inlen <= tlen) || copy < part) {
1347 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
1349 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1350 return (struct mlx5_wqe_dseg *)pdst;
1352 pdst = (uint8_t *)txq->wqes;
1353 part = inlen - tlen;
1358 * Build the Data Segment of pointer type.
1361 * Pointer to TX queue structure.
1363 * Pointer to burst routine local context.
1365 * Pointer to WQE to fill with built Data Segment.
1367 * Data buffer to point.
1369 * Data buffer length.
1371 * Configured Tx offloads mask. It is fully defined at
1372 * compile time and may be used for optimization.
1374 static __rte_always_inline void
1375 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
1376 struct mlx5_txq_local *__rte_restrict loc,
1377 struct mlx5_wqe_dseg *__rte_restrict dseg,
1380 unsigned int olx __rte_unused)
1384 dseg->bcount = rte_cpu_to_be_32(len);
1385 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1386 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1390 * Build the Data Segment of pointer type or inline if data length is less than
1391 * buffer in minimal Data Segment size.
1394 * Pointer to TX queue structure.
1396 * Pointer to burst routine local context.
1398 * Pointer to WQE to fill with built Data Segment.
1400 * Data buffer to point.
1402 * Data buffer length.
1404 * Configured Tx offloads mask. It is fully defined at
1405 * compile time and may be used for optimization.
1407 static __rte_always_inline void
1408 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
1409 struct mlx5_txq_local *__rte_restrict loc,
1410 struct mlx5_wqe_dseg *__rte_restrict dseg,
1413 unsigned int olx __rte_unused)
1419 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
1420 dseg->bcount = rte_cpu_to_be_32(len);
1421 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1422 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1426 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1427 /* Unrolled implementation of generic rte_memcpy. */
1428 dst = (uintptr_t)&dseg->inline_data[0];
1429 src = (uintptr_t)buf;
1431 #ifdef RTE_ARCH_STRICT_ALIGN
1432 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
1433 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1434 dst += sizeof(uint32_t);
1435 src += sizeof(uint32_t);
1436 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1437 dst += sizeof(uint32_t);
1438 src += sizeof(uint32_t);
1440 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
1441 dst += sizeof(uint64_t);
1442 src += sizeof(uint64_t);
1446 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1447 dst += sizeof(uint32_t);
1448 src += sizeof(uint32_t);
1451 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
1452 dst += sizeof(uint16_t);
1453 src += sizeof(uint16_t);
1456 *(uint8_t *)dst = *(uint8_t *)src;
1460 * Build the Data Segment of inlined data from single
1461 * segment packet, no VLAN insertion.
1464 * Pointer to TX queue structure.
1466 * Pointer to burst routine local context.
1468 * Pointer to WQE to fill with built Data Segment.
1470 * Data buffer to point.
1472 * Data buffer length.
1474 * Configured Tx offloads mask. It is fully defined at
1475 * compile time and may be used for optimization.
1478 * Pointer to the next Data Segment after inlined data.
1479 * Ring buffer wraparound check is needed. We do not do it here because it
1480 * may not be needed for the last packet in the eMPW session.
1482 static __rte_always_inline struct mlx5_wqe_dseg *
1483 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
1484 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1485 struct mlx5_wqe_dseg *__rte_restrict dseg,
1488 unsigned int olx __rte_unused)
1493 if (!MLX5_TXOFF_CONFIG(MPW)) {
1494 /* Store the descriptor byte counter for eMPW sessions. */
1495 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1496 pdst = &dseg->inline_data[0];
1498 /* The entire legacy MPW session counter is stored on close. */
1499 pdst = (uint8_t *)dseg;
1502 * The WQEBB space availability is checked by caller.
1503 * Here we should be aware of WQE ring buffer wraparound only.
1505 part = (uint8_t *)txq->wqes_end - pdst;
1506 part = RTE_MIN(part, len);
1508 rte_memcpy(pdst, buf, part);
1512 if (!MLX5_TXOFF_CONFIG(MPW))
1513 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1514 /* Note: no final wraparound check here. */
1515 return (struct mlx5_wqe_dseg *)pdst;
1517 pdst = (uint8_t *)txq->wqes;
1524 * Build the Data Segment of inlined data from single
1525 * segment packet with VLAN insertion.
1528 * Pointer to TX queue structure.
1530 * Pointer to burst routine local context.
1532 * Pointer to the dseg fill with built Data Segment.
1534 * Data buffer to point.
1536 * Data buffer length.
1538 * Configured Tx offloads mask. It is fully defined at
1539 * compile time and may be used for optimization.
1542 * Pointer to the next Data Segment after inlined data.
1543 * Ring buffer wraparound check is needed.
1545 static __rte_always_inline struct mlx5_wqe_dseg *
1546 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
1547 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1548 struct mlx5_wqe_dseg *__rte_restrict dseg,
1551 unsigned int olx __rte_unused)
1557 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
1558 if (!MLX5_TXOFF_CONFIG(MPW)) {
1559 /* Store the descriptor byte counter for eMPW sessions. */
1560 dseg->bcount = rte_cpu_to_be_32
1561 ((len + sizeof(struct rte_vlan_hdr)) |
1562 MLX5_ETH_WQE_DATA_INLINE);
1563 pdst = &dseg->inline_data[0];
1565 /* The entire legacy MPW session counter is stored on close. */
1566 pdst = (uint8_t *)dseg;
1568 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
1569 buf += MLX5_DSEG_MIN_INLINE_SIZE;
1570 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
1571 len -= MLX5_DSEG_MIN_INLINE_SIZE;
1572 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
1573 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1574 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1575 pdst = (uint8_t *)txq->wqes;
1576 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
1577 loc->mbuf->vlan_tci);
1578 pdst += sizeof(struct rte_vlan_hdr);
1580 * The WQEBB space availability is checked by caller.
1581 * Here we should be aware of WQE ring buffer wraparound only.
1583 part = (uint8_t *)txq->wqes_end - pdst;
1584 part = RTE_MIN(part, len);
1586 rte_memcpy(pdst, buf, part);
1590 if (!MLX5_TXOFF_CONFIG(MPW))
1591 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1592 /* Note: no final wraparound check here. */
1593 return (struct mlx5_wqe_dseg *)pdst;
1595 pdst = (uint8_t *)txq->wqes;
1602 * Build the Ethernet Segment with optionally inlined data with
1603 * VLAN insertion and following Data Segments (if any) from
1604 * multi-segment packet. Used by ordinary send and TSO.
1607 * Pointer to TX queue structure.
1609 * Pointer to burst routine local context.
1611 * Pointer to WQE to fill with built Ethernet/Data Segments.
1613 * Length of VLAN header to insert, 0 means no VLAN insertion.
1615 * Data length to inline. For TSO this parameter specifies exact value,
1616 * for ordinary send routine can be aligned by caller to provide better WQE
1617 * space saving and data buffer start address alignment.
1618 * This length includes VLAN header being inserted.
1620 * Zero means ordinary send, inlined data can be extended,
1621 * otherwise this is TSO, inlined data length is fixed.
1623 * Configured Tx offloads mask. It is fully defined at
1624 * compile time and may be used for optimization.
1627 * Actual size of built WQE in segments.
1629 static __rte_always_inline unsigned int
1630 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
1631 struct mlx5_txq_local *__rte_restrict loc,
1632 struct mlx5_wqe *__rte_restrict wqe,
1636 unsigned int olx __rte_unused)
1638 struct mlx5_wqe_dseg *__rte_restrict dseg;
1641 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
1642 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
1645 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
1646 if (!loc->mbuf_nseg)
1649 * There are still some mbuf remaining, not inlined.
1650 * The first mbuf may be partially inlined and we
1651 * must process the possible non-zero data offset.
1653 if (loc->mbuf_off) {
1658 * Exhausted packets must be dropped before.
1659 * Non-zero offset means there are some data
1660 * remained in the packet.
1662 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
1663 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
1664 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1666 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
1668 * Build the pointer/minimal Data Segment.
1669 * Do ring buffer wrapping check in advance.
1671 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1672 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1673 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
1674 /* Store the mbuf to be freed on completion. */
1675 MLX5_ASSERT(loc->elts_free);
1676 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1679 if (--loc->mbuf_nseg == 0)
1681 loc->mbuf = loc->mbuf->next;
1685 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1686 struct rte_mbuf *mbuf;
1688 /* Zero length segment found, just skip. */
1690 loc->mbuf = loc->mbuf->next;
1691 rte_pktmbuf_free_seg(mbuf);
1692 if (--loc->mbuf_nseg == 0)
1695 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1696 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1699 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1700 rte_pktmbuf_data_len(loc->mbuf), olx);
1701 MLX5_ASSERT(loc->elts_free);
1702 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1705 if (--loc->mbuf_nseg == 0)
1707 loc->mbuf = loc->mbuf->next;
1712 /* Calculate actual segments used from the dseg pointer. */
1713 if ((uintptr_t)wqe < (uintptr_t)dseg)
1714 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
1716 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
1717 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
1722 * The routine checks timestamp flag in the current packet,
1723 * and push WAIT WQE into the queue if scheduling is required.
1726 * Pointer to TX queue structure.
1728 * Pointer to burst routine local context.
1730 * Configured Tx offloads mask. It is fully defined at
1731 * compile time and may be used for optimization.
1734 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1735 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
1736 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
1737 * Local context variables partially updated.
1739 static __rte_always_inline enum mlx5_txcmp_code
1740 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
1741 struct mlx5_txq_local *restrict loc,
1744 if (MLX5_TXOFF_CONFIG(TXPP) &&
1745 loc->mbuf->ol_flags & txq->ts_mask) {
1746 struct mlx5_wqe *wqe;
1751 * Estimate the required space quickly and roughly.
1752 * We would like to ensure the packet can be pushed
1753 * to the queue and we won't get the orphan WAIT WQE.
1755 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
1756 loc->elts_free < NB_SEGS(loc->mbuf))
1757 return MLX5_TXCMP_CODE_EXIT;
1758 /* Convert the timestamp into completion to wait. */
1759 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
1760 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
1761 if (unlikely(wci < 0))
1762 return MLX5_TXCMP_CODE_SINGLE;
1763 /* Build the WAIT WQE with specified completion. */
1764 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1765 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
1766 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
1769 return MLX5_TXCMP_CODE_MULTI;
1771 return MLX5_TXCMP_CODE_SINGLE;
1775 * Tx one packet function for multi-segment TSO. Supports all
1776 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
1777 * sends one packet per WQE.
1779 * This routine is responsible for storing processed mbuf
1780 * into elts ring buffer and update elts_head.
1783 * Pointer to TX queue structure.
1785 * Pointer to burst routine local context.
1787 * Configured Tx offloads mask. It is fully defined at
1788 * compile time and may be used for optimization.
1791 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1792 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1793 * Local context variables partially updated.
1795 static __rte_always_inline enum mlx5_txcmp_code
1796 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
1797 struct mlx5_txq_local *__rte_restrict loc,
1800 struct mlx5_wqe *__rte_restrict wqe;
1801 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
1803 if (MLX5_TXOFF_CONFIG(TXPP)) {
1804 enum mlx5_txcmp_code wret;
1806 /* Generate WAIT for scheduling if requested. */
1807 wret = mlx5_tx_schedule_send(txq, loc, olx);
1808 if (wret == MLX5_TXCMP_CODE_EXIT)
1809 return MLX5_TXCMP_CODE_EXIT;
1810 if (wret == MLX5_TXCMP_CODE_ERROR)
1811 return MLX5_TXCMP_CODE_ERROR;
1814 * Calculate data length to be inlined to estimate
1815 * the required space in WQE ring buffer.
1817 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
1818 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
1819 vlan = sizeof(struct rte_vlan_hdr);
1820 inlen = loc->mbuf->l2_len + vlan +
1821 loc->mbuf->l3_len + loc->mbuf->l4_len;
1822 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
1823 return MLX5_TXCMP_CODE_ERROR;
1824 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
1825 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
1826 /* Packet must contain all TSO headers. */
1827 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
1828 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
1829 inlen > (dlen + vlan)))
1830 return MLX5_TXCMP_CODE_ERROR;
1831 MLX5_ASSERT(inlen >= txq->inlen_mode);
1833 * Check whether there are enough free WQEBBs:
1835 * - Ethernet Segment
1836 * - First Segment of inlined Ethernet data
1837 * - ... data continued ...
1838 * - Data Segments of pointer/min inline type
1840 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
1841 MLX5_ESEG_MIN_INLINE_SIZE +
1843 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
1844 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1845 return MLX5_TXCMP_CODE_EXIT;
1846 /* Check for maximal WQE size. */
1847 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1848 return MLX5_TXCMP_CODE_ERROR;
1849 #ifdef MLX5_PMD_SOFT_COUNTERS
1850 /* Update sent data bytes/packets counters. */
1851 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
1852 loc->mbuf->tso_segsz;
1854 * One will be added for mbuf itself at the end of the mlx5_tx_burst
1855 * from loc->pkts_sent field.
1858 txq->stats.opackets += ntcp;
1859 txq->stats.obytes += dlen + vlan + ntcp * inlen;
1861 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1862 loc->wqe_last = wqe;
1863 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
1864 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
1865 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
1866 txq->wqe_ci += (ds + 3) / 4;
1867 loc->wqe_free -= (ds + 3) / 4;
1868 return MLX5_TXCMP_CODE_MULTI;
1872 * Tx one packet function for multi-segment SEND. Supports all types of Tx
1873 * offloads, uses MLX5_OPCODE_SEND to build WQEs, sends one packet per WQE,
1874 * without any data inlining in Ethernet Segment.
1876 * This routine is responsible for storing processed mbuf
1877 * into elts ring buffer and update elts_head.
1880 * Pointer to TX queue structure.
1882 * Pointer to burst routine local context.
1884 * Configured Tx offloads mask. It is fully defined at
1885 * compile time and may be used for optimization.
1888 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1889 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1890 * Local context variables partially updated.
1892 static __rte_always_inline enum mlx5_txcmp_code
1893 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
1894 struct mlx5_txq_local *__rte_restrict loc,
1897 struct mlx5_wqe_dseg *__rte_restrict dseg;
1898 struct mlx5_wqe *__rte_restrict wqe;
1899 unsigned int ds, nseg;
1901 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
1902 if (MLX5_TXOFF_CONFIG(TXPP)) {
1903 enum mlx5_txcmp_code wret;
1905 /* Generate WAIT for scheduling if requested. */
1906 wret = mlx5_tx_schedule_send(txq, loc, olx);
1907 if (wret == MLX5_TXCMP_CODE_EXIT)
1908 return MLX5_TXCMP_CODE_EXIT;
1909 if (wret == MLX5_TXCMP_CODE_ERROR)
1910 return MLX5_TXCMP_CODE_ERROR;
1913 * No inline at all, it means the CPU cycles saving is prioritized at
1914 * configuration, we should not copy any packet data to WQE.
1916 nseg = NB_SEGS(loc->mbuf);
1918 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1919 return MLX5_TXCMP_CODE_EXIT;
1920 /* Check for maximal WQE size. */
1921 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1922 return MLX5_TXCMP_CODE_ERROR;
1924 * Some Tx offloads may cause an error if packet is not long enough,
1925 * check against assumed minimal length.
1927 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
1928 return MLX5_TXCMP_CODE_ERROR;
1929 #ifdef MLX5_PMD_SOFT_COUNTERS
1930 /* Update sent data bytes counter. */
1931 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
1932 if (MLX5_TXOFF_CONFIG(VLAN) &&
1933 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
1934 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
1937 * SEND WQE, one WQEBB:
1938 * - Control Segment, SEND opcode
1939 * - Ethernet Segment, optional VLAN, no inline
1940 * - Data Segments, pointer only type
1942 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1943 loc->wqe_last = wqe;
1944 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
1945 mlx5_tx_eseg_none(txq, loc, wqe, olx);
1946 dseg = &wqe->dseg[0];
1948 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1949 struct rte_mbuf *mbuf;
1952 * Zero length segment found, have to correct total
1953 * size of WQE in segments.
1954 * It is supposed to be rare occasion, so in normal
1955 * case (no zero length segments) we avoid extra
1956 * writing to the Control Segment.
1959 wqe->cseg.sq_ds -= RTE_BE32(1);
1961 loc->mbuf = mbuf->next;
1962 rte_pktmbuf_free_seg(mbuf);
1968 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1969 rte_pktmbuf_data_len(loc->mbuf), olx);
1970 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1975 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1976 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1977 loc->mbuf = loc->mbuf->next;
1980 txq->wqe_ci += (ds + 3) / 4;
1981 loc->wqe_free -= (ds + 3) / 4;
1982 return MLX5_TXCMP_CODE_MULTI;
1986 * Tx one packet function for multi-segment SEND. Supports all
1987 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
1988 * sends one packet per WQE, with data inlining in
1989 * Ethernet Segment and minimal Data Segments.
1991 * This routine is responsible for storing processed mbuf
1992 * into elts ring buffer and update elts_head.
1995 * Pointer to TX queue structure.
1997 * Pointer to burst routine local context.
1999 * Configured Tx offloads mask. It is fully defined at
2000 * compile time and may be used for optimization.
2003 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2004 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2005 * Local context variables partially updated.
2007 static __rte_always_inline enum mlx5_txcmp_code
2008 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
2009 struct mlx5_txq_local *__rte_restrict loc,
2012 struct mlx5_wqe *__rte_restrict wqe;
2013 unsigned int ds, inlen, dlen, vlan = 0;
2015 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2016 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2017 if (MLX5_TXOFF_CONFIG(TXPP)) {
2018 enum mlx5_txcmp_code wret;
2020 /* Generate WAIT for scheduling if requested. */
2021 wret = mlx5_tx_schedule_send(txq, loc, olx);
2022 if (wret == MLX5_TXCMP_CODE_EXIT)
2023 return MLX5_TXCMP_CODE_EXIT;
2024 if (wret == MLX5_TXCMP_CODE_ERROR)
2025 return MLX5_TXCMP_CODE_ERROR;
2028 * First calculate data length to be inlined
2029 * to estimate the required space for WQE.
2031 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
2032 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2033 vlan = sizeof(struct rte_vlan_hdr);
2034 inlen = dlen + vlan;
2035 /* Check against minimal length. */
2036 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
2037 return MLX5_TXCMP_CODE_ERROR;
2038 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
2039 if (inlen > txq->inlen_send ||
2040 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2041 struct rte_mbuf *mbuf;
2046 nxlen = rte_pktmbuf_data_len(mbuf);
2048 * Packet length exceeds the allowed inline data length,
2049 * check whether the minimal inlining is required.
2051 if (txq->inlen_mode) {
2052 MLX5_ASSERT(txq->inlen_mode >=
2053 MLX5_ESEG_MIN_INLINE_SIZE);
2054 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
2055 inlen = txq->inlen_mode;
2056 } else if (vlan && !txq->vlan_en) {
2058 * VLAN insertion is requested and hardware does not
2059 * support the offload, will do with software inline.
2061 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
2062 } else if (mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
2063 nxlen > txq->inlen_send) {
2064 return mlx5_tx_packet_multi_send(txq, loc, olx);
2069 * Now we know the minimal amount of data is requested
2070 * to inline. Check whether we should inline the buffers
2071 * from the chain beginning to eliminate some mbufs.
2073 if (unlikely(nxlen <= txq->inlen_send)) {
2074 /* We can inline first mbuf at least. */
2075 if (nxlen < inlen) {
2078 /* Scan mbufs till inlen filled. */
2083 nxlen = rte_pktmbuf_data_len(mbuf);
2085 } while (unlikely(nxlen < inlen));
2086 if (unlikely(nxlen > txq->inlen_send)) {
2087 /* We cannot inline entire mbuf. */
2088 smlen = inlen - smlen;
2089 start = rte_pktmbuf_mtod_offset
2090 (mbuf, uintptr_t, smlen);
2098 /* There should be not end of packet. */
2100 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
2101 } while (unlikely(nxlen < txq->inlen_send));
2103 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
2105 * Check whether we can do inline to align start
2106 * address of data buffer to cacheline.
2109 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
2110 if (unlikely(start)) {
2112 if (start <= txq->inlen_send)
2117 * Check whether there are enough free WQEBBs:
2119 * - Ethernet Segment
2120 * - First Segment of inlined Ethernet data
2121 * - ... data continued ...
2122 * - Data Segments of pointer/min inline type
2124 * Estimate the number of Data Segments conservatively,
2125 * supposing no any mbufs is being freed during inlining.
2127 MLX5_ASSERT(inlen <= txq->inlen_send);
2128 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2129 MLX5_ESEG_MIN_INLINE_SIZE +
2131 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2132 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2133 return MLX5_TXCMP_CODE_EXIT;
2134 /* Check for maximal WQE size. */
2135 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2136 return MLX5_TXCMP_CODE_ERROR;
2137 #ifdef MLX5_PMD_SOFT_COUNTERS
2138 /* Update sent data bytes/packets counters. */
2139 txq->stats.obytes += dlen + vlan;
2141 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2142 loc->wqe_last = wqe;
2143 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
2144 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
2145 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2146 txq->wqe_ci += (ds + 3) / 4;
2147 loc->wqe_free -= (ds + 3) / 4;
2148 return MLX5_TXCMP_CODE_MULTI;
2152 * Tx burst function for multi-segment packets. Supports all
2153 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
2154 * sends one packet per WQE. Function stops sending if it
2155 * encounters the single-segment packet.
2157 * This routine is responsible for storing processed mbuf
2158 * into elts ring buffer and update elts_head.
2161 * Pointer to TX queue structure.
2163 * Packets to transmit.
2165 * Number of packets in array.
2167 * Pointer to burst routine local context.
2169 * Configured Tx offloads mask. It is fully defined at
2170 * compile time and may be used for optimization.
2173 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2174 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2175 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2176 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
2177 * Local context variables updated.
2179 static __rte_always_inline enum mlx5_txcmp_code
2180 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
2181 struct rte_mbuf **__rte_restrict pkts,
2182 unsigned int pkts_n,
2183 struct mlx5_txq_local *__rte_restrict loc,
2186 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2187 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2188 pkts += loc->pkts_sent + 1;
2189 pkts_n -= loc->pkts_sent;
2191 enum mlx5_txcmp_code ret;
2193 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2195 * Estimate the number of free elts quickly but conservatively.
2196 * Some segment may be fully inlined and freed,
2197 * ignore this here - precise estimation is costly.
2199 if (loc->elts_free < NB_SEGS(loc->mbuf))
2200 return MLX5_TXCMP_CODE_EXIT;
2201 if (MLX5_TXOFF_CONFIG(TSO) &&
2202 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
2203 /* Proceed with multi-segment TSO. */
2204 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
2205 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
2206 /* Proceed with multi-segment SEND with inlining. */
2207 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
2209 /* Proceed with multi-segment SEND w/o inlining. */
2210 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
2212 if (ret == MLX5_TXCMP_CODE_EXIT)
2213 return MLX5_TXCMP_CODE_EXIT;
2214 if (ret == MLX5_TXCMP_CODE_ERROR)
2215 return MLX5_TXCMP_CODE_ERROR;
2216 /* WQE is built, go to the next packet. */
2219 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2220 return MLX5_TXCMP_CODE_EXIT;
2221 loc->mbuf = *pkts++;
2223 rte_prefetch0(*pkts);
2224 if (likely(NB_SEGS(loc->mbuf) > 1))
2226 /* Here ends the series of multi-segment packets. */
2227 if (MLX5_TXOFF_CONFIG(TSO) &&
2228 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2229 return MLX5_TXCMP_CODE_TSO;
2230 return MLX5_TXCMP_CODE_SINGLE;
2236 * Tx burst function for single-segment packets with TSO.
2237 * Supports all types of Tx offloads, except multi-packets.
2238 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
2239 * Function stops sending if it encounters the multi-segment
2240 * packet or packet without TSO requested.
2242 * The routine is responsible for storing processed mbuf into elts ring buffer
2243 * and update elts_head if inline offloads is requested due to possible early
2244 * freeing of the inlined mbufs (can not store pkts array in elts as a batch).
2247 * Pointer to TX queue structure.
2249 * Packets to transmit.
2251 * Number of packets in array.
2253 * Pointer to burst routine local context.
2255 * Configured Tx offloads mask. It is fully defined at
2256 * compile time and may be used for optimization.
2259 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2260 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2261 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2262 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2263 * Local context variables updated.
2265 static __rte_always_inline enum mlx5_txcmp_code
2266 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
2267 struct rte_mbuf **__rte_restrict pkts,
2268 unsigned int pkts_n,
2269 struct mlx5_txq_local *__rte_restrict loc,
2272 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2273 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2274 pkts += loc->pkts_sent + 1;
2275 pkts_n -= loc->pkts_sent;
2277 struct mlx5_wqe_dseg *__rte_restrict dseg;
2278 struct mlx5_wqe *__rte_restrict wqe;
2279 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
2282 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2283 if (MLX5_TXOFF_CONFIG(TXPP)) {
2284 enum mlx5_txcmp_code wret;
2286 /* Generate WAIT for scheduling if requested. */
2287 wret = mlx5_tx_schedule_send(txq, loc, olx);
2288 if (wret == MLX5_TXCMP_CODE_EXIT)
2289 return MLX5_TXCMP_CODE_EXIT;
2290 if (wret == MLX5_TXCMP_CODE_ERROR)
2291 return MLX5_TXCMP_CODE_ERROR;
2293 dlen = rte_pktmbuf_data_len(loc->mbuf);
2294 if (MLX5_TXOFF_CONFIG(VLAN) &&
2295 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2296 vlan = sizeof(struct rte_vlan_hdr);
2299 * First calculate the WQE size to check
2300 * whether we have enough space in ring buffer.
2302 hlen = loc->mbuf->l2_len + vlan +
2303 loc->mbuf->l3_len + loc->mbuf->l4_len;
2304 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
2305 return MLX5_TXCMP_CODE_ERROR;
2306 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
2307 hlen += loc->mbuf->outer_l2_len +
2308 loc->mbuf->outer_l3_len;
2309 /* Segment must contain all TSO headers. */
2310 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
2311 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2312 hlen > (dlen + vlan)))
2313 return MLX5_TXCMP_CODE_ERROR;
2315 * Check whether there are enough free WQEBBs:
2317 * - Ethernet Segment
2318 * - First Segment of inlined Ethernet data
2319 * - ... data continued ...
2320 * - Finishing Data Segment of pointer type
2322 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
2323 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2324 if (loc->wqe_free < ((ds + 3) / 4))
2325 return MLX5_TXCMP_CODE_EXIT;
2326 #ifdef MLX5_PMD_SOFT_COUNTERS
2327 /* Update sent data bytes/packets counters. */
2328 ntcp = (dlen + vlan - hlen +
2329 loc->mbuf->tso_segsz - 1) /
2330 loc->mbuf->tso_segsz;
2332 * One will be added for mbuf itself at the end
2333 * of the mlx5_tx_burst from loc->pkts_sent field.
2336 txq->stats.opackets += ntcp;
2337 txq->stats.obytes += dlen + vlan + ntcp * hlen;
2340 * Build the TSO WQE:
2342 * - Ethernet Segment with hlen bytes inlined
2343 * - Data Segment of pointer type
2345 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2346 loc->wqe_last = wqe;
2347 mlx5_tx_cseg_init(txq, loc, wqe, ds,
2348 MLX5_OPCODE_TSO, olx);
2349 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
2350 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
2351 dlen -= hlen - vlan;
2352 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2354 * WQE is built, update the loop parameters
2355 * and go to the next packet.
2357 txq->wqe_ci += (ds + 3) / 4;
2358 loc->wqe_free -= (ds + 3) / 4;
2359 if (MLX5_TXOFF_CONFIG(INLINE))
2360 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2364 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2365 return MLX5_TXCMP_CODE_EXIT;
2366 loc->mbuf = *pkts++;
2368 rte_prefetch0(*pkts);
2369 if (MLX5_TXOFF_CONFIG(MULTI) &&
2370 unlikely(NB_SEGS(loc->mbuf) > 1))
2371 return MLX5_TXCMP_CODE_MULTI;
2372 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
2373 return MLX5_TXCMP_CODE_SINGLE;
2374 /* Continue with the next TSO packet. */
2380 * Analyze the packet and select the best method to send.
2383 * Pointer to TX queue structure.
2385 * Pointer to burst routine local context.
2387 * Configured Tx offloads mask. It is fully defined at
2388 * compile time and may be used for optimization.
2390 * The predefined flag whether do complete check for
2391 * multi-segment packets and TSO.
2394 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2395 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
2396 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
2397 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
2399 static __rte_always_inline enum mlx5_txcmp_code
2400 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
2401 struct mlx5_txq_local *__rte_restrict loc,
2405 /* Check for multi-segment packet. */
2407 MLX5_TXOFF_CONFIG(MULTI) &&
2408 unlikely(NB_SEGS(loc->mbuf) > 1))
2409 return MLX5_TXCMP_CODE_MULTI;
2410 /* Check for TSO packet. */
2412 MLX5_TXOFF_CONFIG(TSO) &&
2413 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2414 return MLX5_TXCMP_CODE_TSO;
2415 /* Check if eMPW is enabled at all. */
2416 if (!MLX5_TXOFF_CONFIG(EMPW))
2417 return MLX5_TXCMP_CODE_SINGLE;
2418 /* Check if eMPW can be engaged. */
2419 if (MLX5_TXOFF_CONFIG(VLAN) &&
2420 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
2421 (!MLX5_TXOFF_CONFIG(INLINE) ||
2422 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
2423 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
2425 * eMPW does not support VLAN insertion offload, we have to
2426 * inline the entire packet but packet is too long for inlining.
2428 return MLX5_TXCMP_CODE_SINGLE;
2430 return MLX5_TXCMP_CODE_EMPW;
2434 * Check the next packet attributes to match with the eMPW batch ones.
2435 * In addition, for legacy MPW the packet length is checked either.
2438 * Pointer to TX queue structure.
2440 * Pointer to Ethernet Segment of eMPW batch.
2442 * Pointer to burst routine local context.
2444 * Length of previous packet in MPW descriptor.
2446 * Configured Tx offloads mask. It is fully defined at
2447 * compile time and may be used for optimization.
2450 * true - packet match with eMPW batch attributes.
2451 * false - no match, eMPW should be restarted.
2453 static __rte_always_inline bool
2454 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
2455 struct mlx5_wqe_eseg *__rte_restrict es,
2456 struct mlx5_txq_local *__rte_restrict loc,
2460 uint8_t swp_flags = 0;
2462 /* Compare the checksum flags, if any. */
2463 if (MLX5_TXOFF_CONFIG(CSUM) &&
2464 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
2466 /* Compare the Software Parser offsets and flags. */
2467 if (MLX5_TXOFF_CONFIG(SWP) &&
2468 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
2469 es->swp_flags != swp_flags))
2471 /* Fill metadata field if needed. */
2472 if (MLX5_TXOFF_CONFIG(METADATA) &&
2473 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2474 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) : 0))
2476 /* Legacy MPW can send packets with the same length only. */
2477 if (MLX5_TXOFF_CONFIG(MPW) &&
2478 dlen != rte_pktmbuf_data_len(loc->mbuf))
2480 /* There must be no VLAN packets in eMPW loop. */
2481 if (MLX5_TXOFF_CONFIG(VLAN))
2482 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
2483 /* Check if the scheduling is requested. */
2484 if (MLX5_TXOFF_CONFIG(TXPP) &&
2485 loc->mbuf->ol_flags & txq->ts_mask)
2491 * Update send loop variables and WQE for eMPW loop without data inlining.
2492 * Number of Data Segments is equal to the number of sent packets.
2495 * Pointer to TX queue structure.
2497 * Pointer to burst routine local context.
2499 * Number of packets/Data Segments/Packets.
2501 * Accumulated statistics, bytes sent.
2503 * Configured Tx offloads mask. It is fully defined at
2504 * compile time and may be used for optimization.
2507 * true - packet match with eMPW batch attributes.
2508 * false - no match, eMPW should be restarted.
2510 static __rte_always_inline void
2511 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
2512 struct mlx5_txq_local *__rte_restrict loc,
2515 unsigned int olx __rte_unused)
2517 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2518 #ifdef MLX5_PMD_SOFT_COUNTERS
2519 /* Update sent data bytes counter. */
2520 txq->stats.obytes += slen;
2524 loc->elts_free -= ds;
2525 loc->pkts_sent += ds;
2527 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2528 txq->wqe_ci += (ds + 3) / 4;
2529 loc->wqe_free -= (ds + 3) / 4;
2533 * Update send loop variables and WQE for eMPW loop with data inlining.
2534 * Gets the size of pushed descriptors and data to the WQE.
2537 * Pointer to TX queue structure.
2539 * Pointer to burst routine local context.
2541 * Total size of descriptor/data in bytes.
2543 * Accumulated statistics, data bytes sent.
2545 * The base WQE for the eMPW/MPW descriptor.
2547 * Configured Tx offloads mask. It is fully defined at
2548 * compile time and may be used for optimization.
2551 * true - packet match with eMPW batch attributes.
2552 * false - no match, eMPW should be restarted.
2554 static __rte_always_inline void
2555 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
2556 struct mlx5_txq_local *__rte_restrict loc,
2559 struct mlx5_wqe *__rte_restrict wqem,
2560 unsigned int olx __rte_unused)
2562 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
2564 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2565 #ifdef MLX5_PMD_SOFT_COUNTERS
2566 /* Update sent data bytes counter. */
2567 txq->stats.obytes += slen;
2571 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
2573 * If the legacy MPW session contains the inline packets
2574 * we should set the only inline data segment length
2575 * and align the total length to the segment size.
2577 MLX5_ASSERT(len > sizeof(dseg->bcount));
2578 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
2579 MLX5_ETH_WQE_DATA_INLINE);
2580 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
2583 * The session is not legacy MPW or contains the
2584 * data buffer pointer segments.
2586 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
2587 len = len / MLX5_WSEG_SIZE + 2;
2589 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
2590 txq->wqe_ci += (len + 3) / 4;
2591 loc->wqe_free -= (len + 3) / 4;
2592 loc->wqe_last = wqem;
2596 * The set of Tx burst functions for single-segment packets without TSO
2597 * and with Multi-Packet Writing feature support.
2598 * Supports all types of Tx offloads, except multi-packets and TSO.
2600 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends as many packet
2601 * per WQE as it can. If eMPW is not configured or packet can not be sent with
2602 * eMPW (VLAN insertion) the ordinary SEND opcode is used and only one packet
2605 * Functions stop sending if it encounters the multi-segment packet or packet
2606 * with TSO requested.
2608 * The routines are responsible for storing processed mbuf into elts ring buffer
2609 * and update elts_head if inlining offload is requested. Otherwise the copying
2610 * mbufs to elts can be postponed and completed at the end of burst routine.
2613 * Pointer to TX queue structure.
2615 * Packets to transmit.
2617 * Number of packets in array.
2619 * Pointer to burst routine local context.
2621 * Configured Tx offloads mask. It is fully defined at
2622 * compile time and may be used for optimization.
2625 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2626 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2627 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2628 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
2629 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
2630 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
2632 * Local context variables updated.
2635 * The routine sends packets with MLX5_OPCODE_EMPW
2636 * without inlining, this is dedicated optimized branch.
2637 * No VLAN insertion is supported.
2639 static __rte_always_inline enum mlx5_txcmp_code
2640 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
2641 struct rte_mbuf **__rte_restrict pkts,
2642 unsigned int pkts_n,
2643 struct mlx5_txq_local *__rte_restrict loc,
2647 * Subroutine is the part of mlx5_tx_burst_single() and sends
2648 * single-segment packet with eMPW opcode without data inlining.
2650 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2651 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2652 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2653 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2654 pkts += loc->pkts_sent + 1;
2655 pkts_n -= loc->pkts_sent;
2657 struct mlx5_wqe_dseg *__rte_restrict dseg;
2658 struct mlx5_wqe_eseg *__rte_restrict eseg;
2659 enum mlx5_txcmp_code ret;
2660 unsigned int part, loop;
2661 unsigned int slen = 0;
2664 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2665 if (MLX5_TXOFF_CONFIG(TXPP)) {
2666 enum mlx5_txcmp_code wret;
2668 /* Generate WAIT for scheduling if requested. */
2669 wret = mlx5_tx_schedule_send(txq, loc, olx);
2670 if (wret == MLX5_TXCMP_CODE_EXIT)
2671 return MLX5_TXCMP_CODE_EXIT;
2672 if (wret == MLX5_TXCMP_CODE_ERROR)
2673 return MLX5_TXCMP_CODE_ERROR;
2675 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2676 MLX5_MPW_MAX_PACKETS :
2677 MLX5_EMPW_MAX_PACKETS);
2678 if (unlikely(loc->elts_free < part)) {
2679 /* We have no enough elts to save all mbufs. */
2680 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
2681 return MLX5_TXCMP_CODE_EXIT;
2682 /* But we still able to send at least minimal eMPW. */
2683 part = loc->elts_free;
2685 /* Check whether we have enough WQEs */
2686 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
2687 if (unlikely(loc->wqe_free <
2688 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2689 return MLX5_TXCMP_CODE_EXIT;
2690 part = (loc->wqe_free * 4) - 2;
2692 if (likely(part > 1))
2693 rte_prefetch0(*pkts);
2694 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2696 * Build eMPW title WQEBB:
2697 * - Control Segment, eMPW opcode
2698 * - Ethernet Segment, no inline
2700 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
2701 MLX5_OPCODE_ENHANCED_MPSW, olx);
2702 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
2703 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2704 eseg = &loc->wqe_last->eseg;
2705 dseg = &loc->wqe_last->dseg[0];
2707 /* Store the packet length for legacy MPW. */
2708 if (MLX5_TXOFF_CONFIG(MPW))
2709 eseg->mss = rte_cpu_to_be_16
2710 (rte_pktmbuf_data_len(loc->mbuf));
2712 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2713 #ifdef MLX5_PMD_SOFT_COUNTERS
2714 /* Update sent data bytes counter. */
2719 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2721 if (unlikely(--loop == 0))
2723 loc->mbuf = *pkts++;
2724 if (likely(loop > 1))
2725 rte_prefetch0(*pkts);
2726 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2728 * Unroll the completion code to avoid
2729 * returning variable value - it results in
2730 * unoptimized sequent checking in caller.
2732 if (ret == MLX5_TXCMP_CODE_MULTI) {
2734 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2735 if (unlikely(!loc->elts_free ||
2737 return MLX5_TXCMP_CODE_EXIT;
2738 return MLX5_TXCMP_CODE_MULTI;
2740 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2741 if (ret == MLX5_TXCMP_CODE_TSO) {
2743 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2744 if (unlikely(!loc->elts_free ||
2746 return MLX5_TXCMP_CODE_EXIT;
2747 return MLX5_TXCMP_CODE_TSO;
2749 if (ret == MLX5_TXCMP_CODE_SINGLE) {
2751 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2752 if (unlikely(!loc->elts_free ||
2754 return MLX5_TXCMP_CODE_EXIT;
2755 return MLX5_TXCMP_CODE_SINGLE;
2757 if (ret != MLX5_TXCMP_CODE_EMPW) {
2760 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2761 return MLX5_TXCMP_CODE_ERROR;
2764 * Check whether packet parameters coincide
2765 * within assumed eMPW batch:
2766 * - check sum settings
2768 * - software parser settings
2769 * - packets length (legacy MPW only)
2770 * - scheduling is not required
2772 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
2775 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2776 if (unlikely(!loc->elts_free ||
2778 return MLX5_TXCMP_CODE_EXIT;
2782 /* Packet attributes match, continue the same eMPW. */
2784 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2785 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2787 /* eMPW is built successfully, update loop parameters. */
2789 MLX5_ASSERT(pkts_n >= part);
2790 #ifdef MLX5_PMD_SOFT_COUNTERS
2791 /* Update sent data bytes counter. */
2792 txq->stats.obytes += slen;
2794 loc->elts_free -= part;
2795 loc->pkts_sent += part;
2796 txq->wqe_ci += (2 + part + 3) / 4;
2797 loc->wqe_free -= (2 + part + 3) / 4;
2799 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2800 return MLX5_TXCMP_CODE_EXIT;
2801 loc->mbuf = *pkts++;
2802 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2803 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
2805 /* Continue sending eMPW batches. */
2811 * The routine sends packets with MLX5_OPCODE_EMPW
2812 * with inlining, optionally supports VLAN insertion.
2814 static __rte_always_inline enum mlx5_txcmp_code
2815 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
2816 struct rte_mbuf **__rte_restrict pkts,
2817 unsigned int pkts_n,
2818 struct mlx5_txq_local *__rte_restrict loc,
2822 * Subroutine is the part of mlx5_tx_burst_single() and sends
2823 * single-segment packet with eMPW opcode with data inlining.
2825 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2826 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2827 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2828 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2829 pkts += loc->pkts_sent + 1;
2830 pkts_n -= loc->pkts_sent;
2832 struct mlx5_wqe_dseg *__rte_restrict dseg;
2833 struct mlx5_wqe *__rte_restrict wqem;
2834 enum mlx5_txcmp_code ret;
2835 unsigned int room, part, nlim;
2836 unsigned int slen = 0;
2838 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2839 if (MLX5_TXOFF_CONFIG(TXPP)) {
2840 enum mlx5_txcmp_code wret;
2842 /* Generate WAIT for scheduling if requested. */
2843 wret = mlx5_tx_schedule_send(txq, loc, olx);
2844 if (wret == MLX5_TXCMP_CODE_EXIT)
2845 return MLX5_TXCMP_CODE_EXIT;
2846 if (wret == MLX5_TXCMP_CODE_ERROR)
2847 return MLX5_TXCMP_CODE_ERROR;
2850 * Limits the amount of packets in one WQE
2851 * to improve CQE latency generation.
2853 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2854 MLX5_MPW_INLINE_MAX_PACKETS :
2855 MLX5_EMPW_MAX_PACKETS);
2856 /* Check whether we have minimal amount WQEs */
2857 if (unlikely(loc->wqe_free <
2858 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2859 return MLX5_TXCMP_CODE_EXIT;
2860 if (likely(pkts_n > 1))
2861 rte_prefetch0(*pkts);
2862 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2864 * Build eMPW title WQEBB:
2865 * - Control Segment, eMPW opcode, zero DS
2866 * - Ethernet Segment, no inline
2868 mlx5_tx_cseg_init(txq, loc, wqem, 0,
2869 MLX5_OPCODE_ENHANCED_MPSW, olx);
2870 mlx5_tx_eseg_none(txq, loc, wqem,
2871 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2872 dseg = &wqem->dseg[0];
2873 /* Store the packet length for legacy MPW. */
2874 if (MLX5_TXOFF_CONFIG(MPW))
2875 wqem->eseg.mss = rte_cpu_to_be_16
2876 (rte_pktmbuf_data_len(loc->mbuf));
2877 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
2878 loc->wqe_free) * MLX5_WQE_SIZE -
2879 MLX5_WQE_CSEG_SIZE -
2881 /* Limit the room for legacy MPW sessions for performance. */
2882 if (MLX5_TXOFF_CONFIG(MPW))
2883 room = RTE_MIN(room,
2884 RTE_MAX(txq->inlen_empw +
2885 sizeof(dseg->bcount) +
2886 (MLX5_TXOFF_CONFIG(VLAN) ?
2887 sizeof(struct rte_vlan_hdr) : 0),
2888 MLX5_MPW_INLINE_MAX_PACKETS *
2889 MLX5_WQE_DSEG_SIZE));
2890 /* Build WQE till we have space, packets and resources. */
2893 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2894 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2897 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
2898 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
2899 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
2901 * Some Tx offloads may cause an error if packet is not
2902 * long enough, check against assumed minimal length.
2904 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
2906 if (unlikely(!part))
2907 return MLX5_TXCMP_CODE_ERROR;
2909 * We have some successfully built
2910 * packet Data Segments to send.
2912 mlx5_tx_idone_empw(txq, loc, part,
2914 return MLX5_TXCMP_CODE_ERROR;
2916 /* Inline or not inline - that's the Question. */
2917 if (dlen > txq->inlen_empw ||
2918 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
2920 if (MLX5_TXOFF_CONFIG(MPW)) {
2921 if (dlen > txq->inlen_send)
2925 /* Open new inline MPW session. */
2926 tlen += sizeof(dseg->bcount);
2927 dseg->bcount = RTE_BE32(0);
2929 (dseg, sizeof(dseg->bcount));
2932 * No pointer and inline descriptor
2933 * intermix for legacy MPW sessions.
2935 if (wqem->dseg[0].bcount)
2939 tlen = sizeof(dseg->bcount) + dlen;
2941 /* Inline entire packet, optional VLAN insertion. */
2942 if (MLX5_TXOFF_CONFIG(VLAN) &&
2943 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2945 * The packet length must be checked in
2946 * mlx5_tx_able_to_empw() and packet
2947 * fits into inline length guaranteed.
2950 sizeof(struct rte_vlan_hdr)) <=
2952 tlen += sizeof(struct rte_vlan_hdr);
2955 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
2957 #ifdef MLX5_PMD_SOFT_COUNTERS
2958 /* Update sent data bytes counter. */
2959 slen += sizeof(struct rte_vlan_hdr);
2964 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
2967 if (!MLX5_TXOFF_CONFIG(MPW))
2968 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
2969 MLX5_ASSERT(room >= tlen);
2972 * Packet data are completely inline,
2973 * we can try to free the packet.
2975 if (likely(loc->pkts_sent == loc->mbuf_free)) {
2977 * All the packets from the burst beginning
2978 * are inline, we can free mbufs directly
2979 * from the origin array on tx_burst exit().
2985 * In order no to call rte_pktmbuf_free_seg() here,
2986 * in the most inner loop (that might be very
2987 * expensive) we just save the mbuf in elts.
2989 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2994 * No pointer and inline descriptor
2995 * intermix for legacy MPW sessions.
2997 if (MLX5_TXOFF_CONFIG(MPW) &&
2999 wqem->dseg[0].bcount == RTE_BE32(0))
3002 * Not inlinable VLAN packets are
3003 * proceeded outside of this routine.
3005 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
3006 if (MLX5_TXOFF_CONFIG(VLAN))
3007 MLX5_ASSERT(!(loc->mbuf->ol_flags &
3009 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3010 /* We have to store mbuf in elts.*/
3011 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3013 room -= MLX5_WQE_DSEG_SIZE;
3014 /* Ring buffer wraparound is checked at the loop end.*/
3017 #ifdef MLX5_PMD_SOFT_COUNTERS
3018 /* Update sent data bytes counter. */
3023 if (unlikely(!pkts_n || !loc->elts_free)) {
3025 * We have no resources/packets to
3026 * continue build descriptors.
3029 mlx5_tx_idone_empw(txq, loc, part,
3031 return MLX5_TXCMP_CODE_EXIT;
3033 loc->mbuf = *pkts++;
3034 if (likely(pkts_n > 1))
3035 rte_prefetch0(*pkts);
3036 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3038 * Unroll the completion code to avoid
3039 * returning variable value - it results in
3040 * unoptimized sequent checking in caller.
3042 if (ret == MLX5_TXCMP_CODE_MULTI) {
3044 mlx5_tx_idone_empw(txq, loc, part,
3046 if (unlikely(!loc->elts_free ||
3048 return MLX5_TXCMP_CODE_EXIT;
3049 return MLX5_TXCMP_CODE_MULTI;
3051 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3052 if (ret == MLX5_TXCMP_CODE_TSO) {
3054 mlx5_tx_idone_empw(txq, loc, part,
3056 if (unlikely(!loc->elts_free ||
3058 return MLX5_TXCMP_CODE_EXIT;
3059 return MLX5_TXCMP_CODE_TSO;
3061 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3063 mlx5_tx_idone_empw(txq, loc, part,
3065 if (unlikely(!loc->elts_free ||
3067 return MLX5_TXCMP_CODE_EXIT;
3068 return MLX5_TXCMP_CODE_SINGLE;
3070 if (ret != MLX5_TXCMP_CODE_EMPW) {
3073 mlx5_tx_idone_empw(txq, loc, part,
3075 return MLX5_TXCMP_CODE_ERROR;
3077 /* Check if we have minimal room left. */
3079 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
3082 * Check whether packet parameters coincide
3083 * within assumed eMPW batch:
3084 * - check sum settings
3086 * - software parser settings
3087 * - packets length (legacy MPW only)
3088 * - scheduling is not required
3090 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
3093 /* Packet attributes match, continue the same eMPW. */
3094 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3095 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3098 * We get here to close an existing eMPW
3099 * session and start the new one.
3101 MLX5_ASSERT(pkts_n);
3103 if (unlikely(!part))
3104 return MLX5_TXCMP_CODE_EXIT;
3105 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
3106 if (unlikely(!loc->elts_free ||
3108 return MLX5_TXCMP_CODE_EXIT;
3109 /* Continue the loop with new eMPW session. */
3115 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
3116 * Data inlining and VLAN insertion are supported.
3118 static __rte_always_inline enum mlx5_txcmp_code
3119 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
3120 struct rte_mbuf **__rte_restrict pkts,
3121 unsigned int pkts_n,
3122 struct mlx5_txq_local *__rte_restrict loc,
3126 * Subroutine is the part of mlx5_tx_burst_single()
3127 * and sends single-segment packet with SEND opcode.
3129 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3130 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3131 pkts += loc->pkts_sent + 1;
3132 pkts_n -= loc->pkts_sent;
3134 struct mlx5_wqe *__rte_restrict wqe;
3135 enum mlx5_txcmp_code ret;
3137 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3138 if (MLX5_TXOFF_CONFIG(TXPP)) {
3139 enum mlx5_txcmp_code wret;
3141 /* Generate WAIT for scheduling if requested. */
3142 wret = mlx5_tx_schedule_send(txq, loc, olx);
3143 if (wret == MLX5_TXCMP_CODE_EXIT)
3144 return MLX5_TXCMP_CODE_EXIT;
3145 if (wret == MLX5_TXCMP_CODE_ERROR)
3146 return MLX5_TXCMP_CODE_ERROR;
3148 if (MLX5_TXOFF_CONFIG(INLINE)) {
3149 unsigned int inlen, vlan = 0;
3151 inlen = rte_pktmbuf_data_len(loc->mbuf);
3152 if (MLX5_TXOFF_CONFIG(VLAN) &&
3153 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3154 vlan = sizeof(struct rte_vlan_hdr);
3158 * If inlining is enabled at configuration time
3159 * the limit must be not less than minimal size.
3160 * Otherwise we would do extra check for data
3161 * size to avoid crashes due to length overflow.
3163 MLX5_ASSERT(txq->inlen_send >=
3164 MLX5_ESEG_MIN_INLINE_SIZE);
3165 if (inlen <= txq->inlen_send) {
3166 unsigned int seg_n, wqe_n;
3168 rte_prefetch0(rte_pktmbuf_mtod
3169 (loc->mbuf, uint8_t *));
3170 /* Check against minimal length. */
3171 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3172 return MLX5_TXCMP_CODE_ERROR;
3173 if (loc->mbuf->ol_flags &
3174 PKT_TX_DYNF_NOINLINE) {
3176 * The hint flag not to inline packet
3177 * data is set. Check whether we can
3180 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
3182 (MLX5_TXOFF_CONFIG(MPW) &&
3184 if (inlen <= txq->inlen_send)
3187 * The hardware requires the
3188 * minimal inline data header.
3190 goto single_min_inline;
3192 if (MLX5_TXOFF_CONFIG(VLAN) &&
3193 vlan && !txq->vlan_en) {
3195 * We must insert VLAN tag
3196 * by software means.
3198 goto single_part_inline;
3200 goto single_no_inline;
3204 * Completely inlined packet data WQE:
3205 * - Control Segment, SEND opcode
3206 * - Ethernet Segment, no VLAN insertion
3207 * - Data inlined, VLAN optionally inserted
3208 * - Alignment to MLX5_WSEG_SIZE
3209 * Have to estimate amount of WQEBBs
3211 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
3212 MLX5_ESEG_MIN_INLINE_SIZE +
3213 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3214 /* Check if there are enough WQEBBs. */
3215 wqe_n = (seg_n + 3) / 4;
3216 if (wqe_n > loc->wqe_free)
3217 return MLX5_TXCMP_CODE_EXIT;
3218 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3219 loc->wqe_last = wqe;
3220 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
3221 MLX5_OPCODE_SEND, olx);
3222 mlx5_tx_eseg_data(txq, loc, wqe,
3223 vlan, inlen, 0, olx);
3224 txq->wqe_ci += wqe_n;
3225 loc->wqe_free -= wqe_n;
3227 * Packet data are completely inlined,
3228 * free the packet immediately.
3230 rte_pktmbuf_free_seg(loc->mbuf);
3231 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
3232 MLX5_TXOFF_CONFIG(MPW)) &&
3235 * If minimal inlining is requested the eMPW
3236 * feature should be disabled due to data is
3237 * inlined into Ethernet Segment, which can
3238 * not contain inlined data for eMPW due to
3239 * segment shared for all packets.
3241 struct mlx5_wqe_dseg *__rte_restrict dseg;
3246 * The inline-mode settings require
3247 * to inline the specified amount of
3248 * data bytes to the Ethernet Segment.
3249 * We should check the free space in
3250 * WQE ring buffer to inline partially.
3253 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
3254 MLX5_ASSERT(inlen > txq->inlen_mode);
3255 MLX5_ASSERT(txq->inlen_mode >=
3256 MLX5_ESEG_MIN_INLINE_SIZE);
3258 * Check whether there are enough free WQEBBs:
3260 * - Ethernet Segment
3261 * - First Segment of inlined Ethernet data
3262 * - ... data continued ...
3263 * - Finishing Data Segment of pointer type
3265 ds = (MLX5_WQE_CSEG_SIZE +
3266 MLX5_WQE_ESEG_SIZE +
3267 MLX5_WQE_DSEG_SIZE +
3269 MLX5_ESEG_MIN_INLINE_SIZE +
3270 MLX5_WQE_DSEG_SIZE +
3271 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3272 if (loc->wqe_free < ((ds + 3) / 4))
3273 return MLX5_TXCMP_CODE_EXIT;
3275 * Build the ordinary SEND WQE:
3277 * - Ethernet Segment, inline inlen_mode bytes
3278 * - Data Segment of pointer type
3280 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3281 loc->wqe_last = wqe;
3282 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3283 MLX5_OPCODE_SEND, olx);
3284 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
3287 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3288 txq->inlen_mode - vlan;
3289 inlen -= txq->inlen_mode;
3290 mlx5_tx_dseg_ptr(txq, loc, dseg,
3293 * WQE is built, update the loop parameters
3294 * and got to the next packet.
3296 txq->wqe_ci += (ds + 3) / 4;
3297 loc->wqe_free -= (ds + 3) / 4;
3298 /* We have to store mbuf in elts.*/
3299 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3300 txq->elts[txq->elts_head++ & txq->elts_m] =
3308 * Partially inlined packet data WQE, we have
3309 * some space in title WQEBB, we can fill it
3310 * with some packet data. It takes one WQEBB,
3311 * it is available, no extra space check:
3312 * - Control Segment, SEND opcode
3313 * - Ethernet Segment, no VLAN insertion
3314 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
3315 * - Data Segment, pointer type
3317 * We also get here if VLAN insertion is not
3318 * supported by HW, the inline is enabled.
3321 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3322 loc->wqe_last = wqe;
3323 mlx5_tx_cseg_init(txq, loc, wqe, 4,
3324 MLX5_OPCODE_SEND, olx);
3325 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
3326 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3327 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
3329 * The length check is performed above, by
3330 * comparing with txq->inlen_send. We should
3331 * not get overflow here.
3333 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
3334 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
3335 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
3339 /* We have to store mbuf in elts.*/
3340 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3341 txq->elts[txq->elts_head++ & txq->elts_m] =
3345 #ifdef MLX5_PMD_SOFT_COUNTERS
3346 /* Update sent data bytes counter. */
3347 txq->stats.obytes += vlan +
3348 rte_pktmbuf_data_len(loc->mbuf);
3352 * No inline at all, it means the CPU cycles saving
3353 * is prioritized at configuration, we should not
3354 * copy any packet data to WQE.
3356 * SEND WQE, one WQEBB:
3357 * - Control Segment, SEND opcode
3358 * - Ethernet Segment, optional VLAN, no inline
3359 * - Data Segment, pointer type
3362 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3363 loc->wqe_last = wqe;
3364 mlx5_tx_cseg_init(txq, loc, wqe, 3,
3365 MLX5_OPCODE_SEND, olx);
3366 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3368 (txq, loc, &wqe->dseg[0],
3369 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3370 rte_pktmbuf_data_len(loc->mbuf), olx);
3374 * We should not store mbuf pointer in elts
3375 * if no inlining is configured, this is done
3376 * by calling routine in a batch copy.
3378 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3380 #ifdef MLX5_PMD_SOFT_COUNTERS
3381 /* Update sent data bytes counter. */
3382 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
3383 if (MLX5_TXOFF_CONFIG(VLAN) &&
3384 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3385 txq->stats.obytes +=
3386 sizeof(struct rte_vlan_hdr);
3391 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3392 return MLX5_TXCMP_CODE_EXIT;
3393 loc->mbuf = *pkts++;
3395 rte_prefetch0(*pkts);
3396 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3397 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
3403 static __rte_always_inline enum mlx5_txcmp_code
3404 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
3405 struct rte_mbuf **__rte_restrict pkts,
3406 unsigned int pkts_n,
3407 struct mlx5_txq_local *__rte_restrict loc,
3410 enum mlx5_txcmp_code ret;
3412 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
3413 if (ret == MLX5_TXCMP_CODE_SINGLE)
3415 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
3417 /* Optimize for inline/no inline eMPW send. */
3418 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
3419 mlx5_tx_burst_empw_inline
3420 (txq, pkts, pkts_n, loc, olx) :
3421 mlx5_tx_burst_empw_simple
3422 (txq, pkts, pkts_n, loc, olx);
3423 if (ret != MLX5_TXCMP_CODE_SINGLE)
3425 /* The resources to send one packet should remain. */
3426 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3428 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
3429 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
3430 if (ret != MLX5_TXCMP_CODE_EMPW)
3432 /* The resources to send one packet should remain. */
3433 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3438 * DPDK Tx callback template. This is configured template used to generate
3439 * routines optimized for specified offload setup.
3440 * One of this generated functions is chosen at SQ configuration time.
3443 * Generic pointer to TX queue structure.
3445 * Packets to transmit.
3447 * Number of packets in array.
3449 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
3450 * values. Should be static to take compile time static configuration
3454 * Number of packets successfully transmitted (<= pkts_n).
3456 static __rte_always_inline uint16_t
3457 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
3458 struct rte_mbuf **__rte_restrict pkts,
3462 struct mlx5_txq_local loc;
3463 enum mlx5_txcmp_code ret;
3466 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3467 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3468 if (unlikely(!pkts_n))
3470 if (MLX5_TXOFF_CONFIG(INLINE))
3474 loc.wqe_last = NULL;
3477 loc.pkts_loop = loc.pkts_sent;
3479 * Check if there are some CQEs, if any:
3480 * - process an encountered errors
3481 * - process the completed WQEs
3482 * - free related mbufs
3483 * - doorbell the NIC about processed CQEs
3485 rte_prefetch0(*(pkts + loc.pkts_sent));
3486 mlx5_tx_handle_completion(txq, olx);
3488 * Calculate the number of available resources - elts and WQEs.
3489 * There are two possible different scenarios:
3490 * - no data inlining into WQEs, one WQEBB may contains up to
3491 * four packets, in this case elts become scarce resource
3492 * - data inlining into WQEs, one packet may require multiple
3493 * WQEBBs, the WQEs become the limiting factor.
3495 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3496 loc.elts_free = txq->elts_s -
3497 (uint16_t)(txq->elts_head - txq->elts_tail);
3498 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3499 loc.wqe_free = txq->wqe_s -
3500 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
3501 if (unlikely(!loc.elts_free || !loc.wqe_free))
3505 * Fetch the packet from array. Usually this is the first
3506 * packet in series of multi/single segment packets.
3508 loc.mbuf = *(pkts + loc.pkts_sent);
3509 /* Dedicated branch for multi-segment packets. */
3510 if (MLX5_TXOFF_CONFIG(MULTI) &&
3511 unlikely(NB_SEGS(loc.mbuf) > 1)) {
3513 * Multi-segment packet encountered.
3514 * Hardware is able to process it only
3515 * with SEND/TSO opcodes, one packet
3516 * per WQE, do it in dedicated routine.
3519 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
3520 part = loc.pkts_sent - loc.pkts_copy;
3521 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3523 * There are some single-segment mbufs not
3524 * stored in elts. The mbufs must be in the
3525 * same order as WQEs, so we must copy the
3526 * mbufs to elts here, before the coming
3527 * multi-segment packet mbufs is appended.
3529 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
3531 loc.pkts_copy = loc.pkts_sent;
3533 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3534 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
3535 if (!MLX5_TXOFF_CONFIG(INLINE))
3536 loc.pkts_copy = loc.pkts_sent;
3538 * These returned code checks are supposed
3539 * to be optimized out due to routine inlining.
3541 if (ret == MLX5_TXCMP_CODE_EXIT) {
3543 * The routine returns this code when
3544 * all packets are sent or there is no
3545 * enough resources to complete request.
3549 if (ret == MLX5_TXCMP_CODE_ERROR) {
3551 * The routine returns this code when some error
3552 * in the incoming packets format occurred.
3554 txq->stats.oerrors++;
3557 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3559 * The single-segment packet was encountered
3560 * in the array, try to send it with the
3561 * best optimized way, possible engaging eMPW.
3563 goto enter_send_single;
3565 if (MLX5_TXOFF_CONFIG(TSO) &&
3566 ret == MLX5_TXCMP_CODE_TSO) {
3568 * The single-segment TSO packet was
3569 * encountered in the array.
3571 goto enter_send_tso;
3573 /* We must not get here. Something is going wrong. */
3575 txq->stats.oerrors++;
3578 /* Dedicated branch for single-segment TSO packets. */
3579 if (MLX5_TXOFF_CONFIG(TSO) &&
3580 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3582 * TSO might require special way for inlining
3583 * (dedicated parameters) and is sent with
3584 * MLX5_OPCODE_TSO opcode only, provide this
3585 * in dedicated branch.
3588 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
3589 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3590 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
3592 * These returned code checks are supposed
3593 * to be optimized out due to routine inlining.
3595 if (ret == MLX5_TXCMP_CODE_EXIT)
3597 if (ret == MLX5_TXCMP_CODE_ERROR) {
3598 txq->stats.oerrors++;
3601 if (ret == MLX5_TXCMP_CODE_SINGLE)
3602 goto enter_send_single;
3603 if (MLX5_TXOFF_CONFIG(MULTI) &&
3604 ret == MLX5_TXCMP_CODE_MULTI) {
3606 * The multi-segment packet was
3607 * encountered in the array.
3609 goto enter_send_multi;
3611 /* We must not get here. Something is going wrong. */
3613 txq->stats.oerrors++;
3617 * The dedicated branch for the single-segment packets
3618 * without TSO. Often these ones can be sent using
3619 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
3620 * The routine builds the WQEs till it encounters
3621 * the TSO or multi-segment packet (in case if these
3622 * offloads are requested at SQ configuration time).
3625 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3626 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
3628 * These returned code checks are supposed
3629 * to be optimized out due to routine inlining.
3631 if (ret == MLX5_TXCMP_CODE_EXIT)
3633 if (ret == MLX5_TXCMP_CODE_ERROR) {
3634 txq->stats.oerrors++;
3637 if (MLX5_TXOFF_CONFIG(MULTI) &&
3638 ret == MLX5_TXCMP_CODE_MULTI) {
3640 * The multi-segment packet was
3641 * encountered in the array.
3643 goto enter_send_multi;
3645 if (MLX5_TXOFF_CONFIG(TSO) &&
3646 ret == MLX5_TXCMP_CODE_TSO) {
3648 * The single-segment TSO packet was
3649 * encountered in the array.
3651 goto enter_send_tso;
3653 /* We must not get here. Something is going wrong. */
3655 txq->stats.oerrors++;
3659 * Main Tx loop is completed, do the rest:
3660 * - set completion request if thresholds are reached
3661 * - doorbell the hardware
3662 * - copy the rest of mbufs to elts (if any)
3664 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
3665 loc.pkts_sent >= loc.pkts_copy);
3666 /* Take a shortcut if nothing is sent. */
3667 if (unlikely(loc.pkts_sent == loc.pkts_loop))
3669 /* Request CQE generation if limits are reached. */
3670 mlx5_tx_request_completion(txq, &loc, olx);
3672 * Ring QP doorbell immediately after WQE building completion
3673 * to improve latencies. The pure software related data treatment
3674 * can be completed after doorbell. Tx CQEs for this SQ are
3675 * processed in this thread only by the polling.
3677 * The rdma core library can map doorbell register in two ways,
3678 * depending on the environment variable "MLX5_SHUT_UP_BF":
3680 * - as regular cached memory, the variable is either missing or
3681 * set to zero. This type of mapping may cause the significant
3682 * doorbell register writing latency and requires explicit memory
3683 * write barrier to mitigate this issue and prevent write combining.
3685 * - as non-cached memory, the variable is present and set to not "0"
3686 * value. This type of mapping may cause performance impact under
3687 * heavy loading conditions but the explicit write memory barrier is
3688 * not required and it may improve core performance.
3690 * - the legacy behaviour (prior 19.08 release) was to use some
3691 * heuristics to decide whether write memory barrier should
3692 * be performed. This behavior is supported with specifying
3693 * tx_db_nc=2, write barrier is skipped if application provides
3694 * the full recommended burst of packets, it supposes the next
3695 * packets are coming and the write barrier will be issued on
3696 * the next burst (after descriptor writing, at least).
3698 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
3699 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
3700 /* Not all of the mbufs may be stored into elts yet. */
3701 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
3702 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3704 * There are some single-segment mbufs not stored in elts.
3705 * It can be only if the last packet was single-segment.
3706 * The copying is gathered into one place due to it is
3707 * a good opportunity to optimize that with SIMD.
3708 * Unfortunately if inlining is enabled the gaps in pointer
3709 * array may happen due to early freeing of the inlined mbufs.
3711 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
3712 loc.pkts_copy = loc.pkts_sent;
3714 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3715 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3716 if (pkts_n > loc.pkts_sent) {
3718 * If burst size is large there might be no enough CQE
3719 * fetched from completion queue and no enough resources
3720 * freed to send all the packets.
3725 #ifdef MLX5_PMD_SOFT_COUNTERS
3726 /* Increment sent packets counter. */
3727 txq->stats.opackets += loc.pkts_sent;
3729 if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
3730 __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
3731 return loc.pkts_sent;
3734 #endif /* RTE_PMD_MLX5_TX_H_ */