1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_TX_H_
7 #define RTE_PMD_MLX5_TX_H_
10 #include <sys/queue.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_spinlock.h>
17 #include <mlx5_common_mr.h>
20 #include "mlx5_autoconf.h"
23 /* TX burst subroutines return codes. */
24 enum mlx5_txcmp_code {
25 MLX5_TXCMP_CODE_EXIT = 0,
26 MLX5_TXCMP_CODE_ERROR,
27 MLX5_TXCMP_CODE_SINGLE,
28 MLX5_TXCMP_CODE_MULTI,
34 * These defines are used to configure Tx burst routine option set supported
35 * at compile time. The not specified options are optimized out due to if
36 * conditions can be explicitly calculated at compile time.
37 * The offloads with bigger runtime check (require more CPU cycles toskip)
38 * overhead should have the bigger index - this is needed to select the better
39 * matching routine function if no exact match and some offloads are not
42 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
43 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
44 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
45 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
46 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
47 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
48 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
49 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
50 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
51 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
53 /* The most common offloads groups. */
54 #define MLX5_TXOFF_CONFIG_NONE 0
55 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
56 MLX5_TXOFF_CONFIG_TSO | \
57 MLX5_TXOFF_CONFIG_SWP | \
58 MLX5_TXOFF_CONFIG_CSUM | \
59 MLX5_TXOFF_CONFIG_INLINE | \
60 MLX5_TXOFF_CONFIG_VLAN | \
61 MLX5_TXOFF_CONFIG_METADATA)
63 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
65 #define MLX5_TXOFF_PRE_DECL(func) \
66 uint16_t mlx5_tx_burst_##func(void *txq, \
67 struct rte_mbuf **pkts, \
70 #define MLX5_TXOFF_DECL(func, olx) \
71 uint16_t mlx5_tx_burst_##func(void *txq, \
72 struct rte_mbuf **pkts, \
75 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
76 pkts, pkts_n, (olx)); \
79 /* Mbuf dynamic flag offset for inline. */
80 extern uint64_t rte_net_mlx5_dynf_inline_mask;
81 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
83 extern uint32_t mlx5_ptype_table[] __rte_cache_aligned;
84 extern uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
85 extern uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
87 struct mlx5_txq_stats {
88 #ifdef MLX5_PMD_SOFT_COUNTERS
89 uint64_t opackets; /**< Total of successfully sent packets. */
90 uint64_t obytes; /**< Total of successfully sent bytes. */
92 uint64_t oerrors; /**< Total number of failed transmitted packets. */
95 /* TX queue send local data. */
97 struct mlx5_txq_local {
98 struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
99 struct rte_mbuf *mbuf; /* first mbuf to process. */
100 uint16_t pkts_copy; /* packets copied to elts. */
101 uint16_t pkts_sent; /* packets sent. */
102 uint16_t pkts_loop; /* packets sent on loop entry. */
103 uint16_t elts_free; /* available elts remain. */
104 uint16_t wqe_free; /* available wqe remain. */
105 uint16_t mbuf_off; /* data offset in current mbuf. */
106 uint16_t mbuf_nseg; /* number of remaining mbuf. */
107 uint16_t mbuf_free; /* number of inline mbufs to free. */
110 /* TX queue descriptor. */
112 struct mlx5_txq_data {
113 uint16_t elts_head; /* Current counter in (*elts)[]. */
114 uint16_t elts_tail; /* Counter of first element awaiting completion. */
115 uint16_t elts_comp; /* elts index since last completion request. */
116 uint16_t elts_s; /* Number of mbuf elements. */
117 uint16_t elts_m; /* Mask for mbuf elements indices. */
118 /* Fields related to elts mbuf storage. */
119 uint16_t wqe_ci; /* Consumer index for work queue. */
120 uint16_t wqe_pi; /* Producer index for work queue. */
121 uint16_t wqe_s; /* Number of WQ elements. */
122 uint16_t wqe_m; /* Mask Number for WQ elements. */
123 uint16_t wqe_comp; /* WQE index since last completion request. */
124 uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
125 /* WQ related fields. */
126 uint16_t cq_ci; /* Consumer index for completion queue. */
127 uint16_t cq_pi; /* Production index for completion queue. */
128 uint16_t cqe_s; /* Number of CQ elements. */
129 uint16_t cqe_m; /* Mask for CQ indices. */
130 /* CQ related fields. */
131 uint16_t elts_n:4; /* elts[] length (in log2). */
132 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
133 uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
134 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
135 uint16_t tunnel_en:1;
136 /* When set TX offload for tunneled packets are supported. */
137 uint16_t swp_en:1; /* Whether SW parser is enabled. */
138 uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
139 uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
140 uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
141 uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */
142 uint16_t inlen_send; /* Ordinary send data inline size. */
143 uint16_t inlen_empw; /* eMPW max packet size to inline. */
144 uint16_t inlen_mode; /* Minimal data length to inline. */
145 uint32_t qp_num_8s; /* QP number shifted by 8. */
146 uint64_t offloads; /* Offloads for Tx Queue. */
147 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
148 struct mlx5_wqe *wqes; /* Work queue. */
149 struct mlx5_wqe *wqes_end; /* Work queue array limit. */
150 #ifdef RTE_LIBRTE_MLX5_DEBUG
151 uint32_t *fcqs; /* Free completion queue (debug extended). */
153 uint16_t *fcqs; /* Free completion queue. */
155 volatile struct mlx5_cqe *cqes; /* Completion queue. */
156 volatile uint32_t *qp_db; /* Work queue doorbell. */
157 volatile uint32_t *cq_db; /* Completion queue doorbell. */
158 uint16_t port_id; /* Port ID of device. */
159 uint16_t idx; /* Queue index. */
160 uint64_t ts_mask; /* Timestamp flag dynamic mask. */
161 int32_t ts_offset; /* Timestamp field dynamic offset. */
162 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
163 struct mlx5_txq_stats stats; /* TX queue counters. */
165 rte_spinlock_t *uar_lock;
166 /* UAR access lock required for 32bit implementations */
168 struct rte_mbuf *elts[0];
169 /* Storage for queued packets, must be the last field. */
170 } __rte_cache_aligned;
173 MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */
174 MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Tx queue. */
177 /* TX queue control descriptor. */
178 struct mlx5_txq_ctrl {
179 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
180 uint32_t refcnt; /* Reference counter. */
181 unsigned int socket; /* CPU socket ID for allocations. */
182 enum mlx5_txq_type type; /* The txq ctrl type. */
183 unsigned int max_inline_data; /* Max inline data. */
184 unsigned int max_tso_header; /* Max TSO header size. */
185 struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
186 struct mlx5_priv *priv; /* Back pointer to private data. */
187 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
188 void *bf_reg; /* BlueFlame register from Verbs. */
189 uint16_t dump_file_n; /* Number of dump files. */
190 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
191 uint32_t hairpin_status; /* Hairpin binding status. */
192 struct mlx5_txq_data txq; /* Data path structure. */
193 /* Must be the last field in the structure, contains elts[]. */
198 int mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
199 int mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
200 int mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
201 int mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
202 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
203 unsigned int socket, const struct rte_eth_txconf *conf);
204 int mlx5_tx_hairpin_queue_setup
205 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
206 const struct rte_eth_hairpin_conf *hairpin_conf);
207 void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
208 void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
209 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
210 void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
211 int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
212 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
213 uint16_t desc, unsigned int socket,
214 const struct rte_eth_txconf *conf);
215 struct mlx5_txq_ctrl *mlx5_txq_hairpin_new
216 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
217 const struct rte_eth_hairpin_conf *hairpin_conf);
218 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
219 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
220 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
221 int mlx5_txq_verify(struct rte_eth_dev *dev);
222 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
223 void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl);
224 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
225 void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
229 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
231 void mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
232 unsigned int olx __rte_unused);
233 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
234 void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
235 struct rte_eth_txq_info *qinfo);
236 int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
237 struct rte_eth_burst_mode *mode);
241 uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
245 MLX5_TXOFF_PRE_DECL(full_empw);
246 MLX5_TXOFF_PRE_DECL(none_empw);
247 MLX5_TXOFF_PRE_DECL(md_empw);
248 MLX5_TXOFF_PRE_DECL(mt_empw);
249 MLX5_TXOFF_PRE_DECL(mtsc_empw);
250 MLX5_TXOFF_PRE_DECL(mti_empw);
251 MLX5_TXOFF_PRE_DECL(mtv_empw);
252 MLX5_TXOFF_PRE_DECL(mtiv_empw);
253 MLX5_TXOFF_PRE_DECL(sc_empw);
254 MLX5_TXOFF_PRE_DECL(sci_empw);
255 MLX5_TXOFF_PRE_DECL(scv_empw);
256 MLX5_TXOFF_PRE_DECL(sciv_empw);
257 MLX5_TXOFF_PRE_DECL(i_empw);
258 MLX5_TXOFF_PRE_DECL(v_empw);
259 MLX5_TXOFF_PRE_DECL(iv_empw);
261 /* mlx5_tx_nompw.c */
263 MLX5_TXOFF_PRE_DECL(full);
264 MLX5_TXOFF_PRE_DECL(none);
265 MLX5_TXOFF_PRE_DECL(md);
266 MLX5_TXOFF_PRE_DECL(mt);
267 MLX5_TXOFF_PRE_DECL(mtsc);
268 MLX5_TXOFF_PRE_DECL(mti);
269 MLX5_TXOFF_PRE_DECL(mtv);
270 MLX5_TXOFF_PRE_DECL(mtiv);
271 MLX5_TXOFF_PRE_DECL(sc);
272 MLX5_TXOFF_PRE_DECL(sci);
273 MLX5_TXOFF_PRE_DECL(scv);
274 MLX5_TXOFF_PRE_DECL(sciv);
275 MLX5_TXOFF_PRE_DECL(i);
276 MLX5_TXOFF_PRE_DECL(v);
277 MLX5_TXOFF_PRE_DECL(iv);
281 MLX5_TXOFF_PRE_DECL(full_ts_nompw);
282 MLX5_TXOFF_PRE_DECL(full_ts_nompwi);
283 MLX5_TXOFF_PRE_DECL(full_ts);
284 MLX5_TXOFF_PRE_DECL(full_ts_noi);
285 MLX5_TXOFF_PRE_DECL(none_ts);
286 MLX5_TXOFF_PRE_DECL(mdi_ts);
287 MLX5_TXOFF_PRE_DECL(mti_ts);
288 MLX5_TXOFF_PRE_DECL(mtiv_ts);
292 MLX5_TXOFF_PRE_DECL(none_mpw);
293 MLX5_TXOFF_PRE_DECL(mci_mpw);
294 MLX5_TXOFF_PRE_DECL(mc_mpw);
295 MLX5_TXOFF_PRE_DECL(i_mpw);
297 static __rte_always_inline uint64_t *
298 mlx5_tx_bfreg(struct mlx5_txq_data *txq)
300 return MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx];
304 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
305 * 64bit architectures.
308 * value to write in CPU endian format.
310 * Address to write to.
312 * Address of the lock to use for that UAR access.
314 static __rte_always_inline void
315 __mlx5_uar_write64_relaxed(uint64_t val, void *addr,
316 rte_spinlock_t *lock __rte_unused)
319 *(uint64_t *)addr = val;
320 #else /* !RTE_ARCH_64 */
321 rte_spinlock_lock(lock);
322 *(uint32_t *)addr = val;
324 *((uint32_t *)addr + 1) = val >> 32;
325 rte_spinlock_unlock(lock);
330 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
331 * 64bit architectures while guaranteeing the order of execution with the
332 * code being executed.
335 * value to write in CPU endian format.
337 * Address to write to.
339 * Address of the lock to use for that UAR access.
341 static __rte_always_inline void
342 __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
345 __mlx5_uar_write64_relaxed(val, addr, lock);
348 /* Assist macros, used instead of directly calling the functions they wrap. */
350 #define mlx5_uar_write64_relaxed(val, dst, lock) \
351 __mlx5_uar_write64_relaxed(val, dst, NULL)
352 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
354 #define mlx5_uar_write64_relaxed(val, dst, lock) \
355 __mlx5_uar_write64_relaxed(val, dst, lock)
356 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
360 * Query LKey from a packet buffer for Tx. If not found, add the mempool.
363 * Pointer to Tx queue structure.
368 * Searched LKey on success, UINT32_MAX on no match.
370 static __rte_always_inline uint32_t
371 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
373 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
374 uintptr_t addr = (uintptr_t)mb->buf_addr;
377 /* Check generation bit to see if there's any change on existing MRs. */
378 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
379 mlx5_mr_flush_local_cache(mr_ctrl);
380 /* Linear search on MR cache array. */
381 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
382 MLX5_MR_CACHE_N, addr);
383 if (likely(lkey != UINT32_MAX))
385 /* Take slower bottom-half on miss. */
386 return mlx5_tx_mb2mr_bh(txq, mb);
390 * Ring TX queue doorbell and flush the update if requested.
393 * Pointer to TX queue structure.
395 * Pointer to the last WQE posted in the NIC.
397 * Request for write memory barrier after BlueFlame update.
399 static __rte_always_inline void
400 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
403 uint64_t *dst = mlx5_tx_bfreg(txq);
404 volatile uint64_t *src = ((volatile uint64_t *)wqe);
407 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
408 /* Ensure ordering between DB record and BF copy. */
410 mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
416 * Ring TX queue doorbell and flush the update by write memory barrier.
419 * Pointer to TX queue structure.
421 * Pointer to the last WQE posted in the NIC.
423 static __rte_always_inline void
424 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
426 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
430 * Convert timestamp from mbuf format to linear counter
431 * of Clock Queue completions (24 bits).
434 * Pointer to the device shared context to fetch Tx
435 * packet pacing timestamp and parameters.
437 * Timestamp from mbuf to convert.
439 * positive or zero value - completion ID to wait.
440 * negative value - conversion error.
442 static __rte_always_inline int32_t
443 mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts)
450 * Read atomically two uint64_t fields and compare lsb bits.
451 * It there is no match - the timestamp was updated in
452 * the service thread, data should be re-read.
454 rte_compiler_barrier();
455 ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
456 ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
457 rte_compiler_barrier();
458 if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
461 /* Perform the skew correction, positive value to send earlier. */
462 mts -= sh->txpp.skew;
464 if (unlikely(mts >= UINT64_MAX / 2)) {
465 /* We have negative integer, mts is in the past. */
466 __atomic_fetch_add(&sh->txpp.err_ts_past,
467 1, __ATOMIC_RELAXED);
470 tick = sh->txpp.tick;
472 /* Convert delta to completions, round up. */
473 mts = (mts + tick - 1) / tick;
474 if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
475 /* We have mts is too distant future. */
476 __atomic_fetch_add(&sh->txpp.err_ts_future,
477 1, __ATOMIC_RELAXED);
480 mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
482 ci >>= 64 - MLX5_CQ_INDEX_WIDTH;
487 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
488 * Flags must be preliminary initialized to zero.
491 * Pointer to burst routine local context.
493 * Pointer to store Software Parser flags.
495 * Configured Tx offloads mask. It is fully defined at
496 * compile time and may be used for optimization.
499 * Software Parser offsets packed in dword.
500 * Software Parser flags are set by pointer.
502 static __rte_always_inline uint32_t
503 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
508 unsigned int idx, off;
511 if (!MLX5_TXOFF_CONFIG(SWP))
513 ol = loc->mbuf->ol_flags;
514 tunnel = ol & PKT_TX_TUNNEL_MASK;
516 * Check whether Software Parser is required.
517 * Only customized tunnels may ask for.
519 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
522 * The index should have:
523 * bit[0:1] = PKT_TX_L4_MASK
524 * bit[4] = PKT_TX_IPV6
525 * bit[8] = PKT_TX_OUTER_IPV6
526 * bit[9] = PKT_TX_OUTER_UDP
528 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
529 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
530 *swp_flags = mlx5_swp_types_table[idx];
532 * Set offsets for SW parser. Since ConnectX-5, SW parser just
533 * complements HW parser. SW parser starts to engage only if HW parser
534 * can't reach a header. For the older devices, HW parser will not kick
535 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
536 * should be set regardless of HW offload.
538 off = loc->mbuf->outer_l2_len;
539 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
540 off += sizeof(struct rte_vlan_hdr);
541 set = (off >> 1) << 8; /* Outer L3 offset. */
542 off += loc->mbuf->outer_l3_len;
543 if (tunnel == PKT_TX_TUNNEL_UDP)
544 set |= off >> 1; /* Outer L4 offset. */
545 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
546 const uint64_t csum = ol & PKT_TX_L4_MASK;
547 off += loc->mbuf->l2_len;
548 set |= (off >> 1) << 24; /* Inner L3 offset. */
549 if (csum == PKT_TX_TCP_CKSUM ||
550 csum == PKT_TX_UDP_CKSUM ||
551 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
552 off += loc->mbuf->l3_len;
553 set |= (off >> 1) << 16; /* Inner L4 offset. */
556 set = rte_cpu_to_le_32(set);
561 * Convert the Checksum offloads to Verbs.
564 * Pointer to the mbuf.
567 * Converted checksum flags.
569 static __rte_always_inline uint8_t
570 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
573 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
574 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
575 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
578 * The index should have:
579 * bit[0] = PKT_TX_TCP_SEG
580 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
581 * bit[4] = PKT_TX_IP_CKSUM
582 * bit[8] = PKT_TX_OUTER_IP_CKSUM
585 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
586 return mlx5_cksum_table[idx];
590 * Free the mbufs from the linear array of pointers.
593 * Pointer to Tx queue structure.
595 * Pointer to array of packets to be free.
597 * Number of packets to be freed.
599 * Configured Tx offloads mask. It is fully defined at
600 * compile time and may be used for optimization.
602 static __rte_always_inline void
603 mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
604 struct rte_mbuf **__rte_restrict pkts,
606 unsigned int olx __rte_unused)
608 struct rte_mempool *pool = NULL;
609 struct rte_mbuf **p_free = NULL;
610 struct rte_mbuf *mbuf;
611 unsigned int n_free = 0;
614 * The implemented algorithm eliminates
615 * copying pointers to temporary array
616 * for rte_mempool_put_bulk() calls.
621 * Free mbufs directly to the pool in bulk
622 * if fast free offload is engaged
624 if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
627 rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
633 * Decrement mbuf reference counter, detach
634 * indirect and external buffers if needed.
636 mbuf = rte_pktmbuf_prefree_seg(*pkts);
637 if (likely(mbuf != NULL)) {
638 MLX5_ASSERT(mbuf == *pkts);
639 if (likely(n_free != 0)) {
640 if (unlikely(pool != mbuf->pool))
641 /* From different pool. */
644 /* Start new scan array. */
651 if (unlikely(pkts_n == 0)) {
657 * This happens if mbuf is still referenced.
658 * We can't put it back to the pool, skip.
662 if (unlikely(n_free != 0))
663 /* There is some array to free.*/
665 if (unlikely(pkts_n == 0))
666 /* Last mbuf, nothing to free. */
672 * This loop is implemented to avoid multiple
673 * inlining of rte_mempool_put_bulk().
679 * Free the array of pre-freed mbufs
680 * belonging to the same memory pool.
682 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
683 if (unlikely(mbuf != NULL)) {
684 /* There is the request to start new scan. */
689 if (likely(pkts_n != 0))
692 * This is the last mbuf to be freed.
693 * Do one more loop iteration to complete.
694 * This is rare case of the last unique mbuf.
699 if (likely(pkts_n == 0))
708 * No inline version to free buffers for optimal call
709 * on the tx_burst completion.
711 static __rte_noinline void
712 __mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
713 struct rte_mbuf **__rte_restrict pkts,
715 unsigned int olx __rte_unused)
717 mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
721 * Free the mbuf from the elts ring buffer till new tail.
724 * Pointer to Tx queue structure.
726 * Index in elts to free up to, becomes new elts tail.
728 * Configured Tx offloads mask. It is fully defined at
729 * compile time and may be used for optimization.
731 static __rte_always_inline void
732 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
734 unsigned int olx __rte_unused)
736 uint16_t n_elts = tail - txq->elts_tail;
739 MLX5_ASSERT(n_elts <= txq->elts_s);
741 * Implement a loop to support ring buffer wraparound
742 * with single inlining of mlx5_tx_free_mbuf().
747 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
748 part = RTE_MIN(part, n_elts);
750 MLX5_ASSERT(part <= txq->elts_s);
751 mlx5_tx_free_mbuf(txq,
752 &txq->elts[txq->elts_tail & txq->elts_m],
754 txq->elts_tail += part;
760 * Store the mbuf being sent into elts ring buffer.
761 * On Tx completion these mbufs will be freed.
764 * Pointer to Tx queue structure.
766 * Pointer to array of packets to be stored.
768 * Number of packets to be stored.
770 * Configured Tx offloads mask. It is fully defined at
771 * compile time and may be used for optimization.
773 static __rte_always_inline void
774 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
775 struct rte_mbuf **__rte_restrict pkts,
777 unsigned int olx __rte_unused)
780 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
784 part = txq->elts_s - (txq->elts_head & txq->elts_m);
786 MLX5_ASSERT(part <= txq->elts_s);
787 /* This code is a good candidate for vectorizing with SIMD. */
788 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
790 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
791 txq->elts_head += pkts_n;
792 if (unlikely(part < pkts_n))
793 /* The copy is wrapping around the elts array. */
794 rte_memcpy((void *)elts, (void *)(pkts + part),
795 (pkts_n - part) * sizeof(struct rte_mbuf *));
799 * Check if the completion request flag should be set in the last WQE.
800 * Both pushed mbufs and WQEs are monitored and the completion request
801 * flag is set if any of thresholds is reached.
804 * Pointer to TX queue structure.
806 * Pointer to burst routine local context.
808 * Configured Tx offloads mask. It is fully defined at
809 * compile time and may be used for optimization.
811 static __rte_always_inline void
812 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
813 struct mlx5_txq_local *__rte_restrict loc,
816 uint16_t head = txq->elts_head;
819 part = MLX5_TXOFF_CONFIG(INLINE) ?
820 0 : loc->pkts_sent - loc->pkts_copy;
822 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
823 (MLX5_TXOFF_CONFIG(INLINE) &&
824 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
825 volatile struct mlx5_wqe *last = loc->wqe_last;
828 txq->elts_comp = head;
829 if (MLX5_TXOFF_CONFIG(INLINE))
830 txq->wqe_comp = txq->wqe_ci;
831 /* Request unconditional completion on last WQE. */
832 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
833 MLX5_COMP_MODE_OFFSET);
834 /* Save elts_head in dedicated free on completion queue. */
835 #ifdef RTE_LIBRTE_MLX5_DEBUG
836 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
837 (last->cseg.opcode >> 8) << 16;
839 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
841 /* A CQE slot must always be available. */
842 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
847 * Build the Control Segment with specified opcode:
849 * - MLX5_OPCODE_ENHANCED_MPSW
853 * Pointer to TX queue structure.
855 * Pointer to burst routine local context.
857 * Pointer to WQE to fill with built Control Segment.
859 * Supposed length of WQE in segments.
861 * SQ WQE opcode to put into Control Segment.
863 * Configured Tx offloads mask. It is fully defined at
864 * compile time and may be used for optimization.
866 static __rte_always_inline void
867 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
868 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
869 struct mlx5_wqe *__rte_restrict wqe,
872 unsigned int olx __rte_unused)
874 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
876 /* For legacy MPW replace the EMPW by TSO with modifier. */
877 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
878 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
879 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
880 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
881 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
882 MLX5_COMP_MODE_OFFSET);
883 cs->misc = RTE_BE32(0);
887 * Build the Synchronize Queue Segment with specified completion index.
890 * Pointer to TX queue structure.
892 * Pointer to burst routine local context.
894 * Pointer to WQE to fill with built Control Segment.
896 * Completion index in Clock Queue to wait.
898 * Configured Tx offloads mask. It is fully defined at
899 * compile time and may be used for optimization.
901 static __rte_always_inline void
902 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
903 struct mlx5_txq_local *restrict loc __rte_unused,
904 struct mlx5_wqe *restrict wqe,
906 unsigned int olx __rte_unused)
908 struct mlx5_wqe_qseg *qs;
910 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
911 qs->max_index = rte_cpu_to_be_32(wci);
912 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
913 qs->reserved0 = RTE_BE32(0);
914 qs->reserved1 = RTE_BE32(0);
918 * Build the Ethernet Segment without inlined data.
919 * Supports Software Parser, Checksums and VLAN insertion Tx offload features.
922 * Pointer to TX queue structure.
924 * Pointer to burst routine local context.
926 * Pointer to WQE to fill with built Ethernet Segment.
928 * Configured Tx offloads mask. It is fully defined at
929 * compile time and may be used for optimization.
931 static __rte_always_inline void
932 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
933 struct mlx5_txq_local *__rte_restrict loc,
934 struct mlx5_wqe *__rte_restrict wqe,
937 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
941 * Calculate and set check sum flags first, dword field
942 * in segment may be shared with Software Parser flags.
944 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
945 es->flags = rte_cpu_to_le_32(csum);
947 * Calculate and set Software Parser offsets and flags.
948 * These flags a set for custom UDP and IP tunnel packets.
950 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
951 /* Fill metadata field if needed. */
952 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
953 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
954 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
956 /* Engage VLAN tag insertion feature if requested. */
957 if (MLX5_TXOFF_CONFIG(VLAN) &&
958 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
960 * We should get here only if device support
961 * this feature correctly.
963 MLX5_ASSERT(txq->vlan_en);
964 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
965 loc->mbuf->vlan_tci);
967 es->inline_hdr = RTE_BE32(0);
972 * Build the Ethernet Segment with minimal inlined data
973 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
974 * used to fill the gap in single WQEBB WQEs.
975 * Supports Software Parser, Checksums and VLAN
976 * insertion Tx offload features.
979 * Pointer to TX queue structure.
981 * Pointer to burst routine local context.
983 * Pointer to WQE to fill with built Ethernet Segment.
985 * Length of VLAN tag insertion if any.
987 * Configured Tx offloads mask. It is fully defined at
988 * compile time and may be used for optimization.
990 static __rte_always_inline void
991 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
992 struct mlx5_txq_local *__rte_restrict loc,
993 struct mlx5_wqe *__rte_restrict wqe,
997 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
999 uint8_t *psrc, *pdst;
1002 * Calculate and set check sum flags first, dword field
1003 * in segment may be shared with Software Parser flags.
1005 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1006 es->flags = rte_cpu_to_le_32(csum);
1008 * Calculate and set Software Parser offsets and flags.
1009 * These flags a set for custom UDP and IP tunnel packets.
1011 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1012 /* Fill metadata field if needed. */
1013 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1014 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1015 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1017 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1018 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
1019 es->inline_data = *(unaligned_uint16_t *)psrc;
1020 psrc += sizeof(uint16_t);
1021 pdst = (uint8_t *)(es + 1);
1022 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1023 /* Implement VLAN tag insertion as part inline data. */
1024 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1025 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1026 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1027 /* Insert VLAN ethertype + VLAN tag. */
1028 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1029 ((RTE_ETHER_TYPE_VLAN << 16) |
1030 loc->mbuf->vlan_tci);
1031 pdst += sizeof(struct rte_vlan_hdr);
1032 /* Copy the rest two bytes from packet data. */
1033 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1034 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1036 /* Fill the gap in the title WQEBB with inline data. */
1037 rte_mov16(pdst, psrc);
1042 * Build the Ethernet Segment with entire packet data inlining. Checks the
1043 * boundary of WQEBB and ring buffer wrapping, supports Software Parser,
1044 * Checksums and VLAN insertion Tx offload features.
1047 * Pointer to TX queue structure.
1049 * Pointer to burst routine local context.
1051 * Pointer to WQE to fill with built Ethernet Segment.
1053 * Length of VLAN tag insertion if any.
1055 * Length of data to inline (VLAN included, if any).
1057 * TSO flag, set mss field from the packet.
1059 * Configured Tx offloads mask. It is fully defined at
1060 * compile time and may be used for optimization.
1063 * Pointer to the next Data Segment (aligned and wrapped around).
1065 static __rte_always_inline struct mlx5_wqe_dseg *
1066 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
1067 struct mlx5_txq_local *__rte_restrict loc,
1068 struct mlx5_wqe *__rte_restrict wqe,
1074 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1076 uint8_t *psrc, *pdst;
1080 * Calculate and set check sum flags first, dword field
1081 * in segment may be shared with Software Parser flags.
1083 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1086 csum |= loc->mbuf->tso_segsz;
1087 es->flags = rte_cpu_to_be_32(csum);
1089 es->flags = rte_cpu_to_le_32(csum);
1092 * Calculate and set Software Parser offsets and flags.
1093 * These flags a set for custom UDP and IP tunnel packets.
1095 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1096 /* Fill metadata field if needed. */
1097 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1098 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1099 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1101 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1102 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
1103 es->inline_data = *(unaligned_uint16_t *)psrc;
1104 psrc += sizeof(uint16_t);
1105 pdst = (uint8_t *)(es + 1);
1106 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1107 /* Implement VLAN tag insertion as part inline data. */
1108 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1109 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1110 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1111 /* Insert VLAN ethertype + VLAN tag. */
1112 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1113 ((RTE_ETHER_TYPE_VLAN << 16) |
1114 loc->mbuf->vlan_tci);
1115 pdst += sizeof(struct rte_vlan_hdr);
1116 /* Copy the rest two bytes from packet data. */
1117 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1118 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1119 psrc += sizeof(uint16_t);
1121 /* Fill the gap in the title WQEBB with inline data. */
1122 rte_mov16(pdst, psrc);
1123 psrc += sizeof(rte_v128u32_t);
1125 pdst = (uint8_t *)(es + 2);
1126 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1127 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1128 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
1130 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1131 return (struct mlx5_wqe_dseg *)pdst;
1134 * The WQEBB space availability is checked by caller.
1135 * Here we should be aware of WQE ring buffer wraparound only.
1137 part = (uint8_t *)txq->wqes_end - pdst;
1138 part = RTE_MIN(part, inlen);
1140 rte_memcpy(pdst, psrc, part);
1142 if (likely(!inlen)) {
1144 * If return value is not used by the caller
1145 * the code below will be optimized out.
1148 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1149 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1150 pdst = (uint8_t *)txq->wqes;
1151 return (struct mlx5_wqe_dseg *)pdst;
1153 pdst = (uint8_t *)txq->wqes;
1160 * Copy data from chain of mbuf to the specified linear buffer.
1161 * Checksums and VLAN insertion Tx offload features. If data
1162 * from some mbuf copied completely this mbuf is freed. Local
1163 * structure is used to keep the byte stream state.
1166 * Pointer to the destination linear buffer.
1168 * Pointer to burst routine local context.
1170 * Length of data to be copied.
1172 * Length of data to be copied ignoring no inline hint.
1174 * Configured Tx offloads mask. It is fully defined at
1175 * compile time and may be used for optimization.
1178 * Number of actual copied data bytes. This is always greater than or
1179 * equal to must parameter and might be lesser than len in no inline
1180 * hint flag is encountered.
1182 static __rte_always_inline unsigned int
1183 mlx5_tx_mseg_memcpy(uint8_t *pdst,
1184 struct mlx5_txq_local *__rte_restrict loc,
1187 unsigned int olx __rte_unused)
1189 struct rte_mbuf *mbuf;
1190 unsigned int part, dlen, copy = 0;
1194 MLX5_ASSERT(must <= len);
1196 /* Allow zero length packets, must check first. */
1197 dlen = rte_pktmbuf_data_len(loc->mbuf);
1198 if (dlen <= loc->mbuf_off) {
1199 /* Exhausted packet, just free. */
1201 loc->mbuf = mbuf->next;
1202 rte_pktmbuf_free_seg(mbuf);
1204 MLX5_ASSERT(loc->mbuf_nseg > 1);
1205 MLX5_ASSERT(loc->mbuf);
1207 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
1212 * We already copied the minimal
1213 * requested amount of data.
1218 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
1220 * Copy only the minimal required
1221 * part of the data buffer.
1228 dlen -= loc->mbuf_off;
1229 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1231 part = RTE_MIN(len, dlen);
1232 rte_memcpy(pdst, psrc, part);
1234 loc->mbuf_off += part;
1237 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
1239 /* Exhausted packet, just free. */
1241 loc->mbuf = mbuf->next;
1242 rte_pktmbuf_free_seg(mbuf);
1244 MLX5_ASSERT(loc->mbuf_nseg >= 1);
1254 * Build the Ethernet Segment with inlined data from multi-segment packet.
1255 * Checks the boundary of WQEBB and ring buffer wrapping, supports Software
1256 * Parser, Checksums and VLAN insertion Tx offload features.
1259 * Pointer to TX queue structure.
1261 * Pointer to burst routine local context.
1263 * Pointer to WQE to fill with built Ethernet Segment.
1265 * Length of VLAN tag insertion if any.
1267 * Length of data to inline (VLAN included, if any).
1269 * TSO flag, set mss field from the packet.
1271 * Configured Tx offloads mask. It is fully defined at
1272 * compile time and may be used for optimization.
1275 * Pointer to the next Data Segment (aligned and possible NOT wrapped
1276 * around - caller should do wrapping check on its own).
1278 static __rte_always_inline struct mlx5_wqe_dseg *
1279 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
1280 struct mlx5_txq_local *__rte_restrict loc,
1281 struct mlx5_wqe *__rte_restrict wqe,
1287 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1290 unsigned int part, tlen = 0;
1293 * Calculate and set check sum flags first, uint32_t field
1294 * in segment may be shared with Software Parser flags.
1296 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1299 csum |= loc->mbuf->tso_segsz;
1300 es->flags = rte_cpu_to_be_32(csum);
1302 es->flags = rte_cpu_to_le_32(csum);
1305 * Calculate and set Software Parser offsets and flags.
1306 * These flags a set for custom UDP and IP tunnel packets.
1308 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1309 /* Fill metadata field if needed. */
1310 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1311 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1312 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1314 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1315 pdst = (uint8_t *)&es->inline_data;
1316 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1317 /* Implement VLAN tag insertion as part inline data. */
1318 mlx5_tx_mseg_memcpy(pdst, loc,
1319 2 * RTE_ETHER_ADDR_LEN,
1320 2 * RTE_ETHER_ADDR_LEN, olx);
1321 pdst += 2 * RTE_ETHER_ADDR_LEN;
1322 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1323 ((RTE_ETHER_TYPE_VLAN << 16) |
1324 loc->mbuf->vlan_tci);
1325 pdst += sizeof(struct rte_vlan_hdr);
1326 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
1328 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1330 * The WQEBB space availability is checked by caller.
1331 * Here we should be aware of WQE ring buffer wraparound only.
1333 part = (uint8_t *)txq->wqes_end - pdst;
1334 part = RTE_MIN(part, inlen - tlen);
1340 * Copying may be interrupted inside the routine
1341 * if run into no inline hint flag.
1343 copy = tso ? inlen : txq->inlen_mode;
1344 copy = tlen >= copy ? 0 : (copy - tlen);
1345 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
1347 if (likely(inlen <= tlen) || copy < part) {
1348 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
1350 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1351 return (struct mlx5_wqe_dseg *)pdst;
1353 pdst = (uint8_t *)txq->wqes;
1354 part = inlen - tlen;
1359 * Build the Data Segment of pointer type.
1362 * Pointer to TX queue structure.
1364 * Pointer to burst routine local context.
1366 * Pointer to WQE to fill with built Data Segment.
1368 * Data buffer to point.
1370 * Data buffer length.
1372 * Configured Tx offloads mask. It is fully defined at
1373 * compile time and may be used for optimization.
1375 static __rte_always_inline void
1376 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
1377 struct mlx5_txq_local *__rte_restrict loc,
1378 struct mlx5_wqe_dseg *__rte_restrict dseg,
1381 unsigned int olx __rte_unused)
1385 dseg->bcount = rte_cpu_to_be_32(len);
1386 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1387 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1391 * Build the Data Segment of pointer type or inline if data length is less than
1392 * buffer in minimal Data Segment size.
1395 * Pointer to TX queue structure.
1397 * Pointer to burst routine local context.
1399 * Pointer to WQE to fill with built Data Segment.
1401 * Data buffer to point.
1403 * Data buffer length.
1405 * Configured Tx offloads mask. It is fully defined at
1406 * compile time and may be used for optimization.
1408 static __rte_always_inline void
1409 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
1410 struct mlx5_txq_local *__rte_restrict loc,
1411 struct mlx5_wqe_dseg *__rte_restrict dseg,
1414 unsigned int olx __rte_unused)
1420 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
1421 dseg->bcount = rte_cpu_to_be_32(len);
1422 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1423 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1427 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1428 /* Unrolled implementation of generic rte_memcpy. */
1429 dst = (uintptr_t)&dseg->inline_data[0];
1430 src = (uintptr_t)buf;
1432 #ifdef RTE_ARCH_STRICT_ALIGN
1433 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
1434 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1435 dst += sizeof(uint32_t);
1436 src += sizeof(uint32_t);
1437 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1438 dst += sizeof(uint32_t);
1439 src += sizeof(uint32_t);
1441 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
1442 dst += sizeof(uint64_t);
1443 src += sizeof(uint64_t);
1447 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1448 dst += sizeof(uint32_t);
1449 src += sizeof(uint32_t);
1452 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
1453 dst += sizeof(uint16_t);
1454 src += sizeof(uint16_t);
1457 *(uint8_t *)dst = *(uint8_t *)src;
1461 * Build the Data Segment of inlined data from single
1462 * segment packet, no VLAN insertion.
1465 * Pointer to TX queue structure.
1467 * Pointer to burst routine local context.
1469 * Pointer to WQE to fill with built Data Segment.
1471 * Data buffer to point.
1473 * Data buffer length.
1475 * Configured Tx offloads mask. It is fully defined at
1476 * compile time and may be used for optimization.
1479 * Pointer to the next Data Segment after inlined data.
1480 * Ring buffer wraparound check is needed. We do not do it here because it
1481 * may not be needed for the last packet in the eMPW session.
1483 static __rte_always_inline struct mlx5_wqe_dseg *
1484 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
1485 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1486 struct mlx5_wqe_dseg *__rte_restrict dseg,
1489 unsigned int olx __rte_unused)
1494 if (!MLX5_TXOFF_CONFIG(MPW)) {
1495 /* Store the descriptor byte counter for eMPW sessions. */
1496 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1497 pdst = &dseg->inline_data[0];
1499 /* The entire legacy MPW session counter is stored on close. */
1500 pdst = (uint8_t *)dseg;
1503 * The WQEBB space availability is checked by caller.
1504 * Here we should be aware of WQE ring buffer wraparound only.
1506 part = (uint8_t *)txq->wqes_end - pdst;
1507 part = RTE_MIN(part, len);
1509 rte_memcpy(pdst, buf, part);
1513 if (!MLX5_TXOFF_CONFIG(MPW))
1514 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1515 /* Note: no final wraparound check here. */
1516 return (struct mlx5_wqe_dseg *)pdst;
1518 pdst = (uint8_t *)txq->wqes;
1525 * Build the Data Segment of inlined data from single
1526 * segment packet with VLAN insertion.
1529 * Pointer to TX queue structure.
1531 * Pointer to burst routine local context.
1533 * Pointer to the dseg fill with built Data Segment.
1535 * Data buffer to point.
1537 * Data buffer length.
1539 * Configured Tx offloads mask. It is fully defined at
1540 * compile time and may be used for optimization.
1543 * Pointer to the next Data Segment after inlined data.
1544 * Ring buffer wraparound check is needed.
1546 static __rte_always_inline struct mlx5_wqe_dseg *
1547 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
1548 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1549 struct mlx5_wqe_dseg *__rte_restrict dseg,
1552 unsigned int olx __rte_unused)
1558 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
1559 if (!MLX5_TXOFF_CONFIG(MPW)) {
1560 /* Store the descriptor byte counter for eMPW sessions. */
1561 dseg->bcount = rte_cpu_to_be_32
1562 ((len + sizeof(struct rte_vlan_hdr)) |
1563 MLX5_ETH_WQE_DATA_INLINE);
1564 pdst = &dseg->inline_data[0];
1566 /* The entire legacy MPW session counter is stored on close. */
1567 pdst = (uint8_t *)dseg;
1569 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
1570 buf += MLX5_DSEG_MIN_INLINE_SIZE;
1571 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
1572 len -= MLX5_DSEG_MIN_INLINE_SIZE;
1573 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
1574 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1575 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1576 pdst = (uint8_t *)txq->wqes;
1577 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
1578 loc->mbuf->vlan_tci);
1579 pdst += sizeof(struct rte_vlan_hdr);
1581 * The WQEBB space availability is checked by caller.
1582 * Here we should be aware of WQE ring buffer wraparound only.
1584 part = (uint8_t *)txq->wqes_end - pdst;
1585 part = RTE_MIN(part, len);
1587 rte_memcpy(pdst, buf, part);
1591 if (!MLX5_TXOFF_CONFIG(MPW))
1592 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1593 /* Note: no final wraparound check here. */
1594 return (struct mlx5_wqe_dseg *)pdst;
1596 pdst = (uint8_t *)txq->wqes;
1603 * Build the Ethernet Segment with optionally inlined data with
1604 * VLAN insertion and following Data Segments (if any) from
1605 * multi-segment packet. Used by ordinary send and TSO.
1608 * Pointer to TX queue structure.
1610 * Pointer to burst routine local context.
1612 * Pointer to WQE to fill with built Ethernet/Data Segments.
1614 * Length of VLAN header to insert, 0 means no VLAN insertion.
1616 * Data length to inline. For TSO this parameter specifies exact value,
1617 * for ordinary send routine can be aligned by caller to provide better WQE
1618 * space saving and data buffer start address alignment.
1619 * This length includes VLAN header being inserted.
1621 * Zero means ordinary send, inlined data can be extended,
1622 * otherwise this is TSO, inlined data length is fixed.
1624 * Configured Tx offloads mask. It is fully defined at
1625 * compile time and may be used for optimization.
1628 * Actual size of built WQE in segments.
1630 static __rte_always_inline unsigned int
1631 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
1632 struct mlx5_txq_local *__rte_restrict loc,
1633 struct mlx5_wqe *__rte_restrict wqe,
1637 unsigned int olx __rte_unused)
1639 struct mlx5_wqe_dseg *__rte_restrict dseg;
1642 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
1643 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
1646 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
1647 if (!loc->mbuf_nseg)
1650 * There are still some mbuf remaining, not inlined.
1651 * The first mbuf may be partially inlined and we
1652 * must process the possible non-zero data offset.
1654 if (loc->mbuf_off) {
1659 * Exhausted packets must be dropped before.
1660 * Non-zero offset means there are some data
1661 * remained in the packet.
1663 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
1664 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
1665 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1667 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
1669 * Build the pointer/minimal Data Segment.
1670 * Do ring buffer wrapping check in advance.
1672 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1673 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1674 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
1675 /* Store the mbuf to be freed on completion. */
1676 MLX5_ASSERT(loc->elts_free);
1677 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1680 if (--loc->mbuf_nseg == 0)
1682 loc->mbuf = loc->mbuf->next;
1686 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1687 struct rte_mbuf *mbuf;
1689 /* Zero length segment found, just skip. */
1691 loc->mbuf = loc->mbuf->next;
1692 rte_pktmbuf_free_seg(mbuf);
1693 if (--loc->mbuf_nseg == 0)
1696 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1697 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1700 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1701 rte_pktmbuf_data_len(loc->mbuf), olx);
1702 MLX5_ASSERT(loc->elts_free);
1703 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1706 if (--loc->mbuf_nseg == 0)
1708 loc->mbuf = loc->mbuf->next;
1713 /* Calculate actual segments used from the dseg pointer. */
1714 if ((uintptr_t)wqe < (uintptr_t)dseg)
1715 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
1717 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
1718 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
1723 * The routine checks timestamp flag in the current packet,
1724 * and push WAIT WQE into the queue if scheduling is required.
1727 * Pointer to TX queue structure.
1729 * Pointer to burst routine local context.
1731 * Configured Tx offloads mask. It is fully defined at
1732 * compile time and may be used for optimization.
1735 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1736 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
1737 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
1738 * Local context variables partially updated.
1740 static __rte_always_inline enum mlx5_txcmp_code
1741 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
1742 struct mlx5_txq_local *restrict loc,
1745 if (MLX5_TXOFF_CONFIG(TXPP) &&
1746 loc->mbuf->ol_flags & txq->ts_mask) {
1747 struct mlx5_wqe *wqe;
1752 * Estimate the required space quickly and roughly.
1753 * We would like to ensure the packet can be pushed
1754 * to the queue and we won't get the orphan WAIT WQE.
1756 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
1757 loc->elts_free < NB_SEGS(loc->mbuf))
1758 return MLX5_TXCMP_CODE_EXIT;
1759 /* Convert the timestamp into completion to wait. */
1760 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
1761 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
1762 if (unlikely(wci < 0))
1763 return MLX5_TXCMP_CODE_SINGLE;
1764 /* Build the WAIT WQE with specified completion. */
1765 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1766 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
1767 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
1770 return MLX5_TXCMP_CODE_MULTI;
1772 return MLX5_TXCMP_CODE_SINGLE;
1776 * Tx one packet function for multi-segment TSO. Supports all
1777 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
1778 * sends one packet per WQE.
1780 * This routine is responsible for storing processed mbuf
1781 * into elts ring buffer and update elts_head.
1784 * Pointer to TX queue structure.
1786 * Pointer to burst routine local context.
1788 * Configured Tx offloads mask. It is fully defined at
1789 * compile time and may be used for optimization.
1792 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1793 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1794 * Local context variables partially updated.
1796 static __rte_always_inline enum mlx5_txcmp_code
1797 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
1798 struct mlx5_txq_local *__rte_restrict loc,
1801 struct mlx5_wqe *__rte_restrict wqe;
1802 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
1804 if (MLX5_TXOFF_CONFIG(TXPP)) {
1805 enum mlx5_txcmp_code wret;
1807 /* Generate WAIT for scheduling if requested. */
1808 wret = mlx5_tx_schedule_send(txq, loc, olx);
1809 if (wret == MLX5_TXCMP_CODE_EXIT)
1810 return MLX5_TXCMP_CODE_EXIT;
1811 if (wret == MLX5_TXCMP_CODE_ERROR)
1812 return MLX5_TXCMP_CODE_ERROR;
1815 * Calculate data length to be inlined to estimate
1816 * the required space in WQE ring buffer.
1818 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
1819 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
1820 vlan = sizeof(struct rte_vlan_hdr);
1821 inlen = loc->mbuf->l2_len + vlan +
1822 loc->mbuf->l3_len + loc->mbuf->l4_len;
1823 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
1824 return MLX5_TXCMP_CODE_ERROR;
1825 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
1826 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
1827 /* Packet must contain all TSO headers. */
1828 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
1829 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
1830 inlen > (dlen + vlan)))
1831 return MLX5_TXCMP_CODE_ERROR;
1832 MLX5_ASSERT(inlen >= txq->inlen_mode);
1834 * Check whether there are enough free WQEBBs:
1836 * - Ethernet Segment
1837 * - First Segment of inlined Ethernet data
1838 * - ... data continued ...
1839 * - Data Segments of pointer/min inline type
1841 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
1842 MLX5_ESEG_MIN_INLINE_SIZE +
1844 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
1845 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1846 return MLX5_TXCMP_CODE_EXIT;
1847 /* Check for maximal WQE size. */
1848 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1849 return MLX5_TXCMP_CODE_ERROR;
1850 #ifdef MLX5_PMD_SOFT_COUNTERS
1851 /* Update sent data bytes/packets counters. */
1852 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
1853 loc->mbuf->tso_segsz;
1855 * One will be added for mbuf itself at the end of the mlx5_tx_burst
1856 * from loc->pkts_sent field.
1859 txq->stats.opackets += ntcp;
1860 txq->stats.obytes += dlen + vlan + ntcp * inlen;
1862 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1863 loc->wqe_last = wqe;
1864 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
1865 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
1866 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
1867 txq->wqe_ci += (ds + 3) / 4;
1868 loc->wqe_free -= (ds + 3) / 4;
1869 return MLX5_TXCMP_CODE_MULTI;
1873 * Tx one packet function for multi-segment SEND. Supports all types of Tx
1874 * offloads, uses MLX5_OPCODE_SEND to build WQEs, sends one packet per WQE,
1875 * without any data inlining in Ethernet Segment.
1877 * This routine is responsible for storing processed mbuf
1878 * into elts ring buffer and update elts_head.
1881 * Pointer to TX queue structure.
1883 * Pointer to burst routine local context.
1885 * Configured Tx offloads mask. It is fully defined at
1886 * compile time and may be used for optimization.
1889 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1890 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1891 * Local context variables partially updated.
1893 static __rte_always_inline enum mlx5_txcmp_code
1894 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
1895 struct mlx5_txq_local *__rte_restrict loc,
1898 struct mlx5_wqe_dseg *__rte_restrict dseg;
1899 struct mlx5_wqe *__rte_restrict wqe;
1900 unsigned int ds, nseg;
1902 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
1903 if (MLX5_TXOFF_CONFIG(TXPP)) {
1904 enum mlx5_txcmp_code wret;
1906 /* Generate WAIT for scheduling if requested. */
1907 wret = mlx5_tx_schedule_send(txq, loc, olx);
1908 if (wret == MLX5_TXCMP_CODE_EXIT)
1909 return MLX5_TXCMP_CODE_EXIT;
1910 if (wret == MLX5_TXCMP_CODE_ERROR)
1911 return MLX5_TXCMP_CODE_ERROR;
1914 * No inline at all, it means the CPU cycles saving is prioritized at
1915 * configuration, we should not copy any packet data to WQE.
1917 nseg = NB_SEGS(loc->mbuf);
1919 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1920 return MLX5_TXCMP_CODE_EXIT;
1921 /* Check for maximal WQE size. */
1922 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1923 return MLX5_TXCMP_CODE_ERROR;
1925 * Some Tx offloads may cause an error if packet is not long enough,
1926 * check against assumed minimal length.
1928 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
1929 return MLX5_TXCMP_CODE_ERROR;
1930 #ifdef MLX5_PMD_SOFT_COUNTERS
1931 /* Update sent data bytes counter. */
1932 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
1933 if (MLX5_TXOFF_CONFIG(VLAN) &&
1934 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
1935 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
1938 * SEND WQE, one WQEBB:
1939 * - Control Segment, SEND opcode
1940 * - Ethernet Segment, optional VLAN, no inline
1941 * - Data Segments, pointer only type
1943 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1944 loc->wqe_last = wqe;
1945 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
1946 mlx5_tx_eseg_none(txq, loc, wqe, olx);
1947 dseg = &wqe->dseg[0];
1949 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1950 struct rte_mbuf *mbuf;
1953 * Zero length segment found, have to correct total
1954 * size of WQE in segments.
1955 * It is supposed to be rare occasion, so in normal
1956 * case (no zero length segments) we avoid extra
1957 * writing to the Control Segment.
1960 wqe->cseg.sq_ds -= RTE_BE32(1);
1962 loc->mbuf = mbuf->next;
1963 rte_pktmbuf_free_seg(mbuf);
1969 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1970 rte_pktmbuf_data_len(loc->mbuf), olx);
1971 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1976 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1977 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1978 loc->mbuf = loc->mbuf->next;
1981 txq->wqe_ci += (ds + 3) / 4;
1982 loc->wqe_free -= (ds + 3) / 4;
1983 return MLX5_TXCMP_CODE_MULTI;
1987 * Tx one packet function for multi-segment SEND. Supports all
1988 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
1989 * sends one packet per WQE, with data inlining in
1990 * Ethernet Segment and minimal Data Segments.
1992 * This routine is responsible for storing processed mbuf
1993 * into elts ring buffer and update elts_head.
1996 * Pointer to TX queue structure.
1998 * Pointer to burst routine local context.
2000 * Configured Tx offloads mask. It is fully defined at
2001 * compile time and may be used for optimization.
2004 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2005 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2006 * Local context variables partially updated.
2008 static __rte_always_inline enum mlx5_txcmp_code
2009 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
2010 struct mlx5_txq_local *__rte_restrict loc,
2013 struct mlx5_wqe *__rte_restrict wqe;
2014 unsigned int ds, inlen, dlen, vlan = 0;
2016 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2017 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2018 if (MLX5_TXOFF_CONFIG(TXPP)) {
2019 enum mlx5_txcmp_code wret;
2021 /* Generate WAIT for scheduling if requested. */
2022 wret = mlx5_tx_schedule_send(txq, loc, olx);
2023 if (wret == MLX5_TXCMP_CODE_EXIT)
2024 return MLX5_TXCMP_CODE_EXIT;
2025 if (wret == MLX5_TXCMP_CODE_ERROR)
2026 return MLX5_TXCMP_CODE_ERROR;
2029 * First calculate data length to be inlined
2030 * to estimate the required space for WQE.
2032 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
2033 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2034 vlan = sizeof(struct rte_vlan_hdr);
2035 inlen = dlen + vlan;
2036 /* Check against minimal length. */
2037 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
2038 return MLX5_TXCMP_CODE_ERROR;
2039 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
2040 if (inlen > txq->inlen_send ||
2041 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2042 struct rte_mbuf *mbuf;
2047 nxlen = rte_pktmbuf_data_len(mbuf);
2049 * Packet length exceeds the allowed inline data length,
2050 * check whether the minimal inlining is required.
2052 if (txq->inlen_mode) {
2053 MLX5_ASSERT(txq->inlen_mode >=
2054 MLX5_ESEG_MIN_INLINE_SIZE);
2055 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
2056 inlen = txq->inlen_mode;
2057 } else if (vlan && !txq->vlan_en) {
2059 * VLAN insertion is requested and hardware does not
2060 * support the offload, will do with software inline.
2062 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
2063 } else if (mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
2064 nxlen > txq->inlen_send) {
2065 return mlx5_tx_packet_multi_send(txq, loc, olx);
2070 * Now we know the minimal amount of data is requested
2071 * to inline. Check whether we should inline the buffers
2072 * from the chain beginning to eliminate some mbufs.
2074 if (unlikely(nxlen <= txq->inlen_send)) {
2075 /* We can inline first mbuf at least. */
2076 if (nxlen < inlen) {
2079 /* Scan mbufs till inlen filled. */
2084 nxlen = rte_pktmbuf_data_len(mbuf);
2086 } while (unlikely(nxlen < inlen));
2087 if (unlikely(nxlen > txq->inlen_send)) {
2088 /* We cannot inline entire mbuf. */
2089 smlen = inlen - smlen;
2090 start = rte_pktmbuf_mtod_offset
2091 (mbuf, uintptr_t, smlen);
2099 /* There should be not end of packet. */
2101 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
2102 } while (unlikely(nxlen < txq->inlen_send));
2104 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
2106 * Check whether we can do inline to align start
2107 * address of data buffer to cacheline.
2110 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
2111 if (unlikely(start)) {
2113 if (start <= txq->inlen_send)
2118 * Check whether there are enough free WQEBBs:
2120 * - Ethernet Segment
2121 * - First Segment of inlined Ethernet data
2122 * - ... data continued ...
2123 * - Data Segments of pointer/min inline type
2125 * Estimate the number of Data Segments conservatively,
2126 * supposing no any mbufs is being freed during inlining.
2128 MLX5_ASSERT(inlen <= txq->inlen_send);
2129 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2130 MLX5_ESEG_MIN_INLINE_SIZE +
2132 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2133 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2134 return MLX5_TXCMP_CODE_EXIT;
2135 /* Check for maximal WQE size. */
2136 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2137 return MLX5_TXCMP_CODE_ERROR;
2138 #ifdef MLX5_PMD_SOFT_COUNTERS
2139 /* Update sent data bytes/packets counters. */
2140 txq->stats.obytes += dlen + vlan;
2142 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2143 loc->wqe_last = wqe;
2144 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
2145 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
2146 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2147 txq->wqe_ci += (ds + 3) / 4;
2148 loc->wqe_free -= (ds + 3) / 4;
2149 return MLX5_TXCMP_CODE_MULTI;
2153 * Tx burst function for multi-segment packets. Supports all
2154 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
2155 * sends one packet per WQE. Function stops sending if it
2156 * encounters the single-segment packet.
2158 * This routine is responsible for storing processed mbuf
2159 * into elts ring buffer and update elts_head.
2162 * Pointer to TX queue structure.
2164 * Packets to transmit.
2166 * Number of packets in array.
2168 * Pointer to burst routine local context.
2170 * Configured Tx offloads mask. It is fully defined at
2171 * compile time and may be used for optimization.
2174 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2175 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2176 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2177 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
2178 * Local context variables updated.
2180 static __rte_always_inline enum mlx5_txcmp_code
2181 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
2182 struct rte_mbuf **__rte_restrict pkts,
2183 unsigned int pkts_n,
2184 struct mlx5_txq_local *__rte_restrict loc,
2187 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2188 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2189 pkts += loc->pkts_sent + 1;
2190 pkts_n -= loc->pkts_sent;
2192 enum mlx5_txcmp_code ret;
2194 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2196 * Estimate the number of free elts quickly but conservatively.
2197 * Some segment may be fully inlined and freed,
2198 * ignore this here - precise estimation is costly.
2200 if (loc->elts_free < NB_SEGS(loc->mbuf))
2201 return MLX5_TXCMP_CODE_EXIT;
2202 if (MLX5_TXOFF_CONFIG(TSO) &&
2203 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
2204 /* Proceed with multi-segment TSO. */
2205 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
2206 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
2207 /* Proceed with multi-segment SEND with inlining. */
2208 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
2210 /* Proceed with multi-segment SEND w/o inlining. */
2211 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
2213 if (ret == MLX5_TXCMP_CODE_EXIT)
2214 return MLX5_TXCMP_CODE_EXIT;
2215 if (ret == MLX5_TXCMP_CODE_ERROR)
2216 return MLX5_TXCMP_CODE_ERROR;
2217 /* WQE is built, go to the next packet. */
2220 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2221 return MLX5_TXCMP_CODE_EXIT;
2222 loc->mbuf = *pkts++;
2224 rte_prefetch0(*pkts);
2225 if (likely(NB_SEGS(loc->mbuf) > 1))
2227 /* Here ends the series of multi-segment packets. */
2228 if (MLX5_TXOFF_CONFIG(TSO) &&
2229 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2230 return MLX5_TXCMP_CODE_TSO;
2231 return MLX5_TXCMP_CODE_SINGLE;
2237 * Tx burst function for single-segment packets with TSO.
2238 * Supports all types of Tx offloads, except multi-packets.
2239 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
2240 * Function stops sending if it encounters the multi-segment
2241 * packet or packet without TSO requested.
2243 * The routine is responsible for storing processed mbuf into elts ring buffer
2244 * and update elts_head if inline offloads is requested due to possible early
2245 * freeing of the inlined mbufs (can not store pkts array in elts as a batch).
2248 * Pointer to TX queue structure.
2250 * Packets to transmit.
2252 * Number of packets in array.
2254 * Pointer to burst routine local context.
2256 * Configured Tx offloads mask. It is fully defined at
2257 * compile time and may be used for optimization.
2260 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2261 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2262 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2263 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2264 * Local context variables updated.
2266 static __rte_always_inline enum mlx5_txcmp_code
2267 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
2268 struct rte_mbuf **__rte_restrict pkts,
2269 unsigned int pkts_n,
2270 struct mlx5_txq_local *__rte_restrict loc,
2273 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2274 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2275 pkts += loc->pkts_sent + 1;
2276 pkts_n -= loc->pkts_sent;
2278 struct mlx5_wqe_dseg *__rte_restrict dseg;
2279 struct mlx5_wqe *__rte_restrict wqe;
2280 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
2283 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2284 if (MLX5_TXOFF_CONFIG(TXPP)) {
2285 enum mlx5_txcmp_code wret;
2287 /* Generate WAIT for scheduling if requested. */
2288 wret = mlx5_tx_schedule_send(txq, loc, olx);
2289 if (wret == MLX5_TXCMP_CODE_EXIT)
2290 return MLX5_TXCMP_CODE_EXIT;
2291 if (wret == MLX5_TXCMP_CODE_ERROR)
2292 return MLX5_TXCMP_CODE_ERROR;
2294 dlen = rte_pktmbuf_data_len(loc->mbuf);
2295 if (MLX5_TXOFF_CONFIG(VLAN) &&
2296 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2297 vlan = sizeof(struct rte_vlan_hdr);
2300 * First calculate the WQE size to check
2301 * whether we have enough space in ring buffer.
2303 hlen = loc->mbuf->l2_len + vlan +
2304 loc->mbuf->l3_len + loc->mbuf->l4_len;
2305 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
2306 return MLX5_TXCMP_CODE_ERROR;
2307 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
2308 hlen += loc->mbuf->outer_l2_len +
2309 loc->mbuf->outer_l3_len;
2310 /* Segment must contain all TSO headers. */
2311 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
2312 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2313 hlen > (dlen + vlan)))
2314 return MLX5_TXCMP_CODE_ERROR;
2316 * Check whether there are enough free WQEBBs:
2318 * - Ethernet Segment
2319 * - First Segment of inlined Ethernet data
2320 * - ... data continued ...
2321 * - Finishing Data Segment of pointer type
2323 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
2324 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2325 if (loc->wqe_free < ((ds + 3) / 4))
2326 return MLX5_TXCMP_CODE_EXIT;
2327 #ifdef MLX5_PMD_SOFT_COUNTERS
2328 /* Update sent data bytes/packets counters. */
2329 ntcp = (dlen + vlan - hlen +
2330 loc->mbuf->tso_segsz - 1) /
2331 loc->mbuf->tso_segsz;
2333 * One will be added for mbuf itself at the end
2334 * of the mlx5_tx_burst from loc->pkts_sent field.
2337 txq->stats.opackets += ntcp;
2338 txq->stats.obytes += dlen + vlan + ntcp * hlen;
2341 * Build the TSO WQE:
2343 * - Ethernet Segment with hlen bytes inlined
2344 * - Data Segment of pointer type
2346 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2347 loc->wqe_last = wqe;
2348 mlx5_tx_cseg_init(txq, loc, wqe, ds,
2349 MLX5_OPCODE_TSO, olx);
2350 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
2351 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
2352 dlen -= hlen - vlan;
2353 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2355 * WQE is built, update the loop parameters
2356 * and go to the next packet.
2358 txq->wqe_ci += (ds + 3) / 4;
2359 loc->wqe_free -= (ds + 3) / 4;
2360 if (MLX5_TXOFF_CONFIG(INLINE))
2361 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2365 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2366 return MLX5_TXCMP_CODE_EXIT;
2367 loc->mbuf = *pkts++;
2369 rte_prefetch0(*pkts);
2370 if (MLX5_TXOFF_CONFIG(MULTI) &&
2371 unlikely(NB_SEGS(loc->mbuf) > 1))
2372 return MLX5_TXCMP_CODE_MULTI;
2373 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
2374 return MLX5_TXCMP_CODE_SINGLE;
2375 /* Continue with the next TSO packet. */
2381 * Analyze the packet and select the best method to send.
2384 * Pointer to TX queue structure.
2386 * Pointer to burst routine local context.
2388 * Configured Tx offloads mask. It is fully defined at
2389 * compile time and may be used for optimization.
2391 * The predefined flag whether do complete check for
2392 * multi-segment packets and TSO.
2395 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2396 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
2397 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
2398 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
2400 static __rte_always_inline enum mlx5_txcmp_code
2401 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
2402 struct mlx5_txq_local *__rte_restrict loc,
2406 /* Check for multi-segment packet. */
2408 MLX5_TXOFF_CONFIG(MULTI) &&
2409 unlikely(NB_SEGS(loc->mbuf) > 1))
2410 return MLX5_TXCMP_CODE_MULTI;
2411 /* Check for TSO packet. */
2413 MLX5_TXOFF_CONFIG(TSO) &&
2414 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2415 return MLX5_TXCMP_CODE_TSO;
2416 /* Check if eMPW is enabled at all. */
2417 if (!MLX5_TXOFF_CONFIG(EMPW))
2418 return MLX5_TXCMP_CODE_SINGLE;
2419 /* Check if eMPW can be engaged. */
2420 if (MLX5_TXOFF_CONFIG(VLAN) &&
2421 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
2422 (!MLX5_TXOFF_CONFIG(INLINE) ||
2423 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
2424 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
2426 * eMPW does not support VLAN insertion offload, we have to
2427 * inline the entire packet but packet is too long for inlining.
2429 return MLX5_TXCMP_CODE_SINGLE;
2431 return MLX5_TXCMP_CODE_EMPW;
2435 * Check the next packet attributes to match with the eMPW batch ones.
2436 * In addition, for legacy MPW the packet length is checked either.
2439 * Pointer to TX queue structure.
2441 * Pointer to Ethernet Segment of eMPW batch.
2443 * Pointer to burst routine local context.
2445 * Length of previous packet in MPW descriptor.
2447 * Configured Tx offloads mask. It is fully defined at
2448 * compile time and may be used for optimization.
2451 * true - packet match with eMPW batch attributes.
2452 * false - no match, eMPW should be restarted.
2454 static __rte_always_inline bool
2455 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
2456 struct mlx5_wqe_eseg *__rte_restrict es,
2457 struct mlx5_txq_local *__rte_restrict loc,
2461 uint8_t swp_flags = 0;
2463 /* Compare the checksum flags, if any. */
2464 if (MLX5_TXOFF_CONFIG(CSUM) &&
2465 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
2467 /* Compare the Software Parser offsets and flags. */
2468 if (MLX5_TXOFF_CONFIG(SWP) &&
2469 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
2470 es->swp_flags != swp_flags))
2472 /* Fill metadata field if needed. */
2473 if (MLX5_TXOFF_CONFIG(METADATA) &&
2474 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2475 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) : 0))
2477 /* Legacy MPW can send packets with the same length only. */
2478 if (MLX5_TXOFF_CONFIG(MPW) &&
2479 dlen != rte_pktmbuf_data_len(loc->mbuf))
2481 /* There must be no VLAN packets in eMPW loop. */
2482 if (MLX5_TXOFF_CONFIG(VLAN))
2483 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
2484 /* Check if the scheduling is requested. */
2485 if (MLX5_TXOFF_CONFIG(TXPP) &&
2486 loc->mbuf->ol_flags & txq->ts_mask)
2492 * Update send loop variables and WQE for eMPW loop without data inlining.
2493 * Number of Data Segments is equal to the number of sent packets.
2496 * Pointer to TX queue structure.
2498 * Pointer to burst routine local context.
2500 * Number of packets/Data Segments/Packets.
2502 * Accumulated statistics, bytes sent.
2504 * Configured Tx offloads mask. It is fully defined at
2505 * compile time and may be used for optimization.
2508 * true - packet match with eMPW batch attributes.
2509 * false - no match, eMPW should be restarted.
2511 static __rte_always_inline void
2512 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
2513 struct mlx5_txq_local *__rte_restrict loc,
2516 unsigned int olx __rte_unused)
2518 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2519 #ifdef MLX5_PMD_SOFT_COUNTERS
2520 /* Update sent data bytes counter. */
2521 txq->stats.obytes += slen;
2525 loc->elts_free -= ds;
2526 loc->pkts_sent += ds;
2528 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2529 txq->wqe_ci += (ds + 3) / 4;
2530 loc->wqe_free -= (ds + 3) / 4;
2534 * Update send loop variables and WQE for eMPW loop with data inlining.
2535 * Gets the size of pushed descriptors and data to the WQE.
2538 * Pointer to TX queue structure.
2540 * Pointer to burst routine local context.
2542 * Total size of descriptor/data in bytes.
2544 * Accumulated statistics, data bytes sent.
2546 * The base WQE for the eMPW/MPW descriptor.
2548 * Configured Tx offloads mask. It is fully defined at
2549 * compile time and may be used for optimization.
2552 * true - packet match with eMPW batch attributes.
2553 * false - no match, eMPW should be restarted.
2555 static __rte_always_inline void
2556 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
2557 struct mlx5_txq_local *__rte_restrict loc,
2560 struct mlx5_wqe *__rte_restrict wqem,
2561 unsigned int olx __rte_unused)
2563 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
2565 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2566 #ifdef MLX5_PMD_SOFT_COUNTERS
2567 /* Update sent data bytes counter. */
2568 txq->stats.obytes += slen;
2572 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
2574 * If the legacy MPW session contains the inline packets
2575 * we should set the only inline data segment length
2576 * and align the total length to the segment size.
2578 MLX5_ASSERT(len > sizeof(dseg->bcount));
2579 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
2580 MLX5_ETH_WQE_DATA_INLINE);
2581 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
2584 * The session is not legacy MPW or contains the
2585 * data buffer pointer segments.
2587 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
2588 len = len / MLX5_WSEG_SIZE + 2;
2590 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
2591 txq->wqe_ci += (len + 3) / 4;
2592 loc->wqe_free -= (len + 3) / 4;
2593 loc->wqe_last = wqem;
2597 * The set of Tx burst functions for single-segment packets without TSO
2598 * and with Multi-Packet Writing feature support.
2599 * Supports all types of Tx offloads, except multi-packets and TSO.
2601 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends as many packet
2602 * per WQE as it can. If eMPW is not configured or packet can not be sent with
2603 * eMPW (VLAN insertion) the ordinary SEND opcode is used and only one packet
2606 * Functions stop sending if it encounters the multi-segment packet or packet
2607 * with TSO requested.
2609 * The routines are responsible for storing processed mbuf into elts ring buffer
2610 * and update elts_head if inlining offload is requested. Otherwise the copying
2611 * mbufs to elts can be postponed and completed at the end of burst routine.
2614 * Pointer to TX queue structure.
2616 * Packets to transmit.
2618 * Number of packets in array.
2620 * Pointer to burst routine local context.
2622 * Configured Tx offloads mask. It is fully defined at
2623 * compile time and may be used for optimization.
2626 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2627 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2628 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2629 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
2630 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
2631 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
2633 * Local context variables updated.
2636 * The routine sends packets with MLX5_OPCODE_EMPW
2637 * without inlining, this is dedicated optimized branch.
2638 * No VLAN insertion is supported.
2640 static __rte_always_inline enum mlx5_txcmp_code
2641 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
2642 struct rte_mbuf **__rte_restrict pkts,
2643 unsigned int pkts_n,
2644 struct mlx5_txq_local *__rte_restrict loc,
2648 * Subroutine is the part of mlx5_tx_burst_single() and sends
2649 * single-segment packet with eMPW opcode without data inlining.
2651 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2652 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2653 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2654 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2655 pkts += loc->pkts_sent + 1;
2656 pkts_n -= loc->pkts_sent;
2658 struct mlx5_wqe_dseg *__rte_restrict dseg;
2659 struct mlx5_wqe_eseg *__rte_restrict eseg;
2660 enum mlx5_txcmp_code ret;
2661 unsigned int part, loop;
2662 unsigned int slen = 0;
2665 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2666 if (MLX5_TXOFF_CONFIG(TXPP)) {
2667 enum mlx5_txcmp_code wret;
2669 /* Generate WAIT for scheduling if requested. */
2670 wret = mlx5_tx_schedule_send(txq, loc, olx);
2671 if (wret == MLX5_TXCMP_CODE_EXIT)
2672 return MLX5_TXCMP_CODE_EXIT;
2673 if (wret == MLX5_TXCMP_CODE_ERROR)
2674 return MLX5_TXCMP_CODE_ERROR;
2676 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2677 MLX5_MPW_MAX_PACKETS :
2678 MLX5_EMPW_MAX_PACKETS);
2679 if (unlikely(loc->elts_free < part)) {
2680 /* We have no enough elts to save all mbufs. */
2681 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
2682 return MLX5_TXCMP_CODE_EXIT;
2683 /* But we still able to send at least minimal eMPW. */
2684 part = loc->elts_free;
2686 /* Check whether we have enough WQEs */
2687 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
2688 if (unlikely(loc->wqe_free <
2689 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2690 return MLX5_TXCMP_CODE_EXIT;
2691 part = (loc->wqe_free * 4) - 2;
2693 if (likely(part > 1))
2694 rte_prefetch0(*pkts);
2695 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2697 * Build eMPW title WQEBB:
2698 * - Control Segment, eMPW opcode
2699 * - Ethernet Segment, no inline
2701 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
2702 MLX5_OPCODE_ENHANCED_MPSW, olx);
2703 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
2704 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2705 eseg = &loc->wqe_last->eseg;
2706 dseg = &loc->wqe_last->dseg[0];
2708 /* Store the packet length for legacy MPW. */
2709 if (MLX5_TXOFF_CONFIG(MPW))
2710 eseg->mss = rte_cpu_to_be_16
2711 (rte_pktmbuf_data_len(loc->mbuf));
2713 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2714 #ifdef MLX5_PMD_SOFT_COUNTERS
2715 /* Update sent data bytes counter. */
2720 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2722 if (unlikely(--loop == 0))
2724 loc->mbuf = *pkts++;
2725 if (likely(loop > 1))
2726 rte_prefetch0(*pkts);
2727 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2729 * Unroll the completion code to avoid
2730 * returning variable value - it results in
2731 * unoptimized sequent checking in caller.
2733 if (ret == MLX5_TXCMP_CODE_MULTI) {
2735 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2736 if (unlikely(!loc->elts_free ||
2738 return MLX5_TXCMP_CODE_EXIT;
2739 return MLX5_TXCMP_CODE_MULTI;
2741 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2742 if (ret == MLX5_TXCMP_CODE_TSO) {
2744 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2745 if (unlikely(!loc->elts_free ||
2747 return MLX5_TXCMP_CODE_EXIT;
2748 return MLX5_TXCMP_CODE_TSO;
2750 if (ret == MLX5_TXCMP_CODE_SINGLE) {
2752 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2753 if (unlikely(!loc->elts_free ||
2755 return MLX5_TXCMP_CODE_EXIT;
2756 return MLX5_TXCMP_CODE_SINGLE;
2758 if (ret != MLX5_TXCMP_CODE_EMPW) {
2761 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2762 return MLX5_TXCMP_CODE_ERROR;
2765 * Check whether packet parameters coincide
2766 * within assumed eMPW batch:
2767 * - check sum settings
2769 * - software parser settings
2770 * - packets length (legacy MPW only)
2771 * - scheduling is not required
2773 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
2776 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2777 if (unlikely(!loc->elts_free ||
2779 return MLX5_TXCMP_CODE_EXIT;
2783 /* Packet attributes match, continue the same eMPW. */
2785 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2786 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2788 /* eMPW is built successfully, update loop parameters. */
2790 MLX5_ASSERT(pkts_n >= part);
2791 #ifdef MLX5_PMD_SOFT_COUNTERS
2792 /* Update sent data bytes counter. */
2793 txq->stats.obytes += slen;
2795 loc->elts_free -= part;
2796 loc->pkts_sent += part;
2797 txq->wqe_ci += (2 + part + 3) / 4;
2798 loc->wqe_free -= (2 + part + 3) / 4;
2800 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2801 return MLX5_TXCMP_CODE_EXIT;
2802 loc->mbuf = *pkts++;
2803 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2804 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
2806 /* Continue sending eMPW batches. */
2812 * The routine sends packets with MLX5_OPCODE_EMPW
2813 * with inlining, optionally supports VLAN insertion.
2815 static __rte_always_inline enum mlx5_txcmp_code
2816 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
2817 struct rte_mbuf **__rte_restrict pkts,
2818 unsigned int pkts_n,
2819 struct mlx5_txq_local *__rte_restrict loc,
2823 * Subroutine is the part of mlx5_tx_burst_single() and sends
2824 * single-segment packet with eMPW opcode with data inlining.
2826 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2827 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2828 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2829 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2830 pkts += loc->pkts_sent + 1;
2831 pkts_n -= loc->pkts_sent;
2833 struct mlx5_wqe_dseg *__rte_restrict dseg;
2834 struct mlx5_wqe *__rte_restrict wqem;
2835 enum mlx5_txcmp_code ret;
2836 unsigned int room, part, nlim;
2837 unsigned int slen = 0;
2839 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2840 if (MLX5_TXOFF_CONFIG(TXPP)) {
2841 enum mlx5_txcmp_code wret;
2843 /* Generate WAIT for scheduling if requested. */
2844 wret = mlx5_tx_schedule_send(txq, loc, olx);
2845 if (wret == MLX5_TXCMP_CODE_EXIT)
2846 return MLX5_TXCMP_CODE_EXIT;
2847 if (wret == MLX5_TXCMP_CODE_ERROR)
2848 return MLX5_TXCMP_CODE_ERROR;
2851 * Limits the amount of packets in one WQE
2852 * to improve CQE latency generation.
2854 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2855 MLX5_MPW_INLINE_MAX_PACKETS :
2856 MLX5_EMPW_MAX_PACKETS);
2857 /* Check whether we have minimal amount WQEs */
2858 if (unlikely(loc->wqe_free <
2859 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2860 return MLX5_TXCMP_CODE_EXIT;
2861 if (likely(pkts_n > 1))
2862 rte_prefetch0(*pkts);
2863 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2865 * Build eMPW title WQEBB:
2866 * - Control Segment, eMPW opcode, zero DS
2867 * - Ethernet Segment, no inline
2869 mlx5_tx_cseg_init(txq, loc, wqem, 0,
2870 MLX5_OPCODE_ENHANCED_MPSW, olx);
2871 mlx5_tx_eseg_none(txq, loc, wqem,
2872 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2873 dseg = &wqem->dseg[0];
2874 /* Store the packet length for legacy MPW. */
2875 if (MLX5_TXOFF_CONFIG(MPW))
2876 wqem->eseg.mss = rte_cpu_to_be_16
2877 (rte_pktmbuf_data_len(loc->mbuf));
2878 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
2879 loc->wqe_free) * MLX5_WQE_SIZE -
2880 MLX5_WQE_CSEG_SIZE -
2882 /* Limit the room for legacy MPW sessions for performance. */
2883 if (MLX5_TXOFF_CONFIG(MPW))
2884 room = RTE_MIN(room,
2885 RTE_MAX(txq->inlen_empw +
2886 sizeof(dseg->bcount) +
2887 (MLX5_TXOFF_CONFIG(VLAN) ?
2888 sizeof(struct rte_vlan_hdr) : 0),
2889 MLX5_MPW_INLINE_MAX_PACKETS *
2890 MLX5_WQE_DSEG_SIZE));
2891 /* Build WQE till we have space, packets and resources. */
2894 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2895 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2898 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
2899 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
2900 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
2902 * Some Tx offloads may cause an error if packet is not
2903 * long enough, check against assumed minimal length.
2905 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
2907 if (unlikely(!part))
2908 return MLX5_TXCMP_CODE_ERROR;
2910 * We have some successfully built
2911 * packet Data Segments to send.
2913 mlx5_tx_idone_empw(txq, loc, part,
2915 return MLX5_TXCMP_CODE_ERROR;
2917 /* Inline or not inline - that's the Question. */
2918 if (dlen > txq->inlen_empw ||
2919 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
2921 if (MLX5_TXOFF_CONFIG(MPW)) {
2922 if (dlen > txq->inlen_send)
2926 /* Open new inline MPW session. */
2927 tlen += sizeof(dseg->bcount);
2928 dseg->bcount = RTE_BE32(0);
2930 (dseg, sizeof(dseg->bcount));
2933 * No pointer and inline descriptor
2934 * intermix for legacy MPW sessions.
2936 if (wqem->dseg[0].bcount)
2940 tlen = sizeof(dseg->bcount) + dlen;
2942 /* Inline entire packet, optional VLAN insertion. */
2943 if (MLX5_TXOFF_CONFIG(VLAN) &&
2944 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2946 * The packet length must be checked in
2947 * mlx5_tx_able_to_empw() and packet
2948 * fits into inline length guaranteed.
2951 sizeof(struct rte_vlan_hdr)) <=
2953 tlen += sizeof(struct rte_vlan_hdr);
2956 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
2958 #ifdef MLX5_PMD_SOFT_COUNTERS
2959 /* Update sent data bytes counter. */
2960 slen += sizeof(struct rte_vlan_hdr);
2965 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
2968 if (!MLX5_TXOFF_CONFIG(MPW))
2969 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
2970 MLX5_ASSERT(room >= tlen);
2973 * Packet data are completely inline,
2974 * we can try to free the packet.
2976 if (likely(loc->pkts_sent == loc->mbuf_free)) {
2978 * All the packets from the burst beginning
2979 * are inline, we can free mbufs directly
2980 * from the origin array on tx_burst exit().
2986 * In order no to call rte_pktmbuf_free_seg() here,
2987 * in the most inner loop (that might be very
2988 * expensive) we just save the mbuf in elts.
2990 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2995 * No pointer and inline descriptor
2996 * intermix for legacy MPW sessions.
2998 if (MLX5_TXOFF_CONFIG(MPW) &&
3000 wqem->dseg[0].bcount == RTE_BE32(0))
3003 * Not inlinable VLAN packets are
3004 * proceeded outside of this routine.
3006 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
3007 if (MLX5_TXOFF_CONFIG(VLAN))
3008 MLX5_ASSERT(!(loc->mbuf->ol_flags &
3010 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3011 /* We have to store mbuf in elts.*/
3012 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3014 room -= MLX5_WQE_DSEG_SIZE;
3015 /* Ring buffer wraparound is checked at the loop end.*/
3018 #ifdef MLX5_PMD_SOFT_COUNTERS
3019 /* Update sent data bytes counter. */
3024 if (unlikely(!pkts_n || !loc->elts_free)) {
3026 * We have no resources/packets to
3027 * continue build descriptors.
3030 mlx5_tx_idone_empw(txq, loc, part,
3032 return MLX5_TXCMP_CODE_EXIT;
3034 loc->mbuf = *pkts++;
3035 if (likely(pkts_n > 1))
3036 rte_prefetch0(*pkts);
3037 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3039 * Unroll the completion code to avoid
3040 * returning variable value - it results in
3041 * unoptimized sequent checking in caller.
3043 if (ret == MLX5_TXCMP_CODE_MULTI) {
3045 mlx5_tx_idone_empw(txq, loc, part,
3047 if (unlikely(!loc->elts_free ||
3049 return MLX5_TXCMP_CODE_EXIT;
3050 return MLX5_TXCMP_CODE_MULTI;
3052 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3053 if (ret == MLX5_TXCMP_CODE_TSO) {
3055 mlx5_tx_idone_empw(txq, loc, part,
3057 if (unlikely(!loc->elts_free ||
3059 return MLX5_TXCMP_CODE_EXIT;
3060 return MLX5_TXCMP_CODE_TSO;
3062 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3064 mlx5_tx_idone_empw(txq, loc, part,
3066 if (unlikely(!loc->elts_free ||
3068 return MLX5_TXCMP_CODE_EXIT;
3069 return MLX5_TXCMP_CODE_SINGLE;
3071 if (ret != MLX5_TXCMP_CODE_EMPW) {
3074 mlx5_tx_idone_empw(txq, loc, part,
3076 return MLX5_TXCMP_CODE_ERROR;
3078 /* Check if we have minimal room left. */
3080 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
3083 * Check whether packet parameters coincide
3084 * within assumed eMPW batch:
3085 * - check sum settings
3087 * - software parser settings
3088 * - packets length (legacy MPW only)
3089 * - scheduling is not required
3091 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
3094 /* Packet attributes match, continue the same eMPW. */
3095 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3096 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3099 * We get here to close an existing eMPW
3100 * session and start the new one.
3102 MLX5_ASSERT(pkts_n);
3104 if (unlikely(!part))
3105 return MLX5_TXCMP_CODE_EXIT;
3106 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
3107 if (unlikely(!loc->elts_free ||
3109 return MLX5_TXCMP_CODE_EXIT;
3110 /* Continue the loop with new eMPW session. */
3116 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
3117 * Data inlining and VLAN insertion are supported.
3119 static __rte_always_inline enum mlx5_txcmp_code
3120 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
3121 struct rte_mbuf **__rte_restrict pkts,
3122 unsigned int pkts_n,
3123 struct mlx5_txq_local *__rte_restrict loc,
3127 * Subroutine is the part of mlx5_tx_burst_single()
3128 * and sends single-segment packet with SEND opcode.
3130 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3131 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3132 pkts += loc->pkts_sent + 1;
3133 pkts_n -= loc->pkts_sent;
3135 struct mlx5_wqe *__rte_restrict wqe;
3136 enum mlx5_txcmp_code ret;
3138 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3139 if (MLX5_TXOFF_CONFIG(TXPP)) {
3140 enum mlx5_txcmp_code wret;
3142 /* Generate WAIT for scheduling if requested. */
3143 wret = mlx5_tx_schedule_send(txq, loc, olx);
3144 if (wret == MLX5_TXCMP_CODE_EXIT)
3145 return MLX5_TXCMP_CODE_EXIT;
3146 if (wret == MLX5_TXCMP_CODE_ERROR)
3147 return MLX5_TXCMP_CODE_ERROR;
3149 if (MLX5_TXOFF_CONFIG(INLINE)) {
3150 unsigned int inlen, vlan = 0;
3152 inlen = rte_pktmbuf_data_len(loc->mbuf);
3153 if (MLX5_TXOFF_CONFIG(VLAN) &&
3154 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3155 vlan = sizeof(struct rte_vlan_hdr);
3159 * If inlining is enabled at configuration time
3160 * the limit must be not less than minimal size.
3161 * Otherwise we would do extra check for data
3162 * size to avoid crashes due to length overflow.
3164 MLX5_ASSERT(txq->inlen_send >=
3165 MLX5_ESEG_MIN_INLINE_SIZE);
3166 if (inlen <= txq->inlen_send) {
3167 unsigned int seg_n, wqe_n;
3169 rte_prefetch0(rte_pktmbuf_mtod
3170 (loc->mbuf, uint8_t *));
3171 /* Check against minimal length. */
3172 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3173 return MLX5_TXCMP_CODE_ERROR;
3174 if (loc->mbuf->ol_flags &
3175 PKT_TX_DYNF_NOINLINE) {
3177 * The hint flag not to inline packet
3178 * data is set. Check whether we can
3181 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
3183 (MLX5_TXOFF_CONFIG(MPW) &&
3185 if (inlen <= txq->inlen_send)
3188 * The hardware requires the
3189 * minimal inline data header.
3191 goto single_min_inline;
3193 if (MLX5_TXOFF_CONFIG(VLAN) &&
3194 vlan && !txq->vlan_en) {
3196 * We must insert VLAN tag
3197 * by software means.
3199 goto single_part_inline;
3201 goto single_no_inline;
3205 * Completely inlined packet data WQE:
3206 * - Control Segment, SEND opcode
3207 * - Ethernet Segment, no VLAN insertion
3208 * - Data inlined, VLAN optionally inserted
3209 * - Alignment to MLX5_WSEG_SIZE
3210 * Have to estimate amount of WQEBBs
3212 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
3213 MLX5_ESEG_MIN_INLINE_SIZE +
3214 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3215 /* Check if there are enough WQEBBs. */
3216 wqe_n = (seg_n + 3) / 4;
3217 if (wqe_n > loc->wqe_free)
3218 return MLX5_TXCMP_CODE_EXIT;
3219 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3220 loc->wqe_last = wqe;
3221 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
3222 MLX5_OPCODE_SEND, olx);
3223 mlx5_tx_eseg_data(txq, loc, wqe,
3224 vlan, inlen, 0, olx);
3225 txq->wqe_ci += wqe_n;
3226 loc->wqe_free -= wqe_n;
3228 * Packet data are completely inlined,
3229 * free the packet immediately.
3231 rte_pktmbuf_free_seg(loc->mbuf);
3232 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
3233 MLX5_TXOFF_CONFIG(MPW)) &&
3236 * If minimal inlining is requested the eMPW
3237 * feature should be disabled due to data is
3238 * inlined into Ethernet Segment, which can
3239 * not contain inlined data for eMPW due to
3240 * segment shared for all packets.
3242 struct mlx5_wqe_dseg *__rte_restrict dseg;
3247 * The inline-mode settings require
3248 * to inline the specified amount of
3249 * data bytes to the Ethernet Segment.
3250 * We should check the free space in
3251 * WQE ring buffer to inline partially.
3254 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
3255 MLX5_ASSERT(inlen > txq->inlen_mode);
3256 MLX5_ASSERT(txq->inlen_mode >=
3257 MLX5_ESEG_MIN_INLINE_SIZE);
3259 * Check whether there are enough free WQEBBs:
3261 * - Ethernet Segment
3262 * - First Segment of inlined Ethernet data
3263 * - ... data continued ...
3264 * - Finishing Data Segment of pointer type
3266 ds = (MLX5_WQE_CSEG_SIZE +
3267 MLX5_WQE_ESEG_SIZE +
3268 MLX5_WQE_DSEG_SIZE +
3270 MLX5_ESEG_MIN_INLINE_SIZE +
3271 MLX5_WQE_DSEG_SIZE +
3272 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3273 if (loc->wqe_free < ((ds + 3) / 4))
3274 return MLX5_TXCMP_CODE_EXIT;
3276 * Build the ordinary SEND WQE:
3278 * - Ethernet Segment, inline inlen_mode bytes
3279 * - Data Segment of pointer type
3281 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3282 loc->wqe_last = wqe;
3283 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3284 MLX5_OPCODE_SEND, olx);
3285 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
3288 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3289 txq->inlen_mode - vlan;
3290 inlen -= txq->inlen_mode;
3291 mlx5_tx_dseg_ptr(txq, loc, dseg,
3294 * WQE is built, update the loop parameters
3295 * and got to the next packet.
3297 txq->wqe_ci += (ds + 3) / 4;
3298 loc->wqe_free -= (ds + 3) / 4;
3299 /* We have to store mbuf in elts.*/
3300 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3301 txq->elts[txq->elts_head++ & txq->elts_m] =
3309 * Partially inlined packet data WQE, we have
3310 * some space in title WQEBB, we can fill it
3311 * with some packet data. It takes one WQEBB,
3312 * it is available, no extra space check:
3313 * - Control Segment, SEND opcode
3314 * - Ethernet Segment, no VLAN insertion
3315 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
3316 * - Data Segment, pointer type
3318 * We also get here if VLAN insertion is not
3319 * supported by HW, the inline is enabled.
3322 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3323 loc->wqe_last = wqe;
3324 mlx5_tx_cseg_init(txq, loc, wqe, 4,
3325 MLX5_OPCODE_SEND, olx);
3326 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
3327 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3328 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
3330 * The length check is performed above, by
3331 * comparing with txq->inlen_send. We should
3332 * not get overflow here.
3334 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
3335 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
3336 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
3340 /* We have to store mbuf in elts.*/
3341 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3342 txq->elts[txq->elts_head++ & txq->elts_m] =
3346 #ifdef MLX5_PMD_SOFT_COUNTERS
3347 /* Update sent data bytes counter. */
3348 txq->stats.obytes += vlan +
3349 rte_pktmbuf_data_len(loc->mbuf);
3353 * No inline at all, it means the CPU cycles saving
3354 * is prioritized at configuration, we should not
3355 * copy any packet data to WQE.
3357 * SEND WQE, one WQEBB:
3358 * - Control Segment, SEND opcode
3359 * - Ethernet Segment, optional VLAN, no inline
3360 * - Data Segment, pointer type
3363 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3364 loc->wqe_last = wqe;
3365 mlx5_tx_cseg_init(txq, loc, wqe, 3,
3366 MLX5_OPCODE_SEND, olx);
3367 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3369 (txq, loc, &wqe->dseg[0],
3370 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3371 rte_pktmbuf_data_len(loc->mbuf), olx);
3375 * We should not store mbuf pointer in elts
3376 * if no inlining is configured, this is done
3377 * by calling routine in a batch copy.
3379 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3381 #ifdef MLX5_PMD_SOFT_COUNTERS
3382 /* Update sent data bytes counter. */
3383 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
3384 if (MLX5_TXOFF_CONFIG(VLAN) &&
3385 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3386 txq->stats.obytes +=
3387 sizeof(struct rte_vlan_hdr);
3392 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3393 return MLX5_TXCMP_CODE_EXIT;
3394 loc->mbuf = *pkts++;
3396 rte_prefetch0(*pkts);
3397 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3398 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
3404 static __rte_always_inline enum mlx5_txcmp_code
3405 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
3406 struct rte_mbuf **__rte_restrict pkts,
3407 unsigned int pkts_n,
3408 struct mlx5_txq_local *__rte_restrict loc,
3411 enum mlx5_txcmp_code ret;
3413 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
3414 if (ret == MLX5_TXCMP_CODE_SINGLE)
3416 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
3418 /* Optimize for inline/no inline eMPW send. */
3419 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
3420 mlx5_tx_burst_empw_inline
3421 (txq, pkts, pkts_n, loc, olx) :
3422 mlx5_tx_burst_empw_simple
3423 (txq, pkts, pkts_n, loc, olx);
3424 if (ret != MLX5_TXCMP_CODE_SINGLE)
3426 /* The resources to send one packet should remain. */
3427 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3429 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
3430 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
3431 if (ret != MLX5_TXCMP_CODE_EMPW)
3433 /* The resources to send one packet should remain. */
3434 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3439 * DPDK Tx callback template. This is configured template used to generate
3440 * routines optimized for specified offload setup.
3441 * One of this generated functions is chosen at SQ configuration time.
3444 * Generic pointer to TX queue structure.
3446 * Packets to transmit.
3448 * Number of packets in array.
3450 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
3451 * values. Should be static to take compile time static configuration
3455 * Number of packets successfully transmitted (<= pkts_n).
3457 static __rte_always_inline uint16_t
3458 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
3459 struct rte_mbuf **__rte_restrict pkts,
3463 struct mlx5_txq_local loc;
3464 enum mlx5_txcmp_code ret;
3467 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3468 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3469 if (unlikely(!pkts_n))
3471 if (MLX5_TXOFF_CONFIG(INLINE))
3475 loc.wqe_last = NULL;
3478 loc.pkts_loop = loc.pkts_sent;
3480 * Check if there are some CQEs, if any:
3481 * - process an encountered errors
3482 * - process the completed WQEs
3483 * - free related mbufs
3484 * - doorbell the NIC about processed CQEs
3486 rte_prefetch0(*(pkts + loc.pkts_sent));
3487 mlx5_tx_handle_completion(txq, olx);
3489 * Calculate the number of available resources - elts and WQEs.
3490 * There are two possible different scenarios:
3491 * - no data inlining into WQEs, one WQEBB may contains up to
3492 * four packets, in this case elts become scarce resource
3493 * - data inlining into WQEs, one packet may require multiple
3494 * WQEBBs, the WQEs become the limiting factor.
3496 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3497 loc.elts_free = txq->elts_s -
3498 (uint16_t)(txq->elts_head - txq->elts_tail);
3499 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3500 loc.wqe_free = txq->wqe_s -
3501 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
3502 if (unlikely(!loc.elts_free || !loc.wqe_free))
3506 * Fetch the packet from array. Usually this is the first
3507 * packet in series of multi/single segment packets.
3509 loc.mbuf = *(pkts + loc.pkts_sent);
3510 /* Dedicated branch for multi-segment packets. */
3511 if (MLX5_TXOFF_CONFIG(MULTI) &&
3512 unlikely(NB_SEGS(loc.mbuf) > 1)) {
3514 * Multi-segment packet encountered.
3515 * Hardware is able to process it only
3516 * with SEND/TSO opcodes, one packet
3517 * per WQE, do it in dedicated routine.
3520 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
3521 part = loc.pkts_sent - loc.pkts_copy;
3522 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3524 * There are some single-segment mbufs not
3525 * stored in elts. The mbufs must be in the
3526 * same order as WQEs, so we must copy the
3527 * mbufs to elts here, before the coming
3528 * multi-segment packet mbufs is appended.
3530 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
3532 loc.pkts_copy = loc.pkts_sent;
3534 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3535 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
3536 if (!MLX5_TXOFF_CONFIG(INLINE))
3537 loc.pkts_copy = loc.pkts_sent;
3539 * These returned code checks are supposed
3540 * to be optimized out due to routine inlining.
3542 if (ret == MLX5_TXCMP_CODE_EXIT) {
3544 * The routine returns this code when
3545 * all packets are sent or there is no
3546 * enough resources to complete request.
3550 if (ret == MLX5_TXCMP_CODE_ERROR) {
3552 * The routine returns this code when some error
3553 * in the incoming packets format occurred.
3555 txq->stats.oerrors++;
3558 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3560 * The single-segment packet was encountered
3561 * in the array, try to send it with the
3562 * best optimized way, possible engaging eMPW.
3564 goto enter_send_single;
3566 if (MLX5_TXOFF_CONFIG(TSO) &&
3567 ret == MLX5_TXCMP_CODE_TSO) {
3569 * The single-segment TSO packet was
3570 * encountered in the array.
3572 goto enter_send_tso;
3574 /* We must not get here. Something is going wrong. */
3576 txq->stats.oerrors++;
3579 /* Dedicated branch for single-segment TSO packets. */
3580 if (MLX5_TXOFF_CONFIG(TSO) &&
3581 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3583 * TSO might require special way for inlining
3584 * (dedicated parameters) and is sent with
3585 * MLX5_OPCODE_TSO opcode only, provide this
3586 * in dedicated branch.
3589 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
3590 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3591 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
3593 * These returned code checks are supposed
3594 * to be optimized out due to routine inlining.
3596 if (ret == MLX5_TXCMP_CODE_EXIT)
3598 if (ret == MLX5_TXCMP_CODE_ERROR) {
3599 txq->stats.oerrors++;
3602 if (ret == MLX5_TXCMP_CODE_SINGLE)
3603 goto enter_send_single;
3604 if (MLX5_TXOFF_CONFIG(MULTI) &&
3605 ret == MLX5_TXCMP_CODE_MULTI) {
3607 * The multi-segment packet was
3608 * encountered in the array.
3610 goto enter_send_multi;
3612 /* We must not get here. Something is going wrong. */
3614 txq->stats.oerrors++;
3618 * The dedicated branch for the single-segment packets
3619 * without TSO. Often these ones can be sent using
3620 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
3621 * The routine builds the WQEs till it encounters
3622 * the TSO or multi-segment packet (in case if these
3623 * offloads are requested at SQ configuration time).
3626 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3627 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
3629 * These returned code checks are supposed
3630 * to be optimized out due to routine inlining.
3632 if (ret == MLX5_TXCMP_CODE_EXIT)
3634 if (ret == MLX5_TXCMP_CODE_ERROR) {
3635 txq->stats.oerrors++;
3638 if (MLX5_TXOFF_CONFIG(MULTI) &&
3639 ret == MLX5_TXCMP_CODE_MULTI) {
3641 * The multi-segment packet was
3642 * encountered in the array.
3644 goto enter_send_multi;
3646 if (MLX5_TXOFF_CONFIG(TSO) &&
3647 ret == MLX5_TXCMP_CODE_TSO) {
3649 * The single-segment TSO packet was
3650 * encountered in the array.
3652 goto enter_send_tso;
3654 /* We must not get here. Something is going wrong. */
3656 txq->stats.oerrors++;
3660 * Main Tx loop is completed, do the rest:
3661 * - set completion request if thresholds are reached
3662 * - doorbell the hardware
3663 * - copy the rest of mbufs to elts (if any)
3665 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
3666 loc.pkts_sent >= loc.pkts_copy);
3667 /* Take a shortcut if nothing is sent. */
3668 if (unlikely(loc.pkts_sent == loc.pkts_loop))
3670 /* Request CQE generation if limits are reached. */
3671 mlx5_tx_request_completion(txq, &loc, olx);
3673 * Ring QP doorbell immediately after WQE building completion
3674 * to improve latencies. The pure software related data treatment
3675 * can be completed after doorbell. Tx CQEs for this SQ are
3676 * processed in this thread only by the polling.
3678 * The rdma core library can map doorbell register in two ways,
3679 * depending on the environment variable "MLX5_SHUT_UP_BF":
3681 * - as regular cached memory, the variable is either missing or
3682 * set to zero. This type of mapping may cause the significant
3683 * doorbell register writing latency and requires explicit memory
3684 * write barrier to mitigate this issue and prevent write combining.
3686 * - as non-cached memory, the variable is present and set to not "0"
3687 * value. This type of mapping may cause performance impact under
3688 * heavy loading conditions but the explicit write memory barrier is
3689 * not required and it may improve core performance.
3691 * - the legacy behaviour (prior 19.08 release) was to use some
3692 * heuristics to decide whether write memory barrier should
3693 * be performed. This behavior is supported with specifying
3694 * tx_db_nc=2, write barrier is skipped if application provides
3695 * the full recommended burst of packets, it supposes the next
3696 * packets are coming and the write barrier will be issued on
3697 * the next burst (after descriptor writing, at least).
3699 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
3700 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
3701 /* Not all of the mbufs may be stored into elts yet. */
3702 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
3703 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3705 * There are some single-segment mbufs not stored in elts.
3706 * It can be only if the last packet was single-segment.
3707 * The copying is gathered into one place due to it is
3708 * a good opportunity to optimize that with SIMD.
3709 * Unfortunately if inlining is enabled the gaps in pointer
3710 * array may happen due to early freeing of the inlined mbufs.
3712 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
3713 loc.pkts_copy = loc.pkts_sent;
3715 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3716 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3717 if (pkts_n > loc.pkts_sent) {
3719 * If burst size is large there might be no enough CQE
3720 * fetched from completion queue and no enough resources
3721 * freed to send all the packets.
3726 #ifdef MLX5_PMD_SOFT_COUNTERS
3727 /* Increment sent packets counter. */
3728 txq->stats.opackets += loc.pkts_sent;
3730 if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
3731 __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
3732 return loc.pkts_sent;
3735 #endif /* RTE_PMD_MLX5_TX_H_ */