1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_TX_H_
7 #define RTE_PMD_MLX5_TX_H_
10 #include <sys/queue.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_spinlock.h>
17 #include <mlx5_common.h>
18 #include <mlx5_common_mr.h>
21 #include "mlx5_autoconf.h"
23 /* TX burst subroutines return codes. */
24 enum mlx5_txcmp_code {
25 MLX5_TXCMP_CODE_EXIT = 0,
26 MLX5_TXCMP_CODE_ERROR,
27 MLX5_TXCMP_CODE_SINGLE,
28 MLX5_TXCMP_CODE_MULTI,
34 * These defines are used to configure Tx burst routine option set supported
35 * at compile time. The not specified options are optimized out due to if
36 * conditions can be explicitly calculated at compile time.
37 * The offloads with bigger runtime check (require more CPU cycles toskip)
38 * overhead should have the bigger index - this is needed to select the better
39 * matching routine function if no exact match and some offloads are not
42 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
43 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
44 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
45 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
46 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
47 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
48 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
49 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
50 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
51 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
53 /* The most common offloads groups. */
54 #define MLX5_TXOFF_CONFIG_NONE 0
55 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
56 MLX5_TXOFF_CONFIG_TSO | \
57 MLX5_TXOFF_CONFIG_SWP | \
58 MLX5_TXOFF_CONFIG_CSUM | \
59 MLX5_TXOFF_CONFIG_INLINE | \
60 MLX5_TXOFF_CONFIG_VLAN | \
61 MLX5_TXOFF_CONFIG_METADATA)
63 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
65 #define MLX5_TXOFF_PRE_DECL(func) \
66 uint16_t mlx5_tx_burst_##func(void *txq, \
67 struct rte_mbuf **pkts, \
70 #define MLX5_TXOFF_DECL(func, olx) \
71 uint16_t mlx5_tx_burst_##func(void *txq, \
72 struct rte_mbuf **pkts, \
75 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
76 pkts, pkts_n, (olx)); \
79 /* Mbuf dynamic flag offset for inline. */
80 extern uint64_t rte_net_mlx5_dynf_inline_mask;
81 #define RTE_MBUF_F_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
83 extern uint32_t mlx5_ptype_table[] __rte_cache_aligned;
84 extern uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
85 extern uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
87 struct mlx5_txq_stats {
88 #ifdef MLX5_PMD_SOFT_COUNTERS
89 uint64_t opackets; /**< Total of successfully sent packets. */
90 uint64_t obytes; /**< Total of successfully sent bytes. */
92 uint64_t oerrors; /**< Total number of failed transmitted packets. */
95 /* TX queue send local data. */
97 struct mlx5_txq_local {
98 struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
99 struct rte_mbuf *mbuf; /* first mbuf to process. */
100 uint16_t pkts_copy; /* packets copied to elts. */
101 uint16_t pkts_sent; /* packets sent. */
102 uint16_t pkts_loop; /* packets sent on loop entry. */
103 uint16_t elts_free; /* available elts remain. */
104 uint16_t wqe_free; /* available wqe remain. */
105 uint16_t mbuf_off; /* data offset in current mbuf. */
106 uint16_t mbuf_nseg; /* number of remaining mbuf. */
107 uint16_t mbuf_free; /* number of inline mbufs to free. */
110 /* TX queue descriptor. */
112 struct mlx5_txq_data {
113 uint16_t elts_head; /* Current counter in (*elts)[]. */
114 uint16_t elts_tail; /* Counter of first element awaiting completion. */
115 uint16_t elts_comp; /* elts index since last completion request. */
116 uint16_t elts_s; /* Number of mbuf elements. */
117 uint16_t elts_m; /* Mask for mbuf elements indices. */
118 /* Fields related to elts mbuf storage. */
119 uint16_t wqe_ci; /* Consumer index for work queue. */
120 uint16_t wqe_pi; /* Producer index for work queue. */
121 uint16_t wqe_s; /* Number of WQ elements. */
122 uint16_t wqe_m; /* Mask Number for WQ elements. */
123 uint16_t wqe_comp; /* WQE index since last completion request. */
124 uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
125 /* WQ related fields. */
126 uint16_t cq_ci; /* Consumer index for completion queue. */
127 uint16_t cq_pi; /* Production index for completion queue. */
128 uint16_t cqe_s; /* Number of CQ elements. */
129 uint16_t cqe_m; /* Mask for CQ indices. */
130 /* CQ related fields. */
131 uint16_t elts_n:4; /* elts[] length (in log2). */
132 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
133 uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
134 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
135 uint16_t tunnel_en:1;
136 /* When set TX offload for tunneled packets are supported. */
137 uint16_t swp_en:1; /* Whether SW parser is enabled. */
138 uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
139 uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
140 uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
141 uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */
142 uint16_t inlen_send; /* Ordinary send data inline size. */
143 uint16_t inlen_empw; /* eMPW max packet size to inline. */
144 uint16_t inlen_mode; /* Minimal data length to inline. */
145 uint32_t qp_num_8s; /* QP number shifted by 8. */
146 uint64_t offloads; /* Offloads for Tx Queue. */
147 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
148 struct mlx5_wqe *wqes; /* Work queue. */
149 struct mlx5_wqe *wqes_end; /* Work queue array limit. */
150 #ifdef RTE_LIBRTE_MLX5_DEBUG
151 uint32_t *fcqs; /* Free completion queue (debug extended). */
153 uint16_t *fcqs; /* Free completion queue. */
155 volatile struct mlx5_cqe *cqes; /* Completion queue. */
156 volatile uint32_t *qp_db; /* Work queue doorbell. */
157 volatile uint32_t *cq_db; /* Completion queue doorbell. */
158 uint16_t port_id; /* Port ID of device. */
159 uint16_t idx; /* Queue index. */
160 uint64_t ts_mask; /* Timestamp flag dynamic mask. */
161 int32_t ts_offset; /* Timestamp field dynamic offset. */
162 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
163 struct mlx5_txq_stats stats; /* TX queue counters. */
164 struct mlx5_uar_data uar_data;
165 struct rte_mbuf *elts[0];
166 /* Storage for queued packets, must be the last field. */
167 } __rte_cache_aligned;
170 MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */
171 MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Tx queue. */
174 /* TX queue control descriptor. */
175 struct mlx5_txq_ctrl {
176 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
177 uint32_t refcnt; /* Reference counter. */
178 unsigned int socket; /* CPU socket ID for allocations. */
179 enum mlx5_txq_type type; /* The txq ctrl type. */
180 unsigned int max_inline_data; /* Max inline data. */
181 unsigned int max_tso_header; /* Max TSO header size. */
182 struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
183 struct mlx5_priv *priv; /* Back pointer to private data. */
184 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
185 uint16_t dump_file_n; /* Number of dump files. */
186 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
187 uint32_t hairpin_status; /* Hairpin binding status. */
188 struct mlx5_txq_data txq; /* Data path structure. */
189 /* Must be the last field in the structure, contains elts[]. */
194 int mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
195 int mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
196 int mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
197 int mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
198 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
199 unsigned int socket, const struct rte_eth_txconf *conf);
200 int mlx5_tx_hairpin_queue_setup
201 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
202 const struct rte_eth_hairpin_conf *hairpin_conf);
203 void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
204 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
205 void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
206 int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
207 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
208 uint16_t desc, unsigned int socket,
209 const struct rte_eth_txconf *conf);
210 struct mlx5_txq_ctrl *mlx5_txq_hairpin_new
211 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
212 const struct rte_eth_hairpin_conf *hairpin_conf);
213 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
214 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
215 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
216 int mlx5_txq_verify(struct rte_eth_dev *dev);
217 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
218 void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl);
219 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
220 void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
224 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
226 void mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
227 unsigned int olx __rte_unused);
228 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
229 void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
230 struct rte_eth_txq_info *qinfo);
231 int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
232 struct rte_eth_burst_mode *mode);
236 MLX5_TXOFF_PRE_DECL(full_empw);
237 MLX5_TXOFF_PRE_DECL(none_empw);
238 MLX5_TXOFF_PRE_DECL(md_empw);
239 MLX5_TXOFF_PRE_DECL(mt_empw);
240 MLX5_TXOFF_PRE_DECL(mtsc_empw);
241 MLX5_TXOFF_PRE_DECL(mti_empw);
242 MLX5_TXOFF_PRE_DECL(mtv_empw);
243 MLX5_TXOFF_PRE_DECL(mtiv_empw);
244 MLX5_TXOFF_PRE_DECL(sc_empw);
245 MLX5_TXOFF_PRE_DECL(sci_empw);
246 MLX5_TXOFF_PRE_DECL(scv_empw);
247 MLX5_TXOFF_PRE_DECL(sciv_empw);
248 MLX5_TXOFF_PRE_DECL(i_empw);
249 MLX5_TXOFF_PRE_DECL(v_empw);
250 MLX5_TXOFF_PRE_DECL(iv_empw);
252 /* mlx5_tx_nompw.c */
254 MLX5_TXOFF_PRE_DECL(full);
255 MLX5_TXOFF_PRE_DECL(none);
256 MLX5_TXOFF_PRE_DECL(md);
257 MLX5_TXOFF_PRE_DECL(mt);
258 MLX5_TXOFF_PRE_DECL(mtsc);
259 MLX5_TXOFF_PRE_DECL(mti);
260 MLX5_TXOFF_PRE_DECL(mtv);
261 MLX5_TXOFF_PRE_DECL(mtiv);
262 MLX5_TXOFF_PRE_DECL(sc);
263 MLX5_TXOFF_PRE_DECL(sci);
264 MLX5_TXOFF_PRE_DECL(scv);
265 MLX5_TXOFF_PRE_DECL(sciv);
266 MLX5_TXOFF_PRE_DECL(i);
267 MLX5_TXOFF_PRE_DECL(v);
268 MLX5_TXOFF_PRE_DECL(iv);
272 MLX5_TXOFF_PRE_DECL(full_ts_nompw);
273 MLX5_TXOFF_PRE_DECL(full_ts_nompwi);
274 MLX5_TXOFF_PRE_DECL(full_ts);
275 MLX5_TXOFF_PRE_DECL(full_ts_noi);
276 MLX5_TXOFF_PRE_DECL(none_ts);
277 MLX5_TXOFF_PRE_DECL(mdi_ts);
278 MLX5_TXOFF_PRE_DECL(mti_ts);
279 MLX5_TXOFF_PRE_DECL(mtiv_ts);
283 MLX5_TXOFF_PRE_DECL(none_mpw);
284 MLX5_TXOFF_PRE_DECL(mci_mpw);
285 MLX5_TXOFF_PRE_DECL(mc_mpw);
286 MLX5_TXOFF_PRE_DECL(i_mpw);
288 static __rte_always_inline struct mlx5_uar_data *
289 mlx5_tx_bfreg(struct mlx5_txq_data *txq)
291 return &MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx];
295 * Ring TX queue doorbell and flush the update by write memory barrier.
298 * Pointer to TX queue structure.
300 * Pointer to the last WQE posted in the NIC.
302 static __rte_always_inline void
303 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
305 mlx5_doorbell_ring(mlx5_tx_bfreg(txq), *(volatile uint64_t *)wqe,
306 txq->wqe_ci, txq->qp_db, 1);
310 * Convert timestamp from mbuf format to linear counter
311 * of Clock Queue completions (24 bits).
314 * Pointer to the device shared context to fetch Tx
315 * packet pacing timestamp and parameters.
317 * Timestamp from mbuf to convert.
319 * positive or zero value - completion ID to wait.
320 * negative value - conversion error.
322 static __rte_always_inline int32_t
323 mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts)
330 * Read atomically two uint64_t fields and compare lsb bits.
331 * It there is no match - the timestamp was updated in
332 * the service thread, data should be re-read.
334 rte_compiler_barrier();
335 ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
336 ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
337 rte_compiler_barrier();
338 if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
341 /* Perform the skew correction, positive value to send earlier. */
342 mts -= sh->txpp.skew;
344 if (unlikely(mts >= UINT64_MAX / 2)) {
345 /* We have negative integer, mts is in the past. */
346 __atomic_fetch_add(&sh->txpp.err_ts_past,
347 1, __ATOMIC_RELAXED);
350 tick = sh->txpp.tick;
352 /* Convert delta to completions, round up. */
353 mts = (mts + tick - 1) / tick;
354 if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
355 /* We have mts is too distant future. */
356 __atomic_fetch_add(&sh->txpp.err_ts_future,
357 1, __ATOMIC_RELAXED);
360 mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
362 ci >>= 64 - MLX5_CQ_INDEX_WIDTH;
367 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
368 * Flags must be preliminary initialized to zero.
371 * Pointer to burst routine local context.
373 * Pointer to store Software Parser flags.
375 * Configured Tx offloads mask. It is fully defined at
376 * compile time and may be used for optimization.
379 * Software Parser offsets packed in dword.
380 * Software Parser flags are set by pointer.
382 static __rte_always_inline uint32_t
383 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
388 unsigned int idx, off;
391 if (!MLX5_TXOFF_CONFIG(SWP))
393 ol = loc->mbuf->ol_flags;
394 tunnel = ol & RTE_MBUF_F_TX_TUNNEL_MASK;
396 * Check whether Software Parser is required.
397 * Only customized tunnels may ask for.
399 if (likely(tunnel != RTE_MBUF_F_TX_TUNNEL_UDP && tunnel != RTE_MBUF_F_TX_TUNNEL_IP))
402 * The index should have:
403 * bit[0:1] = RTE_MBUF_F_TX_L4_MASK
404 * bit[4] = RTE_MBUF_F_TX_IPV6
405 * bit[8] = RTE_MBUF_F_TX_OUTER_IPV6
406 * bit[9] = RTE_MBUF_F_TX_OUTER_UDP
408 idx = (ol & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_IPV6 | RTE_MBUF_F_TX_OUTER_IPV6)) >> 52;
409 idx |= (tunnel == RTE_MBUF_F_TX_TUNNEL_UDP) ? (1 << 9) : 0;
410 *swp_flags = mlx5_swp_types_table[idx];
412 * Set offsets for SW parser. Since ConnectX-5, SW parser just
413 * complements HW parser. SW parser starts to engage only if HW parser
414 * can't reach a header. For the older devices, HW parser will not kick
415 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
416 * should be set regardless of HW offload.
418 off = loc->mbuf->outer_l2_len;
419 if (MLX5_TXOFF_CONFIG(VLAN) && ol & RTE_MBUF_F_TX_VLAN)
420 off += sizeof(struct rte_vlan_hdr);
421 set = (off >> 1) << 8; /* Outer L3 offset. */
422 off += loc->mbuf->outer_l3_len;
423 if (tunnel == RTE_MBUF_F_TX_TUNNEL_UDP)
424 set |= off >> 1; /* Outer L4 offset. */
425 if (ol & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) { /* Inner IP. */
426 const uint64_t csum = ol & RTE_MBUF_F_TX_L4_MASK;
427 off += loc->mbuf->l2_len;
428 set |= (off >> 1) << 24; /* Inner L3 offset. */
429 if (csum == RTE_MBUF_F_TX_TCP_CKSUM ||
430 csum == RTE_MBUF_F_TX_UDP_CKSUM ||
431 (MLX5_TXOFF_CONFIG(TSO) && ol & RTE_MBUF_F_TX_TCP_SEG)) {
432 off += loc->mbuf->l3_len;
433 set |= (off >> 1) << 16; /* Inner L4 offset. */
436 set = rte_cpu_to_le_32(set);
441 * Convert the Checksum offloads to Verbs.
444 * Pointer to the mbuf.
447 * Converted checksum flags.
449 static __rte_always_inline uint8_t
450 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
453 uint8_t is_tunnel = !!(buf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
454 const uint64_t ol_flags_mask = RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_L4_MASK |
455 RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM;
458 * The index should have:
459 * bit[0] = RTE_MBUF_F_TX_TCP_SEG
460 * bit[2:3] = RTE_MBUF_F_TX_UDP_CKSUM, RTE_MBUF_F_TX_TCP_CKSUM
461 * bit[4] = RTE_MBUF_F_TX_IP_CKSUM
462 * bit[8] = RTE_MBUF_F_TX_OUTER_IP_CKSUM
465 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
466 return mlx5_cksum_table[idx];
470 * Free the mbufs from the linear array of pointers.
473 * Pointer to Tx queue structure.
475 * Pointer to array of packets to be free.
477 * Number of packets to be freed.
479 * Configured Tx offloads mask. It is fully defined at
480 * compile time and may be used for optimization.
482 static __rte_always_inline void
483 mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
484 struct rte_mbuf **__rte_restrict pkts,
486 unsigned int olx __rte_unused)
488 struct rte_mempool *pool = NULL;
489 struct rte_mbuf **p_free = NULL;
490 struct rte_mbuf *mbuf;
491 unsigned int n_free = 0;
494 * The implemented algorithm eliminates
495 * copying pointers to temporary array
496 * for rte_mempool_put_bulk() calls.
501 * Free mbufs directly to the pool in bulk
502 * if fast free offload is engaged
504 if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
507 rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
513 * Decrement mbuf reference counter, detach
514 * indirect and external buffers if needed.
516 mbuf = rte_pktmbuf_prefree_seg(*pkts);
517 if (likely(mbuf != NULL)) {
518 MLX5_ASSERT(mbuf == *pkts);
519 if (likely(n_free != 0)) {
520 if (unlikely(pool != mbuf->pool))
521 /* From different pool. */
524 /* Start new scan array. */
531 if (unlikely(pkts_n == 0)) {
537 * This happens if mbuf is still referenced.
538 * We can't put it back to the pool, skip.
542 if (unlikely(n_free != 0))
543 /* There is some array to free.*/
545 if (unlikely(pkts_n == 0))
546 /* Last mbuf, nothing to free. */
552 * This loop is implemented to avoid multiple
553 * inlining of rte_mempool_put_bulk().
559 * Free the array of pre-freed mbufs
560 * belonging to the same memory pool.
562 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
563 if (unlikely(mbuf != NULL)) {
564 /* There is the request to start new scan. */
569 if (likely(pkts_n != 0))
572 * This is the last mbuf to be freed.
573 * Do one more loop iteration to complete.
574 * This is rare case of the last unique mbuf.
579 if (likely(pkts_n == 0))
588 * No inline version to free buffers for optimal call
589 * on the tx_burst completion.
591 static __rte_noinline void
592 __mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
593 struct rte_mbuf **__rte_restrict pkts,
595 unsigned int olx __rte_unused)
597 mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
601 * Free the mbuf from the elts ring buffer till new tail.
604 * Pointer to Tx queue structure.
606 * Index in elts to free up to, becomes new elts tail.
608 * Configured Tx offloads mask. It is fully defined at
609 * compile time and may be used for optimization.
611 static __rte_always_inline void
612 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
614 unsigned int olx __rte_unused)
616 uint16_t n_elts = tail - txq->elts_tail;
619 MLX5_ASSERT(n_elts <= txq->elts_s);
621 * Implement a loop to support ring buffer wraparound
622 * with single inlining of mlx5_tx_free_mbuf().
627 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
628 part = RTE_MIN(part, n_elts);
630 MLX5_ASSERT(part <= txq->elts_s);
631 mlx5_tx_free_mbuf(txq,
632 &txq->elts[txq->elts_tail & txq->elts_m],
634 txq->elts_tail += part;
640 * Store the mbuf being sent into elts ring buffer.
641 * On Tx completion these mbufs will be freed.
644 * Pointer to Tx queue structure.
646 * Pointer to array of packets to be stored.
648 * Number of packets to be stored.
650 * Configured Tx offloads mask. It is fully defined at
651 * compile time and may be used for optimization.
653 static __rte_always_inline void
654 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
655 struct rte_mbuf **__rte_restrict pkts,
657 unsigned int olx __rte_unused)
660 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
664 part = txq->elts_s - (txq->elts_head & txq->elts_m);
666 MLX5_ASSERT(part <= txq->elts_s);
667 /* This code is a good candidate for vectorizing with SIMD. */
668 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
670 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
671 txq->elts_head += pkts_n;
672 if (unlikely(part < pkts_n))
673 /* The copy is wrapping around the elts array. */
674 rte_memcpy((void *)elts, (void *)(pkts + part),
675 (pkts_n - part) * sizeof(struct rte_mbuf *));
679 * Check if the completion request flag should be set in the last WQE.
680 * Both pushed mbufs and WQEs are monitored and the completion request
681 * flag is set if any of thresholds is reached.
684 * Pointer to TX queue structure.
686 * Pointer to burst routine local context.
688 * Configured Tx offloads mask. It is fully defined at
689 * compile time and may be used for optimization.
691 static __rte_always_inline void
692 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
693 struct mlx5_txq_local *__rte_restrict loc,
696 uint16_t head = txq->elts_head;
699 part = MLX5_TXOFF_CONFIG(INLINE) ?
700 0 : loc->pkts_sent - loc->pkts_copy;
702 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
703 (MLX5_TXOFF_CONFIG(INLINE) &&
704 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
705 volatile struct mlx5_wqe *last = loc->wqe_last;
708 txq->elts_comp = head;
709 if (MLX5_TXOFF_CONFIG(INLINE))
710 txq->wqe_comp = txq->wqe_ci;
711 /* Request unconditional completion on last WQE. */
712 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
713 MLX5_COMP_MODE_OFFSET);
714 /* Save elts_head in dedicated free on completion queue. */
715 #ifdef RTE_LIBRTE_MLX5_DEBUG
716 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
717 (last->cseg.opcode >> 8) << 16;
719 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
721 /* A CQE slot must always be available. */
722 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
727 * Build the Control Segment with specified opcode:
729 * - MLX5_OPCODE_ENHANCED_MPSW
733 * Pointer to TX queue structure.
735 * Pointer to burst routine local context.
737 * Pointer to WQE to fill with built Control Segment.
739 * Supposed length of WQE in segments.
741 * SQ WQE opcode to put into Control Segment.
743 * Configured Tx offloads mask. It is fully defined at
744 * compile time and may be used for optimization.
746 static __rte_always_inline void
747 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
748 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
749 struct mlx5_wqe *__rte_restrict wqe,
752 unsigned int olx __rte_unused)
754 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
756 /* For legacy MPW replace the EMPW by TSO with modifier. */
757 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
758 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
759 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
760 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
761 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
762 MLX5_COMP_MODE_OFFSET);
763 cs->misc = RTE_BE32(0);
767 * Build the Synchronize Queue Segment with specified completion index.
770 * Pointer to TX queue structure.
772 * Pointer to burst routine local context.
774 * Pointer to WQE to fill with built Control Segment.
776 * Completion index in Clock Queue to wait.
778 * Configured Tx offloads mask. It is fully defined at
779 * compile time and may be used for optimization.
781 static __rte_always_inline void
782 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
783 struct mlx5_txq_local *restrict loc __rte_unused,
784 struct mlx5_wqe *restrict wqe,
786 unsigned int olx __rte_unused)
788 struct mlx5_wqe_qseg *qs;
790 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
791 qs->max_index = rte_cpu_to_be_32(wci);
792 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
793 qs->reserved0 = RTE_BE32(0);
794 qs->reserved1 = RTE_BE32(0);
798 * Build the Ethernet Segment without inlined data.
799 * Supports Software Parser, Checksums and VLAN insertion Tx offload features.
802 * Pointer to TX queue structure.
804 * Pointer to burst routine local context.
806 * Pointer to WQE to fill with built Ethernet Segment.
808 * Configured Tx offloads mask. It is fully defined at
809 * compile time and may be used for optimization.
811 static __rte_always_inline void
812 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
813 struct mlx5_txq_local *__rte_restrict loc,
814 struct mlx5_wqe *__rte_restrict wqe,
817 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
821 * Calculate and set check sum flags first, dword field
822 * in segment may be shared with Software Parser flags.
824 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
825 es->flags = rte_cpu_to_le_32(csum);
827 * Calculate and set Software Parser offsets and flags.
828 * These flags a set for custom UDP and IP tunnel packets.
830 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
831 /* Fill metadata field if needed. */
832 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
833 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
834 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
836 /* Engage VLAN tag insertion feature if requested. */
837 if (MLX5_TXOFF_CONFIG(VLAN) &&
838 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
840 * We should get here only if device support
841 * this feature correctly.
843 MLX5_ASSERT(txq->vlan_en);
844 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
845 loc->mbuf->vlan_tci);
847 es->inline_hdr = RTE_BE32(0);
852 * Build the Ethernet Segment with minimal inlined data
853 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
854 * used to fill the gap in single WQEBB WQEs.
855 * Supports Software Parser, Checksums and VLAN
856 * insertion Tx offload features.
859 * Pointer to TX queue structure.
861 * Pointer to burst routine local context.
863 * Pointer to WQE to fill with built Ethernet Segment.
865 * Length of VLAN tag insertion if any.
867 * Configured Tx offloads mask. It is fully defined at
868 * compile time and may be used for optimization.
870 static __rte_always_inline void
871 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
872 struct mlx5_txq_local *__rte_restrict loc,
873 struct mlx5_wqe *__rte_restrict wqe,
877 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
879 uint8_t *psrc, *pdst;
882 * Calculate and set check sum flags first, dword field
883 * in segment may be shared with Software Parser flags.
885 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
886 es->flags = rte_cpu_to_le_32(csum);
888 * Calculate and set Software Parser offsets and flags.
889 * These flags a set for custom UDP and IP tunnel packets.
891 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
892 /* Fill metadata field if needed. */
893 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
894 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
895 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
897 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
898 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
899 es->inline_data = *(unaligned_uint16_t *)psrc;
900 psrc += sizeof(uint16_t);
901 pdst = (uint8_t *)(es + 1);
902 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
903 /* Implement VLAN tag insertion as part inline data. */
904 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
905 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
906 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
907 /* Insert VLAN ethertype + VLAN tag. */
908 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
909 ((RTE_ETHER_TYPE_VLAN << 16) |
910 loc->mbuf->vlan_tci);
911 pdst += sizeof(struct rte_vlan_hdr);
912 /* Copy the rest two bytes from packet data. */
913 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
914 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
916 /* Fill the gap in the title WQEBB with inline data. */
917 rte_mov16(pdst, psrc);
922 * Build the Ethernet Segment with entire packet data inlining. Checks the
923 * boundary of WQEBB and ring buffer wrapping, supports Software Parser,
924 * Checksums and VLAN insertion Tx offload features.
927 * Pointer to TX queue structure.
929 * Pointer to burst routine local context.
931 * Pointer to WQE to fill with built Ethernet Segment.
933 * Length of VLAN tag insertion if any.
935 * Length of data to inline (VLAN included, if any).
937 * TSO flag, set mss field from the packet.
939 * Configured Tx offloads mask. It is fully defined at
940 * compile time and may be used for optimization.
943 * Pointer to the next Data Segment (aligned and wrapped around).
945 static __rte_always_inline struct mlx5_wqe_dseg *
946 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
947 struct mlx5_txq_local *__rte_restrict loc,
948 struct mlx5_wqe *__rte_restrict wqe,
954 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
956 uint8_t *psrc, *pdst;
960 * Calculate and set check sum flags first, dword field
961 * in segment may be shared with Software Parser flags.
963 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
966 csum |= loc->mbuf->tso_segsz;
967 es->flags = rte_cpu_to_be_32(csum);
969 es->flags = rte_cpu_to_le_32(csum);
972 * Calculate and set Software Parser offsets and flags.
973 * These flags a set for custom UDP and IP tunnel packets.
975 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
976 /* Fill metadata field if needed. */
977 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
978 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
979 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
981 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
982 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
983 es->inline_data = *(unaligned_uint16_t *)psrc;
984 psrc += sizeof(uint16_t);
985 pdst = (uint8_t *)(es + 1);
986 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
987 /* Implement VLAN tag insertion as part inline data. */
988 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
989 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
990 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
991 /* Insert VLAN ethertype + VLAN tag. */
992 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
993 ((RTE_ETHER_TYPE_VLAN << 16) |
994 loc->mbuf->vlan_tci);
995 pdst += sizeof(struct rte_vlan_hdr);
996 /* Copy the rest two bytes from packet data. */
997 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
998 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
999 psrc += sizeof(uint16_t);
1001 /* Fill the gap in the title WQEBB with inline data. */
1002 rte_mov16(pdst, psrc);
1003 psrc += sizeof(rte_v128u32_t);
1005 pdst = (uint8_t *)(es + 2);
1006 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1007 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1008 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
1010 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1011 return (struct mlx5_wqe_dseg *)pdst;
1014 * The WQEBB space availability is checked by caller.
1015 * Here we should be aware of WQE ring buffer wraparound only.
1017 part = (uint8_t *)txq->wqes_end - pdst;
1018 part = RTE_MIN(part, inlen);
1020 rte_memcpy(pdst, psrc, part);
1022 if (likely(!inlen)) {
1024 * If return value is not used by the caller
1025 * the code below will be optimized out.
1028 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1029 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1030 pdst = (uint8_t *)txq->wqes;
1031 return (struct mlx5_wqe_dseg *)pdst;
1033 pdst = (uint8_t *)txq->wqes;
1040 * Copy data from chain of mbuf to the specified linear buffer.
1041 * Checksums and VLAN insertion Tx offload features. If data
1042 * from some mbuf copied completely this mbuf is freed. Local
1043 * structure is used to keep the byte stream state.
1046 * Pointer to the destination linear buffer.
1048 * Pointer to burst routine local context.
1050 * Length of data to be copied.
1052 * Length of data to be copied ignoring no inline hint.
1054 * Configured Tx offloads mask. It is fully defined at
1055 * compile time and may be used for optimization.
1058 * Number of actual copied data bytes. This is always greater than or
1059 * equal to must parameter and might be lesser than len in no inline
1060 * hint flag is encountered.
1062 static __rte_always_inline unsigned int
1063 mlx5_tx_mseg_memcpy(uint8_t *pdst,
1064 struct mlx5_txq_local *__rte_restrict loc,
1067 unsigned int olx __rte_unused)
1069 struct rte_mbuf *mbuf;
1070 unsigned int part, dlen, copy = 0;
1075 /* Allow zero length packets, must check first. */
1076 dlen = rte_pktmbuf_data_len(loc->mbuf);
1077 if (dlen <= loc->mbuf_off) {
1078 /* Exhausted packet, just free. */
1080 loc->mbuf = mbuf->next;
1081 rte_pktmbuf_free_seg(mbuf);
1083 MLX5_ASSERT(loc->mbuf_nseg > 1);
1084 MLX5_ASSERT(loc->mbuf);
1086 if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) {
1091 * We already copied the minimal
1092 * requested amount of data.
1097 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
1099 * Copy only the minimal required
1100 * part of the data buffer. Limit amount
1101 * of data to be copied to the length of
1104 len = RTE_MIN(len, diff);
1109 dlen -= loc->mbuf_off;
1110 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1112 part = RTE_MIN(len, dlen);
1113 rte_memcpy(pdst, psrc, part);
1115 loc->mbuf_off += part;
1118 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
1120 /* Exhausted packet, just free. */
1122 loc->mbuf = mbuf->next;
1123 rte_pktmbuf_free_seg(mbuf);
1125 MLX5_ASSERT(loc->mbuf_nseg >= 1);
1135 * Build the Ethernet Segment with inlined data from multi-segment packet.
1136 * Checks the boundary of WQEBB and ring buffer wrapping, supports Software
1137 * Parser, Checksums and VLAN insertion Tx offload features.
1140 * Pointer to TX queue structure.
1142 * Pointer to burst routine local context.
1144 * Pointer to WQE to fill with built Ethernet Segment.
1146 * Length of VLAN tag insertion if any.
1148 * Length of data to inline (VLAN included, if any).
1150 * TSO flag, set mss field from the packet.
1152 * Configured Tx offloads mask. It is fully defined at
1153 * compile time and may be used for optimization.
1156 * Pointer to the next Data Segment (aligned and possible NOT wrapped
1157 * around - caller should do wrapping check on its own).
1159 static __rte_always_inline struct mlx5_wqe_dseg *
1160 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
1161 struct mlx5_txq_local *__rte_restrict loc,
1162 struct mlx5_wqe *__rte_restrict wqe,
1168 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1171 unsigned int part, tlen = 0;
1174 * Calculate and set check sum flags first, uint32_t field
1175 * in segment may be shared with Software Parser flags.
1177 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1180 csum |= loc->mbuf->tso_segsz;
1181 es->flags = rte_cpu_to_be_32(csum);
1183 es->flags = rte_cpu_to_le_32(csum);
1186 * Calculate and set Software Parser offsets and flags.
1187 * These flags a set for custom UDP and IP tunnel packets.
1189 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1190 /* Fill metadata field if needed. */
1191 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1192 loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
1193 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) :
1195 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1196 pdst = (uint8_t *)&es->inline_data;
1197 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1198 /* Implement VLAN tag insertion as part inline data. */
1199 mlx5_tx_mseg_memcpy(pdst, loc,
1200 2 * RTE_ETHER_ADDR_LEN,
1201 2 * RTE_ETHER_ADDR_LEN, olx);
1202 pdst += 2 * RTE_ETHER_ADDR_LEN;
1203 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1204 ((RTE_ETHER_TYPE_VLAN << 16) |
1205 loc->mbuf->vlan_tci);
1206 pdst += sizeof(struct rte_vlan_hdr);
1207 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
1209 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1211 * The WQEBB space availability is checked by caller.
1212 * Here we should be aware of WQE ring buffer wraparound only.
1214 part = (uint8_t *)txq->wqes_end - pdst;
1215 part = RTE_MIN(part, inlen - tlen);
1221 * Copying may be interrupted inside the routine
1222 * if run into no inline hint flag.
1224 copy = tso ? inlen : txq->inlen_mode;
1225 copy = tlen >= copy ? 0 : (copy - tlen);
1226 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
1228 if (likely(inlen <= tlen) || copy < part) {
1229 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
1231 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1232 return (struct mlx5_wqe_dseg *)pdst;
1234 pdst = (uint8_t *)txq->wqes;
1235 part = inlen - tlen;
1240 * Build the Data Segment of pointer type.
1243 * Pointer to TX queue structure.
1245 * Pointer to burst routine local context.
1247 * Pointer to WQE to fill with built Data Segment.
1249 * Data buffer to point.
1251 * Data buffer length.
1253 * Configured Tx offloads mask. It is fully defined at
1254 * compile time and may be used for optimization.
1256 static __rte_always_inline void
1257 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
1258 struct mlx5_txq_local *__rte_restrict loc,
1259 struct mlx5_wqe_dseg *__rte_restrict dseg,
1262 unsigned int olx __rte_unused)
1266 dseg->bcount = rte_cpu_to_be_32(len);
1267 dseg->lkey = mlx5_mr_mb2mr(&txq->mr_ctrl, loc->mbuf);
1268 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1272 * Build the Data Segment of pointer type or inline if data length is less than
1273 * buffer in minimal Data Segment size.
1276 * Pointer to TX queue structure.
1278 * Pointer to burst routine local context.
1280 * Pointer to WQE to fill with built Data Segment.
1282 * Data buffer to point.
1284 * Data buffer length.
1286 * Configured Tx offloads mask. It is fully defined at
1287 * compile time and may be used for optimization.
1289 static __rte_always_inline void
1290 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
1291 struct mlx5_txq_local *__rte_restrict loc,
1292 struct mlx5_wqe_dseg *__rte_restrict dseg,
1295 unsigned int olx __rte_unused)
1301 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
1302 dseg->bcount = rte_cpu_to_be_32(len);
1303 dseg->lkey = mlx5_mr_mb2mr(&txq->mr_ctrl, loc->mbuf);
1304 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1308 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1309 /* Unrolled implementation of generic rte_memcpy. */
1310 dst = (uintptr_t)&dseg->inline_data[0];
1311 src = (uintptr_t)buf;
1313 #ifdef RTE_ARCH_STRICT_ALIGN
1314 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
1315 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1316 dst += sizeof(uint32_t);
1317 src += sizeof(uint32_t);
1318 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1319 dst += sizeof(uint32_t);
1320 src += sizeof(uint32_t);
1322 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
1323 dst += sizeof(uint64_t);
1324 src += sizeof(uint64_t);
1328 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1329 dst += sizeof(uint32_t);
1330 src += sizeof(uint32_t);
1333 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
1334 dst += sizeof(uint16_t);
1335 src += sizeof(uint16_t);
1338 *(uint8_t *)dst = *(uint8_t *)src;
1342 * Build the Data Segment of inlined data from single
1343 * segment packet, no VLAN insertion.
1346 * Pointer to TX queue structure.
1348 * Pointer to burst routine local context.
1350 * Pointer to WQE to fill with built Data Segment.
1352 * Data buffer to point.
1354 * Data buffer length.
1356 * Configured Tx offloads mask. It is fully defined at
1357 * compile time and may be used for optimization.
1360 * Pointer to the next Data Segment after inlined data.
1361 * Ring buffer wraparound check is needed. We do not do it here because it
1362 * may not be needed for the last packet in the eMPW session.
1364 static __rte_always_inline struct mlx5_wqe_dseg *
1365 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
1366 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1367 struct mlx5_wqe_dseg *__rte_restrict dseg,
1370 unsigned int olx __rte_unused)
1375 if (!MLX5_TXOFF_CONFIG(MPW)) {
1376 /* Store the descriptor byte counter for eMPW sessions. */
1377 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1378 pdst = &dseg->inline_data[0];
1380 /* The entire legacy MPW session counter is stored on close. */
1381 pdst = (uint8_t *)dseg;
1384 * The WQEBB space availability is checked by caller.
1385 * Here we should be aware of WQE ring buffer wraparound only.
1387 part = (uint8_t *)txq->wqes_end - pdst;
1388 part = RTE_MIN(part, len);
1390 rte_memcpy(pdst, buf, part);
1394 if (!MLX5_TXOFF_CONFIG(MPW))
1395 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1396 /* Note: no final wraparound check here. */
1397 return (struct mlx5_wqe_dseg *)pdst;
1399 pdst = (uint8_t *)txq->wqes;
1406 * Build the Data Segment of inlined data from single
1407 * segment packet with VLAN insertion.
1410 * Pointer to TX queue structure.
1412 * Pointer to burst routine local context.
1414 * Pointer to the dseg fill with built Data Segment.
1416 * Data buffer to point.
1418 * Data buffer length.
1420 * Configured Tx offloads mask. It is fully defined at
1421 * compile time and may be used for optimization.
1424 * Pointer to the next Data Segment after inlined data.
1425 * Ring buffer wraparound check is needed.
1427 static __rte_always_inline struct mlx5_wqe_dseg *
1428 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
1429 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1430 struct mlx5_wqe_dseg *__rte_restrict dseg,
1433 unsigned int olx __rte_unused)
1439 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
1440 if (!MLX5_TXOFF_CONFIG(MPW)) {
1441 /* Store the descriptor byte counter for eMPW sessions. */
1442 dseg->bcount = rte_cpu_to_be_32
1443 ((len + sizeof(struct rte_vlan_hdr)) |
1444 MLX5_ETH_WQE_DATA_INLINE);
1445 pdst = &dseg->inline_data[0];
1447 /* The entire legacy MPW session counter is stored on close. */
1448 pdst = (uint8_t *)dseg;
1450 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
1451 buf += MLX5_DSEG_MIN_INLINE_SIZE;
1452 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
1453 len -= MLX5_DSEG_MIN_INLINE_SIZE;
1454 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
1455 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1456 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1457 pdst = (uint8_t *)txq->wqes;
1458 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
1459 loc->mbuf->vlan_tci);
1460 pdst += sizeof(struct rte_vlan_hdr);
1462 * The WQEBB space availability is checked by caller.
1463 * Here we should be aware of WQE ring buffer wraparound only.
1465 part = (uint8_t *)txq->wqes_end - pdst;
1466 part = RTE_MIN(part, len);
1468 rte_memcpy(pdst, buf, part);
1472 if (!MLX5_TXOFF_CONFIG(MPW))
1473 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1474 /* Note: no final wraparound check here. */
1475 return (struct mlx5_wqe_dseg *)pdst;
1477 pdst = (uint8_t *)txq->wqes;
1484 * Build the Ethernet Segment with optionally inlined data with
1485 * VLAN insertion and following Data Segments (if any) from
1486 * multi-segment packet. Used by ordinary send and TSO.
1489 * Pointer to TX queue structure.
1491 * Pointer to burst routine local context.
1493 * Pointer to WQE to fill with built Ethernet/Data Segments.
1495 * Length of VLAN header to insert, 0 means no VLAN insertion.
1497 * Data length to inline. For TSO this parameter specifies exact value,
1498 * for ordinary send routine can be aligned by caller to provide better WQE
1499 * space saving and data buffer start address alignment.
1500 * This length includes VLAN header being inserted.
1502 * Zero means ordinary send, inlined data can be extended,
1503 * otherwise this is TSO, inlined data length is fixed.
1505 * Configured Tx offloads mask. It is fully defined at
1506 * compile time and may be used for optimization.
1509 * Actual size of built WQE in segments.
1511 static __rte_always_inline unsigned int
1512 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
1513 struct mlx5_txq_local *__rte_restrict loc,
1514 struct mlx5_wqe *__rte_restrict wqe,
1518 unsigned int olx __rte_unused)
1520 struct mlx5_wqe_dseg *__rte_restrict dseg;
1523 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
1524 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
1527 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
1528 if (!loc->mbuf_nseg)
1531 * There are still some mbuf remaining, not inlined.
1532 * The first mbuf may be partially inlined and we
1533 * must process the possible non-zero data offset.
1535 if (loc->mbuf_off) {
1540 * Exhausted packets must be dropped before.
1541 * Non-zero offset means there are some data
1542 * remained in the packet.
1544 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
1545 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
1546 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1548 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
1550 * Build the pointer/minimal Data Segment.
1551 * Do ring buffer wrapping check in advance.
1553 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1554 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1555 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
1556 /* Store the mbuf to be freed on completion. */
1557 MLX5_ASSERT(loc->elts_free);
1558 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1561 if (--loc->mbuf_nseg == 0)
1563 loc->mbuf = loc->mbuf->next;
1567 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1568 struct rte_mbuf *mbuf;
1570 /* Zero length segment found, just skip. */
1572 loc->mbuf = loc->mbuf->next;
1573 rte_pktmbuf_free_seg(mbuf);
1574 if (--loc->mbuf_nseg == 0)
1577 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1578 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1581 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1582 rte_pktmbuf_data_len(loc->mbuf), olx);
1583 MLX5_ASSERT(loc->elts_free);
1584 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1587 if (--loc->mbuf_nseg == 0)
1589 loc->mbuf = loc->mbuf->next;
1594 /* Calculate actual segments used from the dseg pointer. */
1595 if ((uintptr_t)wqe < (uintptr_t)dseg)
1596 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
1598 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
1599 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
1604 * The routine checks timestamp flag in the current packet,
1605 * and push WAIT WQE into the queue if scheduling is required.
1608 * Pointer to TX queue structure.
1610 * Pointer to burst routine local context.
1612 * Configured Tx offloads mask. It is fully defined at
1613 * compile time and may be used for optimization.
1616 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1617 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
1618 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
1619 * Local context variables partially updated.
1621 static __rte_always_inline enum mlx5_txcmp_code
1622 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
1623 struct mlx5_txq_local *restrict loc,
1626 if (MLX5_TXOFF_CONFIG(TXPP) &&
1627 loc->mbuf->ol_flags & txq->ts_mask) {
1628 struct mlx5_wqe *wqe;
1633 * Estimate the required space quickly and roughly.
1634 * We would like to ensure the packet can be pushed
1635 * to the queue and we won't get the orphan WAIT WQE.
1637 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
1638 loc->elts_free < NB_SEGS(loc->mbuf))
1639 return MLX5_TXCMP_CODE_EXIT;
1640 /* Convert the timestamp into completion to wait. */
1641 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
1642 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
1643 if (unlikely(wci < 0))
1644 return MLX5_TXCMP_CODE_SINGLE;
1645 /* Build the WAIT WQE with specified completion. */
1646 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1647 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
1648 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
1651 return MLX5_TXCMP_CODE_MULTI;
1653 return MLX5_TXCMP_CODE_SINGLE;
1657 * Tx one packet function for multi-segment TSO. Supports all
1658 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
1659 * sends one packet per WQE.
1661 * This routine is responsible for storing processed mbuf
1662 * into elts ring buffer and update elts_head.
1665 * Pointer to TX queue structure.
1667 * Pointer to burst routine local context.
1669 * Configured Tx offloads mask. It is fully defined at
1670 * compile time and may be used for optimization.
1673 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1674 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1675 * Local context variables partially updated.
1677 static __rte_always_inline enum mlx5_txcmp_code
1678 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
1679 struct mlx5_txq_local *__rte_restrict loc,
1682 struct mlx5_wqe *__rte_restrict wqe;
1683 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
1685 if (MLX5_TXOFF_CONFIG(TXPP)) {
1686 enum mlx5_txcmp_code wret;
1688 /* Generate WAIT for scheduling if requested. */
1689 wret = mlx5_tx_schedule_send(txq, loc, olx);
1690 if (wret == MLX5_TXCMP_CODE_EXIT)
1691 return MLX5_TXCMP_CODE_EXIT;
1692 if (wret == MLX5_TXCMP_CODE_ERROR)
1693 return MLX5_TXCMP_CODE_ERROR;
1696 * Calculate data length to be inlined to estimate
1697 * the required space in WQE ring buffer.
1699 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
1700 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
1701 vlan = sizeof(struct rte_vlan_hdr);
1702 inlen = loc->mbuf->l2_len + vlan +
1703 loc->mbuf->l3_len + loc->mbuf->l4_len;
1704 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
1705 return MLX5_TXCMP_CODE_ERROR;
1706 if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
1707 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
1708 /* Packet must contain all TSO headers. */
1709 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
1710 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
1711 inlen > (dlen + vlan)))
1712 return MLX5_TXCMP_CODE_ERROR;
1713 MLX5_ASSERT(inlen >= txq->inlen_mode);
1715 * Check whether there are enough free WQEBBs:
1717 * - Ethernet Segment
1718 * - First Segment of inlined Ethernet data
1719 * - ... data continued ...
1720 * - Data Segments of pointer/min inline type
1722 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
1723 MLX5_ESEG_MIN_INLINE_SIZE +
1725 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
1726 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1727 return MLX5_TXCMP_CODE_EXIT;
1728 /* Check for maximal WQE size. */
1729 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1730 return MLX5_TXCMP_CODE_ERROR;
1731 #ifdef MLX5_PMD_SOFT_COUNTERS
1732 /* Update sent data bytes/packets counters. */
1733 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
1734 loc->mbuf->tso_segsz;
1736 * One will be added for mbuf itself at the end of the mlx5_tx_burst
1737 * from loc->pkts_sent field.
1740 txq->stats.opackets += ntcp;
1741 txq->stats.obytes += dlen + vlan + ntcp * inlen;
1743 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1744 loc->wqe_last = wqe;
1745 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
1746 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
1747 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
1748 txq->wqe_ci += (ds + 3) / 4;
1749 loc->wqe_free -= (ds + 3) / 4;
1750 return MLX5_TXCMP_CODE_MULTI;
1754 * Tx one packet function for multi-segment SEND. Supports all types of Tx
1755 * offloads, uses MLX5_OPCODE_SEND to build WQEs, sends one packet per WQE,
1756 * without any data inlining in Ethernet Segment.
1758 * This routine is responsible for storing processed mbuf
1759 * into elts ring buffer and update elts_head.
1762 * Pointer to TX queue structure.
1764 * Pointer to burst routine local context.
1766 * Configured Tx offloads mask. It is fully defined at
1767 * compile time and may be used for optimization.
1770 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1771 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1772 * Local context variables partially updated.
1774 static __rte_always_inline enum mlx5_txcmp_code
1775 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
1776 struct mlx5_txq_local *__rte_restrict loc,
1779 struct mlx5_wqe_dseg *__rte_restrict dseg;
1780 struct mlx5_wqe *__rte_restrict wqe;
1781 unsigned int ds, nseg;
1783 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
1784 if (MLX5_TXOFF_CONFIG(TXPP)) {
1785 enum mlx5_txcmp_code wret;
1787 /* Generate WAIT for scheduling if requested. */
1788 wret = mlx5_tx_schedule_send(txq, loc, olx);
1789 if (wret == MLX5_TXCMP_CODE_EXIT)
1790 return MLX5_TXCMP_CODE_EXIT;
1791 if (wret == MLX5_TXCMP_CODE_ERROR)
1792 return MLX5_TXCMP_CODE_ERROR;
1795 * No inline at all, it means the CPU cycles saving is prioritized at
1796 * configuration, we should not copy any packet data to WQE.
1798 nseg = NB_SEGS(loc->mbuf);
1800 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
1801 return MLX5_TXCMP_CODE_EXIT;
1802 /* Check for maximal WQE size. */
1803 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
1804 return MLX5_TXCMP_CODE_ERROR;
1806 * Some Tx offloads may cause an error if packet is not long enough,
1807 * check against assumed minimal length.
1809 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
1810 return MLX5_TXCMP_CODE_ERROR;
1811 #ifdef MLX5_PMD_SOFT_COUNTERS
1812 /* Update sent data bytes counter. */
1813 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
1814 if (MLX5_TXOFF_CONFIG(VLAN) &&
1815 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
1816 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
1819 * SEND WQE, one WQEBB:
1820 * - Control Segment, SEND opcode
1821 * - Ethernet Segment, optional VLAN, no inline
1822 * - Data Segments, pointer only type
1824 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
1825 loc->wqe_last = wqe;
1826 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
1827 mlx5_tx_eseg_none(txq, loc, wqe, olx);
1828 dseg = &wqe->dseg[0];
1830 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1831 struct rte_mbuf *mbuf;
1834 * Zero length segment found, have to correct total
1835 * size of WQE in segments.
1836 * It is supposed to be rare occasion, so in normal
1837 * case (no zero length segments) we avoid extra
1838 * writing to the Control Segment.
1841 wqe->cseg.sq_ds -= RTE_BE32(1);
1843 loc->mbuf = mbuf->next;
1844 rte_pktmbuf_free_seg(mbuf);
1850 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1851 rte_pktmbuf_data_len(loc->mbuf), olx);
1852 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1857 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1858 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1859 loc->mbuf = loc->mbuf->next;
1862 txq->wqe_ci += (ds + 3) / 4;
1863 loc->wqe_free -= (ds + 3) / 4;
1864 return MLX5_TXCMP_CODE_MULTI;
1868 * Tx one packet function for multi-segment SEND. Supports all
1869 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
1870 * sends one packet per WQE, with data inlining in
1871 * Ethernet Segment and minimal Data Segments.
1873 * This routine is responsible for storing processed mbuf
1874 * into elts ring buffer and update elts_head.
1877 * Pointer to TX queue structure.
1879 * Pointer to burst routine local context.
1881 * Configured Tx offloads mask. It is fully defined at
1882 * compile time and may be used for optimization.
1885 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
1886 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
1887 * Local context variables partially updated.
1889 static __rte_always_inline enum mlx5_txcmp_code
1890 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
1891 struct mlx5_txq_local *__rte_restrict loc,
1894 struct mlx5_wqe *__rte_restrict wqe;
1895 unsigned int ds, inlen, dlen, vlan = 0;
1897 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
1898 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
1899 if (MLX5_TXOFF_CONFIG(TXPP)) {
1900 enum mlx5_txcmp_code wret;
1902 /* Generate WAIT for scheduling if requested. */
1903 wret = mlx5_tx_schedule_send(txq, loc, olx);
1904 if (wret == MLX5_TXCMP_CODE_EXIT)
1905 return MLX5_TXCMP_CODE_EXIT;
1906 if (wret == MLX5_TXCMP_CODE_ERROR)
1907 return MLX5_TXCMP_CODE_ERROR;
1910 * First calculate data length to be inlined
1911 * to estimate the required space for WQE.
1913 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
1914 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
1915 vlan = sizeof(struct rte_vlan_hdr);
1916 inlen = dlen + vlan;
1917 /* Check against minimal length. */
1918 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
1919 return MLX5_TXCMP_CODE_ERROR;
1920 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
1921 if (inlen > txq->inlen_send ||
1922 loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) {
1923 struct rte_mbuf *mbuf;
1928 nxlen = rte_pktmbuf_data_len(mbuf);
1930 * Packet length exceeds the allowed inline data length,
1931 * check whether the minimal inlining is required.
1933 if (txq->inlen_mode) {
1934 MLX5_ASSERT(txq->inlen_mode >=
1935 MLX5_ESEG_MIN_INLINE_SIZE);
1936 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
1937 inlen = RTE_MIN(txq->inlen_mode, inlen);
1938 } else if (vlan && !txq->vlan_en) {
1940 * VLAN insertion is requested and hardware does not
1941 * support the offload, will do with software inline.
1943 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
1944 } else if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE ||
1945 nxlen > txq->inlen_send) {
1946 return mlx5_tx_packet_multi_send(txq, loc, olx);
1950 if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE)
1953 * Now we know the minimal amount of data is requested
1954 * to inline. Check whether we should inline the buffers
1955 * from the chain beginning to eliminate some mbufs.
1957 if (unlikely(nxlen <= txq->inlen_send)) {
1958 /* We can inline first mbuf at least. */
1959 if (nxlen < inlen) {
1962 /* Scan mbufs till inlen filled. */
1967 nxlen = rte_pktmbuf_data_len(mbuf);
1969 } while (unlikely(nxlen < inlen));
1970 if (unlikely(nxlen > txq->inlen_send)) {
1971 /* We cannot inline entire mbuf. */
1972 smlen = inlen - smlen;
1973 start = rte_pktmbuf_mtod_offset
1974 (mbuf, uintptr_t, smlen);
1982 /* There should be not end of packet. */
1984 if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE)
1986 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
1987 } while (unlikely(nxlen < txq->inlen_send));
1989 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
1991 * Check whether we can do inline to align start
1992 * address of data buffer to cacheline.
1995 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
1996 if (unlikely(start)) {
1998 if (start <= txq->inlen_send)
2003 * Check whether there are enough free WQEBBs:
2005 * - Ethernet Segment
2006 * - First Segment of inlined Ethernet data
2007 * - ... data continued ...
2008 * - Data Segments of pointer/min inline type
2010 * Estimate the number of Data Segments conservatively,
2011 * supposing no any mbufs is being freed during inlining.
2014 MLX5_ASSERT(inlen <= txq->inlen_send);
2015 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2016 MLX5_ESEG_MIN_INLINE_SIZE +
2018 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2019 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2020 return MLX5_TXCMP_CODE_EXIT;
2021 /* Check for maximal WQE size. */
2022 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2023 return MLX5_TXCMP_CODE_ERROR;
2024 #ifdef MLX5_PMD_SOFT_COUNTERS
2025 /* Update sent data bytes/packets counters. */
2026 txq->stats.obytes += dlen + vlan;
2028 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2029 loc->wqe_last = wqe;
2030 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
2031 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
2032 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2033 txq->wqe_ci += (ds + 3) / 4;
2034 loc->wqe_free -= (ds + 3) / 4;
2035 return MLX5_TXCMP_CODE_MULTI;
2039 * Tx burst function for multi-segment packets. Supports all
2040 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
2041 * sends one packet per WQE. Function stops sending if it
2042 * encounters the single-segment packet.
2044 * This routine is responsible for storing processed mbuf
2045 * into elts ring buffer and update elts_head.
2048 * Pointer to TX queue structure.
2050 * Packets to transmit.
2052 * Number of packets in array.
2054 * Pointer to burst routine local context.
2056 * Configured Tx offloads mask. It is fully defined at
2057 * compile time and may be used for optimization.
2060 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2061 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2062 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2063 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
2064 * Local context variables updated.
2066 static __rte_always_inline enum mlx5_txcmp_code
2067 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
2068 struct rte_mbuf **__rte_restrict pkts,
2069 unsigned int pkts_n,
2070 struct mlx5_txq_local *__rte_restrict loc,
2073 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2074 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2075 pkts += loc->pkts_sent + 1;
2076 pkts_n -= loc->pkts_sent;
2078 enum mlx5_txcmp_code ret;
2080 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2082 * Estimate the number of free elts quickly but conservatively.
2083 * Some segment may be fully inlined and freed,
2084 * ignore this here - precise estimation is costly.
2086 if (loc->elts_free < NB_SEGS(loc->mbuf))
2087 return MLX5_TXCMP_CODE_EXIT;
2088 if (MLX5_TXOFF_CONFIG(TSO) &&
2089 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
2090 /* Proceed with multi-segment TSO. */
2091 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
2092 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
2093 /* Proceed with multi-segment SEND with inlining. */
2094 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
2096 /* Proceed with multi-segment SEND w/o inlining. */
2097 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
2099 if (ret == MLX5_TXCMP_CODE_EXIT)
2100 return MLX5_TXCMP_CODE_EXIT;
2101 if (ret == MLX5_TXCMP_CODE_ERROR)
2102 return MLX5_TXCMP_CODE_ERROR;
2103 /* WQE is built, go to the next packet. */
2106 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2107 return MLX5_TXCMP_CODE_EXIT;
2108 loc->mbuf = *pkts++;
2110 rte_prefetch0(*pkts);
2111 if (likely(NB_SEGS(loc->mbuf) > 1))
2113 /* Here ends the series of multi-segment packets. */
2114 if (MLX5_TXOFF_CONFIG(TSO) &&
2115 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
2116 return MLX5_TXCMP_CODE_TSO;
2117 return MLX5_TXCMP_CODE_SINGLE;
2123 * Tx burst function for single-segment packets with TSO.
2124 * Supports all types of Tx offloads, except multi-packets.
2125 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
2126 * Function stops sending if it encounters the multi-segment
2127 * packet or packet without TSO requested.
2129 * The routine is responsible for storing processed mbuf into elts ring buffer
2130 * and update elts_head if inline offloads is requested due to possible early
2131 * freeing of the inlined mbufs (can not store pkts array in elts as a batch).
2134 * Pointer to TX queue structure.
2136 * Packets to transmit.
2138 * Number of packets in array.
2140 * Pointer to burst routine local context.
2142 * Configured Tx offloads mask. It is fully defined at
2143 * compile time and may be used for optimization.
2146 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2147 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2148 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2149 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2150 * Local context variables updated.
2152 static __rte_always_inline enum mlx5_txcmp_code
2153 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
2154 struct rte_mbuf **__rte_restrict pkts,
2155 unsigned int pkts_n,
2156 struct mlx5_txq_local *__rte_restrict loc,
2159 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2160 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2161 pkts += loc->pkts_sent + 1;
2162 pkts_n -= loc->pkts_sent;
2164 struct mlx5_wqe_dseg *__rte_restrict dseg;
2165 struct mlx5_wqe *__rte_restrict wqe;
2166 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
2169 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2170 if (MLX5_TXOFF_CONFIG(TXPP)) {
2171 enum mlx5_txcmp_code wret;
2173 /* Generate WAIT for scheduling if requested. */
2174 wret = mlx5_tx_schedule_send(txq, loc, olx);
2175 if (wret == MLX5_TXCMP_CODE_EXIT)
2176 return MLX5_TXCMP_CODE_EXIT;
2177 if (wret == MLX5_TXCMP_CODE_ERROR)
2178 return MLX5_TXCMP_CODE_ERROR;
2180 dlen = rte_pktmbuf_data_len(loc->mbuf);
2181 if (MLX5_TXOFF_CONFIG(VLAN) &&
2182 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
2183 vlan = sizeof(struct rte_vlan_hdr);
2186 * First calculate the WQE size to check
2187 * whether we have enough space in ring buffer.
2189 hlen = loc->mbuf->l2_len + vlan +
2190 loc->mbuf->l3_len + loc->mbuf->l4_len;
2191 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
2192 return MLX5_TXCMP_CODE_ERROR;
2193 if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2194 hlen += loc->mbuf->outer_l2_len +
2195 loc->mbuf->outer_l3_len;
2196 /* Segment must contain all TSO headers. */
2197 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
2198 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2199 hlen > (dlen + vlan)))
2200 return MLX5_TXCMP_CODE_ERROR;
2202 * Check whether there are enough free WQEBBs:
2204 * - Ethernet Segment
2205 * - First Segment of inlined Ethernet data
2206 * - ... data continued ...
2207 * - Finishing Data Segment of pointer type
2209 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
2210 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2211 if (loc->wqe_free < ((ds + 3) / 4))
2212 return MLX5_TXCMP_CODE_EXIT;
2213 #ifdef MLX5_PMD_SOFT_COUNTERS
2214 /* Update sent data bytes/packets counters. */
2215 ntcp = (dlen + vlan - hlen +
2216 loc->mbuf->tso_segsz - 1) /
2217 loc->mbuf->tso_segsz;
2219 * One will be added for mbuf itself at the end
2220 * of the mlx5_tx_burst from loc->pkts_sent field.
2223 txq->stats.opackets += ntcp;
2224 txq->stats.obytes += dlen + vlan + ntcp * hlen;
2227 * Build the TSO WQE:
2229 * - Ethernet Segment with hlen bytes inlined
2230 * - Data Segment of pointer type
2232 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2233 loc->wqe_last = wqe;
2234 mlx5_tx_cseg_init(txq, loc, wqe, ds,
2235 MLX5_OPCODE_TSO, olx);
2236 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
2237 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
2238 dlen -= hlen - vlan;
2239 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2241 * WQE is built, update the loop parameters
2242 * and go to the next packet.
2244 txq->wqe_ci += (ds + 3) / 4;
2245 loc->wqe_free -= (ds + 3) / 4;
2246 if (MLX5_TXOFF_CONFIG(INLINE))
2247 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2251 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2252 return MLX5_TXCMP_CODE_EXIT;
2253 loc->mbuf = *pkts++;
2255 rte_prefetch0(*pkts);
2256 if (MLX5_TXOFF_CONFIG(MULTI) &&
2257 unlikely(NB_SEGS(loc->mbuf) > 1))
2258 return MLX5_TXCMP_CODE_MULTI;
2259 if (likely(!(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)))
2260 return MLX5_TXCMP_CODE_SINGLE;
2261 /* Continue with the next TSO packet. */
2267 * Analyze the packet and select the best method to send.
2270 * Pointer to TX queue structure.
2272 * Pointer to burst routine local context.
2274 * Configured Tx offloads mask. It is fully defined at
2275 * compile time and may be used for optimization.
2277 * The predefined flag whether do complete check for
2278 * multi-segment packets and TSO.
2281 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2282 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
2283 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
2284 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
2286 static __rte_always_inline enum mlx5_txcmp_code
2287 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
2288 struct mlx5_txq_local *__rte_restrict loc,
2292 /* Check for multi-segment packet. */
2294 MLX5_TXOFF_CONFIG(MULTI) &&
2295 unlikely(NB_SEGS(loc->mbuf) > 1))
2296 return MLX5_TXCMP_CODE_MULTI;
2297 /* Check for TSO packet. */
2299 MLX5_TXOFF_CONFIG(TSO) &&
2300 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
2301 return MLX5_TXCMP_CODE_TSO;
2302 /* Check if eMPW is enabled at all. */
2303 if (!MLX5_TXOFF_CONFIG(EMPW))
2304 return MLX5_TXCMP_CODE_SINGLE;
2305 /* Check if eMPW can be engaged. */
2306 if (MLX5_TXOFF_CONFIG(VLAN) &&
2307 unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) &&
2308 (!MLX5_TXOFF_CONFIG(INLINE) ||
2309 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
2310 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
2312 * eMPW does not support VLAN insertion offload, we have to
2313 * inline the entire packet but packet is too long for inlining.
2315 return MLX5_TXCMP_CODE_SINGLE;
2317 return MLX5_TXCMP_CODE_EMPW;
2321 * Check the next packet attributes to match with the eMPW batch ones.
2322 * In addition, for legacy MPW the packet length is checked either.
2325 * Pointer to TX queue structure.
2327 * Pointer to Ethernet Segment of eMPW batch.
2329 * Pointer to burst routine local context.
2331 * Length of previous packet in MPW descriptor.
2333 * Configured Tx offloads mask. It is fully defined at
2334 * compile time and may be used for optimization.
2337 * true - packet match with eMPW batch attributes.
2338 * false - no match, eMPW should be restarted.
2340 static __rte_always_inline bool
2341 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
2342 struct mlx5_wqe_eseg *__rte_restrict es,
2343 struct mlx5_txq_local *__rte_restrict loc,
2347 uint8_t swp_flags = 0;
2349 /* Compare the checksum flags, if any. */
2350 if (MLX5_TXOFF_CONFIG(CSUM) &&
2351 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
2353 /* Compare the Software Parser offsets and flags. */
2354 if (MLX5_TXOFF_CONFIG(SWP) &&
2355 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
2356 es->swp_flags != swp_flags))
2358 /* Fill metadata field if needed. */
2359 if (MLX5_TXOFF_CONFIG(METADATA) &&
2360 es->metadata != (loc->mbuf->ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA ?
2361 rte_cpu_to_be_32(*RTE_FLOW_DYNF_METADATA(loc->mbuf)) : 0))
2363 /* Legacy MPW can send packets with the same length only. */
2364 if (MLX5_TXOFF_CONFIG(MPW) &&
2365 dlen != rte_pktmbuf_data_len(loc->mbuf))
2367 /* There must be no VLAN packets in eMPW loop. */
2368 if (MLX5_TXOFF_CONFIG(VLAN))
2369 MLX5_ASSERT(!(loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN));
2370 /* Check if the scheduling is requested. */
2371 if (MLX5_TXOFF_CONFIG(TXPP) &&
2372 loc->mbuf->ol_flags & txq->ts_mask)
2378 * Update send loop variables and WQE for eMPW loop without data inlining.
2379 * Number of Data Segments is equal to the number of sent packets.
2382 * Pointer to TX queue structure.
2384 * Pointer to burst routine local context.
2386 * Number of packets/Data Segments/Packets.
2388 * Accumulated statistics, bytes sent.
2390 * Configured Tx offloads mask. It is fully defined at
2391 * compile time and may be used for optimization.
2394 * true - packet match with eMPW batch attributes.
2395 * false - no match, eMPW should be restarted.
2397 static __rte_always_inline void
2398 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
2399 struct mlx5_txq_local *__rte_restrict loc,
2402 unsigned int olx __rte_unused)
2404 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2405 #ifdef MLX5_PMD_SOFT_COUNTERS
2406 /* Update sent data bytes counter. */
2407 txq->stats.obytes += slen;
2411 loc->elts_free -= ds;
2412 loc->pkts_sent += ds;
2414 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2415 txq->wqe_ci += (ds + 3) / 4;
2416 loc->wqe_free -= (ds + 3) / 4;
2420 * Update send loop variables and WQE for eMPW loop with data inlining.
2421 * Gets the size of pushed descriptors and data to the WQE.
2424 * Pointer to TX queue structure.
2426 * Pointer to burst routine local context.
2428 * Total size of descriptor/data in bytes.
2430 * Accumulated statistics, data bytes sent.
2432 * The base WQE for the eMPW/MPW descriptor.
2434 * Configured Tx offloads mask. It is fully defined at
2435 * compile time and may be used for optimization.
2438 * true - packet match with eMPW batch attributes.
2439 * false - no match, eMPW should be restarted.
2441 static __rte_always_inline void
2442 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
2443 struct mlx5_txq_local *__rte_restrict loc,
2446 struct mlx5_wqe *__rte_restrict wqem,
2447 unsigned int olx __rte_unused)
2449 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
2451 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2452 #ifdef MLX5_PMD_SOFT_COUNTERS
2453 /* Update sent data bytes counter. */
2454 txq->stats.obytes += slen;
2458 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
2460 * If the legacy MPW session contains the inline packets
2461 * we should set the only inline data segment length
2462 * and align the total length to the segment size.
2464 MLX5_ASSERT(len > sizeof(dseg->bcount));
2465 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
2466 MLX5_ETH_WQE_DATA_INLINE);
2467 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
2470 * The session is not legacy MPW or contains the
2471 * data buffer pointer segments.
2473 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
2474 len = len / MLX5_WSEG_SIZE + 2;
2476 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
2477 txq->wqe_ci += (len + 3) / 4;
2478 loc->wqe_free -= (len + 3) / 4;
2479 loc->wqe_last = wqem;
2483 * The set of Tx burst functions for single-segment packets without TSO
2484 * and with Multi-Packet Writing feature support.
2485 * Supports all types of Tx offloads, except multi-packets and TSO.
2487 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends as many packet
2488 * per WQE as it can. If eMPW is not configured or packet can not be sent with
2489 * eMPW (VLAN insertion) the ordinary SEND opcode is used and only one packet
2492 * Functions stop sending if it encounters the multi-segment packet or packet
2493 * with TSO requested.
2495 * The routines are responsible for storing processed mbuf into elts ring buffer
2496 * and update elts_head if inlining offload is requested. Otherwise the copying
2497 * mbufs to elts can be postponed and completed at the end of burst routine.
2500 * Pointer to TX queue structure.
2502 * Packets to transmit.
2504 * Number of packets in array.
2506 * Pointer to burst routine local context.
2508 * Configured Tx offloads mask. It is fully defined at
2509 * compile time and may be used for optimization.
2512 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2513 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2514 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2515 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
2516 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
2517 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
2519 * Local context variables updated.
2522 * The routine sends packets with MLX5_OPCODE_EMPW
2523 * without inlining, this is dedicated optimized branch.
2524 * No VLAN insertion is supported.
2526 static __rte_always_inline enum mlx5_txcmp_code
2527 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
2528 struct rte_mbuf **__rte_restrict pkts,
2529 unsigned int pkts_n,
2530 struct mlx5_txq_local *__rte_restrict loc,
2534 * Subroutine is the part of mlx5_tx_burst_single() and sends
2535 * single-segment packet with eMPW opcode without data inlining.
2537 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2538 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2539 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2540 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2541 pkts += loc->pkts_sent + 1;
2542 pkts_n -= loc->pkts_sent;
2544 struct mlx5_wqe_dseg *__rte_restrict dseg;
2545 struct mlx5_wqe_eseg *__rte_restrict eseg;
2546 enum mlx5_txcmp_code ret;
2547 unsigned int part, loop;
2548 unsigned int slen = 0;
2551 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2552 if (MLX5_TXOFF_CONFIG(TXPP)) {
2553 enum mlx5_txcmp_code wret;
2555 /* Generate WAIT for scheduling if requested. */
2556 wret = mlx5_tx_schedule_send(txq, loc, olx);
2557 if (wret == MLX5_TXCMP_CODE_EXIT)
2558 return MLX5_TXCMP_CODE_EXIT;
2559 if (wret == MLX5_TXCMP_CODE_ERROR)
2560 return MLX5_TXCMP_CODE_ERROR;
2562 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2563 MLX5_MPW_MAX_PACKETS :
2564 MLX5_EMPW_MAX_PACKETS);
2565 if (unlikely(loc->elts_free < part)) {
2566 /* We have no enough elts to save all mbufs. */
2567 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
2568 return MLX5_TXCMP_CODE_EXIT;
2569 /* But we still able to send at least minimal eMPW. */
2570 part = loc->elts_free;
2572 /* Check whether we have enough WQEs */
2573 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
2574 if (unlikely(loc->wqe_free <
2575 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2576 return MLX5_TXCMP_CODE_EXIT;
2577 part = (loc->wqe_free * 4) - 2;
2579 if (likely(part > 1))
2580 rte_prefetch0(*pkts);
2581 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2583 * Build eMPW title WQEBB:
2584 * - Control Segment, eMPW opcode
2585 * - Ethernet Segment, no inline
2587 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
2588 MLX5_OPCODE_ENHANCED_MPSW, olx);
2589 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
2590 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2591 eseg = &loc->wqe_last->eseg;
2592 dseg = &loc->wqe_last->dseg[0];
2594 /* Store the packet length for legacy MPW. */
2595 if (MLX5_TXOFF_CONFIG(MPW))
2596 eseg->mss = rte_cpu_to_be_16
2597 (rte_pktmbuf_data_len(loc->mbuf));
2599 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2600 #ifdef MLX5_PMD_SOFT_COUNTERS
2601 /* Update sent data bytes counter. */
2606 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2608 if (unlikely(--loop == 0))
2610 loc->mbuf = *pkts++;
2611 if (likely(loop > 1))
2612 rte_prefetch0(*pkts);
2613 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2615 * Unroll the completion code to avoid
2616 * returning variable value - it results in
2617 * unoptimized sequent checking in caller.
2619 if (ret == MLX5_TXCMP_CODE_MULTI) {
2621 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2622 if (unlikely(!loc->elts_free ||
2624 return MLX5_TXCMP_CODE_EXIT;
2625 return MLX5_TXCMP_CODE_MULTI;
2627 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2628 if (ret == MLX5_TXCMP_CODE_TSO) {
2630 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2631 if (unlikely(!loc->elts_free ||
2633 return MLX5_TXCMP_CODE_EXIT;
2634 return MLX5_TXCMP_CODE_TSO;
2636 if (ret == MLX5_TXCMP_CODE_SINGLE) {
2638 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2639 if (unlikely(!loc->elts_free ||
2641 return MLX5_TXCMP_CODE_EXIT;
2642 return MLX5_TXCMP_CODE_SINGLE;
2644 if (ret != MLX5_TXCMP_CODE_EMPW) {
2647 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2648 return MLX5_TXCMP_CODE_ERROR;
2651 * Check whether packet parameters coincide
2652 * within assumed eMPW batch:
2653 * - check sum settings
2655 * - software parser settings
2656 * - packets length (legacy MPW only)
2657 * - scheduling is not required
2659 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
2662 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
2663 if (unlikely(!loc->elts_free ||
2665 return MLX5_TXCMP_CODE_EXIT;
2669 /* Packet attributes match, continue the same eMPW. */
2671 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2672 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2674 /* eMPW is built successfully, update loop parameters. */
2676 MLX5_ASSERT(pkts_n >= part);
2677 #ifdef MLX5_PMD_SOFT_COUNTERS
2678 /* Update sent data bytes counter. */
2679 txq->stats.obytes += slen;
2681 loc->elts_free -= part;
2682 loc->pkts_sent += part;
2683 txq->wqe_ci += (2 + part + 3) / 4;
2684 loc->wqe_free -= (2 + part + 3) / 4;
2686 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2687 return MLX5_TXCMP_CODE_EXIT;
2688 loc->mbuf = *pkts++;
2689 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2690 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
2692 /* Continue sending eMPW batches. */
2698 * The routine sends packets with MLX5_OPCODE_EMPW
2699 * with inlining, optionally supports VLAN insertion.
2701 static __rte_always_inline enum mlx5_txcmp_code
2702 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
2703 struct rte_mbuf **__rte_restrict pkts,
2704 unsigned int pkts_n,
2705 struct mlx5_txq_local *__rte_restrict loc,
2709 * Subroutine is the part of mlx5_tx_burst_single() and sends
2710 * single-segment packet with eMPW opcode with data inlining.
2712 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2713 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2714 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2715 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2716 pkts += loc->pkts_sent + 1;
2717 pkts_n -= loc->pkts_sent;
2719 struct mlx5_wqe_dseg *__rte_restrict dseg;
2720 struct mlx5_wqe *__rte_restrict wqem;
2721 enum mlx5_txcmp_code ret;
2722 unsigned int room, part, nlim;
2723 unsigned int slen = 0;
2725 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2726 if (MLX5_TXOFF_CONFIG(TXPP)) {
2727 enum mlx5_txcmp_code wret;
2729 /* Generate WAIT for scheduling if requested. */
2730 wret = mlx5_tx_schedule_send(txq, loc, olx);
2731 if (wret == MLX5_TXCMP_CODE_EXIT)
2732 return MLX5_TXCMP_CODE_EXIT;
2733 if (wret == MLX5_TXCMP_CODE_ERROR)
2734 return MLX5_TXCMP_CODE_ERROR;
2737 * Limits the amount of packets in one WQE
2738 * to improve CQE latency generation.
2740 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2741 MLX5_MPW_INLINE_MAX_PACKETS :
2742 MLX5_EMPW_MAX_PACKETS);
2743 /* Check whether we have minimal amount WQEs */
2744 if (unlikely(loc->wqe_free <
2745 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2746 return MLX5_TXCMP_CODE_EXIT;
2747 if (likely(pkts_n > 1))
2748 rte_prefetch0(*pkts);
2749 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2751 * Build eMPW title WQEBB:
2752 * - Control Segment, eMPW opcode, zero DS
2753 * - Ethernet Segment, no inline
2755 mlx5_tx_cseg_init(txq, loc, wqem, 0,
2756 MLX5_OPCODE_ENHANCED_MPSW, olx);
2757 mlx5_tx_eseg_none(txq, loc, wqem,
2758 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2759 dseg = &wqem->dseg[0];
2760 /* Store the packet length for legacy MPW. */
2761 if (MLX5_TXOFF_CONFIG(MPW))
2762 wqem->eseg.mss = rte_cpu_to_be_16
2763 (rte_pktmbuf_data_len(loc->mbuf));
2764 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
2765 loc->wqe_free) * MLX5_WQE_SIZE -
2766 MLX5_WQE_CSEG_SIZE -
2768 /* Limit the room for legacy MPW sessions for performance. */
2769 if (MLX5_TXOFF_CONFIG(MPW))
2770 room = RTE_MIN(room,
2771 RTE_MAX(txq->inlen_empw +
2772 sizeof(dseg->bcount) +
2773 (MLX5_TXOFF_CONFIG(VLAN) ?
2774 sizeof(struct rte_vlan_hdr) : 0),
2775 MLX5_MPW_INLINE_MAX_PACKETS *
2776 MLX5_WQE_DSEG_SIZE));
2777 /* Build WQE till we have space, packets and resources. */
2780 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
2781 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2784 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
2785 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
2786 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
2788 * Some Tx offloads may cause an error if packet is not
2789 * long enough, check against assumed minimal length.
2791 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
2793 if (unlikely(!part))
2794 return MLX5_TXCMP_CODE_ERROR;
2796 * We have some successfully built
2797 * packet Data Segments to send.
2799 mlx5_tx_idone_empw(txq, loc, part,
2801 return MLX5_TXCMP_CODE_ERROR;
2803 /* Inline or not inline - that's the Question. */
2804 if (dlen > txq->inlen_empw ||
2805 loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE)
2807 if (MLX5_TXOFF_CONFIG(MPW)) {
2808 if (dlen > txq->inlen_send)
2812 /* Open new inline MPW session. */
2813 tlen += sizeof(dseg->bcount);
2814 dseg->bcount = RTE_BE32(0);
2816 (dseg, sizeof(dseg->bcount));
2819 * No pointer and inline descriptor
2820 * intermix for legacy MPW sessions.
2822 if (wqem->dseg[0].bcount)
2826 tlen = sizeof(dseg->bcount) + dlen;
2828 /* Inline entire packet, optional VLAN insertion. */
2829 if (MLX5_TXOFF_CONFIG(VLAN) &&
2830 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
2832 * The packet length must be checked in
2833 * mlx5_tx_able_to_empw() and packet
2834 * fits into inline length guaranteed.
2837 sizeof(struct rte_vlan_hdr)) <=
2839 tlen += sizeof(struct rte_vlan_hdr);
2842 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
2844 #ifdef MLX5_PMD_SOFT_COUNTERS
2845 /* Update sent data bytes counter. */
2846 slen += sizeof(struct rte_vlan_hdr);
2851 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
2854 if (!MLX5_TXOFF_CONFIG(MPW))
2855 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
2856 MLX5_ASSERT(room >= tlen);
2859 * Packet data are completely inline,
2860 * we can try to free the packet.
2862 if (likely(loc->pkts_sent == loc->mbuf_free)) {
2864 * All the packets from the burst beginning
2865 * are inline, we can free mbufs directly
2866 * from the origin array on tx_burst exit().
2872 * In order no to call rte_pktmbuf_free_seg() here,
2873 * in the most inner loop (that might be very
2874 * expensive) we just save the mbuf in elts.
2876 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2881 * No pointer and inline descriptor
2882 * intermix for legacy MPW sessions.
2884 if (MLX5_TXOFF_CONFIG(MPW) &&
2886 wqem->dseg[0].bcount == RTE_BE32(0))
2889 * Not inlinable VLAN packets are
2890 * proceeded outside of this routine.
2892 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
2893 if (MLX5_TXOFF_CONFIG(VLAN))
2894 MLX5_ASSERT(!(loc->mbuf->ol_flags &
2895 RTE_MBUF_F_TX_VLAN));
2896 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2897 /* We have to store mbuf in elts.*/
2898 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2900 room -= MLX5_WQE_DSEG_SIZE;
2901 /* Ring buffer wraparound is checked at the loop end.*/
2904 #ifdef MLX5_PMD_SOFT_COUNTERS
2905 /* Update sent data bytes counter. */
2910 if (unlikely(!pkts_n || !loc->elts_free)) {
2912 * We have no resources/packets to
2913 * continue build descriptors.
2916 mlx5_tx_idone_empw(txq, loc, part,
2918 return MLX5_TXCMP_CODE_EXIT;
2920 loc->mbuf = *pkts++;
2921 if (likely(pkts_n > 1))
2922 rte_prefetch0(*pkts);
2923 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
2925 * Unroll the completion code to avoid
2926 * returning variable value - it results in
2927 * unoptimized sequent checking in caller.
2929 if (ret == MLX5_TXCMP_CODE_MULTI) {
2931 mlx5_tx_idone_empw(txq, loc, part,
2933 if (unlikely(!loc->elts_free ||
2935 return MLX5_TXCMP_CODE_EXIT;
2936 return MLX5_TXCMP_CODE_MULTI;
2938 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2939 if (ret == MLX5_TXCMP_CODE_TSO) {
2941 mlx5_tx_idone_empw(txq, loc, part,
2943 if (unlikely(!loc->elts_free ||
2945 return MLX5_TXCMP_CODE_EXIT;
2946 return MLX5_TXCMP_CODE_TSO;
2948 if (ret == MLX5_TXCMP_CODE_SINGLE) {
2950 mlx5_tx_idone_empw(txq, loc, part,
2952 if (unlikely(!loc->elts_free ||
2954 return MLX5_TXCMP_CODE_EXIT;
2955 return MLX5_TXCMP_CODE_SINGLE;
2957 if (ret != MLX5_TXCMP_CODE_EMPW) {
2960 mlx5_tx_idone_empw(txq, loc, part,
2962 return MLX5_TXCMP_CODE_ERROR;
2964 /* Check if we have minimal room left. */
2966 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
2969 * Check whether packet parameters coincide
2970 * within assumed eMPW batch:
2971 * - check sum settings
2973 * - software parser settings
2974 * - packets length (legacy MPW only)
2975 * - scheduling is not required
2977 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
2980 /* Packet attributes match, continue the same eMPW. */
2981 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2982 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2985 * We get here to close an existing eMPW
2986 * session and start the new one.
2988 MLX5_ASSERT(pkts_n);
2990 if (unlikely(!part))
2991 return MLX5_TXCMP_CODE_EXIT;
2992 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
2993 if (unlikely(!loc->elts_free ||
2995 return MLX5_TXCMP_CODE_EXIT;
2996 /* Continue the loop with new eMPW session. */
3002 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
3003 * Data inlining and VLAN insertion are supported.
3005 static __rte_always_inline enum mlx5_txcmp_code
3006 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
3007 struct rte_mbuf **__rte_restrict pkts,
3008 unsigned int pkts_n,
3009 struct mlx5_txq_local *__rte_restrict loc,
3013 * Subroutine is the part of mlx5_tx_burst_single()
3014 * and sends single-segment packet with SEND opcode.
3016 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3017 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3018 pkts += loc->pkts_sent + 1;
3019 pkts_n -= loc->pkts_sent;
3021 struct mlx5_wqe *__rte_restrict wqe;
3022 enum mlx5_txcmp_code ret;
3024 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3025 if (MLX5_TXOFF_CONFIG(TXPP)) {
3026 enum mlx5_txcmp_code wret;
3028 /* Generate WAIT for scheduling if requested. */
3029 wret = mlx5_tx_schedule_send(txq, loc, olx);
3030 if (wret == MLX5_TXCMP_CODE_EXIT)
3031 return MLX5_TXCMP_CODE_EXIT;
3032 if (wret == MLX5_TXCMP_CODE_ERROR)
3033 return MLX5_TXCMP_CODE_ERROR;
3035 if (MLX5_TXOFF_CONFIG(INLINE)) {
3036 unsigned int inlen, vlan = 0;
3038 inlen = rte_pktmbuf_data_len(loc->mbuf);
3039 if (MLX5_TXOFF_CONFIG(VLAN) &&
3040 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
3041 vlan = sizeof(struct rte_vlan_hdr);
3045 * If inlining is enabled at configuration time
3046 * the limit must be not less than minimal size.
3047 * Otherwise we would do extra check for data
3048 * size to avoid crashes due to length overflow.
3050 MLX5_ASSERT(txq->inlen_send >=
3051 MLX5_ESEG_MIN_INLINE_SIZE);
3052 if (inlen <= txq->inlen_send) {
3053 unsigned int seg_n, wqe_n;
3055 rte_prefetch0(rte_pktmbuf_mtod
3056 (loc->mbuf, uint8_t *));
3057 /* Check against minimal length. */
3058 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3059 return MLX5_TXCMP_CODE_ERROR;
3060 if (loc->mbuf->ol_flags &
3061 RTE_MBUF_F_TX_DYNF_NOINLINE) {
3063 * The hint flag not to inline packet
3064 * data is set. Check whether we can
3067 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
3069 (MLX5_TXOFF_CONFIG(MPW) &&
3071 if (inlen <= txq->inlen_send)
3074 * The hardware requires the
3075 * minimal inline data header.
3077 goto single_min_inline;
3079 if (MLX5_TXOFF_CONFIG(VLAN) &&
3080 vlan && !txq->vlan_en) {
3082 * We must insert VLAN tag
3083 * by software means.
3085 goto single_part_inline;
3087 goto single_no_inline;
3091 * Completely inlined packet data WQE:
3092 * - Control Segment, SEND opcode
3093 * - Ethernet Segment, no VLAN insertion
3094 * - Data inlined, VLAN optionally inserted
3095 * - Alignment to MLX5_WSEG_SIZE
3096 * Have to estimate amount of WQEBBs
3098 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
3099 MLX5_ESEG_MIN_INLINE_SIZE +
3100 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3101 /* Check if there are enough WQEBBs. */
3102 wqe_n = (seg_n + 3) / 4;
3103 if (wqe_n > loc->wqe_free)
3104 return MLX5_TXCMP_CODE_EXIT;
3105 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3106 loc->wqe_last = wqe;
3107 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
3108 MLX5_OPCODE_SEND, olx);
3109 mlx5_tx_eseg_data(txq, loc, wqe,
3110 vlan, inlen, 0, olx);
3111 txq->wqe_ci += wqe_n;
3112 loc->wqe_free -= wqe_n;
3114 * Packet data are completely inlined,
3115 * free the packet immediately.
3117 rte_pktmbuf_free_seg(loc->mbuf);
3118 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
3119 MLX5_TXOFF_CONFIG(MPW)) &&
3122 * If minimal inlining is requested the eMPW
3123 * feature should be disabled due to data is
3124 * inlined into Ethernet Segment, which can
3125 * not contain inlined data for eMPW due to
3126 * segment shared for all packets.
3128 struct mlx5_wqe_dseg *__rte_restrict dseg;
3133 * The inline-mode settings require
3134 * to inline the specified amount of
3135 * data bytes to the Ethernet Segment.
3136 * We should check the free space in
3137 * WQE ring buffer to inline partially.
3140 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
3141 MLX5_ASSERT(inlen > txq->inlen_mode);
3142 MLX5_ASSERT(txq->inlen_mode >=
3143 MLX5_ESEG_MIN_INLINE_SIZE);
3145 * Check whether there are enough free WQEBBs:
3147 * - Ethernet Segment
3148 * - First Segment of inlined Ethernet data
3149 * - ... data continued ...
3150 * - Finishing Data Segment of pointer type
3152 ds = (MLX5_WQE_CSEG_SIZE +
3153 MLX5_WQE_ESEG_SIZE +
3154 MLX5_WQE_DSEG_SIZE +
3156 MLX5_ESEG_MIN_INLINE_SIZE +
3157 MLX5_WQE_DSEG_SIZE +
3158 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3159 if (loc->wqe_free < ((ds + 3) / 4))
3160 return MLX5_TXCMP_CODE_EXIT;
3162 * Build the ordinary SEND WQE:
3164 * - Ethernet Segment, inline inlen_mode bytes
3165 * - Data Segment of pointer type
3167 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3168 loc->wqe_last = wqe;
3169 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3170 MLX5_OPCODE_SEND, olx);
3171 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
3174 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3175 txq->inlen_mode - vlan;
3176 inlen -= txq->inlen_mode;
3177 mlx5_tx_dseg_ptr(txq, loc, dseg,
3180 * WQE is built, update the loop parameters
3181 * and got to the next packet.
3183 txq->wqe_ci += (ds + 3) / 4;
3184 loc->wqe_free -= (ds + 3) / 4;
3185 /* We have to store mbuf in elts.*/
3186 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3187 txq->elts[txq->elts_head++ & txq->elts_m] =
3195 * Partially inlined packet data WQE, we have
3196 * some space in title WQEBB, we can fill it
3197 * with some packet data. It takes one WQEBB,
3198 * it is available, no extra space check:
3199 * - Control Segment, SEND opcode
3200 * - Ethernet Segment, no VLAN insertion
3201 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
3202 * - Data Segment, pointer type
3204 * We also get here if VLAN insertion is not
3205 * supported by HW, the inline is enabled.
3208 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3209 loc->wqe_last = wqe;
3210 mlx5_tx_cseg_init(txq, loc, wqe, 4,
3211 MLX5_OPCODE_SEND, olx);
3212 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
3213 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3214 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
3216 * The length check is performed above, by
3217 * comparing with txq->inlen_send. We should
3218 * not get overflow here.
3220 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
3221 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
3222 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
3226 /* We have to store mbuf in elts.*/
3227 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3228 txq->elts[txq->elts_head++ & txq->elts_m] =
3232 #ifdef MLX5_PMD_SOFT_COUNTERS
3233 /* Update sent data bytes counter. */
3234 txq->stats.obytes += vlan +
3235 rte_pktmbuf_data_len(loc->mbuf);
3239 * No inline at all, it means the CPU cycles saving
3240 * is prioritized at configuration, we should not
3241 * copy any packet data to WQE.
3243 * SEND WQE, one WQEBB:
3244 * - Control Segment, SEND opcode
3245 * - Ethernet Segment, optional VLAN, no inline
3246 * - Data Segment, pointer type
3249 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3250 loc->wqe_last = wqe;
3251 mlx5_tx_cseg_init(txq, loc, wqe, 3,
3252 MLX5_OPCODE_SEND, olx);
3253 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3255 (txq, loc, &wqe->dseg[0],
3256 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3257 rte_pktmbuf_data_len(loc->mbuf), olx);
3261 * We should not store mbuf pointer in elts
3262 * if no inlining is configured, this is done
3263 * by calling routine in a batch copy.
3265 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3267 #ifdef MLX5_PMD_SOFT_COUNTERS
3268 /* Update sent data bytes counter. */
3269 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
3270 if (MLX5_TXOFF_CONFIG(VLAN) &&
3271 loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN)
3272 txq->stats.obytes +=
3273 sizeof(struct rte_vlan_hdr);
3278 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3279 return MLX5_TXCMP_CODE_EXIT;
3280 loc->mbuf = *pkts++;
3282 rte_prefetch0(*pkts);
3283 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3284 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
3290 static __rte_always_inline enum mlx5_txcmp_code
3291 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
3292 struct rte_mbuf **__rte_restrict pkts,
3293 unsigned int pkts_n,
3294 struct mlx5_txq_local *__rte_restrict loc,
3297 enum mlx5_txcmp_code ret;
3299 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
3300 if (ret == MLX5_TXCMP_CODE_SINGLE)
3302 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
3304 /* Optimize for inline/no inline eMPW send. */
3305 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
3306 mlx5_tx_burst_empw_inline
3307 (txq, pkts, pkts_n, loc, olx) :
3308 mlx5_tx_burst_empw_simple
3309 (txq, pkts, pkts_n, loc, olx);
3310 if (ret != MLX5_TXCMP_CODE_SINGLE)
3312 /* The resources to send one packet should remain. */
3313 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3315 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
3316 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
3317 if (ret != MLX5_TXCMP_CODE_EMPW)
3319 /* The resources to send one packet should remain. */
3320 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3325 * DPDK Tx callback template. This is configured template used to generate
3326 * routines optimized for specified offload setup.
3327 * One of this generated functions is chosen at SQ configuration time.
3330 * Generic pointer to TX queue structure.
3332 * Packets to transmit.
3334 * Number of packets in array.
3336 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
3337 * values. Should be static to take compile time static configuration
3341 * Number of packets successfully transmitted (<= pkts_n).
3343 static __rte_always_inline uint16_t
3344 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
3345 struct rte_mbuf **__rte_restrict pkts,
3349 struct mlx5_txq_local loc;
3350 enum mlx5_txcmp_code ret;
3353 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3354 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3355 if (unlikely(!pkts_n))
3357 if (MLX5_TXOFF_CONFIG(INLINE))
3361 loc.wqe_last = NULL;
3364 loc.pkts_loop = loc.pkts_sent;
3366 * Check if there are some CQEs, if any:
3367 * - process an encountered errors
3368 * - process the completed WQEs
3369 * - free related mbufs
3370 * - doorbell the NIC about processed CQEs
3372 rte_prefetch0(*(pkts + loc.pkts_sent));
3373 mlx5_tx_handle_completion(txq, olx);
3375 * Calculate the number of available resources - elts and WQEs.
3376 * There are two possible different scenarios:
3377 * - no data inlining into WQEs, one WQEBB may contains up to
3378 * four packets, in this case elts become scarce resource
3379 * - data inlining into WQEs, one packet may require multiple
3380 * WQEBBs, the WQEs become the limiting factor.
3382 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3383 loc.elts_free = txq->elts_s -
3384 (uint16_t)(txq->elts_head - txq->elts_tail);
3385 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3386 loc.wqe_free = txq->wqe_s -
3387 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
3388 if (unlikely(!loc.elts_free || !loc.wqe_free))
3392 * Fetch the packet from array. Usually this is the first
3393 * packet in series of multi/single segment packets.
3395 loc.mbuf = *(pkts + loc.pkts_sent);
3396 /* Dedicated branch for multi-segment packets. */
3397 if (MLX5_TXOFF_CONFIG(MULTI) &&
3398 unlikely(NB_SEGS(loc.mbuf) > 1)) {
3400 * Multi-segment packet encountered.
3401 * Hardware is able to process it only
3402 * with SEND/TSO opcodes, one packet
3403 * per WQE, do it in dedicated routine.
3406 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
3407 part = loc.pkts_sent - loc.pkts_copy;
3408 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3410 * There are some single-segment mbufs not
3411 * stored in elts. The mbufs must be in the
3412 * same order as WQEs, so we must copy the
3413 * mbufs to elts here, before the coming
3414 * multi-segment packet mbufs is appended.
3416 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
3418 loc.pkts_copy = loc.pkts_sent;
3420 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3421 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
3422 if (!MLX5_TXOFF_CONFIG(INLINE))
3423 loc.pkts_copy = loc.pkts_sent;
3425 * These returned code checks are supposed
3426 * to be optimized out due to routine inlining.
3428 if (ret == MLX5_TXCMP_CODE_EXIT) {
3430 * The routine returns this code when
3431 * all packets are sent or there is no
3432 * enough resources to complete request.
3436 if (ret == MLX5_TXCMP_CODE_ERROR) {
3438 * The routine returns this code when some error
3439 * in the incoming packets format occurred.
3441 txq->stats.oerrors++;
3444 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3446 * The single-segment packet was encountered
3447 * in the array, try to send it with the
3448 * best optimized way, possible engaging eMPW.
3450 goto enter_send_single;
3452 if (MLX5_TXOFF_CONFIG(TSO) &&
3453 ret == MLX5_TXCMP_CODE_TSO) {
3455 * The single-segment TSO packet was
3456 * encountered in the array.
3458 goto enter_send_tso;
3460 /* We must not get here. Something is going wrong. */
3462 txq->stats.oerrors++;
3465 /* Dedicated branch for single-segment TSO packets. */
3466 if (MLX5_TXOFF_CONFIG(TSO) &&
3467 unlikely(loc.mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
3469 * TSO might require special way for inlining
3470 * (dedicated parameters) and is sent with
3471 * MLX5_OPCODE_TSO opcode only, provide this
3472 * in dedicated branch.
3475 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
3476 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3477 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
3479 * These returned code checks are supposed
3480 * to be optimized out due to routine inlining.
3482 if (ret == MLX5_TXCMP_CODE_EXIT)
3484 if (ret == MLX5_TXCMP_CODE_ERROR) {
3485 txq->stats.oerrors++;
3488 if (ret == MLX5_TXCMP_CODE_SINGLE)
3489 goto enter_send_single;
3490 if (MLX5_TXOFF_CONFIG(MULTI) &&
3491 ret == MLX5_TXCMP_CODE_MULTI) {
3493 * The multi-segment packet was
3494 * encountered in the array.
3496 goto enter_send_multi;
3498 /* We must not get here. Something is going wrong. */
3500 txq->stats.oerrors++;
3504 * The dedicated branch for the single-segment packets
3505 * without TSO. Often these ones can be sent using
3506 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
3507 * The routine builds the WQEs till it encounters
3508 * the TSO or multi-segment packet (in case if these
3509 * offloads are requested at SQ configuration time).
3512 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3513 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
3515 * These returned code checks are supposed
3516 * to be optimized out due to routine inlining.
3518 if (ret == MLX5_TXCMP_CODE_EXIT)
3520 if (ret == MLX5_TXCMP_CODE_ERROR) {
3521 txq->stats.oerrors++;
3524 if (MLX5_TXOFF_CONFIG(MULTI) &&
3525 ret == MLX5_TXCMP_CODE_MULTI) {
3527 * The multi-segment packet was
3528 * encountered in the array.
3530 goto enter_send_multi;
3532 if (MLX5_TXOFF_CONFIG(TSO) &&
3533 ret == MLX5_TXCMP_CODE_TSO) {
3535 * The single-segment TSO packet was
3536 * encountered in the array.
3538 goto enter_send_tso;
3540 /* We must not get here. Something is going wrong. */
3542 txq->stats.oerrors++;
3546 * Main Tx loop is completed, do the rest:
3547 * - set completion request if thresholds are reached
3548 * - doorbell the hardware
3549 * - copy the rest of mbufs to elts (if any)
3551 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
3552 loc.pkts_sent >= loc.pkts_copy);
3553 /* Take a shortcut if nothing is sent. */
3554 if (unlikely(loc.pkts_sent == loc.pkts_loop))
3556 /* Request CQE generation if limits are reached. */
3557 mlx5_tx_request_completion(txq, &loc, olx);
3559 * Ring QP doorbell immediately after WQE building completion
3560 * to improve latencies. The pure software related data treatment
3561 * can be completed after doorbell. Tx CQEs for this SQ are
3562 * processed in this thread only by the polling.
3564 * The rdma core library can map doorbell register in two ways,
3565 * depending on the environment variable "MLX5_SHUT_UP_BF":
3567 * - as regular cached memory, the variable is either missing or
3568 * set to zero. This type of mapping may cause the significant
3569 * doorbell register writing latency and requires explicit memory
3570 * write barrier to mitigate this issue and prevent write combining.
3572 * - as non-cached memory, the variable is present and set to not "0"
3573 * value. This type of mapping may cause performance impact under
3574 * heavy loading conditions but the explicit write memory barrier is
3575 * not required and it may improve core performance.
3577 * - the legacy behaviour (prior 19.08 release) was to use some
3578 * heuristics to decide whether write memory barrier should
3579 * be performed. This behavior is supported with specifying
3580 * tx_db_nc=2, write barrier is skipped if application provides
3581 * the full recommended burst of packets, it supposes the next
3582 * packets are coming and the write barrier will be issued on
3583 * the next burst (after descriptor writing, at least).
3585 mlx5_doorbell_ring(mlx5_tx_bfreg(txq),
3586 *(volatile uint64_t *)loc.wqe_last, txq->wqe_ci,
3587 txq->qp_db, !txq->db_nc &&
3588 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
3589 /* Not all of the mbufs may be stored into elts yet. */
3590 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
3591 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3593 * There are some single-segment mbufs not stored in elts.
3594 * It can be only if the last packet was single-segment.
3595 * The copying is gathered into one place due to it is
3596 * a good opportunity to optimize that with SIMD.
3597 * Unfortunately if inlining is enabled the gaps in pointer
3598 * array may happen due to early freeing of the inlined mbufs.
3600 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
3601 loc.pkts_copy = loc.pkts_sent;
3603 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3604 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3605 if (pkts_n > loc.pkts_sent) {
3607 * If burst size is large there might be no enough CQE
3608 * fetched from completion queue and no enough resources
3609 * freed to send all the packets.
3614 #ifdef MLX5_PMD_SOFT_COUNTERS
3615 /* Increment sent packets counter. */
3616 txq->stats.opackets += loc.pkts_sent;
3618 if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
3619 __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
3620 return loc.pkts_sent;
3623 #endif /* RTE_PMD_MLX5_TX_H_ */