1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
20 #include <mlx5_common.h>
22 #include "mlx5_autoconf.h"
23 #include "mlx5_defs.h"
26 #include "mlx5_utils.h"
27 #include "mlx5_rxtx.h"
31 /* TX burst subroutines return codes. */
32 enum mlx5_txcmp_code {
33 MLX5_TXCMP_CODE_EXIT = 0,
34 MLX5_TXCMP_CODE_ERROR,
35 MLX5_TXCMP_CODE_SINGLE,
36 MLX5_TXCMP_CODE_MULTI,
42 * These defines are used to configure Tx burst routine option set
43 * supported at compile time. The not specified options are optimized out
44 * out due to if conditions can be explicitly calculated at compile time.
45 * The offloads with bigger runtime check (require more CPU cycles to
46 * skip) overhead should have the bigger index - this is needed to
47 * select the better matching routine function if no exact match and
48 * some offloads are not actually requested.
50 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
51 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
52 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
53 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
54 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
55 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
56 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
57 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
58 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
59 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
61 /* The most common offloads groups. */
62 #define MLX5_TXOFF_CONFIG_NONE 0
63 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
64 MLX5_TXOFF_CONFIG_TSO | \
65 MLX5_TXOFF_CONFIG_SWP | \
66 MLX5_TXOFF_CONFIG_CSUM | \
67 MLX5_TXOFF_CONFIG_INLINE | \
68 MLX5_TXOFF_CONFIG_VLAN | \
69 MLX5_TXOFF_CONFIG_METADATA)
71 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
73 #define MLX5_TXOFF_DECL(func, olx) \
74 static uint16_t mlx5_tx_burst_##func(void *txq, \
75 struct rte_mbuf **pkts, \
78 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
79 pkts, pkts_n, (olx)); \
82 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
85 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
86 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
87 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
89 sizeof(rte_v128u32_t)),
90 "invalid Ethernet Segment data size");
91 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
93 sizeof(struct rte_vlan_hdr) +
94 2 * RTE_ETHER_ADDR_LEN),
95 "invalid Ethernet Segment data size");
96 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
98 sizeof(rte_v128u32_t)),
99 "invalid Ethernet Segment data size");
100 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
102 sizeof(struct rte_vlan_hdr) +
103 2 * RTE_ETHER_ADDR_LEN),
104 "invalid Ethernet Segment data size");
105 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
107 sizeof(rte_v128u32_t)),
108 "invalid Ethernet Segment data size");
109 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
111 sizeof(struct rte_vlan_hdr) +
112 2 * RTE_ETHER_ADDR_LEN),
113 "invalid Ethernet Segment data size");
114 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
115 (2 * RTE_ETHER_ADDR_LEN),
116 "invalid Data Segment data size");
117 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
118 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
119 static_assert((sizeof(struct rte_vlan_hdr) +
120 sizeof(struct rte_ether_hdr)) ==
121 MLX5_ESEG_MIN_INLINE_SIZE,
122 "invalid min inline data size");
123 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
124 MLX5_DSEG_MAX, "invalid WQE max size");
125 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
126 "invalid WQE Control Segment size");
127 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
128 "invalid WQE Ethernet Segment size");
129 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
130 "invalid WQE Data Segment size");
131 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
134 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
135 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
138 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
139 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
141 uint64_t rte_net_mlx5_dynf_inline_mask;
142 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
145 * Build a table to translate Rx completion flags to packet type.
147 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
150 mlx5_set_ptype_table(void)
153 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
155 /* Last entry must not be overwritten, reserved for errored packet. */
156 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
157 (*p)[i] = RTE_PTYPE_UNKNOWN;
159 * The index to the array should have:
160 * bit[1:0] = l3_hdr_type
161 * bit[4:2] = l4_hdr_type
164 * bit[7] = outer_l3_type
167 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
169 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
170 RTE_PTYPE_L4_NONFRAG;
171 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
172 RTE_PTYPE_L4_NONFRAG;
174 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
176 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
179 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
181 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
183 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
185 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
187 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
189 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
192 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
194 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
196 /* Repeat with outer_l3_type being set. Just in case. */
197 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
198 RTE_PTYPE_L4_NONFRAG;
199 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
200 RTE_PTYPE_L4_NONFRAG;
201 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
203 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
205 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
207 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
209 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
211 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
213 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
215 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
217 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
219 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
222 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
223 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L4_NONFRAG;
226 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
228 RTE_PTYPE_INNER_L4_NONFRAG;
229 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
230 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
232 RTE_PTYPE_INNER_L4_NONFRAG;
233 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L4_NONFRAG;
236 /* Tunneled - Fragmented */
237 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L4_FRAG;
240 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
241 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L4_FRAG;
243 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
244 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L4_FRAG;
246 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
247 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L4_FRAG;
250 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L4_TCP;
253 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L4_TCP;
256 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L4_TCP;
259 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_TCP;
262 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L4_TCP;
265 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L4_TCP;
268 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
269 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L4_TCP;
271 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
272 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
273 RTE_PTYPE_INNER_L4_TCP;
274 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
275 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
276 RTE_PTYPE_INNER_L4_TCP;
277 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
278 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
279 RTE_PTYPE_INNER_L4_TCP;
280 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
281 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
282 RTE_PTYPE_INNER_L4_TCP;
283 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
284 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
285 RTE_PTYPE_INNER_L4_TCP;
287 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
288 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
289 RTE_PTYPE_INNER_L4_UDP;
290 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
291 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
292 RTE_PTYPE_INNER_L4_UDP;
293 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
294 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
295 RTE_PTYPE_INNER_L4_UDP;
296 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
297 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
298 RTE_PTYPE_INNER_L4_UDP;
302 * Build a table to translate packet to checksum type of Verbs.
305 mlx5_set_cksum_table(void)
311 * The index should have:
312 * bit[0] = PKT_TX_TCP_SEG
313 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
314 * bit[4] = PKT_TX_IP_CKSUM
315 * bit[8] = PKT_TX_OUTER_IP_CKSUM
318 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
321 /* Tunneled packet. */
322 if (i & (1 << 8)) /* Outer IP. */
323 v |= MLX5_ETH_WQE_L3_CSUM;
324 if (i & (1 << 4)) /* Inner IP. */
325 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
326 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
327 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
330 if (i & (1 << 4)) /* IP. */
331 v |= MLX5_ETH_WQE_L3_CSUM;
332 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
333 v |= MLX5_ETH_WQE_L4_CSUM;
335 mlx5_cksum_table[i] = v;
340 * Build a table to translate packet type of mbuf to SWP type of Verbs.
343 mlx5_set_swp_types_table(void)
349 * The index should have:
350 * bit[0:1] = PKT_TX_L4_MASK
351 * bit[4] = PKT_TX_IPV6
352 * bit[8] = PKT_TX_OUTER_IPV6
353 * bit[9] = PKT_TX_OUTER_UDP
355 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
358 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
360 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
362 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
363 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
364 v |= MLX5_ETH_WQE_L4_INNER_UDP;
365 mlx5_swp_types_table[i] = v;
370 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
371 * Flags must be preliminary initialized to zero.
374 * Pointer to burst routine local context.
376 * Pointer to store Software Parser flags
378 * Configured Tx offloads mask. It is fully defined at
379 * compile time and may be used for optimization.
382 * Software Parser offsets packed in dword.
383 * Software Parser flags are set by pointer.
385 static __rte_always_inline uint32_t
386 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
391 unsigned int idx, off;
394 if (!MLX5_TXOFF_CONFIG(SWP))
396 ol = loc->mbuf->ol_flags;
397 tunnel = ol & PKT_TX_TUNNEL_MASK;
399 * Check whether Software Parser is required.
400 * Only customized tunnels may ask for.
402 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
405 * The index should have:
406 * bit[0:1] = PKT_TX_L4_MASK
407 * bit[4] = PKT_TX_IPV6
408 * bit[8] = PKT_TX_OUTER_IPV6
409 * bit[9] = PKT_TX_OUTER_UDP
411 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
412 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
413 *swp_flags = mlx5_swp_types_table[idx];
415 * Set offsets for SW parser. Since ConnectX-5, SW parser just
416 * complements HW parser. SW parser starts to engage only if HW parser
417 * can't reach a header. For the older devices, HW parser will not kick
418 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
419 * should be set regardless of HW offload.
421 off = loc->mbuf->outer_l2_len;
422 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
423 off += sizeof(struct rte_vlan_hdr);
424 set = (off >> 1) << 8; /* Outer L3 offset. */
425 off += loc->mbuf->outer_l3_len;
426 if (tunnel == PKT_TX_TUNNEL_UDP)
427 set |= off >> 1; /* Outer L4 offset. */
428 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
429 const uint64_t csum = ol & PKT_TX_L4_MASK;
430 off += loc->mbuf->l2_len;
431 set |= (off >> 1) << 24; /* Inner L3 offset. */
432 if (csum == PKT_TX_TCP_CKSUM ||
433 csum == PKT_TX_UDP_CKSUM ||
434 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
435 off += loc->mbuf->l3_len;
436 set |= (off >> 1) << 16; /* Inner L4 offset. */
439 set = rte_cpu_to_le_32(set);
444 * Convert the Checksum offloads to Verbs.
447 * Pointer to the mbuf.
450 * Converted checksum flags.
452 static __rte_always_inline uint8_t
453 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
456 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
457 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
458 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
461 * The index should have:
462 * bit[0] = PKT_TX_TCP_SEG
463 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
464 * bit[4] = PKT_TX_IP_CKSUM
465 * bit[8] = PKT_TX_OUTER_IP_CKSUM
468 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
469 return mlx5_cksum_table[idx];
472 #define MLX5_SYSTEM_LOG_DIR "/var/log"
474 * Dump debug information to log file.
479 * If not NULL this string is printed as a header to the output
480 * and the output will be in hexadecimal view.
482 * This is the buffer address to print out.
484 * The number of bytes to dump out.
487 mlx5_dump_debug_information(const char *fname, const char *hex_title,
488 const void *buf, unsigned int hex_len)
492 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
493 fd = fopen(path, "a+");
495 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
496 MKSTR(path2, "./%s", fname);
497 fd = fopen(path2, "a+");
499 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
502 DRV_LOG(INFO, "New debug dump in file %s", path2);
504 DRV_LOG(INFO, "New debug dump in file %s", path);
507 rte_hexdump(fd, hex_title, buf, hex_len);
509 fprintf(fd, "%s", (const char *)buf);
510 fprintf(fd, "\n\n\n");
515 * Move QP from error state to running state and initialize indexes.
518 * Pointer to TX queue control structure.
521 * 0 on success, else -1.
524 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
526 struct mlx5_mp_arg_queue_state_modify sm = {
528 .queue_id = txq_ctrl->txq.idx,
531 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
533 txq_ctrl->txq.wqe_ci = 0;
534 txq_ctrl->txq.wqe_pi = 0;
535 txq_ctrl->txq.elts_comp = 0;
539 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
541 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
543 static const uint8_t magic[] = "seen";
547 for (i = 0; i < sizeof(magic); ++i)
548 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
550 err_cqe->rsvd1[i] = magic[i];
559 * Pointer to TX queue structure.
561 * Pointer to the error CQE.
564 * Negative value if queue recovery failed, otherwise
565 * the error completion entry is handled successfully.
568 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
569 volatile struct mlx5_err_cqe *err_cqe)
571 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
572 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
573 struct mlx5_txq_ctrl *txq_ctrl =
574 container_of(txq, struct mlx5_txq_ctrl, txq);
575 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
576 int seen = check_err_cqe_seen(err_cqe);
578 if (!seen && txq_ctrl->dump_file_n <
579 txq_ctrl->priv->config.max_dump_files_num) {
580 MKSTR(err_str, "Unexpected CQE error syndrome "
581 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
582 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
583 txq->cqe_s, txq->qp_num_8s >> 8,
584 rte_be_to_cpu_16(err_cqe->wqe_counter),
585 txq->wqe_ci, txq->cq_ci);
586 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
587 PORT_ID(txq_ctrl->priv), txq->idx,
588 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
589 mlx5_dump_debug_information(name, NULL, err_str, 0);
590 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
591 (const void *)((uintptr_t)
595 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
596 (const void *)((uintptr_t)
600 txq_ctrl->dump_file_n++;
604 * Count errors in WQEs units.
605 * Later it can be improved to count error packets,
606 * for example, by SQ parsing to find how much packets
607 * should be counted for each WQE.
609 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
611 if (tx_recover_qp(txq_ctrl)) {
612 /* Recovering failed - retry later on the same WQE. */
615 /* Release all the remaining buffers. */
616 txq_free_elts(txq_ctrl);
622 * Modify a Verbs/DevX queue state.
623 * This must be called from the primary process.
626 * Pointer to Ethernet device.
628 * State modify request parameters.
631 * 0 in case of success else non-zero value and rte_errno is set.
634 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
635 const struct mlx5_mp_arg_queue_state_modify *sm)
638 struct mlx5_priv *priv = dev->data->dev_private;
641 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
642 struct mlx5_rxq_ctrl *rxq_ctrl =
643 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
645 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, sm->state);
647 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
648 sm->state, strerror(errno));
653 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
654 struct mlx5_txq_ctrl *txq_ctrl =
655 container_of(txq, struct mlx5_txq_ctrl, txq);
657 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
658 MLX5_TXQ_MOD_ERR2RDY,
659 (uint8_t)priv->dev_port);
667 * Modify a Verbs queue state.
670 * Pointer to Ethernet device.
672 * State modify request parameters.
675 * 0 in case of success else non-zero value.
678 mlx5_queue_state_modify(struct rte_eth_dev *dev,
679 struct mlx5_mp_arg_queue_state_modify *sm)
681 struct mlx5_priv *priv = dev->data->dev_private;
684 switch (rte_eal_process_type()) {
685 case RTE_PROC_PRIMARY:
686 ret = mlx5_queue_state_modify_primary(dev, sm);
688 case RTE_PROC_SECONDARY:
689 ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
698 * Dummy DPDK callback for TX.
700 * This function is used to temporarily replace the real callback during
701 * unsafe control operations on the queue, or in case of error.
704 * Generic pointer to TX queue structure.
706 * Packets to transmit.
708 * Number of packets in array.
711 * Number of packets successfully transmitted (<= pkts_n).
714 removed_tx_burst(void *dpdk_txq __rte_unused,
715 struct rte_mbuf **pkts __rte_unused,
716 uint16_t pkts_n __rte_unused)
723 * Free the mbufs from the linear array of pointers.
726 * Pointer to Tx queue structure.
728 * Pointer to array of packets to be free.
730 * Number of packets to be freed.
732 * Configured Tx offloads mask. It is fully defined at
733 * compile time and may be used for optimization.
735 static __rte_always_inline void
736 mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
737 struct rte_mbuf **__rte_restrict pkts,
739 unsigned int olx __rte_unused)
741 struct rte_mempool *pool = NULL;
742 struct rte_mbuf **p_free = NULL;
743 struct rte_mbuf *mbuf;
744 unsigned int n_free = 0;
747 * The implemented algorithm eliminates
748 * copying pointers to temporary array
749 * for rte_mempool_put_bulk() calls.
754 * Free mbufs directly to the pool in bulk
755 * if fast free offload is engaged
757 if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
760 rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
766 * Decrement mbuf reference counter, detach
767 * indirect and external buffers if needed.
769 mbuf = rte_pktmbuf_prefree_seg(*pkts);
770 if (likely(mbuf != NULL)) {
771 MLX5_ASSERT(mbuf == *pkts);
772 if (likely(n_free != 0)) {
773 if (unlikely(pool != mbuf->pool))
774 /* From different pool. */
777 /* Start new scan array. */
784 if (unlikely(pkts_n == 0)) {
790 * This happens if mbuf is still referenced.
791 * We can't put it back to the pool, skip.
795 if (unlikely(n_free != 0))
796 /* There is some array to free.*/
798 if (unlikely(pkts_n == 0))
799 /* Last mbuf, nothing to free. */
805 * This loop is implemented to avoid multiple
806 * inlining of rte_mempool_put_bulk().
812 * Free the array of pre-freed mbufs
813 * belonging to the same memory pool.
815 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
816 if (unlikely(mbuf != NULL)) {
817 /* There is the request to start new scan. */
822 if (likely(pkts_n != 0))
825 * This is the last mbuf to be freed.
826 * Do one more loop iteration to complete.
827 * This is rare case of the last unique mbuf.
832 if (likely(pkts_n == 0))
840 * No inline version to free buffers for optimal call
841 * on the tx_burst completion.
843 static __rte_noinline void
844 __mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
845 struct rte_mbuf **__rte_restrict pkts,
847 unsigned int olx __rte_unused)
849 mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
853 * Free the mbuf from the elts ring buffer till new tail.
856 * Pointer to Tx queue structure.
858 * Index in elts to free up to, becomes new elts tail.
860 * Configured Tx offloads mask. It is fully defined at
861 * compile time and may be used for optimization.
863 static __rte_always_inline void
864 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
866 unsigned int olx __rte_unused)
868 uint16_t n_elts = tail - txq->elts_tail;
871 MLX5_ASSERT(n_elts <= txq->elts_s);
873 * Implement a loop to support ring buffer wraparound
874 * with single inlining of mlx5_tx_free_mbuf().
879 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
880 part = RTE_MIN(part, n_elts);
882 MLX5_ASSERT(part <= txq->elts_s);
883 mlx5_tx_free_mbuf(txq,
884 &txq->elts[txq->elts_tail & txq->elts_m],
886 txq->elts_tail += part;
892 * Store the mbuf being sent into elts ring buffer.
893 * On Tx completion these mbufs will be freed.
896 * Pointer to Tx queue structure.
898 * Pointer to array of packets to be stored.
900 * Number of packets to be stored.
902 * Configured Tx offloads mask. It is fully defined at
903 * compile time and may be used for optimization.
905 static __rte_always_inline void
906 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
907 struct rte_mbuf **__rte_restrict pkts,
909 unsigned int olx __rte_unused)
912 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
916 part = txq->elts_s - (txq->elts_head & txq->elts_m);
918 MLX5_ASSERT(part <= txq->elts_s);
919 /* This code is a good candidate for vectorizing with SIMD. */
920 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
922 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
923 txq->elts_head += pkts_n;
924 if (unlikely(part < pkts_n))
925 /* The copy is wrapping around the elts array. */
926 rte_memcpy((void *)elts, (void *)(pkts + part),
927 (pkts_n - part) * sizeof(struct rte_mbuf *));
931 * Update completion queue consuming index via doorbell
932 * and flush the completed data buffers.
935 * Pointer to TX queue structure.
936 * @param valid CQE pointer
937 * if not NULL update txq->wqe_pi and flush the buffers
939 * Configured Tx offloads mask. It is fully defined at
940 * compile time and may be used for optimization.
942 static __rte_always_inline void
943 mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
944 volatile struct mlx5_cqe *last_cqe,
945 unsigned int olx __rte_unused)
947 if (likely(last_cqe != NULL)) {
950 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
951 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
952 if (likely(tail != txq->elts_tail)) {
953 mlx5_tx_free_elts(txq, tail, olx);
954 MLX5_ASSERT(tail == txq->elts_tail);
960 * Manage TX completions. This routine checks the CQ for
961 * arrived CQEs, deduces the last accomplished WQE in SQ,
962 * updates SQ producing index and frees all completed mbufs.
965 * Pointer to TX queue structure.
967 * Configured Tx offloads mask. It is fully defined at
968 * compile time and may be used for optimization.
970 * NOTE: not inlined intentionally, it makes tx_burst
971 * routine smaller, simple and faster - from experiments.
974 mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
975 unsigned int olx __rte_unused)
977 unsigned int count = MLX5_TX_COMP_MAX_CQE;
978 volatile struct mlx5_cqe *last_cqe = NULL;
979 bool ring_doorbell = false;
983 volatile struct mlx5_cqe *cqe;
985 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
986 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
987 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
988 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
989 /* No new CQEs in completion queue. */
990 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
994 * Some error occurred, try to restart.
995 * We have no barrier after WQE related Doorbell
996 * written, make sure all writes are completed
997 * here, before we might perform SQ reset.
1000 ret = mlx5_tx_error_cqe_handle
1001 (txq, (volatile struct mlx5_err_cqe *)cqe);
1002 if (unlikely(ret < 0)) {
1004 * Some error occurred on queue error
1005 * handling, we do not advance the index
1006 * here, allowing to retry on next call.
1011 * We are going to fetch all entries with
1012 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
1013 * The send queue is supposed to be empty.
1015 ring_doorbell = true;
1017 txq->cq_pi = txq->cq_ci;
1021 /* Normal transmit completion. */
1022 MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
1023 #ifdef RTE_LIBRTE_MLX5_DEBUG
1024 MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
1027 ring_doorbell = true;
1031 * We have to restrict the amount of processed CQEs
1032 * in one tx_burst routine call. The CQ may be large
1033 * and many CQEs may be updated by the NIC in one
1034 * transaction. Buffers freeing is time consuming,
1035 * multiple iterations may introduce significant
1038 if (likely(--count == 0))
1041 if (likely(ring_doorbell)) {
1042 /* Ring doorbell to notify hardware. */
1043 rte_compiler_barrier();
1044 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
1045 mlx5_tx_comp_flush(txq, last_cqe, olx);
1050 * Check if the completion request flag should be set in the last WQE.
1051 * Both pushed mbufs and WQEs are monitored and the completion request
1052 * flag is set if any of thresholds is reached.
1055 * Pointer to TX queue structure.
1057 * Pointer to burst routine local context.
1059 * Configured Tx offloads mask. It is fully defined at
1060 * compile time and may be used for optimization.
1062 static __rte_always_inline void
1063 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
1064 struct mlx5_txq_local *__rte_restrict loc,
1067 uint16_t head = txq->elts_head;
1070 part = MLX5_TXOFF_CONFIG(INLINE) ?
1071 0 : loc->pkts_sent - loc->pkts_copy;
1073 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
1074 (MLX5_TXOFF_CONFIG(INLINE) &&
1075 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
1076 volatile struct mlx5_wqe *last = loc->wqe_last;
1079 txq->elts_comp = head;
1080 if (MLX5_TXOFF_CONFIG(INLINE))
1081 txq->wqe_comp = txq->wqe_ci;
1082 /* Request unconditional completion on last WQE. */
1083 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
1084 MLX5_COMP_MODE_OFFSET);
1085 /* Save elts_head in dedicated free on completion queue. */
1086 #ifdef RTE_LIBRTE_MLX5_DEBUG
1087 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
1088 (last->cseg.opcode >> 8) << 16;
1090 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
1092 /* A CQE slot must always be available. */
1093 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
1098 * DPDK callback to check the status of a tx descriptor.
1103 * The index of the descriptor in the ring.
1106 * The status of the tx descriptor.
1109 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
1111 struct mlx5_txq_data *__rte_restrict txq = tx_queue;
1114 mlx5_tx_handle_completion(txq, 0);
1115 used = txq->elts_head - txq->elts_tail;
1117 return RTE_ETH_TX_DESC_FULL;
1118 return RTE_ETH_TX_DESC_DONE;
1122 * Build the Control Segment with specified opcode:
1123 * - MLX5_OPCODE_SEND
1124 * - MLX5_OPCODE_ENHANCED_MPSW
1128 * Pointer to TX queue structure.
1130 * Pointer to burst routine local context.
1132 * Pointer to WQE to fill with built Control Segment.
1134 * Supposed length of WQE in segments.
1136 * SQ WQE opcode to put into Control Segment.
1138 * Configured Tx offloads mask. It is fully defined at
1139 * compile time and may be used for optimization.
1141 static __rte_always_inline void
1142 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
1143 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1144 struct mlx5_wqe *__rte_restrict wqe,
1146 unsigned int opcode,
1147 unsigned int olx __rte_unused)
1149 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
1151 /* For legacy MPW replace the EMPW by TSO with modifier. */
1152 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
1153 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
1154 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
1155 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
1156 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
1157 MLX5_COMP_MODE_OFFSET);
1158 cs->misc = RTE_BE32(0);
1162 * Build the Synchronize Queue Segment with specified completion index.
1165 * Pointer to TX queue structure.
1167 * Pointer to burst routine local context.
1169 * Pointer to WQE to fill with built Control Segment.
1171 * Completion index in Clock Queue to wait.
1173 * Configured Tx offloads mask. It is fully defined at
1174 * compile time and may be used for optimization.
1176 static __rte_always_inline void
1177 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
1178 struct mlx5_txq_local *restrict loc __rte_unused,
1179 struct mlx5_wqe *restrict wqe,
1181 unsigned int olx __rte_unused)
1183 struct mlx5_wqe_qseg *qs;
1185 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
1186 qs->max_index = rte_cpu_to_be_32(wci);
1187 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
1188 qs->reserved0 = RTE_BE32(0);
1189 qs->reserved1 = RTE_BE32(0);
1193 * Build the Ethernet Segment without inlined data.
1194 * Supports Software Parser, Checksums and VLAN
1195 * insertion Tx offload features.
1198 * Pointer to TX queue structure.
1200 * Pointer to burst routine local context.
1202 * Pointer to WQE to fill with built Ethernet Segment.
1204 * Configured Tx offloads mask. It is fully defined at
1205 * compile time and may be used for optimization.
1207 static __rte_always_inline void
1208 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
1209 struct mlx5_txq_local *__rte_restrict loc,
1210 struct mlx5_wqe *__rte_restrict wqe,
1213 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1217 * Calculate and set check sum flags first, dword field
1218 * in segment may be shared with Software Parser flags.
1220 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1221 es->flags = rte_cpu_to_le_32(csum);
1223 * Calculate and set Software Parser offsets and flags.
1224 * These flags a set for custom UDP and IP tunnel packets.
1226 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1227 /* Fill metadata field if needed. */
1228 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1229 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1230 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1231 /* Engage VLAN tag insertion feature if requested. */
1232 if (MLX5_TXOFF_CONFIG(VLAN) &&
1233 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
1235 * We should get here only if device support
1236 * this feature correctly.
1238 MLX5_ASSERT(txq->vlan_en);
1239 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
1240 loc->mbuf->vlan_tci);
1242 es->inline_hdr = RTE_BE32(0);
1247 * Build the Ethernet Segment with minimal inlined data
1248 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
1249 * used to fill the gap in single WQEBB WQEs.
1250 * Supports Software Parser, Checksums and VLAN
1251 * insertion Tx offload features.
1254 * Pointer to TX queue structure.
1256 * Pointer to burst routine local context.
1258 * Pointer to WQE to fill with built Ethernet Segment.
1260 * Length of VLAN tag insertion if any.
1262 * Configured Tx offloads mask. It is fully defined at
1263 * compile time and may be used for optimization.
1265 static __rte_always_inline void
1266 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
1267 struct mlx5_txq_local *__rte_restrict loc,
1268 struct mlx5_wqe *__rte_restrict wqe,
1272 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1274 uint8_t *psrc, *pdst;
1277 * Calculate and set check sum flags first, dword field
1278 * in segment may be shared with Software Parser flags.
1280 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1281 es->flags = rte_cpu_to_le_32(csum);
1283 * Calculate and set Software Parser offsets and flags.
1284 * These flags a set for custom UDP and IP tunnel packets.
1286 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1287 /* Fill metadata field if needed. */
1288 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1289 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1290 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1291 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1292 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
1293 es->inline_data = *(unaligned_uint16_t *)psrc;
1294 psrc += sizeof(uint16_t);
1295 pdst = (uint8_t *)(es + 1);
1296 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1297 /* Implement VLAN tag insertion as part inline data. */
1298 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1299 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1300 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1301 /* Insert VLAN ethertype + VLAN tag. */
1302 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1303 ((RTE_ETHER_TYPE_VLAN << 16) |
1304 loc->mbuf->vlan_tci);
1305 pdst += sizeof(struct rte_vlan_hdr);
1306 /* Copy the rest two bytes from packet data. */
1307 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1308 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1310 /* Fill the gap in the title WQEBB with inline data. */
1311 rte_mov16(pdst, psrc);
1316 * Build the Ethernet Segment with entire packet
1317 * data inlining. Checks the boundary of WQEBB and
1318 * ring buffer wrapping, supports Software Parser,
1319 * Checksums and VLAN insertion Tx offload features.
1322 * Pointer to TX queue structure.
1324 * Pointer to burst routine local context.
1326 * Pointer to WQE to fill with built Ethernet Segment.
1328 * Length of VLAN tag insertion if any.
1330 * Length of data to inline (VLAN included, if any).
1332 * TSO flag, set mss field from the packet.
1334 * Configured Tx offloads mask. It is fully defined at
1335 * compile time and may be used for optimization.
1338 * Pointer to the next Data Segment (aligned and wrapped around).
1340 static __rte_always_inline struct mlx5_wqe_dseg *
1341 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
1342 struct mlx5_txq_local *__rte_restrict loc,
1343 struct mlx5_wqe *__rte_restrict wqe,
1349 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1351 uint8_t *psrc, *pdst;
1355 * Calculate and set check sum flags first, dword field
1356 * in segment may be shared with Software Parser flags.
1358 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1361 csum |= loc->mbuf->tso_segsz;
1362 es->flags = rte_cpu_to_be_32(csum);
1364 es->flags = rte_cpu_to_le_32(csum);
1367 * Calculate and set Software Parser offsets and flags.
1368 * These flags a set for custom UDP and IP tunnel packets.
1370 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1371 /* Fill metadata field if needed. */
1372 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1373 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1374 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1375 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1376 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
1377 es->inline_data = *(unaligned_uint16_t *)psrc;
1378 psrc += sizeof(uint16_t);
1379 pdst = (uint8_t *)(es + 1);
1380 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1381 /* Implement VLAN tag insertion as part inline data. */
1382 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1383 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1384 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1385 /* Insert VLAN ethertype + VLAN tag. */
1386 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1387 ((RTE_ETHER_TYPE_VLAN << 16) |
1388 loc->mbuf->vlan_tci);
1389 pdst += sizeof(struct rte_vlan_hdr);
1390 /* Copy the rest two bytes from packet data. */
1391 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1392 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1393 psrc += sizeof(uint16_t);
1395 /* Fill the gap in the title WQEBB with inline data. */
1396 rte_mov16(pdst, psrc);
1397 psrc += sizeof(rte_v128u32_t);
1399 pdst = (uint8_t *)(es + 2);
1400 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1401 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1402 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
1404 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1405 return (struct mlx5_wqe_dseg *)pdst;
1408 * The WQEBB space availability is checked by caller.
1409 * Here we should be aware of WQE ring buffer wraparound only.
1411 part = (uint8_t *)txq->wqes_end - pdst;
1412 part = RTE_MIN(part, inlen);
1414 rte_memcpy(pdst, psrc, part);
1416 if (likely(!inlen)) {
1418 * If return value is not used by the caller
1419 * the code below will be optimized out.
1422 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1423 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1424 pdst = (uint8_t *)txq->wqes;
1425 return (struct mlx5_wqe_dseg *)pdst;
1427 pdst = (uint8_t *)txq->wqes;
1434 * Copy data from chain of mbuf to the specified linear buffer.
1435 * Checksums and VLAN insertion Tx offload features. If data
1436 * from some mbuf copied completely this mbuf is freed. Local
1437 * structure is used to keep the byte stream state.
1440 * Pointer to the destination linear buffer.
1442 * Pointer to burst routine local context.
1444 * Length of data to be copied.
1446 * Length of data to be copied ignoring no inline hint.
1448 * Configured Tx offloads mask. It is fully defined at
1449 * compile time and may be used for optimization.
1452 * Number of actual copied data bytes. This is always greater than or
1453 * equal to must parameter and might be lesser than len in no inline
1454 * hint flag is encountered.
1456 static __rte_always_inline unsigned int
1457 mlx5_tx_mseg_memcpy(uint8_t *pdst,
1458 struct mlx5_txq_local *__rte_restrict loc,
1461 unsigned int olx __rte_unused)
1463 struct rte_mbuf *mbuf;
1464 unsigned int part, dlen, copy = 0;
1468 MLX5_ASSERT(must <= len);
1470 /* Allow zero length packets, must check first. */
1471 dlen = rte_pktmbuf_data_len(loc->mbuf);
1472 if (dlen <= loc->mbuf_off) {
1473 /* Exhausted packet, just free. */
1475 loc->mbuf = mbuf->next;
1476 rte_pktmbuf_free_seg(mbuf);
1478 MLX5_ASSERT(loc->mbuf_nseg > 1);
1479 MLX5_ASSERT(loc->mbuf);
1481 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
1486 * We already copied the minimal
1487 * requested amount of data.
1492 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
1494 * Copy only the minimal required
1495 * part of the data buffer.
1502 dlen -= loc->mbuf_off;
1503 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1505 part = RTE_MIN(len, dlen);
1506 rte_memcpy(pdst, psrc, part);
1508 loc->mbuf_off += part;
1511 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
1513 /* Exhausted packet, just free. */
1515 loc->mbuf = mbuf->next;
1516 rte_pktmbuf_free_seg(mbuf);
1518 MLX5_ASSERT(loc->mbuf_nseg >= 1);
1528 * Build the Ethernet Segment with inlined data from
1529 * multi-segment packet. Checks the boundary of WQEBB
1530 * and ring buffer wrapping, supports Software Parser,
1531 * Checksums and VLAN insertion Tx offload features.
1534 * Pointer to TX queue structure.
1536 * Pointer to burst routine local context.
1538 * Pointer to WQE to fill with built Ethernet Segment.
1540 * Length of VLAN tag insertion if any.
1542 * Length of data to inline (VLAN included, if any).
1544 * TSO flag, set mss field from the packet.
1546 * Configured Tx offloads mask. It is fully defined at
1547 * compile time and may be used for optimization.
1550 * Pointer to the next Data Segment (aligned and
1551 * possible NOT wrapped around - caller should do
1552 * wrapping check on its own).
1554 static __rte_always_inline struct mlx5_wqe_dseg *
1555 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
1556 struct mlx5_txq_local *__rte_restrict loc,
1557 struct mlx5_wqe *__rte_restrict wqe,
1563 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1566 unsigned int part, tlen = 0;
1569 * Calculate and set check sum flags first, uint32_t field
1570 * in segment may be shared with Software Parser flags.
1572 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1575 csum |= loc->mbuf->tso_segsz;
1576 es->flags = rte_cpu_to_be_32(csum);
1578 es->flags = rte_cpu_to_le_32(csum);
1581 * Calculate and set Software Parser offsets and flags.
1582 * These flags a set for custom UDP and IP tunnel packets.
1584 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1585 /* Fill metadata field if needed. */
1586 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1587 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1588 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1589 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1590 pdst = (uint8_t *)&es->inline_data;
1591 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1592 /* Implement VLAN tag insertion as part inline data. */
1593 mlx5_tx_mseg_memcpy(pdst, loc,
1594 2 * RTE_ETHER_ADDR_LEN,
1595 2 * RTE_ETHER_ADDR_LEN, olx);
1596 pdst += 2 * RTE_ETHER_ADDR_LEN;
1597 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1598 ((RTE_ETHER_TYPE_VLAN << 16) |
1599 loc->mbuf->vlan_tci);
1600 pdst += sizeof(struct rte_vlan_hdr);
1601 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
1603 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1605 * The WQEBB space availability is checked by caller.
1606 * Here we should be aware of WQE ring buffer wraparound only.
1608 part = (uint8_t *)txq->wqes_end - pdst;
1609 part = RTE_MIN(part, inlen - tlen);
1615 * Copying may be interrupted inside the routine
1616 * if run into no inline hint flag.
1618 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
1619 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
1621 if (likely(inlen <= tlen) || copy < part) {
1622 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
1624 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1625 return (struct mlx5_wqe_dseg *)pdst;
1627 pdst = (uint8_t *)txq->wqes;
1628 part = inlen - tlen;
1633 * Build the Data Segment of pointer type.
1636 * Pointer to TX queue structure.
1638 * Pointer to burst routine local context.
1640 * Pointer to WQE to fill with built Data Segment.
1642 * Data buffer to point.
1644 * Data buffer length.
1646 * Configured Tx offloads mask. It is fully defined at
1647 * compile time and may be used for optimization.
1649 static __rte_always_inline void
1650 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
1651 struct mlx5_txq_local *__rte_restrict loc,
1652 struct mlx5_wqe_dseg *__rte_restrict dseg,
1655 unsigned int olx __rte_unused)
1659 dseg->bcount = rte_cpu_to_be_32(len);
1660 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1661 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1665 * Build the Data Segment of pointer type or inline
1666 * if data length is less than buffer in minimal
1667 * Data Segment size.
1670 * Pointer to TX queue structure.
1672 * Pointer to burst routine local context.
1674 * Pointer to WQE to fill with built Data Segment.
1676 * Data buffer to point.
1678 * Data buffer length.
1680 * Configured Tx offloads mask. It is fully defined at
1681 * compile time and may be used for optimization.
1683 static __rte_always_inline void
1684 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
1685 struct mlx5_txq_local *__rte_restrict loc,
1686 struct mlx5_wqe_dseg *__rte_restrict dseg,
1689 unsigned int olx __rte_unused)
1695 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
1696 dseg->bcount = rte_cpu_to_be_32(len);
1697 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1698 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1702 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1703 /* Unrolled implementation of generic rte_memcpy. */
1704 dst = (uintptr_t)&dseg->inline_data[0];
1705 src = (uintptr_t)buf;
1707 #ifdef RTE_ARCH_STRICT_ALIGN
1708 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
1709 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1710 dst += sizeof(uint32_t);
1711 src += sizeof(uint32_t);
1712 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1713 dst += sizeof(uint32_t);
1714 src += sizeof(uint32_t);
1716 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
1717 dst += sizeof(uint64_t);
1718 src += sizeof(uint64_t);
1722 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1723 dst += sizeof(uint32_t);
1724 src += sizeof(uint32_t);
1727 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
1728 dst += sizeof(uint16_t);
1729 src += sizeof(uint16_t);
1732 *(uint8_t *)dst = *(uint8_t *)src;
1736 * Build the Data Segment of inlined data from single
1737 * segment packet, no VLAN insertion.
1740 * Pointer to TX queue structure.
1742 * Pointer to burst routine local context.
1744 * Pointer to WQE to fill with built Data Segment.
1746 * Data buffer to point.
1748 * Data buffer length.
1750 * Configured Tx offloads mask. It is fully defined at
1751 * compile time and may be used for optimization.
1754 * Pointer to the next Data Segment after inlined data.
1755 * Ring buffer wraparound check is needed. We do not
1756 * do it here because it may not be needed for the
1757 * last packet in the eMPW session.
1759 static __rte_always_inline struct mlx5_wqe_dseg *
1760 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
1761 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1762 struct mlx5_wqe_dseg *__rte_restrict dseg,
1765 unsigned int olx __rte_unused)
1770 if (!MLX5_TXOFF_CONFIG(MPW)) {
1771 /* Store the descriptor byte counter for eMPW sessions. */
1772 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1773 pdst = &dseg->inline_data[0];
1775 /* The entire legacy MPW session counter is stored on close. */
1776 pdst = (uint8_t *)dseg;
1779 * The WQEBB space availability is checked by caller.
1780 * Here we should be aware of WQE ring buffer wraparound only.
1782 part = (uint8_t *)txq->wqes_end - pdst;
1783 part = RTE_MIN(part, len);
1785 rte_memcpy(pdst, buf, part);
1789 if (!MLX5_TXOFF_CONFIG(MPW))
1790 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1791 /* Note: no final wraparound check here. */
1792 return (struct mlx5_wqe_dseg *)pdst;
1794 pdst = (uint8_t *)txq->wqes;
1801 * Build the Data Segment of inlined data from single
1802 * segment packet with VLAN insertion.
1805 * Pointer to TX queue structure.
1807 * Pointer to burst routine local context.
1809 * Pointer to the dseg fill with built Data Segment.
1811 * Data buffer to point.
1813 * Data buffer length.
1815 * Configured Tx offloads mask. It is fully defined at
1816 * compile time and may be used for optimization.
1819 * Pointer to the next Data Segment after inlined data.
1820 * Ring buffer wraparound check is needed.
1822 static __rte_always_inline struct mlx5_wqe_dseg *
1823 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
1824 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1825 struct mlx5_wqe_dseg *__rte_restrict dseg,
1828 unsigned int olx __rte_unused)
1834 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
1835 if (!MLX5_TXOFF_CONFIG(MPW)) {
1836 /* Store the descriptor byte counter for eMPW sessions. */
1837 dseg->bcount = rte_cpu_to_be_32
1838 ((len + sizeof(struct rte_vlan_hdr)) |
1839 MLX5_ETH_WQE_DATA_INLINE);
1840 pdst = &dseg->inline_data[0];
1842 /* The entire legacy MPW session counter is stored on close. */
1843 pdst = (uint8_t *)dseg;
1845 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
1846 buf += MLX5_DSEG_MIN_INLINE_SIZE;
1847 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
1848 len -= MLX5_DSEG_MIN_INLINE_SIZE;
1849 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
1850 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1851 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1852 pdst = (uint8_t *)txq->wqes;
1853 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
1854 loc->mbuf->vlan_tci);
1855 pdst += sizeof(struct rte_vlan_hdr);
1857 * The WQEBB space availability is checked by caller.
1858 * Here we should be aware of WQE ring buffer wraparound only.
1860 part = (uint8_t *)txq->wqes_end - pdst;
1861 part = RTE_MIN(part, len);
1863 rte_memcpy(pdst, buf, part);
1867 if (!MLX5_TXOFF_CONFIG(MPW))
1868 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1869 /* Note: no final wraparound check here. */
1870 return (struct mlx5_wqe_dseg *)pdst;
1872 pdst = (uint8_t *)txq->wqes;
1879 * Build the Ethernet Segment with optionally inlined data with
1880 * VLAN insertion and following Data Segments (if any) from
1881 * multi-segment packet. Used by ordinary send and TSO.
1884 * Pointer to TX queue structure.
1886 * Pointer to burst routine local context.
1888 * Pointer to WQE to fill with built Ethernet/Data Segments.
1890 * Length of VLAN header to insert, 0 means no VLAN insertion.
1892 * Data length to inline. For TSO this parameter specifies
1893 * exact value, for ordinary send routine can be aligned by
1894 * caller to provide better WQE space saving and data buffer
1895 * start address alignment. This length includes VLAN header
1898 * Zero means ordinary send, inlined data can be extended,
1899 * otherwise this is TSO, inlined data length is fixed.
1901 * Configured Tx offloads mask. It is fully defined at
1902 * compile time and may be used for optimization.
1905 * Actual size of built WQE in segments.
1907 static __rte_always_inline unsigned int
1908 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
1909 struct mlx5_txq_local *__rte_restrict loc,
1910 struct mlx5_wqe *__rte_restrict wqe,
1914 unsigned int olx __rte_unused)
1916 struct mlx5_wqe_dseg *__rte_restrict dseg;
1919 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
1920 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
1923 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
1924 if (!loc->mbuf_nseg)
1927 * There are still some mbuf remaining, not inlined.
1928 * The first mbuf may be partially inlined and we
1929 * must process the possible non-zero data offset.
1931 if (loc->mbuf_off) {
1936 * Exhausted packets must be dropped before.
1937 * Non-zero offset means there are some data
1938 * remained in the packet.
1940 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
1941 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
1942 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1944 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
1946 * Build the pointer/minimal data Data Segment.
1947 * Do ring buffer wrapping check in advance.
1949 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1950 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1951 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
1952 /* Store the mbuf to be freed on completion. */
1953 MLX5_ASSERT(loc->elts_free);
1954 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1957 if (--loc->mbuf_nseg == 0)
1959 loc->mbuf = loc->mbuf->next;
1963 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1964 struct rte_mbuf *mbuf;
1966 /* Zero length segment found, just skip. */
1968 loc->mbuf = loc->mbuf->next;
1969 rte_pktmbuf_free_seg(mbuf);
1970 if (--loc->mbuf_nseg == 0)
1973 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1974 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1977 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1978 rte_pktmbuf_data_len(loc->mbuf), olx);
1979 MLX5_ASSERT(loc->elts_free);
1980 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1983 if (--loc->mbuf_nseg == 0)
1985 loc->mbuf = loc->mbuf->next;
1990 /* Calculate actual segments used from the dseg pointer. */
1991 if ((uintptr_t)wqe < (uintptr_t)dseg)
1992 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
1994 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
1995 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
2000 * The routine checks timestamp flag in the current packet,
2001 * and push WAIT WQE into the queue if scheduling is required.
2004 * Pointer to TX queue structure.
2006 * Pointer to burst routine local context.
2008 * Configured Tx offloads mask. It is fully defined at
2009 * compile time and may be used for optimization.
2012 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2013 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
2014 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
2015 * Local context variables partially updated.
2017 static __rte_always_inline enum mlx5_txcmp_code
2018 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
2019 struct mlx5_txq_local *restrict loc,
2022 if (MLX5_TXOFF_CONFIG(TXPP) &&
2023 loc->mbuf->ol_flags & txq->ts_mask) {
2024 struct mlx5_wqe *wqe;
2029 * Estimate the required space quickly and roughly.
2030 * We would like to ensure the packet can be pushed
2031 * to the queue and we won't get the orphan WAIT WQE.
2033 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
2034 loc->elts_free < NB_SEGS(loc->mbuf))
2035 return MLX5_TXCMP_CODE_EXIT;
2036 /* Convert the timestamp into completion to wait. */
2037 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
2038 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
2039 if (unlikely(wci < 0))
2040 return MLX5_TXCMP_CODE_SINGLE;
2041 /* Build the WAIT WQE with specified completion. */
2042 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2043 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
2044 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
2047 return MLX5_TXCMP_CODE_MULTI;
2049 return MLX5_TXCMP_CODE_SINGLE;
2053 * Tx one packet function for multi-segment TSO. Supports all
2054 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
2055 * sends one packet per WQE.
2057 * This routine is responsible for storing processed mbuf
2058 * into elts ring buffer and update elts_head.
2061 * Pointer to TX queue structure.
2063 * Pointer to burst routine local context.
2065 * Configured Tx offloads mask. It is fully defined at
2066 * compile time and may be used for optimization.
2069 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2070 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2071 * Local context variables partially updated.
2073 static __rte_always_inline enum mlx5_txcmp_code
2074 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
2075 struct mlx5_txq_local *__rte_restrict loc,
2078 struct mlx5_wqe *__rte_restrict wqe;
2079 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
2081 if (MLX5_TXOFF_CONFIG(TXPP)) {
2082 enum mlx5_txcmp_code wret;
2084 /* Generate WAIT for scheduling if requested. */
2085 wret = mlx5_tx_schedule_send(txq, loc, olx);
2086 if (wret == MLX5_TXCMP_CODE_EXIT)
2087 return MLX5_TXCMP_CODE_EXIT;
2088 if (wret == MLX5_TXCMP_CODE_ERROR)
2089 return MLX5_TXCMP_CODE_ERROR;
2092 * Calculate data length to be inlined to estimate
2093 * the required space in WQE ring buffer.
2095 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
2096 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2097 vlan = sizeof(struct rte_vlan_hdr);
2098 inlen = loc->mbuf->l2_len + vlan +
2099 loc->mbuf->l3_len + loc->mbuf->l4_len;
2100 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
2101 return MLX5_TXCMP_CODE_ERROR;
2102 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
2103 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
2104 /* Packet must contain all TSO headers. */
2105 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
2106 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2107 inlen > (dlen + vlan)))
2108 return MLX5_TXCMP_CODE_ERROR;
2109 MLX5_ASSERT(inlen >= txq->inlen_mode);
2111 * Check whether there are enough free WQEBBs:
2113 * - Ethernet Segment
2114 * - First Segment of inlined Ethernet data
2115 * - ... data continued ...
2116 * - Data Segments of pointer/min inline type
2118 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2119 MLX5_ESEG_MIN_INLINE_SIZE +
2121 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2122 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2123 return MLX5_TXCMP_CODE_EXIT;
2124 /* Check for maximal WQE size. */
2125 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2126 return MLX5_TXCMP_CODE_ERROR;
2127 #ifdef MLX5_PMD_SOFT_COUNTERS
2128 /* Update sent data bytes/packets counters. */
2129 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
2130 loc->mbuf->tso_segsz;
2132 * One will be added for mbuf itself
2133 * at the end of the mlx5_tx_burst from
2134 * loc->pkts_sent field.
2137 txq->stats.opackets += ntcp;
2138 txq->stats.obytes += dlen + vlan + ntcp * inlen;
2140 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2141 loc->wqe_last = wqe;
2142 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
2143 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
2144 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2145 txq->wqe_ci += (ds + 3) / 4;
2146 loc->wqe_free -= (ds + 3) / 4;
2147 return MLX5_TXCMP_CODE_MULTI;
2151 * Tx one packet function for multi-segment SEND. Supports all
2152 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
2153 * sends one packet per WQE, without any data inlining in
2156 * This routine is responsible for storing processed mbuf
2157 * into elts ring buffer and update elts_head.
2160 * Pointer to TX queue structure.
2162 * Pointer to burst routine local context.
2164 * Configured Tx offloads mask. It is fully defined at
2165 * compile time and may be used for optimization.
2168 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2169 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2170 * Local context variables partially updated.
2172 static __rte_always_inline enum mlx5_txcmp_code
2173 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
2174 struct mlx5_txq_local *__rte_restrict loc,
2177 struct mlx5_wqe_dseg *__rte_restrict dseg;
2178 struct mlx5_wqe *__rte_restrict wqe;
2179 unsigned int ds, nseg;
2181 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2182 if (MLX5_TXOFF_CONFIG(TXPP)) {
2183 enum mlx5_txcmp_code wret;
2185 /* Generate WAIT for scheduling if requested. */
2186 wret = mlx5_tx_schedule_send(txq, loc, olx);
2187 if (wret == MLX5_TXCMP_CODE_EXIT)
2188 return MLX5_TXCMP_CODE_EXIT;
2189 if (wret == MLX5_TXCMP_CODE_ERROR)
2190 return MLX5_TXCMP_CODE_ERROR;
2193 * No inline at all, it means the CPU cycles saving
2194 * is prioritized at configuration, we should not
2195 * copy any packet data to WQE.
2197 nseg = NB_SEGS(loc->mbuf);
2199 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2200 return MLX5_TXCMP_CODE_EXIT;
2201 /* Check for maximal WQE size. */
2202 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2203 return MLX5_TXCMP_CODE_ERROR;
2205 * Some Tx offloads may cause an error if
2206 * packet is not long enough, check against
2207 * assumed minimal length.
2209 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
2210 return MLX5_TXCMP_CODE_ERROR;
2211 #ifdef MLX5_PMD_SOFT_COUNTERS
2212 /* Update sent data bytes counter. */
2213 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
2214 if (MLX5_TXOFF_CONFIG(VLAN) &&
2215 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2216 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
2219 * SEND WQE, one WQEBB:
2220 * - Control Segment, SEND opcode
2221 * - Ethernet Segment, optional VLAN, no inline
2222 * - Data Segments, pointer only type
2224 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2225 loc->wqe_last = wqe;
2226 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
2227 mlx5_tx_eseg_none(txq, loc, wqe, olx);
2228 dseg = &wqe->dseg[0];
2230 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
2231 struct rte_mbuf *mbuf;
2234 * Zero length segment found, have to
2235 * correct total size of WQE in segments.
2236 * It is supposed to be rare occasion, so
2237 * in normal case (no zero length segments)
2238 * we avoid extra writing to the Control
2242 wqe->cseg.sq_ds -= RTE_BE32(1);
2244 loc->mbuf = mbuf->next;
2245 rte_pktmbuf_free_seg(mbuf);
2251 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2252 rte_pktmbuf_data_len(loc->mbuf), olx);
2253 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2258 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2259 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2260 loc->mbuf = loc->mbuf->next;
2263 txq->wqe_ci += (ds + 3) / 4;
2264 loc->wqe_free -= (ds + 3) / 4;
2265 return MLX5_TXCMP_CODE_MULTI;
2269 * Tx one packet function for multi-segment SEND. Supports all
2270 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
2271 * sends one packet per WQE, with data inlining in
2272 * Ethernet Segment and minimal Data Segments.
2274 * This routine is responsible for storing processed mbuf
2275 * into elts ring buffer and update elts_head.
2278 * Pointer to TX queue structure.
2280 * Pointer to burst routine local context.
2282 * Configured Tx offloads mask. It is fully defined at
2283 * compile time and may be used for optimization.
2286 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2287 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2288 * Local context variables partially updated.
2290 static __rte_always_inline enum mlx5_txcmp_code
2291 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
2292 struct mlx5_txq_local *__rte_restrict loc,
2295 struct mlx5_wqe *__rte_restrict wqe;
2296 unsigned int ds, inlen, dlen, vlan = 0;
2298 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2299 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2300 if (MLX5_TXOFF_CONFIG(TXPP)) {
2301 enum mlx5_txcmp_code wret;
2303 /* Generate WAIT for scheduling if requested. */
2304 wret = mlx5_tx_schedule_send(txq, loc, olx);
2305 if (wret == MLX5_TXCMP_CODE_EXIT)
2306 return MLX5_TXCMP_CODE_EXIT;
2307 if (wret == MLX5_TXCMP_CODE_ERROR)
2308 return MLX5_TXCMP_CODE_ERROR;
2311 * First calculate data length to be inlined
2312 * to estimate the required space for WQE.
2314 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
2315 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2316 vlan = sizeof(struct rte_vlan_hdr);
2317 inlen = dlen + vlan;
2318 /* Check against minimal length. */
2319 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
2320 return MLX5_TXCMP_CODE_ERROR;
2321 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
2322 if (inlen > txq->inlen_send ||
2323 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2324 struct rte_mbuf *mbuf;
2329 * Packet length exceeds the allowed inline
2330 * data length, check whether the minimal
2331 * inlining is required.
2333 if (txq->inlen_mode) {
2334 MLX5_ASSERT(txq->inlen_mode >=
2335 MLX5_ESEG_MIN_INLINE_SIZE);
2336 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
2337 inlen = txq->inlen_mode;
2339 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
2340 !vlan || txq->vlan_en) {
2342 * VLAN insertion will be done inside by HW.
2343 * It is not utmost effective - VLAN flag is
2344 * checked twice, but we should proceed the
2345 * inlining length correctly and take into
2346 * account the VLAN header being inserted.
2348 return mlx5_tx_packet_multi_send
2351 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
2354 * Now we know the minimal amount of data is requested
2355 * to inline. Check whether we should inline the buffers
2356 * from the chain beginning to eliminate some mbufs.
2359 nxlen = rte_pktmbuf_data_len(mbuf);
2360 if (unlikely(nxlen <= txq->inlen_send)) {
2361 /* We can inline first mbuf at least. */
2362 if (nxlen < inlen) {
2365 /* Scan mbufs till inlen filled. */
2370 nxlen = rte_pktmbuf_data_len(mbuf);
2372 } while (unlikely(nxlen < inlen));
2373 if (unlikely(nxlen > txq->inlen_send)) {
2374 /* We cannot inline entire mbuf. */
2375 smlen = inlen - smlen;
2376 start = rte_pktmbuf_mtod_offset
2377 (mbuf, uintptr_t, smlen);
2384 /* There should be not end of packet. */
2386 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
2387 } while (unlikely(nxlen < txq->inlen_send));
2389 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
2391 * Check whether we can do inline to align start
2392 * address of data buffer to cacheline.
2395 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
2396 if (unlikely(start)) {
2398 if (start <= txq->inlen_send)
2403 * Check whether there are enough free WQEBBs:
2405 * - Ethernet Segment
2406 * - First Segment of inlined Ethernet data
2407 * - ... data continued ...
2408 * - Data Segments of pointer/min inline type
2410 * Estimate the number of Data Segments conservatively,
2411 * supposing no any mbufs is being freed during inlining.
2413 MLX5_ASSERT(inlen <= txq->inlen_send);
2414 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2415 MLX5_ESEG_MIN_INLINE_SIZE +
2417 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2418 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2419 return MLX5_TXCMP_CODE_EXIT;
2420 /* Check for maximal WQE size. */
2421 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2422 return MLX5_TXCMP_CODE_ERROR;
2423 #ifdef MLX5_PMD_SOFT_COUNTERS
2424 /* Update sent data bytes/packets counters. */
2425 txq->stats.obytes += dlen + vlan;
2427 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2428 loc->wqe_last = wqe;
2429 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
2430 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
2431 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2432 txq->wqe_ci += (ds + 3) / 4;
2433 loc->wqe_free -= (ds + 3) / 4;
2434 return MLX5_TXCMP_CODE_MULTI;
2438 * Tx burst function for multi-segment packets. Supports all
2439 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
2440 * sends one packet per WQE. Function stops sending if it
2441 * encounters the single-segment packet.
2443 * This routine is responsible for storing processed mbuf
2444 * into elts ring buffer and update elts_head.
2447 * Pointer to TX queue structure.
2449 * Packets to transmit.
2451 * Number of packets in array.
2453 * Pointer to burst routine local context.
2455 * Configured Tx offloads mask. It is fully defined at
2456 * compile time and may be used for optimization.
2459 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2460 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2461 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2462 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
2463 * Local context variables updated.
2465 static __rte_always_inline enum mlx5_txcmp_code
2466 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
2467 struct rte_mbuf **__rte_restrict pkts,
2468 unsigned int pkts_n,
2469 struct mlx5_txq_local *__rte_restrict loc,
2472 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2473 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2474 pkts += loc->pkts_sent + 1;
2475 pkts_n -= loc->pkts_sent;
2477 enum mlx5_txcmp_code ret;
2479 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2481 * Estimate the number of free elts quickly but
2482 * conservatively. Some segment may be fully inlined
2483 * and freed, ignore this here - precise estimation
2486 if (loc->elts_free < NB_SEGS(loc->mbuf))
2487 return MLX5_TXCMP_CODE_EXIT;
2488 if (MLX5_TXOFF_CONFIG(TSO) &&
2489 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
2490 /* Proceed with multi-segment TSO. */
2491 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
2492 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
2493 /* Proceed with multi-segment SEND with inlining. */
2494 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
2496 /* Proceed with multi-segment SEND w/o inlining. */
2497 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
2499 if (ret == MLX5_TXCMP_CODE_EXIT)
2500 return MLX5_TXCMP_CODE_EXIT;
2501 if (ret == MLX5_TXCMP_CODE_ERROR)
2502 return MLX5_TXCMP_CODE_ERROR;
2503 /* WQE is built, go to the next packet. */
2506 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2507 return MLX5_TXCMP_CODE_EXIT;
2508 loc->mbuf = *pkts++;
2510 rte_prefetch0(*pkts);
2511 if (likely(NB_SEGS(loc->mbuf) > 1))
2513 /* Here ends the series of multi-segment packets. */
2514 if (MLX5_TXOFF_CONFIG(TSO) &&
2515 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2516 return MLX5_TXCMP_CODE_TSO;
2517 return MLX5_TXCMP_CODE_SINGLE;
2523 * Tx burst function for single-segment packets with TSO.
2524 * Supports all types of Tx offloads, except multi-packets.
2525 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
2526 * Function stops sending if it encounters the multi-segment
2527 * packet or packet without TSO requested.
2529 * The routine is responsible for storing processed mbuf
2530 * into elts ring buffer and update elts_head if inline
2531 * offloads is requested due to possible early freeing
2532 * of the inlined mbufs (can not store pkts array in elts
2536 * Pointer to TX queue structure.
2538 * Packets to transmit.
2540 * Number of packets in array.
2542 * Pointer to burst routine local context.
2544 * Configured Tx offloads mask. It is fully defined at
2545 * compile time and may be used for optimization.
2548 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2549 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2550 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2551 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2552 * Local context variables updated.
2554 static __rte_always_inline enum mlx5_txcmp_code
2555 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
2556 struct rte_mbuf **__rte_restrict pkts,
2557 unsigned int pkts_n,
2558 struct mlx5_txq_local *__rte_restrict loc,
2561 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2562 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2563 pkts += loc->pkts_sent + 1;
2564 pkts_n -= loc->pkts_sent;
2566 struct mlx5_wqe_dseg *__rte_restrict dseg;
2567 struct mlx5_wqe *__rte_restrict wqe;
2568 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
2571 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2572 if (MLX5_TXOFF_CONFIG(TXPP)) {
2573 enum mlx5_txcmp_code wret;
2575 /* Generate WAIT for scheduling if requested. */
2576 wret = mlx5_tx_schedule_send(txq, loc, olx);
2577 if (wret == MLX5_TXCMP_CODE_EXIT)
2578 return MLX5_TXCMP_CODE_EXIT;
2579 if (wret == MLX5_TXCMP_CODE_ERROR)
2580 return MLX5_TXCMP_CODE_ERROR;
2582 dlen = rte_pktmbuf_data_len(loc->mbuf);
2583 if (MLX5_TXOFF_CONFIG(VLAN) &&
2584 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2585 vlan = sizeof(struct rte_vlan_hdr);
2588 * First calculate the WQE size to check
2589 * whether we have enough space in ring buffer.
2591 hlen = loc->mbuf->l2_len + vlan +
2592 loc->mbuf->l3_len + loc->mbuf->l4_len;
2593 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
2594 return MLX5_TXCMP_CODE_ERROR;
2595 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
2596 hlen += loc->mbuf->outer_l2_len +
2597 loc->mbuf->outer_l3_len;
2598 /* Segment must contain all TSO headers. */
2599 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
2600 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2601 hlen > (dlen + vlan)))
2602 return MLX5_TXCMP_CODE_ERROR;
2604 * Check whether there are enough free WQEBBs:
2606 * - Ethernet Segment
2607 * - First Segment of inlined Ethernet data
2608 * - ... data continued ...
2609 * - Finishing Data Segment of pointer type
2611 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
2612 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2613 if (loc->wqe_free < ((ds + 3) / 4))
2614 return MLX5_TXCMP_CODE_EXIT;
2615 #ifdef MLX5_PMD_SOFT_COUNTERS
2616 /* Update sent data bytes/packets counters. */
2617 ntcp = (dlen + vlan - hlen +
2618 loc->mbuf->tso_segsz - 1) /
2619 loc->mbuf->tso_segsz;
2621 * One will be added for mbuf itself at the end
2622 * of the mlx5_tx_burst from loc->pkts_sent field.
2625 txq->stats.opackets += ntcp;
2626 txq->stats.obytes += dlen + vlan + ntcp * hlen;
2629 * Build the TSO WQE:
2631 * - Ethernet Segment with hlen bytes inlined
2632 * - Data Segment of pointer type
2634 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2635 loc->wqe_last = wqe;
2636 mlx5_tx_cseg_init(txq, loc, wqe, ds,
2637 MLX5_OPCODE_TSO, olx);
2638 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
2639 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
2640 dlen -= hlen - vlan;
2641 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2643 * WQE is built, update the loop parameters
2644 * and go to the next packet.
2646 txq->wqe_ci += (ds + 3) / 4;
2647 loc->wqe_free -= (ds + 3) / 4;
2648 if (MLX5_TXOFF_CONFIG(INLINE))
2649 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2653 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2654 return MLX5_TXCMP_CODE_EXIT;
2655 loc->mbuf = *pkts++;
2657 rte_prefetch0(*pkts);
2658 if (MLX5_TXOFF_CONFIG(MULTI) &&
2659 unlikely(NB_SEGS(loc->mbuf) > 1))
2660 return MLX5_TXCMP_CODE_MULTI;
2661 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
2662 return MLX5_TXCMP_CODE_SINGLE;
2663 /* Continue with the next TSO packet. */
2669 * Analyze the packet and select the best method to send.
2672 * Pointer to TX queue structure.
2674 * Pointer to burst routine local context.
2676 * Configured Tx offloads mask. It is fully defined at
2677 * compile time and may be used for optimization.
2679 * The predefined flag whether do complete check for
2680 * multi-segment packets and TSO.
2683 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2684 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
2685 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
2686 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
2688 static __rte_always_inline enum mlx5_txcmp_code
2689 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
2690 struct mlx5_txq_local *__rte_restrict loc,
2694 /* Check for multi-segment packet. */
2696 MLX5_TXOFF_CONFIG(MULTI) &&
2697 unlikely(NB_SEGS(loc->mbuf) > 1))
2698 return MLX5_TXCMP_CODE_MULTI;
2699 /* Check for TSO packet. */
2701 MLX5_TXOFF_CONFIG(TSO) &&
2702 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2703 return MLX5_TXCMP_CODE_TSO;
2704 /* Check if eMPW is enabled at all. */
2705 if (!MLX5_TXOFF_CONFIG(EMPW))
2706 return MLX5_TXCMP_CODE_SINGLE;
2707 /* Check if eMPW can be engaged. */
2708 if (MLX5_TXOFF_CONFIG(VLAN) &&
2709 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
2710 (!MLX5_TXOFF_CONFIG(INLINE) ||
2711 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
2712 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
2714 * eMPW does not support VLAN insertion offload,
2715 * we have to inline the entire packet but
2716 * packet is too long for inlining.
2718 return MLX5_TXCMP_CODE_SINGLE;
2720 return MLX5_TXCMP_CODE_EMPW;
2724 * Check the next packet attributes to match with the eMPW batch ones.
2725 * In addition, for legacy MPW the packet length is checked either.
2728 * Pointer to TX queue structure.
2730 * Pointer to Ethernet Segment of eMPW batch.
2732 * Pointer to burst routine local context.
2734 * Length of previous packet in MPW descriptor.
2736 * Configured Tx offloads mask. It is fully defined at
2737 * compile time and may be used for optimization.
2740 * true - packet match with eMPW batch attributes.
2741 * false - no match, eMPW should be restarted.
2743 static __rte_always_inline bool
2744 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
2745 struct mlx5_wqe_eseg *__rte_restrict es,
2746 struct mlx5_txq_local *__rte_restrict loc,
2750 uint8_t swp_flags = 0;
2752 /* Compare the checksum flags, if any. */
2753 if (MLX5_TXOFF_CONFIG(CSUM) &&
2754 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
2756 /* Compare the Software Parser offsets and flags. */
2757 if (MLX5_TXOFF_CONFIG(SWP) &&
2758 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
2759 es->swp_flags != swp_flags))
2761 /* Fill metadata field if needed. */
2762 if (MLX5_TXOFF_CONFIG(METADATA) &&
2763 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2764 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
2766 /* Legacy MPW can send packets with the same lengt only. */
2767 if (MLX5_TXOFF_CONFIG(MPW) &&
2768 dlen != rte_pktmbuf_data_len(loc->mbuf))
2770 /* There must be no VLAN packets in eMPW loop. */
2771 if (MLX5_TXOFF_CONFIG(VLAN))
2772 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
2773 /* Check if the scheduling is requested. */
2774 if (MLX5_TXOFF_CONFIG(TXPP) &&
2775 loc->mbuf->ol_flags & txq->ts_mask)
2781 * Update send loop variables and WQE for eMPW loop
2782 * without data inlining. Number of Data Segments is
2783 * equal to the number of sent packets.
2786 * Pointer to TX queue structure.
2788 * Pointer to burst routine local context.
2790 * Number of packets/Data Segments/Packets.
2792 * Accumulated statistics, bytes sent
2794 * Configured Tx offloads mask. It is fully defined at
2795 * compile time and may be used for optimization.
2798 * true - packet match with eMPW batch attributes.
2799 * false - no match, eMPW should be restarted.
2801 static __rte_always_inline void
2802 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
2803 struct mlx5_txq_local *__rte_restrict loc,
2806 unsigned int olx __rte_unused)
2808 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2809 #ifdef MLX5_PMD_SOFT_COUNTERS
2810 /* Update sent data bytes counter. */
2811 txq->stats.obytes += slen;
2815 loc->elts_free -= ds;
2816 loc->pkts_sent += ds;
2818 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2819 txq->wqe_ci += (ds + 3) / 4;
2820 loc->wqe_free -= (ds + 3) / 4;
2824 * Update send loop variables and WQE for eMPW loop
2825 * with data inlining. Gets the size of pushed descriptors
2826 * and data to the WQE.
2829 * Pointer to TX queue structure.
2831 * Pointer to burst routine local context.
2833 * Total size of descriptor/data in bytes.
2835 * Accumulated statistics, data bytes sent.
2837 * The base WQE for the eMPW/MPW descriptor.
2839 * Configured Tx offloads mask. It is fully defined at
2840 * compile time and may be used for optimization.
2843 * true - packet match with eMPW batch attributes.
2844 * false - no match, eMPW should be restarted.
2846 static __rte_always_inline void
2847 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
2848 struct mlx5_txq_local *__rte_restrict loc,
2851 struct mlx5_wqe *__rte_restrict wqem,
2852 unsigned int olx __rte_unused)
2854 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
2856 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2857 #ifdef MLX5_PMD_SOFT_COUNTERS
2858 /* Update sent data bytes counter. */
2859 txq->stats.obytes += slen;
2863 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
2865 * If the legacy MPW session contains the inline packets
2866 * we should set the only inline data segment length
2867 * and align the total length to the segment size.
2869 MLX5_ASSERT(len > sizeof(dseg->bcount));
2870 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
2871 MLX5_ETH_WQE_DATA_INLINE);
2872 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
2875 * The session is not legacy MPW or contains the
2876 * data buffer pointer segments.
2878 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
2879 len = len / MLX5_WSEG_SIZE + 2;
2881 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
2882 txq->wqe_ci += (len + 3) / 4;
2883 loc->wqe_free -= (len + 3) / 4;
2884 loc->wqe_last = wqem;
2888 * The set of Tx burst functions for single-segment packets
2889 * without TSO and with Multi-Packet Writing feature support.
2890 * Supports all types of Tx offloads, except multi-packets
2893 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
2894 * as many packet per WQE as it can. If eMPW is not configured
2895 * or packet can not be sent with eMPW (VLAN insertion) the
2896 * ordinary SEND opcode is used and only one packet placed
2899 * Functions stop sending if it encounters the multi-segment
2900 * packet or packet with TSO requested.
2902 * The routines are responsible for storing processed mbuf
2903 * into elts ring buffer and update elts_head if inlining
2904 * offload is requested. Otherwise the copying mbufs to elts
2905 * can be postponed and completed at the end of burst routine.
2908 * Pointer to TX queue structure.
2910 * Packets to transmit.
2912 * Number of packets in array.
2914 * Pointer to burst routine local context.
2916 * Configured Tx offloads mask. It is fully defined at
2917 * compile time and may be used for optimization.
2920 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2921 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2922 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2923 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
2924 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
2925 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
2927 * Local context variables updated.
2930 * The routine sends packets with MLX5_OPCODE_EMPW
2931 * without inlining, this is dedicated optimized branch.
2932 * No VLAN insertion is supported.
2934 static __rte_always_inline enum mlx5_txcmp_code
2935 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
2936 struct rte_mbuf **__rte_restrict pkts,
2937 unsigned int pkts_n,
2938 struct mlx5_txq_local *__rte_restrict loc,
2942 * Subroutine is the part of mlx5_tx_burst_single()
2943 * and sends single-segment packet with eMPW opcode
2944 * without data inlining.
2946 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2947 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2948 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2949 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2950 pkts += loc->pkts_sent + 1;
2951 pkts_n -= loc->pkts_sent;
2953 struct mlx5_wqe_dseg *__rte_restrict dseg;
2954 struct mlx5_wqe_eseg *__rte_restrict eseg;
2955 enum mlx5_txcmp_code ret;
2956 unsigned int part, loop;
2957 unsigned int slen = 0;
2960 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2961 if (MLX5_TXOFF_CONFIG(TXPP)) {
2962 enum mlx5_txcmp_code wret;
2964 /* Generate WAIT for scheduling if requested. */
2965 wret = mlx5_tx_schedule_send(txq, loc, olx);
2966 if (wret == MLX5_TXCMP_CODE_EXIT)
2967 return MLX5_TXCMP_CODE_EXIT;
2968 if (wret == MLX5_TXCMP_CODE_ERROR)
2969 return MLX5_TXCMP_CODE_ERROR;
2971 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2972 MLX5_MPW_MAX_PACKETS :
2973 MLX5_EMPW_MAX_PACKETS);
2974 if (unlikely(loc->elts_free < part)) {
2975 /* We have no enough elts to save all mbufs. */
2976 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
2977 return MLX5_TXCMP_CODE_EXIT;
2978 /* But we still able to send at least minimal eMPW. */
2979 part = loc->elts_free;
2981 /* Check whether we have enough WQEs */
2982 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
2983 if (unlikely(loc->wqe_free <
2984 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2985 return MLX5_TXCMP_CODE_EXIT;
2986 part = (loc->wqe_free * 4) - 2;
2988 if (likely(part > 1))
2989 rte_prefetch0(*pkts);
2990 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2992 * Build eMPW title WQEBB:
2993 * - Control Segment, eMPW opcode
2994 * - Ethernet Segment, no inline
2996 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
2997 MLX5_OPCODE_ENHANCED_MPSW, olx);
2998 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
2999 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3000 eseg = &loc->wqe_last->eseg;
3001 dseg = &loc->wqe_last->dseg[0];
3003 /* Store the packet length for legacy MPW. */
3004 if (MLX5_TXOFF_CONFIG(MPW))
3005 eseg->mss = rte_cpu_to_be_16
3006 (rte_pktmbuf_data_len(loc->mbuf));
3008 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3009 #ifdef MLX5_PMD_SOFT_COUNTERS
3010 /* Update sent data bytes counter. */
3015 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3017 if (unlikely(--loop == 0))
3019 loc->mbuf = *pkts++;
3020 if (likely(loop > 1))
3021 rte_prefetch0(*pkts);
3022 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3024 * Unroll the completion code to avoid
3025 * returning variable value - it results in
3026 * unoptimized sequent checking in caller.
3028 if (ret == MLX5_TXCMP_CODE_MULTI) {
3030 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3031 if (unlikely(!loc->elts_free ||
3033 return MLX5_TXCMP_CODE_EXIT;
3034 return MLX5_TXCMP_CODE_MULTI;
3036 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3037 if (ret == MLX5_TXCMP_CODE_TSO) {
3039 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3040 if (unlikely(!loc->elts_free ||
3042 return MLX5_TXCMP_CODE_EXIT;
3043 return MLX5_TXCMP_CODE_TSO;
3045 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3047 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3048 if (unlikely(!loc->elts_free ||
3050 return MLX5_TXCMP_CODE_EXIT;
3051 return MLX5_TXCMP_CODE_SINGLE;
3053 if (ret != MLX5_TXCMP_CODE_EMPW) {
3056 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3057 return MLX5_TXCMP_CODE_ERROR;
3060 * Check whether packet parameters coincide
3061 * within assumed eMPW batch:
3062 * - check sum settings
3064 * - software parser settings
3065 * - packets length (legacy MPW only)
3066 * - scheduling is not required
3068 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
3071 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3072 if (unlikely(!loc->elts_free ||
3074 return MLX5_TXCMP_CODE_EXIT;
3078 /* Packet attributes match, continue the same eMPW. */
3080 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3081 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3083 /* eMPW is built successfully, update loop parameters. */
3085 MLX5_ASSERT(pkts_n >= part);
3086 #ifdef MLX5_PMD_SOFT_COUNTERS
3087 /* Update sent data bytes counter. */
3088 txq->stats.obytes += slen;
3090 loc->elts_free -= part;
3091 loc->pkts_sent += part;
3092 txq->wqe_ci += (2 + part + 3) / 4;
3093 loc->wqe_free -= (2 + part + 3) / 4;
3095 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3096 return MLX5_TXCMP_CODE_EXIT;
3097 loc->mbuf = *pkts++;
3098 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3099 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
3101 /* Continue sending eMPW batches. */
3107 * The routine sends packets with MLX5_OPCODE_EMPW
3108 * with inlining, optionally supports VLAN insertion.
3110 static __rte_always_inline enum mlx5_txcmp_code
3111 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
3112 struct rte_mbuf **__rte_restrict pkts,
3113 unsigned int pkts_n,
3114 struct mlx5_txq_local *__rte_restrict loc,
3118 * Subroutine is the part of mlx5_tx_burst_single()
3119 * and sends single-segment packet with eMPW opcode
3120 * with data inlining.
3122 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3123 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
3124 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3125 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3126 pkts += loc->pkts_sent + 1;
3127 pkts_n -= loc->pkts_sent;
3129 struct mlx5_wqe_dseg *__rte_restrict dseg;
3130 struct mlx5_wqe *__rte_restrict wqem;
3131 enum mlx5_txcmp_code ret;
3132 unsigned int room, part, nlim;
3133 unsigned int slen = 0;
3135 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3136 if (MLX5_TXOFF_CONFIG(TXPP)) {
3137 enum mlx5_txcmp_code wret;
3139 /* Generate WAIT for scheduling if requested. */
3140 wret = mlx5_tx_schedule_send(txq, loc, olx);
3141 if (wret == MLX5_TXCMP_CODE_EXIT)
3142 return MLX5_TXCMP_CODE_EXIT;
3143 if (wret == MLX5_TXCMP_CODE_ERROR)
3144 return MLX5_TXCMP_CODE_ERROR;
3147 * Limits the amount of packets in one WQE
3148 * to improve CQE latency generation.
3150 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
3151 MLX5_MPW_INLINE_MAX_PACKETS :
3152 MLX5_EMPW_MAX_PACKETS);
3153 /* Check whether we have minimal amount WQEs */
3154 if (unlikely(loc->wqe_free <
3155 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3156 return MLX5_TXCMP_CODE_EXIT;
3157 if (likely(pkts_n > 1))
3158 rte_prefetch0(*pkts);
3159 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3161 * Build eMPW title WQEBB:
3162 * - Control Segment, eMPW opcode, zero DS
3163 * - Ethernet Segment, no inline
3165 mlx5_tx_cseg_init(txq, loc, wqem, 0,
3166 MLX5_OPCODE_ENHANCED_MPSW, olx);
3167 mlx5_tx_eseg_none(txq, loc, wqem,
3168 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3169 dseg = &wqem->dseg[0];
3170 /* Store the packet length for legacy MPW. */
3171 if (MLX5_TXOFF_CONFIG(MPW))
3172 wqem->eseg.mss = rte_cpu_to_be_16
3173 (rte_pktmbuf_data_len(loc->mbuf));
3174 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
3175 loc->wqe_free) * MLX5_WQE_SIZE -
3176 MLX5_WQE_CSEG_SIZE -
3178 /* Limit the room for legacy MPW sessions for performance. */
3179 if (MLX5_TXOFF_CONFIG(MPW))
3180 room = RTE_MIN(room,
3181 RTE_MAX(txq->inlen_empw +
3182 sizeof(dseg->bcount) +
3183 (MLX5_TXOFF_CONFIG(VLAN) ?
3184 sizeof(struct rte_vlan_hdr) : 0),
3185 MLX5_MPW_INLINE_MAX_PACKETS *
3186 MLX5_WQE_DSEG_SIZE));
3187 /* Build WQE till we have space, packets and resources. */
3190 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3191 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
3194 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
3195 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
3196 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
3198 * Some Tx offloads may cause an error if
3199 * packet is not long enough, check against
3200 * assumed minimal length.
3202 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
3204 if (unlikely(!part))
3205 return MLX5_TXCMP_CODE_ERROR;
3207 * We have some successfully built
3208 * packet Data Segments to send.
3210 mlx5_tx_idone_empw(txq, loc, part,
3212 return MLX5_TXCMP_CODE_ERROR;
3214 /* Inline or not inline - that's the Question. */
3215 if (dlen > txq->inlen_empw ||
3216 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
3218 if (MLX5_TXOFF_CONFIG(MPW)) {
3219 if (dlen > txq->inlen_send)
3223 /* Open new inline MPW session. */
3224 tlen += sizeof(dseg->bcount);
3225 dseg->bcount = RTE_BE32(0);
3227 (dseg, sizeof(dseg->bcount));
3230 * No pointer and inline descriptor
3231 * intermix for legacy MPW sessions.
3233 if (wqem->dseg[0].bcount)
3237 tlen = sizeof(dseg->bcount) + dlen;
3239 /* Inline entire packet, optional VLAN insertion. */
3240 if (MLX5_TXOFF_CONFIG(VLAN) &&
3241 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3243 * The packet length must be checked in
3244 * mlx5_tx_able_to_empw() and packet
3245 * fits into inline length guaranteed.
3248 sizeof(struct rte_vlan_hdr)) <=
3250 tlen += sizeof(struct rte_vlan_hdr);
3253 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
3255 #ifdef MLX5_PMD_SOFT_COUNTERS
3256 /* Update sent data bytes counter. */
3257 slen += sizeof(struct rte_vlan_hdr);
3262 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
3265 if (!MLX5_TXOFF_CONFIG(MPW))
3266 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
3267 MLX5_ASSERT(room >= tlen);
3270 * Packet data are completely inline,
3271 * we can try to free the packet.
3273 if (likely(loc->pkts_sent == loc->mbuf_free)) {
3275 * All the packets from the burst beginning
3276 * are inline, we can free mbufs directly
3277 * from the origin array on tx_burst exit().
3283 * In order no to call rte_pktmbuf_free_seg() here,
3284 * in the most inner loop (that might be very
3285 * expensive) we just save the mbuf in elts.
3287 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3292 * No pointer and inline descriptor
3293 * intermix for legacy MPW sessions.
3295 if (MLX5_TXOFF_CONFIG(MPW) &&
3297 wqem->dseg[0].bcount == RTE_BE32(0))
3300 * Not inlinable VLAN packets are
3301 * proceeded outside of this routine.
3303 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
3304 if (MLX5_TXOFF_CONFIG(VLAN))
3305 MLX5_ASSERT(!(loc->mbuf->ol_flags &
3307 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3308 /* We have to store mbuf in elts.*/
3309 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3311 room -= MLX5_WQE_DSEG_SIZE;
3312 /* Ring buffer wraparound is checked at the loop end.*/
3315 #ifdef MLX5_PMD_SOFT_COUNTERS
3316 /* Update sent data bytes counter. */
3321 if (unlikely(!pkts_n || !loc->elts_free)) {
3323 * We have no resources/packets to
3324 * continue build descriptors.
3327 mlx5_tx_idone_empw(txq, loc, part,
3329 return MLX5_TXCMP_CODE_EXIT;
3331 loc->mbuf = *pkts++;
3332 if (likely(pkts_n > 1))
3333 rte_prefetch0(*pkts);
3334 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3336 * Unroll the completion code to avoid
3337 * returning variable value - it results in
3338 * unoptimized sequent checking in caller.
3340 if (ret == MLX5_TXCMP_CODE_MULTI) {
3342 mlx5_tx_idone_empw(txq, loc, part,
3344 if (unlikely(!loc->elts_free ||
3346 return MLX5_TXCMP_CODE_EXIT;
3347 return MLX5_TXCMP_CODE_MULTI;
3349 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3350 if (ret == MLX5_TXCMP_CODE_TSO) {
3352 mlx5_tx_idone_empw(txq, loc, part,
3354 if (unlikely(!loc->elts_free ||
3356 return MLX5_TXCMP_CODE_EXIT;
3357 return MLX5_TXCMP_CODE_TSO;
3359 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3361 mlx5_tx_idone_empw(txq, loc, part,
3363 if (unlikely(!loc->elts_free ||
3365 return MLX5_TXCMP_CODE_EXIT;
3366 return MLX5_TXCMP_CODE_SINGLE;
3368 if (ret != MLX5_TXCMP_CODE_EMPW) {
3371 mlx5_tx_idone_empw(txq, loc, part,
3373 return MLX5_TXCMP_CODE_ERROR;
3375 /* Check if we have minimal room left. */
3377 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
3380 * Check whether packet parameters coincide
3381 * within assumed eMPW batch:
3382 * - check sum settings
3384 * - software parser settings
3385 * - packets length (legacy MPW only)
3386 * - scheduling is not required
3388 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
3391 /* Packet attributes match, continue the same eMPW. */
3392 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3393 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3396 * We get here to close an existing eMPW
3397 * session and start the new one.
3399 MLX5_ASSERT(pkts_n);
3401 if (unlikely(!part))
3402 return MLX5_TXCMP_CODE_EXIT;
3403 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
3404 if (unlikely(!loc->elts_free ||
3406 return MLX5_TXCMP_CODE_EXIT;
3407 /* Continue the loop with new eMPW session. */
3413 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
3414 * Data inlining and VLAN insertion are supported.
3416 static __rte_always_inline enum mlx5_txcmp_code
3417 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
3418 struct rte_mbuf **__rte_restrict pkts,
3419 unsigned int pkts_n,
3420 struct mlx5_txq_local *__rte_restrict loc,
3424 * Subroutine is the part of mlx5_tx_burst_single()
3425 * and sends single-segment packet with SEND opcode.
3427 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3428 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3429 pkts += loc->pkts_sent + 1;
3430 pkts_n -= loc->pkts_sent;
3432 struct mlx5_wqe *__rte_restrict wqe;
3433 enum mlx5_txcmp_code ret;
3435 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3436 if (MLX5_TXOFF_CONFIG(TXPP)) {
3437 enum mlx5_txcmp_code wret;
3439 /* Generate WAIT for scheduling if requested. */
3440 wret = mlx5_tx_schedule_send(txq, loc, olx);
3441 if (wret == MLX5_TXCMP_CODE_EXIT)
3442 return MLX5_TXCMP_CODE_EXIT;
3443 if (wret == MLX5_TXCMP_CODE_ERROR)
3444 return MLX5_TXCMP_CODE_ERROR;
3446 if (MLX5_TXOFF_CONFIG(INLINE)) {
3447 unsigned int inlen, vlan = 0;
3449 inlen = rte_pktmbuf_data_len(loc->mbuf);
3450 if (MLX5_TXOFF_CONFIG(VLAN) &&
3451 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3452 vlan = sizeof(struct rte_vlan_hdr);
3456 * If inlining is enabled at configuration time
3457 * the limit must be not less than minimal size.
3458 * Otherwise we would do extra check for data
3459 * size to avoid crashes due to length overflow.
3461 MLX5_ASSERT(txq->inlen_send >=
3462 MLX5_ESEG_MIN_INLINE_SIZE);
3463 if (inlen <= txq->inlen_send) {
3464 unsigned int seg_n, wqe_n;
3466 rte_prefetch0(rte_pktmbuf_mtod
3467 (loc->mbuf, uint8_t *));
3468 /* Check against minimal length. */
3469 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3470 return MLX5_TXCMP_CODE_ERROR;
3471 if (loc->mbuf->ol_flags &
3472 PKT_TX_DYNF_NOINLINE) {
3474 * The hint flag not to inline packet
3475 * data is set. Check whether we can
3478 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
3480 (MLX5_TXOFF_CONFIG(MPW) &&
3482 if (inlen <= txq->inlen_send)
3485 * The hardware requires the
3486 * minimal inline data header.
3488 goto single_min_inline;
3490 if (MLX5_TXOFF_CONFIG(VLAN) &&
3491 vlan && !txq->vlan_en) {
3493 * We must insert VLAN tag
3494 * by software means.
3496 goto single_part_inline;
3498 goto single_no_inline;
3502 * Completely inlined packet data WQE:
3503 * - Control Segment, SEND opcode
3504 * - Ethernet Segment, no VLAN insertion
3505 * - Data inlined, VLAN optionally inserted
3506 * - Alignment to MLX5_WSEG_SIZE
3507 * Have to estimate amount of WQEBBs
3509 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
3510 MLX5_ESEG_MIN_INLINE_SIZE +
3511 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3512 /* Check if there are enough WQEBBs. */
3513 wqe_n = (seg_n + 3) / 4;
3514 if (wqe_n > loc->wqe_free)
3515 return MLX5_TXCMP_CODE_EXIT;
3516 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3517 loc->wqe_last = wqe;
3518 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
3519 MLX5_OPCODE_SEND, olx);
3520 mlx5_tx_eseg_data(txq, loc, wqe,
3521 vlan, inlen, 0, olx);
3522 txq->wqe_ci += wqe_n;
3523 loc->wqe_free -= wqe_n;
3525 * Packet data are completely inlined,
3526 * free the packet immediately.
3528 rte_pktmbuf_free_seg(loc->mbuf);
3529 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
3530 MLX5_TXOFF_CONFIG(MPW)) &&
3533 * If minimal inlining is requested the eMPW
3534 * feature should be disabled due to data is
3535 * inlined into Ethernet Segment, which can
3536 * not contain inlined data for eMPW due to
3537 * segment shared for all packets.
3539 struct mlx5_wqe_dseg *__rte_restrict dseg;
3544 * The inline-mode settings require
3545 * to inline the specified amount of
3546 * data bytes to the Ethernet Segment.
3547 * We should check the free space in
3548 * WQE ring buffer to inline partially.
3551 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
3552 MLX5_ASSERT(inlen > txq->inlen_mode);
3553 MLX5_ASSERT(txq->inlen_mode >=
3554 MLX5_ESEG_MIN_INLINE_SIZE);
3556 * Check whether there are enough free WQEBBs:
3558 * - Ethernet Segment
3559 * - First Segment of inlined Ethernet data
3560 * - ... data continued ...
3561 * - Finishing Data Segment of pointer type
3563 ds = (MLX5_WQE_CSEG_SIZE +
3564 MLX5_WQE_ESEG_SIZE +
3565 MLX5_WQE_DSEG_SIZE +
3567 MLX5_ESEG_MIN_INLINE_SIZE +
3568 MLX5_WQE_DSEG_SIZE +
3569 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3570 if (loc->wqe_free < ((ds + 3) / 4))
3571 return MLX5_TXCMP_CODE_EXIT;
3573 * Build the ordinary SEND WQE:
3575 * - Ethernet Segment, inline inlen_mode bytes
3576 * - Data Segment of pointer type
3578 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3579 loc->wqe_last = wqe;
3580 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3581 MLX5_OPCODE_SEND, olx);
3582 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
3585 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3586 txq->inlen_mode - vlan;
3587 inlen -= txq->inlen_mode;
3588 mlx5_tx_dseg_ptr(txq, loc, dseg,
3591 * WQE is built, update the loop parameters
3592 * and got to the next packet.
3594 txq->wqe_ci += (ds + 3) / 4;
3595 loc->wqe_free -= (ds + 3) / 4;
3596 /* We have to store mbuf in elts.*/
3597 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3598 txq->elts[txq->elts_head++ & txq->elts_m] =
3606 * Partially inlined packet data WQE, we have
3607 * some space in title WQEBB, we can fill it
3608 * with some packet data. It takes one WQEBB,
3609 * it is available, no extra space check:
3610 * - Control Segment, SEND opcode
3611 * - Ethernet Segment, no VLAN insertion
3612 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
3613 * - Data Segment, pointer type
3615 * We also get here if VLAN insertion is not
3616 * supported by HW, the inline is enabled.
3619 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3620 loc->wqe_last = wqe;
3621 mlx5_tx_cseg_init(txq, loc, wqe, 4,
3622 MLX5_OPCODE_SEND, olx);
3623 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
3624 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3625 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
3627 * The length check is performed above, by
3628 * comparing with txq->inlen_send. We should
3629 * not get overflow here.
3631 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
3632 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
3633 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
3637 /* We have to store mbuf in elts.*/
3638 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3639 txq->elts[txq->elts_head++ & txq->elts_m] =
3643 #ifdef MLX5_PMD_SOFT_COUNTERS
3644 /* Update sent data bytes counter. */
3645 txq->stats.obytes += vlan +
3646 rte_pktmbuf_data_len(loc->mbuf);
3650 * No inline at all, it means the CPU cycles saving
3651 * is prioritized at configuration, we should not
3652 * copy any packet data to WQE.
3654 * SEND WQE, one WQEBB:
3655 * - Control Segment, SEND opcode
3656 * - Ethernet Segment, optional VLAN, no inline
3657 * - Data Segment, pointer type
3660 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3661 loc->wqe_last = wqe;
3662 mlx5_tx_cseg_init(txq, loc, wqe, 3,
3663 MLX5_OPCODE_SEND, olx);
3664 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3666 (txq, loc, &wqe->dseg[0],
3667 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3668 rte_pktmbuf_data_len(loc->mbuf), olx);
3672 * We should not store mbuf pointer in elts
3673 * if no inlining is configured, this is done
3674 * by calling routine in a batch copy.
3676 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3678 #ifdef MLX5_PMD_SOFT_COUNTERS
3679 /* Update sent data bytes counter. */
3680 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
3681 if (MLX5_TXOFF_CONFIG(VLAN) &&
3682 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3683 txq->stats.obytes +=
3684 sizeof(struct rte_vlan_hdr);
3689 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3690 return MLX5_TXCMP_CODE_EXIT;
3691 loc->mbuf = *pkts++;
3693 rte_prefetch0(*pkts);
3694 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3695 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
3701 static __rte_always_inline enum mlx5_txcmp_code
3702 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
3703 struct rte_mbuf **__rte_restrict pkts,
3704 unsigned int pkts_n,
3705 struct mlx5_txq_local *__rte_restrict loc,
3708 enum mlx5_txcmp_code ret;
3710 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
3711 if (ret == MLX5_TXCMP_CODE_SINGLE)
3713 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
3715 /* Optimize for inline/no inline eMPW send. */
3716 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
3717 mlx5_tx_burst_empw_inline
3718 (txq, pkts, pkts_n, loc, olx) :
3719 mlx5_tx_burst_empw_simple
3720 (txq, pkts, pkts_n, loc, olx);
3721 if (ret != MLX5_TXCMP_CODE_SINGLE)
3723 /* The resources to send one packet should remain. */
3724 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3726 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
3727 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
3728 if (ret != MLX5_TXCMP_CODE_EMPW)
3730 /* The resources to send one packet should remain. */
3731 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3736 * DPDK Tx callback template. This is configured template
3737 * used to generate routines optimized for specified offload setup.
3738 * One of this generated functions is chosen at SQ configuration
3742 * Generic pointer to TX queue structure.
3744 * Packets to transmit.
3746 * Number of packets in array.
3748 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
3749 * values. Should be static to take compile time static configuration
3753 * Number of packets successfully transmitted (<= pkts_n).
3755 static __rte_always_inline uint16_t
3756 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
3757 struct rte_mbuf **__rte_restrict pkts,
3761 struct mlx5_txq_local loc;
3762 enum mlx5_txcmp_code ret;
3765 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3766 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3767 if (unlikely(!pkts_n))
3769 if (MLX5_TXOFF_CONFIG(INLINE))
3773 loc.wqe_last = NULL;
3776 loc.pkts_loop = loc.pkts_sent;
3778 * Check if there are some CQEs, if any:
3779 * - process an encountered errors
3780 * - process the completed WQEs
3781 * - free related mbufs
3782 * - doorbell the NIC about processed CQEs
3784 rte_prefetch0(*(pkts + loc.pkts_sent));
3785 mlx5_tx_handle_completion(txq, olx);
3787 * Calculate the number of available resources - elts and WQEs.
3788 * There are two possible different scenarios:
3789 * - no data inlining into WQEs, one WQEBB may contains up to
3790 * four packets, in this case elts become scarce resource
3791 * - data inlining into WQEs, one packet may require multiple
3792 * WQEBBs, the WQEs become the limiting factor.
3794 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3795 loc.elts_free = txq->elts_s -
3796 (uint16_t)(txq->elts_head - txq->elts_tail);
3797 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3798 loc.wqe_free = txq->wqe_s -
3799 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
3800 if (unlikely(!loc.elts_free || !loc.wqe_free))
3804 * Fetch the packet from array. Usually this is
3805 * the first packet in series of multi/single
3808 loc.mbuf = *(pkts + loc.pkts_sent);
3809 /* Dedicated branch for multi-segment packets. */
3810 if (MLX5_TXOFF_CONFIG(MULTI) &&
3811 unlikely(NB_SEGS(loc.mbuf) > 1)) {
3813 * Multi-segment packet encountered.
3814 * Hardware is able to process it only
3815 * with SEND/TSO opcodes, one packet
3816 * per WQE, do it in dedicated routine.
3819 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
3820 part = loc.pkts_sent - loc.pkts_copy;
3821 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3823 * There are some single-segment mbufs not
3824 * stored in elts. The mbufs must be in the
3825 * same order as WQEs, so we must copy the
3826 * mbufs to elts here, before the coming
3827 * multi-segment packet mbufs is appended.
3829 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
3831 loc.pkts_copy = loc.pkts_sent;
3833 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3834 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
3835 if (!MLX5_TXOFF_CONFIG(INLINE))
3836 loc.pkts_copy = loc.pkts_sent;
3838 * These returned code checks are supposed
3839 * to be optimized out due to routine inlining.
3841 if (ret == MLX5_TXCMP_CODE_EXIT) {
3843 * The routine returns this code when
3844 * all packets are sent or there is no
3845 * enough resources to complete request.
3849 if (ret == MLX5_TXCMP_CODE_ERROR) {
3851 * The routine returns this code when
3852 * some error in the incoming packets
3855 txq->stats.oerrors++;
3858 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3860 * The single-segment packet was encountered
3861 * in the array, try to send it with the
3862 * best optimized way, possible engaging eMPW.
3864 goto enter_send_single;
3866 if (MLX5_TXOFF_CONFIG(TSO) &&
3867 ret == MLX5_TXCMP_CODE_TSO) {
3869 * The single-segment TSO packet was
3870 * encountered in the array.
3872 goto enter_send_tso;
3874 /* We must not get here. Something is going wrong. */
3876 txq->stats.oerrors++;
3879 /* Dedicated branch for single-segment TSO packets. */
3880 if (MLX5_TXOFF_CONFIG(TSO) &&
3881 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3883 * TSO might require special way for inlining
3884 * (dedicated parameters) and is sent with
3885 * MLX5_OPCODE_TSO opcode only, provide this
3886 * in dedicated branch.
3889 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
3890 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3891 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
3893 * These returned code checks are supposed
3894 * to be optimized out due to routine inlining.
3896 if (ret == MLX5_TXCMP_CODE_EXIT)
3898 if (ret == MLX5_TXCMP_CODE_ERROR) {
3899 txq->stats.oerrors++;
3902 if (ret == MLX5_TXCMP_CODE_SINGLE)
3903 goto enter_send_single;
3904 if (MLX5_TXOFF_CONFIG(MULTI) &&
3905 ret == MLX5_TXCMP_CODE_MULTI) {
3907 * The multi-segment packet was
3908 * encountered in the array.
3910 goto enter_send_multi;
3912 /* We must not get here. Something is going wrong. */
3914 txq->stats.oerrors++;
3918 * The dedicated branch for the single-segment packets
3919 * without TSO. Often these ones can be sent using
3920 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
3921 * The routine builds the WQEs till it encounters
3922 * the TSO or multi-segment packet (in case if these
3923 * offloads are requested at SQ configuration time).
3926 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3927 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
3929 * These returned code checks are supposed
3930 * to be optimized out due to routine inlining.
3932 if (ret == MLX5_TXCMP_CODE_EXIT)
3934 if (ret == MLX5_TXCMP_CODE_ERROR) {
3935 txq->stats.oerrors++;
3938 if (MLX5_TXOFF_CONFIG(MULTI) &&
3939 ret == MLX5_TXCMP_CODE_MULTI) {
3941 * The multi-segment packet was
3942 * encountered in the array.
3944 goto enter_send_multi;
3946 if (MLX5_TXOFF_CONFIG(TSO) &&
3947 ret == MLX5_TXCMP_CODE_TSO) {
3949 * The single-segment TSO packet was
3950 * encountered in the array.
3952 goto enter_send_tso;
3954 /* We must not get here. Something is going wrong. */
3956 txq->stats.oerrors++;
3960 * Main Tx loop is completed, do the rest:
3961 * - set completion request if thresholds are reached
3962 * - doorbell the hardware
3963 * - copy the rest of mbufs to elts (if any)
3965 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
3966 loc.pkts_sent >= loc.pkts_copy);
3967 /* Take a shortcut if nothing is sent. */
3968 if (unlikely(loc.pkts_sent == loc.pkts_loop))
3970 /* Request CQE generation if limits are reached. */
3971 mlx5_tx_request_completion(txq, &loc, olx);
3973 * Ring QP doorbell immediately after WQE building completion
3974 * to improve latencies. The pure software related data treatment
3975 * can be completed after doorbell. Tx CQEs for this SQ are
3976 * processed in this thread only by the polling.
3978 * The rdma core library can map doorbell register in two ways,
3979 * depending on the environment variable "MLX5_SHUT_UP_BF":
3981 * - as regular cached memory, the variable is either missing or
3982 * set to zero. This type of mapping may cause the significant
3983 * doorbell register writing latency and requires explicit
3984 * memory write barrier to mitigate this issue and prevent
3987 * - as non-cached memory, the variable is present and set to
3988 * not "0" value. This type of mapping may cause performance
3989 * impact under heavy loading conditions but the explicit write
3990 * memory barrier is not required and it may improve core
3993 * - the legacy behaviour (prior 19.08 release) was to use some
3994 * heuristics to decide whether write memory barrier should
3995 * be performed. This behavior is supported with specifying
3996 * tx_db_nc=2, write barrier is skipped if application
3997 * provides the full recommended burst of packets, it
3998 * supposes the next packets are coming and the write barrier
3999 * will be issued on the next burst (after descriptor writing,
4002 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
4003 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
4004 /* Not all of the mbufs may be stored into elts yet. */
4005 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
4006 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4008 * There are some single-segment mbufs not stored in elts.
4009 * It can be only if the last packet was single-segment.
4010 * The copying is gathered into one place due to it is
4011 * a good opportunity to optimize that with SIMD.
4012 * Unfortunately if inlining is enabled the gaps in
4013 * pointer array may happen due to early freeing of the
4016 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
4017 loc.pkts_copy = loc.pkts_sent;
4019 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4020 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4021 if (pkts_n > loc.pkts_sent) {
4023 * If burst size is large there might be no enough CQE
4024 * fetched from completion queue and no enough resources
4025 * freed to send all the packets.
4030 #ifdef MLX5_PMD_SOFT_COUNTERS
4031 /* Increment sent packets counter. */
4032 txq->stats.opackets += loc.pkts_sent;
4034 if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
4035 __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
4036 return loc.pkts_sent;
4039 /* Generate routines with Enhanced Multi-Packet Write support. */
4040 MLX5_TXOFF_DECL(full_empw,
4041 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
4043 MLX5_TXOFF_DECL(none_empw,
4044 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4046 MLX5_TXOFF_DECL(md_empw,
4047 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4049 MLX5_TXOFF_DECL(mt_empw,
4050 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4051 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4053 MLX5_TXOFF_DECL(mtsc_empw,
4054 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4055 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4056 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4058 MLX5_TXOFF_DECL(mti_empw,
4059 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4060 MLX5_TXOFF_CONFIG_INLINE |
4061 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4063 MLX5_TXOFF_DECL(mtv_empw,
4064 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4065 MLX5_TXOFF_CONFIG_VLAN |
4066 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4068 MLX5_TXOFF_DECL(mtiv_empw,
4069 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4070 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4071 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4073 MLX5_TXOFF_DECL(sc_empw,
4074 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4075 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4077 MLX5_TXOFF_DECL(sci_empw,
4078 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4079 MLX5_TXOFF_CONFIG_INLINE |
4080 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4082 MLX5_TXOFF_DECL(scv_empw,
4083 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4084 MLX5_TXOFF_CONFIG_VLAN |
4085 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4087 MLX5_TXOFF_DECL(sciv_empw,
4088 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4089 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4090 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4092 MLX5_TXOFF_DECL(i_empw,
4093 MLX5_TXOFF_CONFIG_INLINE |
4094 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4096 MLX5_TXOFF_DECL(v_empw,
4097 MLX5_TXOFF_CONFIG_VLAN |
4098 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4100 MLX5_TXOFF_DECL(iv_empw,
4101 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4102 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4104 /* Generate routines without Enhanced Multi-Packet Write support. */
4105 MLX5_TXOFF_DECL(full,
4106 MLX5_TXOFF_CONFIG_FULL)
4108 MLX5_TXOFF_DECL(none,
4109 MLX5_TXOFF_CONFIG_NONE)
4112 MLX5_TXOFF_CONFIG_METADATA)
4115 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4116 MLX5_TXOFF_CONFIG_METADATA)
4118 MLX5_TXOFF_DECL(mtsc,
4119 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4120 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4121 MLX5_TXOFF_CONFIG_METADATA)
4123 MLX5_TXOFF_DECL(mti,
4124 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4125 MLX5_TXOFF_CONFIG_INLINE |
4126 MLX5_TXOFF_CONFIG_METADATA)
4129 MLX5_TXOFF_DECL(mtv,
4130 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4131 MLX5_TXOFF_CONFIG_VLAN |
4132 MLX5_TXOFF_CONFIG_METADATA)
4135 MLX5_TXOFF_DECL(mtiv,
4136 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4137 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4138 MLX5_TXOFF_CONFIG_METADATA)
4141 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4142 MLX5_TXOFF_CONFIG_METADATA)
4144 MLX5_TXOFF_DECL(sci,
4145 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4146 MLX5_TXOFF_CONFIG_INLINE |
4147 MLX5_TXOFF_CONFIG_METADATA)
4150 MLX5_TXOFF_DECL(scv,
4151 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4152 MLX5_TXOFF_CONFIG_VLAN |
4153 MLX5_TXOFF_CONFIG_METADATA)
4156 MLX5_TXOFF_DECL(sciv,
4157 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4158 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4159 MLX5_TXOFF_CONFIG_METADATA)
4162 MLX5_TXOFF_CONFIG_INLINE |
4163 MLX5_TXOFF_CONFIG_METADATA)
4166 MLX5_TXOFF_CONFIG_VLAN |
4167 MLX5_TXOFF_CONFIG_METADATA)
4170 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4171 MLX5_TXOFF_CONFIG_METADATA)
4173 /* Generate routines with timestamp scheduling. */
4174 MLX5_TXOFF_DECL(full_ts_nompw,
4175 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
4177 MLX5_TXOFF_DECL(full_ts_nompwi,
4178 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4179 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4180 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
4181 MLX5_TXOFF_CONFIG_TXPP)
4183 MLX5_TXOFF_DECL(full_ts,
4184 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
4185 MLX5_TXOFF_CONFIG_EMPW)
4187 MLX5_TXOFF_DECL(full_ts_noi,
4188 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4189 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4190 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
4191 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4193 MLX5_TXOFF_DECL(none_ts,
4194 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
4195 MLX5_TXOFF_CONFIG_EMPW)
4197 MLX5_TXOFF_DECL(mdi_ts,
4198 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
4199 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4201 MLX5_TXOFF_DECL(mti_ts,
4202 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4203 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
4204 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4206 MLX5_TXOFF_DECL(mtiv_ts,
4207 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4208 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4209 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
4210 MLX5_TXOFF_CONFIG_EMPW)
4213 * Generate routines with Legacy Multi-Packet Write support.
4214 * This mode is supported by ConnectX-4 Lx only and imposes
4215 * offload limitations, not supported:
4216 * - ACL/Flows (metadata are becoming meaningless)
4217 * - WQE Inline headers
4218 * - SRIOV (E-Switch offloads)
4220 * - tunnel encapsulation/decapsulation
4223 MLX5_TXOFF_DECL(none_mpw,
4224 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
4225 MLX5_TXOFF_CONFIG_MPW)
4227 MLX5_TXOFF_DECL(mci_mpw,
4228 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4229 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4230 MLX5_TXOFF_CONFIG_MPW)
4232 MLX5_TXOFF_DECL(mc_mpw,
4233 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4234 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
4236 MLX5_TXOFF_DECL(i_mpw,
4237 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4238 MLX5_TXOFF_CONFIG_MPW)
4241 * Array of declared and compiled Tx burst function and corresponding
4242 * supported offloads set. The array is used to select the Tx burst
4243 * function for specified offloads set at Tx queue configuration time.
4246 eth_tx_burst_t func;
4249 MLX5_TXOFF_INFO(full_empw,
4250 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4251 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4252 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4253 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4255 MLX5_TXOFF_INFO(none_empw,
4256 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4258 MLX5_TXOFF_INFO(md_empw,
4259 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4261 MLX5_TXOFF_INFO(mt_empw,
4262 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4263 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4265 MLX5_TXOFF_INFO(mtsc_empw,
4266 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4267 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4268 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4270 MLX5_TXOFF_INFO(mti_empw,
4271 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4272 MLX5_TXOFF_CONFIG_INLINE |
4273 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4275 MLX5_TXOFF_INFO(mtv_empw,
4276 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4277 MLX5_TXOFF_CONFIG_VLAN |
4278 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4280 MLX5_TXOFF_INFO(mtiv_empw,
4281 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4282 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4283 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4285 MLX5_TXOFF_INFO(sc_empw,
4286 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4287 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4289 MLX5_TXOFF_INFO(sci_empw,
4290 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4291 MLX5_TXOFF_CONFIG_INLINE |
4292 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4294 MLX5_TXOFF_INFO(scv_empw,
4295 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4296 MLX5_TXOFF_CONFIG_VLAN |
4297 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4299 MLX5_TXOFF_INFO(sciv_empw,
4300 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4301 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4302 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4304 MLX5_TXOFF_INFO(i_empw,
4305 MLX5_TXOFF_CONFIG_INLINE |
4306 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4308 MLX5_TXOFF_INFO(v_empw,
4309 MLX5_TXOFF_CONFIG_VLAN |
4310 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4312 MLX5_TXOFF_INFO(iv_empw,
4313 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4314 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4316 MLX5_TXOFF_INFO(full_ts_nompw,
4317 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
4319 MLX5_TXOFF_INFO(full_ts_nompwi,
4320 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4321 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4322 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
4323 MLX5_TXOFF_CONFIG_TXPP)
4325 MLX5_TXOFF_INFO(full_ts,
4326 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
4327 MLX5_TXOFF_CONFIG_EMPW)
4329 MLX5_TXOFF_INFO(full_ts_noi,
4330 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4331 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4332 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
4333 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4335 MLX5_TXOFF_INFO(none_ts,
4336 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
4337 MLX5_TXOFF_CONFIG_EMPW)
4339 MLX5_TXOFF_INFO(mdi_ts,
4340 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
4341 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4343 MLX5_TXOFF_INFO(mti_ts,
4344 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4345 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
4346 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4348 MLX5_TXOFF_INFO(mtiv_ts,
4349 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4350 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4351 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
4352 MLX5_TXOFF_CONFIG_EMPW)
4354 MLX5_TXOFF_INFO(full,
4355 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4356 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4357 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4358 MLX5_TXOFF_CONFIG_METADATA)
4360 MLX5_TXOFF_INFO(none,
4361 MLX5_TXOFF_CONFIG_NONE)
4364 MLX5_TXOFF_CONFIG_METADATA)
4367 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4368 MLX5_TXOFF_CONFIG_METADATA)
4370 MLX5_TXOFF_INFO(mtsc,
4371 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4372 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4373 MLX5_TXOFF_CONFIG_METADATA)
4375 MLX5_TXOFF_INFO(mti,
4376 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4377 MLX5_TXOFF_CONFIG_INLINE |
4378 MLX5_TXOFF_CONFIG_METADATA)
4380 MLX5_TXOFF_INFO(mtv,
4381 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4382 MLX5_TXOFF_CONFIG_VLAN |
4383 MLX5_TXOFF_CONFIG_METADATA)
4385 MLX5_TXOFF_INFO(mtiv,
4386 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4387 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4388 MLX5_TXOFF_CONFIG_METADATA)
4391 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4392 MLX5_TXOFF_CONFIG_METADATA)
4394 MLX5_TXOFF_INFO(sci,
4395 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4396 MLX5_TXOFF_CONFIG_INLINE |
4397 MLX5_TXOFF_CONFIG_METADATA)
4399 MLX5_TXOFF_INFO(scv,
4400 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4401 MLX5_TXOFF_CONFIG_VLAN |
4402 MLX5_TXOFF_CONFIG_METADATA)
4404 MLX5_TXOFF_INFO(sciv,
4405 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4406 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4407 MLX5_TXOFF_CONFIG_METADATA)
4410 MLX5_TXOFF_CONFIG_INLINE |
4411 MLX5_TXOFF_CONFIG_METADATA)
4414 MLX5_TXOFF_CONFIG_VLAN |
4415 MLX5_TXOFF_CONFIG_METADATA)
4418 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4419 MLX5_TXOFF_CONFIG_METADATA)
4421 MLX5_TXOFF_INFO(none_mpw,
4422 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
4423 MLX5_TXOFF_CONFIG_MPW)
4425 MLX5_TXOFF_INFO(mci_mpw,
4426 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4427 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4428 MLX5_TXOFF_CONFIG_MPW)
4430 MLX5_TXOFF_INFO(mc_mpw,
4431 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4432 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
4434 MLX5_TXOFF_INFO(i_mpw,
4435 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4436 MLX5_TXOFF_CONFIG_MPW)
4440 * Configure the Tx function to use. The routine checks configured
4441 * Tx offloads for the device and selects appropriate Tx burst
4442 * routine. There are multiple Tx burst routines compiled from
4443 * the same template in the most optimal way for the dedicated
4447 * Pointer to private data structure.
4450 * Pointer to selected Tx burst function.
4453 mlx5_select_tx_function(struct rte_eth_dev *dev)
4455 struct mlx5_priv *priv = dev->data->dev_private;
4456 struct mlx5_dev_config *config = &priv->config;
4457 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
4458 unsigned int diff = 0, olx = 0, i, m;
4461 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
4462 /* We should support Multi-Segment Packets. */
4463 olx |= MLX5_TXOFF_CONFIG_MULTI;
4465 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
4466 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
4467 DEV_TX_OFFLOAD_GRE_TNL_TSO |
4468 DEV_TX_OFFLOAD_IP_TNL_TSO |
4469 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
4470 /* We should support TCP Send Offload. */
4471 olx |= MLX5_TXOFF_CONFIG_TSO;
4473 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
4474 DEV_TX_OFFLOAD_UDP_TNL_TSO |
4475 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
4476 /* We should support Software Parser for Tunnels. */
4477 olx |= MLX5_TXOFF_CONFIG_SWP;
4479 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
4480 DEV_TX_OFFLOAD_UDP_CKSUM |
4481 DEV_TX_OFFLOAD_TCP_CKSUM |
4482 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
4483 /* We should support IP/TCP/UDP Checksums. */
4484 olx |= MLX5_TXOFF_CONFIG_CSUM;
4486 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
4487 /* We should support VLAN insertion. */
4488 olx |= MLX5_TXOFF_CONFIG_VLAN;
4490 if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
4491 rte_mbuf_dynflag_lookup
4492 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
4493 rte_mbuf_dynfield_lookup
4494 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL) >= 0) {
4495 /* Offload configured, dynamic entities registered. */
4496 olx |= MLX5_TXOFF_CONFIG_TXPP;
4498 if (priv->txqs_n && (*priv->txqs)[0]) {
4499 struct mlx5_txq_data *txd = (*priv->txqs)[0];
4501 if (txd->inlen_send) {
4503 * Check the data inline requirements. Data inline
4504 * is enabled on per device basis, we can check
4505 * the first Tx queue only.
4507 * If device does not support VLAN insertion in WQE
4508 * and some queues are requested to perform VLAN
4509 * insertion offload than inline must be enabled.
4511 olx |= MLX5_TXOFF_CONFIG_INLINE;
4514 if (config->mps == MLX5_MPW_ENHANCED &&
4515 config->txq_inline_min <= 0) {
4517 * The NIC supports Enhanced Multi-Packet Write
4518 * and does not require minimal inline data.
4520 olx |= MLX5_TXOFF_CONFIG_EMPW;
4522 if (rte_flow_dynf_metadata_avail()) {
4523 /* We should support Flow metadata. */
4524 olx |= MLX5_TXOFF_CONFIG_METADATA;
4526 if (config->mps == MLX5_MPW) {
4528 * The NIC supports Legacy Multi-Packet Write.
4529 * The MLX5_TXOFF_CONFIG_MPW controls the
4530 * descriptor building method in combination
4531 * with MLX5_TXOFF_CONFIG_EMPW.
4533 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
4534 MLX5_TXOFF_CONFIG_SWP |
4535 MLX5_TXOFF_CONFIG_VLAN |
4536 MLX5_TXOFF_CONFIG_METADATA)))
4537 olx |= MLX5_TXOFF_CONFIG_EMPW |
4538 MLX5_TXOFF_CONFIG_MPW;
4541 * Scan the routines table to find the minimal
4542 * satisfying routine with requested offloads.
4544 m = RTE_DIM(txoff_func);
4545 for (i = 0; i < RTE_DIM(txoff_func); i++) {
4548 tmp = txoff_func[i].olx;
4550 /* Meets requested offloads exactly.*/
4554 if ((tmp & olx) != olx) {
4555 /* Does not meet requested offloads at all. */
4558 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW)
4559 /* Do not enable legacy MPW if not configured. */
4561 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
4562 /* Do not enable eMPW if not configured. */
4564 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
4565 /* Do not enable inlining if not configured. */
4567 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_TXPP)
4568 /* Do not enable scheduling if not configured. */
4571 * Some routine meets the requirements.
4572 * Check whether it has minimal amount
4573 * of not requested offloads.
4575 tmp = __builtin_popcountl(tmp & ~olx);
4576 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
4577 /* First or better match, save and continue. */
4583 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
4584 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
4585 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
4586 /* Lighter not requested offload. */
4591 if (m >= RTE_DIM(txoff_func)) {
4592 DRV_LOG(DEBUG, "port %u has no selected Tx function"
4593 " for requested offloads %04X",
4594 dev->data->port_id, olx);
4597 DRV_LOG(DEBUG, "port %u has selected Tx function"
4598 " supporting offloads %04X/%04X",
4599 dev->data->port_id, olx, txoff_func[m].olx);
4600 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
4601 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
4602 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
4603 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
4604 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
4605 DRV_LOG(DEBUG, "\tSWP (software parser)");
4606 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
4607 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
4608 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
4609 DRV_LOG(DEBUG, "\tINLIN (inline data)");
4610 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
4611 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
4612 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
4613 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
4614 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TXPP)
4615 DRV_LOG(DEBUG, "\tMETAD (tx Scheduling)");
4616 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
4617 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
4618 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
4620 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
4622 return txoff_func[m].func;
4626 * DPDK callback to get the TX queue information
4629 * Pointer to the device structure.
4631 * @param tx_queue_id
4632 * Tx queue identificator.
4635 * Pointer to the TX queue information structure.
4642 mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
4643 struct rte_eth_txq_info *qinfo)
4645 struct mlx5_priv *priv = dev->data->dev_private;
4646 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
4647 struct mlx5_txq_ctrl *txq_ctrl =
4648 container_of(txq, struct mlx5_txq_ctrl, txq);
4652 qinfo->nb_desc = txq->elts_s;
4653 qinfo->conf.tx_thresh.pthresh = 0;
4654 qinfo->conf.tx_thresh.hthresh = 0;
4655 qinfo->conf.tx_thresh.wthresh = 0;
4656 qinfo->conf.tx_rs_thresh = 0;
4657 qinfo->conf.tx_free_thresh = 0;
4658 qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
4659 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
4663 * DPDK callback to get the TX packet burst mode information
4666 * Pointer to the device structure.
4668 * @param tx_queue_id
4669 * Tx queue identificatior.
4672 * Pointer to the burts mode information.
4675 * 0 as success, -EINVAL as failure.
4679 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
4680 uint16_t tx_queue_id,
4681 struct rte_eth_burst_mode *mode)
4683 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
4684 struct mlx5_priv *priv = dev->data->dev_private;
4685 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
4686 unsigned int i, olx;
4688 for (i = 0; i < RTE_DIM(txoff_func); i++) {
4689 if (pkt_burst == txoff_func[i].func) {
4690 olx = txoff_func[i].olx;
4691 snprintf(mode->info, sizeof(mode->info),
4692 "%s%s%s%s%s%s%s%s%s%s",
4693 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
4694 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
4695 "Legacy MPW" : "Enhanced MPW") : "No MPW",
4696 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
4698 (olx & MLX5_TXOFF_CONFIG_TSO) ?
4700 (olx & MLX5_TXOFF_CONFIG_SWP) ?
4702 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
4704 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
4706 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
4708 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
4710 (olx & MLX5_TXOFF_CONFIG_TXPP) ?
4712 (txq && txq->fast_free) ?
4713 " + Fast Free" : "");