1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
20 #include <mlx5_common.h>
22 #include "mlx5_autoconf.h"
23 #include "mlx5_defs.h"
26 #include "mlx5_utils.h"
27 #include "mlx5_rxtx.h"
30 /* TX burst subroutines return codes. */
31 enum mlx5_txcmp_code {
32 MLX5_TXCMP_CODE_EXIT = 0,
33 MLX5_TXCMP_CODE_ERROR,
34 MLX5_TXCMP_CODE_SINGLE,
35 MLX5_TXCMP_CODE_MULTI,
41 * These defines are used to configure Tx burst routine option set
42 * supported at compile time. The not specified options are optimized out
43 * out due to if conditions can be explicitly calculated at compile time.
44 * The offloads with bigger runtime check (require more CPU cycles to
45 * skip) overhead should have the bigger index - this is needed to
46 * select the better matching routine function if no exact match and
47 * some offloads are not actually requested.
49 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
50 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
51 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
52 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
53 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
54 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
55 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
56 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
57 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
58 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
60 /* The most common offloads groups. */
61 #define MLX5_TXOFF_CONFIG_NONE 0
62 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
63 MLX5_TXOFF_CONFIG_TSO | \
64 MLX5_TXOFF_CONFIG_SWP | \
65 MLX5_TXOFF_CONFIG_CSUM | \
66 MLX5_TXOFF_CONFIG_INLINE | \
67 MLX5_TXOFF_CONFIG_VLAN | \
68 MLX5_TXOFF_CONFIG_METADATA)
70 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
72 #define MLX5_TXOFF_DECL(func, olx) \
73 static uint16_t mlx5_tx_burst_##func(void *txq, \
74 struct rte_mbuf **pkts, \
77 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
78 pkts, pkts_n, (olx)); \
81 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
84 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
85 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
86 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
88 sizeof(rte_v128u32_t)),
89 "invalid Ethernet Segment data size");
90 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
92 sizeof(struct rte_vlan_hdr) +
93 2 * RTE_ETHER_ADDR_LEN),
94 "invalid Ethernet Segment data size");
95 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
97 sizeof(rte_v128u32_t)),
98 "invalid Ethernet Segment data size");
99 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
101 sizeof(struct rte_vlan_hdr) +
102 2 * RTE_ETHER_ADDR_LEN),
103 "invalid Ethernet Segment data size");
104 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
106 sizeof(rte_v128u32_t)),
107 "invalid Ethernet Segment data size");
108 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
110 sizeof(struct rte_vlan_hdr) +
111 2 * RTE_ETHER_ADDR_LEN),
112 "invalid Ethernet Segment data size");
113 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
114 (2 * RTE_ETHER_ADDR_LEN),
115 "invalid Data Segment data size");
116 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
117 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
118 static_assert((sizeof(struct rte_vlan_hdr) +
119 sizeof(struct rte_ether_hdr)) ==
120 MLX5_ESEG_MIN_INLINE_SIZE,
121 "invalid min inline data size");
122 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
123 MLX5_DSEG_MAX, "invalid WQE max size");
124 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
125 "invalid WQE Control Segment size");
126 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
127 "invalid WQE Ethernet Segment size");
128 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
129 "invalid WQE Data Segment size");
130 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
133 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
134 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
137 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
138 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
140 uint64_t rte_net_mlx5_dynf_inline_mask;
141 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
144 * Build a table to translate Rx completion flags to packet type.
146 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
149 mlx5_set_ptype_table(void)
152 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
154 /* Last entry must not be overwritten, reserved for errored packet. */
155 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
156 (*p)[i] = RTE_PTYPE_UNKNOWN;
158 * The index to the array should have:
159 * bit[1:0] = l3_hdr_type
160 * bit[4:2] = l4_hdr_type
163 * bit[7] = outer_l3_type
166 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
168 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
169 RTE_PTYPE_L4_NONFRAG;
170 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
171 RTE_PTYPE_L4_NONFRAG;
173 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
175 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
178 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
180 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
182 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
184 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
186 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
188 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
191 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
193 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
195 /* Repeat with outer_l3_type being set. Just in case. */
196 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
197 RTE_PTYPE_L4_NONFRAG;
198 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
199 RTE_PTYPE_L4_NONFRAG;
200 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
202 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
204 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
206 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
208 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
210 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
212 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
214 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
216 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
218 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
221 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
222 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L4_NONFRAG;
225 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L4_NONFRAG;
228 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
229 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L4_NONFRAG;
232 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L4_NONFRAG;
235 /* Tunneled - Fragmented */
236 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
237 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L4_FRAG;
239 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
240 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
241 RTE_PTYPE_INNER_L4_FRAG;
242 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
244 RTE_PTYPE_INNER_L4_FRAG;
245 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
247 RTE_PTYPE_INNER_L4_FRAG;
249 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
250 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L4_TCP;
252 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L4_TCP;
255 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L4_TCP;
258 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
259 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L4_TCP;
261 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
262 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L4_TCP;
264 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
265 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L4_TCP;
267 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
268 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
269 RTE_PTYPE_INNER_L4_TCP;
270 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
272 RTE_PTYPE_INNER_L4_TCP;
273 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
274 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
275 RTE_PTYPE_INNER_L4_TCP;
276 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
277 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
278 RTE_PTYPE_INNER_L4_TCP;
279 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
280 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
281 RTE_PTYPE_INNER_L4_TCP;
282 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
283 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
284 RTE_PTYPE_INNER_L4_TCP;
286 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
287 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
288 RTE_PTYPE_INNER_L4_UDP;
289 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
290 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
291 RTE_PTYPE_INNER_L4_UDP;
292 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
293 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
294 RTE_PTYPE_INNER_L4_UDP;
295 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
296 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
297 RTE_PTYPE_INNER_L4_UDP;
301 * Build a table to translate packet to checksum type of Verbs.
304 mlx5_set_cksum_table(void)
310 * The index should have:
311 * bit[0] = PKT_TX_TCP_SEG
312 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
313 * bit[4] = PKT_TX_IP_CKSUM
314 * bit[8] = PKT_TX_OUTER_IP_CKSUM
317 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
320 /* Tunneled packet. */
321 if (i & (1 << 8)) /* Outer IP. */
322 v |= MLX5_ETH_WQE_L3_CSUM;
323 if (i & (1 << 4)) /* Inner IP. */
324 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
325 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
326 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
329 if (i & (1 << 4)) /* IP. */
330 v |= MLX5_ETH_WQE_L3_CSUM;
331 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
332 v |= MLX5_ETH_WQE_L4_CSUM;
334 mlx5_cksum_table[i] = v;
339 * Build a table to translate packet type of mbuf to SWP type of Verbs.
342 mlx5_set_swp_types_table(void)
348 * The index should have:
349 * bit[0:1] = PKT_TX_L4_MASK
350 * bit[4] = PKT_TX_IPV6
351 * bit[8] = PKT_TX_OUTER_IPV6
352 * bit[9] = PKT_TX_OUTER_UDP
354 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
357 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
359 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
361 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
362 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
363 v |= MLX5_ETH_WQE_L4_INNER_UDP;
364 mlx5_swp_types_table[i] = v;
369 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
370 * Flags must be preliminary initialized to zero.
373 * Pointer to burst routine local context.
375 * Pointer to store Software Parser flags
377 * Configured Tx offloads mask. It is fully defined at
378 * compile time and may be used for optimization.
381 * Software Parser offsets packed in dword.
382 * Software Parser flags are set by pointer.
384 static __rte_always_inline uint32_t
385 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
390 unsigned int idx, off;
393 if (!MLX5_TXOFF_CONFIG(SWP))
395 ol = loc->mbuf->ol_flags;
396 tunnel = ol & PKT_TX_TUNNEL_MASK;
398 * Check whether Software Parser is required.
399 * Only customized tunnels may ask for.
401 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
404 * The index should have:
405 * bit[0:1] = PKT_TX_L4_MASK
406 * bit[4] = PKT_TX_IPV6
407 * bit[8] = PKT_TX_OUTER_IPV6
408 * bit[9] = PKT_TX_OUTER_UDP
410 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
411 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
412 *swp_flags = mlx5_swp_types_table[idx];
414 * Set offsets for SW parser. Since ConnectX-5, SW parser just
415 * complements HW parser. SW parser starts to engage only if HW parser
416 * can't reach a header. For the older devices, HW parser will not kick
417 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
418 * should be set regardless of HW offload.
420 off = loc->mbuf->outer_l2_len;
421 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
422 off += sizeof(struct rte_vlan_hdr);
423 set = (off >> 1) << 8; /* Outer L3 offset. */
424 off += loc->mbuf->outer_l3_len;
425 if (tunnel == PKT_TX_TUNNEL_UDP)
426 set |= off >> 1; /* Outer L4 offset. */
427 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
428 const uint64_t csum = ol & PKT_TX_L4_MASK;
429 off += loc->mbuf->l2_len;
430 set |= (off >> 1) << 24; /* Inner L3 offset. */
431 if (csum == PKT_TX_TCP_CKSUM ||
432 csum == PKT_TX_UDP_CKSUM ||
433 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
434 off += loc->mbuf->l3_len;
435 set |= (off >> 1) << 16; /* Inner L4 offset. */
438 set = rte_cpu_to_le_32(set);
443 * Convert the Checksum offloads to Verbs.
446 * Pointer to the mbuf.
449 * Converted checksum flags.
451 static __rte_always_inline uint8_t
452 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
455 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
456 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
457 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
460 * The index should have:
461 * bit[0] = PKT_TX_TCP_SEG
462 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
463 * bit[4] = PKT_TX_IP_CKSUM
464 * bit[8] = PKT_TX_OUTER_IP_CKSUM
467 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
468 return mlx5_cksum_table[idx];
471 #define MLX5_SYSTEM_LOG_DIR "/var/log"
473 * Dump debug information to log file.
478 * If not NULL this string is printed as a header to the output
479 * and the output will be in hexadecimal view.
481 * This is the buffer address to print out.
483 * The number of bytes to dump out.
486 mlx5_dump_debug_information(const char *fname, const char *hex_title,
487 const void *buf, unsigned int hex_len)
491 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
492 fd = fopen(path, "a+");
494 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
495 MKSTR(path2, "./%s", fname);
496 fd = fopen(path2, "a+");
498 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
501 DRV_LOG(INFO, "New debug dump in file %s", path2);
503 DRV_LOG(INFO, "New debug dump in file %s", path);
506 rte_hexdump(fd, hex_title, buf, hex_len);
508 fprintf(fd, "%s", (const char *)buf);
509 fprintf(fd, "\n\n\n");
514 * Move QP from error state to running state and initialize indexes.
517 * Pointer to TX queue control structure.
520 * 0 on success, else -1.
523 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
525 struct mlx5_mp_arg_queue_state_modify sm = {
527 .queue_id = txq_ctrl->txq.idx,
530 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
532 txq_ctrl->txq.wqe_ci = 0;
533 txq_ctrl->txq.wqe_pi = 0;
534 txq_ctrl->txq.elts_comp = 0;
538 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
540 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
542 static const uint8_t magic[] = "seen";
546 for (i = 0; i < sizeof(magic); ++i)
547 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
549 err_cqe->rsvd1[i] = magic[i];
558 * Pointer to TX queue structure.
560 * Pointer to the error CQE.
563 * Negative value if queue recovery failed, otherwise
564 * the error completion entry is handled successfully.
567 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
568 volatile struct mlx5_err_cqe *err_cqe)
570 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
571 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
572 struct mlx5_txq_ctrl *txq_ctrl =
573 container_of(txq, struct mlx5_txq_ctrl, txq);
574 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
575 int seen = check_err_cqe_seen(err_cqe);
577 if (!seen && txq_ctrl->dump_file_n <
578 txq_ctrl->priv->config.max_dump_files_num) {
579 MKSTR(err_str, "Unexpected CQE error syndrome "
580 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
581 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
582 txq->cqe_s, txq->qp_num_8s >> 8,
583 rte_be_to_cpu_16(err_cqe->wqe_counter),
584 txq->wqe_ci, txq->cq_ci);
585 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
586 PORT_ID(txq_ctrl->priv), txq->idx,
587 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
588 mlx5_dump_debug_information(name, NULL, err_str, 0);
589 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
590 (const void *)((uintptr_t)
594 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
595 (const void *)((uintptr_t)
599 txq_ctrl->dump_file_n++;
603 * Count errors in WQEs units.
604 * Later it can be improved to count error packets,
605 * for example, by SQ parsing to find how much packets
606 * should be counted for each WQE.
608 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
610 if (tx_recover_qp(txq_ctrl)) {
611 /* Recovering failed - retry later on the same WQE. */
614 /* Release all the remaining buffers. */
615 txq_free_elts(txq_ctrl);
621 * Modify a Verbs/DevX queue state.
622 * This must be called from the primary process.
625 * Pointer to Ethernet device.
627 * State modify request parameters.
630 * 0 in case of success else non-zero value and rte_errno is set.
633 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
634 const struct mlx5_mp_arg_queue_state_modify *sm)
637 struct mlx5_priv *priv = dev->data->dev_private;
640 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
641 struct mlx5_rxq_ctrl *rxq_ctrl =
642 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
644 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, sm->state);
646 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
647 sm->state, strerror(errno));
652 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
653 struct mlx5_txq_ctrl *txq_ctrl =
654 container_of(txq, struct mlx5_txq_ctrl, txq);
656 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
657 MLX5_TXQ_MOD_ERR2RDY,
658 (uint8_t)priv->dev_port);
666 * Modify a Verbs queue state.
669 * Pointer to Ethernet device.
671 * State modify request parameters.
674 * 0 in case of success else non-zero value.
677 mlx5_queue_state_modify(struct rte_eth_dev *dev,
678 struct mlx5_mp_arg_queue_state_modify *sm)
680 struct mlx5_priv *priv = dev->data->dev_private;
683 switch (rte_eal_process_type()) {
684 case RTE_PROC_PRIMARY:
685 ret = mlx5_queue_state_modify_primary(dev, sm);
687 case RTE_PROC_SECONDARY:
688 ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
697 * Dummy DPDK callback for TX.
699 * This function is used to temporarily replace the real callback during
700 * unsafe control operations on the queue, or in case of error.
703 * Generic pointer to TX queue structure.
705 * Packets to transmit.
707 * Number of packets in array.
710 * Number of packets successfully transmitted (<= pkts_n).
713 removed_tx_burst(void *dpdk_txq __rte_unused,
714 struct rte_mbuf **pkts __rte_unused,
715 uint16_t pkts_n __rte_unused)
722 * Free the mbufs from the linear array of pointers.
725 * Pointer to Tx queue structure.
727 * Pointer to array of packets to be free.
729 * Number of packets to be freed.
731 * Configured Tx offloads mask. It is fully defined at
732 * compile time and may be used for optimization.
734 static __rte_always_inline void
735 mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
736 struct rte_mbuf **__rte_restrict pkts,
738 unsigned int olx __rte_unused)
740 struct rte_mempool *pool = NULL;
741 struct rte_mbuf **p_free = NULL;
742 struct rte_mbuf *mbuf;
743 unsigned int n_free = 0;
746 * The implemented algorithm eliminates
747 * copying pointers to temporary array
748 * for rte_mempool_put_bulk() calls.
753 * Free mbufs directly to the pool in bulk
754 * if fast free offload is engaged
756 if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
759 rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
765 * Decrement mbuf reference counter, detach
766 * indirect and external buffers if needed.
768 mbuf = rte_pktmbuf_prefree_seg(*pkts);
769 if (likely(mbuf != NULL)) {
770 MLX5_ASSERT(mbuf == *pkts);
771 if (likely(n_free != 0)) {
772 if (unlikely(pool != mbuf->pool))
773 /* From different pool. */
776 /* Start new scan array. */
783 if (unlikely(pkts_n == 0)) {
789 * This happens if mbuf is still referenced.
790 * We can't put it back to the pool, skip.
794 if (unlikely(n_free != 0))
795 /* There is some array to free.*/
797 if (unlikely(pkts_n == 0))
798 /* Last mbuf, nothing to free. */
804 * This loop is implemented to avoid multiple
805 * inlining of rte_mempool_put_bulk().
811 * Free the array of pre-freed mbufs
812 * belonging to the same memory pool.
814 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
815 if (unlikely(mbuf != NULL)) {
816 /* There is the request to start new scan. */
821 if (likely(pkts_n != 0))
824 * This is the last mbuf to be freed.
825 * Do one more loop iteration to complete.
826 * This is rare case of the last unique mbuf.
831 if (likely(pkts_n == 0))
839 * No inline version to free buffers for optimal call
840 * on the tx_burst completion.
842 static __rte_noinline void
843 __mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
844 struct rte_mbuf **__rte_restrict pkts,
846 unsigned int olx __rte_unused)
848 mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
852 * Free the mbuf from the elts ring buffer till new tail.
855 * Pointer to Tx queue structure.
857 * Index in elts to free up to, becomes new elts tail.
859 * Configured Tx offloads mask. It is fully defined at
860 * compile time and may be used for optimization.
862 static __rte_always_inline void
863 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
865 unsigned int olx __rte_unused)
867 uint16_t n_elts = tail - txq->elts_tail;
870 MLX5_ASSERT(n_elts <= txq->elts_s);
872 * Implement a loop to support ring buffer wraparound
873 * with single inlining of mlx5_tx_free_mbuf().
878 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
879 part = RTE_MIN(part, n_elts);
881 MLX5_ASSERT(part <= txq->elts_s);
882 mlx5_tx_free_mbuf(txq,
883 &txq->elts[txq->elts_tail & txq->elts_m],
885 txq->elts_tail += part;
891 * Store the mbuf being sent into elts ring buffer.
892 * On Tx completion these mbufs will be freed.
895 * Pointer to Tx queue structure.
897 * Pointer to array of packets to be stored.
899 * Number of packets to be stored.
901 * Configured Tx offloads mask. It is fully defined at
902 * compile time and may be used for optimization.
904 static __rte_always_inline void
905 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
906 struct rte_mbuf **__rte_restrict pkts,
908 unsigned int olx __rte_unused)
911 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
915 part = txq->elts_s - (txq->elts_head & txq->elts_m);
917 MLX5_ASSERT(part <= txq->elts_s);
918 /* This code is a good candidate for vectorizing with SIMD. */
919 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
921 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
922 txq->elts_head += pkts_n;
923 if (unlikely(part < pkts_n))
924 /* The copy is wrapping around the elts array. */
925 rte_memcpy((void *)elts, (void *)(pkts + part),
926 (pkts_n - part) * sizeof(struct rte_mbuf *));
930 * Update completion queue consuming index via doorbell
931 * and flush the completed data buffers.
934 * Pointer to TX queue structure.
935 * @param valid CQE pointer
936 * if not NULL update txq->wqe_pi and flush the buffers
938 * Configured Tx offloads mask. It is fully defined at
939 * compile time and may be used for optimization.
941 static __rte_always_inline void
942 mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
943 volatile struct mlx5_cqe *last_cqe,
944 unsigned int olx __rte_unused)
946 if (likely(last_cqe != NULL)) {
949 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
950 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
951 if (likely(tail != txq->elts_tail)) {
952 mlx5_tx_free_elts(txq, tail, olx);
953 MLX5_ASSERT(tail == txq->elts_tail);
959 * Manage TX completions. This routine checks the CQ for
960 * arrived CQEs, deduces the last accomplished WQE in SQ,
961 * updates SQ producing index and frees all completed mbufs.
964 * Pointer to TX queue structure.
966 * Configured Tx offloads mask. It is fully defined at
967 * compile time and may be used for optimization.
969 * NOTE: not inlined intentionally, it makes tx_burst
970 * routine smaller, simple and faster - from experiments.
973 mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
974 unsigned int olx __rte_unused)
976 unsigned int count = MLX5_TX_COMP_MAX_CQE;
977 volatile struct mlx5_cqe *last_cqe = NULL;
978 bool ring_doorbell = false;
982 volatile struct mlx5_cqe *cqe;
984 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
985 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
986 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
987 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
988 /* No new CQEs in completion queue. */
989 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
993 * Some error occurred, try to restart.
994 * We have no barrier after WQE related Doorbell
995 * written, make sure all writes are completed
996 * here, before we might perform SQ reset.
999 ret = mlx5_tx_error_cqe_handle
1000 (txq, (volatile struct mlx5_err_cqe *)cqe);
1001 if (unlikely(ret < 0)) {
1003 * Some error occurred on queue error
1004 * handling, we do not advance the index
1005 * here, allowing to retry on next call.
1010 * We are going to fetch all entries with
1011 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
1012 * The send queue is supposed to be empty.
1014 ring_doorbell = true;
1016 txq->cq_pi = txq->cq_ci;
1020 /* Normal transmit completion. */
1021 MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
1022 #ifdef RTE_LIBRTE_MLX5_DEBUG
1023 MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
1026 ring_doorbell = true;
1030 * We have to restrict the amount of processed CQEs
1031 * in one tx_burst routine call. The CQ may be large
1032 * and many CQEs may be updated by the NIC in one
1033 * transaction. Buffers freeing is time consuming,
1034 * multiple iterations may introduce significant
1037 if (likely(--count == 0))
1040 if (likely(ring_doorbell)) {
1041 /* Ring doorbell to notify hardware. */
1042 rte_compiler_barrier();
1043 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
1044 mlx5_tx_comp_flush(txq, last_cqe, olx);
1049 * Check if the completion request flag should be set in the last WQE.
1050 * Both pushed mbufs and WQEs are monitored and the completion request
1051 * flag is set if any of thresholds is reached.
1054 * Pointer to TX queue structure.
1056 * Pointer to burst routine local context.
1058 * Configured Tx offloads mask. It is fully defined at
1059 * compile time and may be used for optimization.
1061 static __rte_always_inline void
1062 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
1063 struct mlx5_txq_local *__rte_restrict loc,
1066 uint16_t head = txq->elts_head;
1069 part = MLX5_TXOFF_CONFIG(INLINE) ?
1070 0 : loc->pkts_sent - loc->pkts_copy;
1072 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
1073 (MLX5_TXOFF_CONFIG(INLINE) &&
1074 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
1075 volatile struct mlx5_wqe *last = loc->wqe_last;
1078 txq->elts_comp = head;
1079 if (MLX5_TXOFF_CONFIG(INLINE))
1080 txq->wqe_comp = txq->wqe_ci;
1081 /* Request unconditional completion on last WQE. */
1082 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
1083 MLX5_COMP_MODE_OFFSET);
1084 /* Save elts_head in dedicated free on completion queue. */
1085 #ifdef RTE_LIBRTE_MLX5_DEBUG
1086 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
1087 (last->cseg.opcode >> 8) << 16;
1089 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
1091 /* A CQE slot must always be available. */
1092 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
1097 * DPDK callback to check the status of a tx descriptor.
1102 * The index of the descriptor in the ring.
1105 * The status of the tx descriptor.
1108 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
1110 struct mlx5_txq_data *__rte_restrict txq = tx_queue;
1113 mlx5_tx_handle_completion(txq, 0);
1114 used = txq->elts_head - txq->elts_tail;
1116 return RTE_ETH_TX_DESC_FULL;
1117 return RTE_ETH_TX_DESC_DONE;
1121 * Build the Control Segment with specified opcode:
1122 * - MLX5_OPCODE_SEND
1123 * - MLX5_OPCODE_ENHANCED_MPSW
1127 * Pointer to TX queue structure.
1129 * Pointer to burst routine local context.
1131 * Pointer to WQE to fill with built Control Segment.
1133 * Supposed length of WQE in segments.
1135 * SQ WQE opcode to put into Control Segment.
1137 * Configured Tx offloads mask. It is fully defined at
1138 * compile time and may be used for optimization.
1140 static __rte_always_inline void
1141 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
1142 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1143 struct mlx5_wqe *__rte_restrict wqe,
1145 unsigned int opcode,
1146 unsigned int olx __rte_unused)
1148 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
1150 /* For legacy MPW replace the EMPW by TSO with modifier. */
1151 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
1152 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
1153 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
1154 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
1155 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
1156 MLX5_COMP_MODE_OFFSET);
1157 cs->misc = RTE_BE32(0);
1161 * Build the Synchronize Queue Segment with specified completion index.
1164 * Pointer to TX queue structure.
1166 * Pointer to burst routine local context.
1168 * Pointer to WQE to fill with built Control Segment.
1170 * Completion index in Clock Queue to wait.
1172 * Configured Tx offloads mask. It is fully defined at
1173 * compile time and may be used for optimization.
1175 static __rte_always_inline void
1176 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
1177 struct mlx5_txq_local *restrict loc __rte_unused,
1178 struct mlx5_wqe *restrict wqe,
1180 unsigned int olx __rte_unused)
1182 struct mlx5_wqe_qseg *qs;
1184 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
1185 qs->max_index = rte_cpu_to_be_32(wci);
1186 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
1187 qs->reserved0 = RTE_BE32(0);
1188 qs->reserved1 = RTE_BE32(0);
1192 * Build the Ethernet Segment without inlined data.
1193 * Supports Software Parser, Checksums and VLAN
1194 * insertion Tx offload features.
1197 * Pointer to TX queue structure.
1199 * Pointer to burst routine local context.
1201 * Pointer to WQE to fill with built Ethernet Segment.
1203 * Configured Tx offloads mask. It is fully defined at
1204 * compile time and may be used for optimization.
1206 static __rte_always_inline void
1207 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
1208 struct mlx5_txq_local *__rte_restrict loc,
1209 struct mlx5_wqe *__rte_restrict wqe,
1212 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1216 * Calculate and set check sum flags first, dword field
1217 * in segment may be shared with Software Parser flags.
1219 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1220 es->flags = rte_cpu_to_le_32(csum);
1222 * Calculate and set Software Parser offsets and flags.
1223 * These flags a set for custom UDP and IP tunnel packets.
1225 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1226 /* Fill metadata field if needed. */
1227 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1228 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1229 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1230 /* Engage VLAN tag insertion feature if requested. */
1231 if (MLX5_TXOFF_CONFIG(VLAN) &&
1232 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
1234 * We should get here only if device support
1235 * this feature correctly.
1237 MLX5_ASSERT(txq->vlan_en);
1238 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
1239 loc->mbuf->vlan_tci);
1241 es->inline_hdr = RTE_BE32(0);
1246 * Build the Ethernet Segment with minimal inlined data
1247 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
1248 * used to fill the gap in single WQEBB WQEs.
1249 * Supports Software Parser, Checksums and VLAN
1250 * insertion Tx offload features.
1253 * Pointer to TX queue structure.
1255 * Pointer to burst routine local context.
1257 * Pointer to WQE to fill with built Ethernet Segment.
1259 * Length of VLAN tag insertion if any.
1261 * Configured Tx offloads mask. It is fully defined at
1262 * compile time and may be used for optimization.
1264 static __rte_always_inline void
1265 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
1266 struct mlx5_txq_local *__rte_restrict loc,
1267 struct mlx5_wqe *__rte_restrict wqe,
1271 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1273 uint8_t *psrc, *pdst;
1276 * Calculate and set check sum flags first, dword field
1277 * in segment may be shared with Software Parser flags.
1279 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1280 es->flags = rte_cpu_to_le_32(csum);
1282 * Calculate and set Software Parser offsets and flags.
1283 * These flags a set for custom UDP and IP tunnel packets.
1285 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1286 /* Fill metadata field if needed. */
1287 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1288 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1289 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1290 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1291 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
1292 es->inline_data = *(unaligned_uint16_t *)psrc;
1293 psrc += sizeof(uint16_t);
1294 pdst = (uint8_t *)(es + 1);
1295 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1296 /* Implement VLAN tag insertion as part inline data. */
1297 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1298 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1299 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1300 /* Insert VLAN ethertype + VLAN tag. */
1301 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1302 ((RTE_ETHER_TYPE_VLAN << 16) |
1303 loc->mbuf->vlan_tci);
1304 pdst += sizeof(struct rte_vlan_hdr);
1305 /* Copy the rest two bytes from packet data. */
1306 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1307 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1309 /* Fill the gap in the title WQEBB with inline data. */
1310 rte_mov16(pdst, psrc);
1315 * Build the Ethernet Segment with entire packet
1316 * data inlining. Checks the boundary of WQEBB and
1317 * ring buffer wrapping, supports Software Parser,
1318 * Checksums and VLAN insertion Tx offload features.
1321 * Pointer to TX queue structure.
1323 * Pointer to burst routine local context.
1325 * Pointer to WQE to fill with built Ethernet Segment.
1327 * Length of VLAN tag insertion if any.
1329 * Length of data to inline (VLAN included, if any).
1331 * TSO flag, set mss field from the packet.
1333 * Configured Tx offloads mask. It is fully defined at
1334 * compile time and may be used for optimization.
1337 * Pointer to the next Data Segment (aligned and wrapped around).
1339 static __rte_always_inline struct mlx5_wqe_dseg *
1340 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
1341 struct mlx5_txq_local *__rte_restrict loc,
1342 struct mlx5_wqe *__rte_restrict wqe,
1348 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1350 uint8_t *psrc, *pdst;
1354 * Calculate and set check sum flags first, dword field
1355 * in segment may be shared with Software Parser flags.
1357 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1360 csum |= loc->mbuf->tso_segsz;
1361 es->flags = rte_cpu_to_be_32(csum);
1363 es->flags = rte_cpu_to_le_32(csum);
1366 * Calculate and set Software Parser offsets and flags.
1367 * These flags a set for custom UDP and IP tunnel packets.
1369 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1370 /* Fill metadata field if needed. */
1371 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1372 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1373 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1374 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
1375 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
1376 es->inline_data = *(unaligned_uint16_t *)psrc;
1377 psrc += sizeof(uint16_t);
1378 pdst = (uint8_t *)(es + 1);
1379 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1380 /* Implement VLAN tag insertion as part inline data. */
1381 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
1382 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1383 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
1384 /* Insert VLAN ethertype + VLAN tag. */
1385 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1386 ((RTE_ETHER_TYPE_VLAN << 16) |
1387 loc->mbuf->vlan_tci);
1388 pdst += sizeof(struct rte_vlan_hdr);
1389 /* Copy the rest two bytes from packet data. */
1390 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
1391 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
1392 psrc += sizeof(uint16_t);
1394 /* Fill the gap in the title WQEBB with inline data. */
1395 rte_mov16(pdst, psrc);
1396 psrc += sizeof(rte_v128u32_t);
1398 pdst = (uint8_t *)(es + 2);
1399 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1400 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1401 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
1403 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1404 return (struct mlx5_wqe_dseg *)pdst;
1407 * The WQEBB space availability is checked by caller.
1408 * Here we should be aware of WQE ring buffer wraparound only.
1410 part = (uint8_t *)txq->wqes_end - pdst;
1411 part = RTE_MIN(part, inlen);
1413 rte_memcpy(pdst, psrc, part);
1415 if (likely(!inlen)) {
1417 * If return value is not used by the caller
1418 * the code below will be optimized out.
1421 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1422 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1423 pdst = (uint8_t *)txq->wqes;
1424 return (struct mlx5_wqe_dseg *)pdst;
1426 pdst = (uint8_t *)txq->wqes;
1433 * Copy data from chain of mbuf to the specified linear buffer.
1434 * Checksums and VLAN insertion Tx offload features. If data
1435 * from some mbuf copied completely this mbuf is freed. Local
1436 * structure is used to keep the byte stream state.
1439 * Pointer to the destination linear buffer.
1441 * Pointer to burst routine local context.
1443 * Length of data to be copied.
1445 * Length of data to be copied ignoring no inline hint.
1447 * Configured Tx offloads mask. It is fully defined at
1448 * compile time and may be used for optimization.
1451 * Number of actual copied data bytes. This is always greater than or
1452 * equal to must parameter and might be lesser than len in no inline
1453 * hint flag is encountered.
1455 static __rte_always_inline unsigned int
1456 mlx5_tx_mseg_memcpy(uint8_t *pdst,
1457 struct mlx5_txq_local *__rte_restrict loc,
1460 unsigned int olx __rte_unused)
1462 struct rte_mbuf *mbuf;
1463 unsigned int part, dlen, copy = 0;
1467 MLX5_ASSERT(must <= len);
1469 /* Allow zero length packets, must check first. */
1470 dlen = rte_pktmbuf_data_len(loc->mbuf);
1471 if (dlen <= loc->mbuf_off) {
1472 /* Exhausted packet, just free. */
1474 loc->mbuf = mbuf->next;
1475 rte_pktmbuf_free_seg(mbuf);
1477 MLX5_ASSERT(loc->mbuf_nseg > 1);
1478 MLX5_ASSERT(loc->mbuf);
1480 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
1485 * We already copied the minimal
1486 * requested amount of data.
1491 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
1493 * Copy only the minimal required
1494 * part of the data buffer.
1501 dlen -= loc->mbuf_off;
1502 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1504 part = RTE_MIN(len, dlen);
1505 rte_memcpy(pdst, psrc, part);
1507 loc->mbuf_off += part;
1510 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
1512 /* Exhausted packet, just free. */
1514 loc->mbuf = mbuf->next;
1515 rte_pktmbuf_free_seg(mbuf);
1517 MLX5_ASSERT(loc->mbuf_nseg >= 1);
1527 * Build the Ethernet Segment with inlined data from
1528 * multi-segment packet. Checks the boundary of WQEBB
1529 * and ring buffer wrapping, supports Software Parser,
1530 * Checksums and VLAN insertion Tx offload features.
1533 * Pointer to TX queue structure.
1535 * Pointer to burst routine local context.
1537 * Pointer to WQE to fill with built Ethernet Segment.
1539 * Length of VLAN tag insertion if any.
1541 * Length of data to inline (VLAN included, if any).
1543 * TSO flag, set mss field from the packet.
1545 * Configured Tx offloads mask. It is fully defined at
1546 * compile time and may be used for optimization.
1549 * Pointer to the next Data Segment (aligned and
1550 * possible NOT wrapped around - caller should do
1551 * wrapping check on its own).
1553 static __rte_always_inline struct mlx5_wqe_dseg *
1554 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
1555 struct mlx5_txq_local *__rte_restrict loc,
1556 struct mlx5_wqe *__rte_restrict wqe,
1562 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
1565 unsigned int part, tlen = 0;
1568 * Calculate and set check sum flags first, uint32_t field
1569 * in segment may be shared with Software Parser flags.
1571 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
1574 csum |= loc->mbuf->tso_segsz;
1575 es->flags = rte_cpu_to_be_32(csum);
1577 es->flags = rte_cpu_to_le_32(csum);
1580 * Calculate and set Software Parser offsets and flags.
1581 * These flags a set for custom UDP and IP tunnel packets.
1583 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
1584 /* Fill metadata field if needed. */
1585 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
1586 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
1587 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
1588 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
1589 pdst = (uint8_t *)&es->inline_data;
1590 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
1591 /* Implement VLAN tag insertion as part inline data. */
1592 mlx5_tx_mseg_memcpy(pdst, loc,
1593 2 * RTE_ETHER_ADDR_LEN,
1594 2 * RTE_ETHER_ADDR_LEN, olx);
1595 pdst += 2 * RTE_ETHER_ADDR_LEN;
1596 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
1597 ((RTE_ETHER_TYPE_VLAN << 16) |
1598 loc->mbuf->vlan_tci);
1599 pdst += sizeof(struct rte_vlan_hdr);
1600 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
1602 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
1604 * The WQEBB space availability is checked by caller.
1605 * Here we should be aware of WQE ring buffer wraparound only.
1607 part = (uint8_t *)txq->wqes_end - pdst;
1608 part = RTE_MIN(part, inlen - tlen);
1614 * Copying may be interrupted inside the routine
1615 * if run into no inline hint flag.
1617 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
1618 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
1620 if (likely(inlen <= tlen) || copy < part) {
1621 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
1623 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1624 return (struct mlx5_wqe_dseg *)pdst;
1626 pdst = (uint8_t *)txq->wqes;
1627 part = inlen - tlen;
1632 * Build the Data Segment of pointer type.
1635 * Pointer to TX queue structure.
1637 * Pointer to burst routine local context.
1639 * Pointer to WQE to fill with built Data Segment.
1641 * Data buffer to point.
1643 * Data buffer length.
1645 * Configured Tx offloads mask. It is fully defined at
1646 * compile time and may be used for optimization.
1648 static __rte_always_inline void
1649 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
1650 struct mlx5_txq_local *__rte_restrict loc,
1651 struct mlx5_wqe_dseg *__rte_restrict dseg,
1654 unsigned int olx __rte_unused)
1658 dseg->bcount = rte_cpu_to_be_32(len);
1659 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1660 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1664 * Build the Data Segment of pointer type or inline
1665 * if data length is less than buffer in minimal
1666 * Data Segment size.
1669 * Pointer to TX queue structure.
1671 * Pointer to burst routine local context.
1673 * Pointer to WQE to fill with built Data Segment.
1675 * Data buffer to point.
1677 * Data buffer length.
1679 * Configured Tx offloads mask. It is fully defined at
1680 * compile time and may be used for optimization.
1682 static __rte_always_inline void
1683 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
1684 struct mlx5_txq_local *__rte_restrict loc,
1685 struct mlx5_wqe_dseg *__rte_restrict dseg,
1688 unsigned int olx __rte_unused)
1694 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
1695 dseg->bcount = rte_cpu_to_be_32(len);
1696 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
1697 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
1701 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1702 /* Unrolled implementation of generic rte_memcpy. */
1703 dst = (uintptr_t)&dseg->inline_data[0];
1704 src = (uintptr_t)buf;
1706 #ifdef RTE_ARCH_STRICT_ALIGN
1707 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
1708 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1709 dst += sizeof(uint32_t);
1710 src += sizeof(uint32_t);
1711 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1712 dst += sizeof(uint32_t);
1713 src += sizeof(uint32_t);
1715 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
1716 dst += sizeof(uint64_t);
1717 src += sizeof(uint64_t);
1721 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
1722 dst += sizeof(uint32_t);
1723 src += sizeof(uint32_t);
1726 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
1727 dst += sizeof(uint16_t);
1728 src += sizeof(uint16_t);
1731 *(uint8_t *)dst = *(uint8_t *)src;
1735 * Build the Data Segment of inlined data from single
1736 * segment packet, no VLAN insertion.
1739 * Pointer to TX queue structure.
1741 * Pointer to burst routine local context.
1743 * Pointer to WQE to fill with built Data Segment.
1745 * Data buffer to point.
1747 * Data buffer length.
1749 * Configured Tx offloads mask. It is fully defined at
1750 * compile time and may be used for optimization.
1753 * Pointer to the next Data Segment after inlined data.
1754 * Ring buffer wraparound check is needed. We do not
1755 * do it here because it may not be needed for the
1756 * last packet in the eMPW session.
1758 static __rte_always_inline struct mlx5_wqe_dseg *
1759 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
1760 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1761 struct mlx5_wqe_dseg *__rte_restrict dseg,
1764 unsigned int olx __rte_unused)
1769 if (!MLX5_TXOFF_CONFIG(MPW)) {
1770 /* Store the descriptor byte counter for eMPW sessions. */
1771 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
1772 pdst = &dseg->inline_data[0];
1774 /* The entire legacy MPW session counter is stored on close. */
1775 pdst = (uint8_t *)dseg;
1778 * The WQEBB space availability is checked by caller.
1779 * Here we should be aware of WQE ring buffer wraparound only.
1781 part = (uint8_t *)txq->wqes_end - pdst;
1782 part = RTE_MIN(part, len);
1784 rte_memcpy(pdst, buf, part);
1788 if (!MLX5_TXOFF_CONFIG(MPW))
1789 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1790 /* Note: no final wraparound check here. */
1791 return (struct mlx5_wqe_dseg *)pdst;
1793 pdst = (uint8_t *)txq->wqes;
1800 * Build the Data Segment of inlined data from single
1801 * segment packet with VLAN insertion.
1804 * Pointer to TX queue structure.
1806 * Pointer to burst routine local context.
1808 * Pointer to the dseg fill with built Data Segment.
1810 * Data buffer to point.
1812 * Data buffer length.
1814 * Configured Tx offloads mask. It is fully defined at
1815 * compile time and may be used for optimization.
1818 * Pointer to the next Data Segment after inlined data.
1819 * Ring buffer wraparound check is needed.
1821 static __rte_always_inline struct mlx5_wqe_dseg *
1822 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
1823 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
1824 struct mlx5_wqe_dseg *__rte_restrict dseg,
1827 unsigned int olx __rte_unused)
1833 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
1834 if (!MLX5_TXOFF_CONFIG(MPW)) {
1835 /* Store the descriptor byte counter for eMPW sessions. */
1836 dseg->bcount = rte_cpu_to_be_32
1837 ((len + sizeof(struct rte_vlan_hdr)) |
1838 MLX5_ETH_WQE_DATA_INLINE);
1839 pdst = &dseg->inline_data[0];
1841 /* The entire legacy MPW session counter is stored on close. */
1842 pdst = (uint8_t *)dseg;
1844 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
1845 buf += MLX5_DSEG_MIN_INLINE_SIZE;
1846 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
1847 len -= MLX5_DSEG_MIN_INLINE_SIZE;
1848 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
1849 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
1850 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
1851 pdst = (uint8_t *)txq->wqes;
1852 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
1853 loc->mbuf->vlan_tci);
1854 pdst += sizeof(struct rte_vlan_hdr);
1856 * The WQEBB space availability is checked by caller.
1857 * Here we should be aware of WQE ring buffer wraparound only.
1859 part = (uint8_t *)txq->wqes_end - pdst;
1860 part = RTE_MIN(part, len);
1862 rte_memcpy(pdst, buf, part);
1866 if (!MLX5_TXOFF_CONFIG(MPW))
1867 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
1868 /* Note: no final wraparound check here. */
1869 return (struct mlx5_wqe_dseg *)pdst;
1871 pdst = (uint8_t *)txq->wqes;
1878 * Build the Ethernet Segment with optionally inlined data with
1879 * VLAN insertion and following Data Segments (if any) from
1880 * multi-segment packet. Used by ordinary send and TSO.
1883 * Pointer to TX queue structure.
1885 * Pointer to burst routine local context.
1887 * Pointer to WQE to fill with built Ethernet/Data Segments.
1889 * Length of VLAN header to insert, 0 means no VLAN insertion.
1891 * Data length to inline. For TSO this parameter specifies
1892 * exact value, for ordinary send routine can be aligned by
1893 * caller to provide better WQE space saving and data buffer
1894 * start address alignment. This length includes VLAN header
1897 * Zero means ordinary send, inlined data can be extended,
1898 * otherwise this is TSO, inlined data length is fixed.
1900 * Configured Tx offloads mask. It is fully defined at
1901 * compile time and may be used for optimization.
1904 * Actual size of built WQE in segments.
1906 static __rte_always_inline unsigned int
1907 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
1908 struct mlx5_txq_local *__rte_restrict loc,
1909 struct mlx5_wqe *__rte_restrict wqe,
1913 unsigned int olx __rte_unused)
1915 struct mlx5_wqe_dseg *__rte_restrict dseg;
1918 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
1919 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
1922 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
1923 if (!loc->mbuf_nseg)
1926 * There are still some mbuf remaining, not inlined.
1927 * The first mbuf may be partially inlined and we
1928 * must process the possible non-zero data offset.
1930 if (loc->mbuf_off) {
1935 * Exhausted packets must be dropped before.
1936 * Non-zero offset means there are some data
1937 * remained in the packet.
1939 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
1940 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
1941 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
1943 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
1945 * Build the pointer/minimal data Data Segment.
1946 * Do ring buffer wrapping check in advance.
1948 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1949 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1950 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
1951 /* Store the mbuf to be freed on completion. */
1952 MLX5_ASSERT(loc->elts_free);
1953 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1956 if (--loc->mbuf_nseg == 0)
1958 loc->mbuf = loc->mbuf->next;
1962 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
1963 struct rte_mbuf *mbuf;
1965 /* Zero length segment found, just skip. */
1967 loc->mbuf = loc->mbuf->next;
1968 rte_pktmbuf_free_seg(mbuf);
1969 if (--loc->mbuf_nseg == 0)
1972 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
1973 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
1976 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
1977 rte_pktmbuf_data_len(loc->mbuf), olx);
1978 MLX5_ASSERT(loc->elts_free);
1979 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
1982 if (--loc->mbuf_nseg == 0)
1984 loc->mbuf = loc->mbuf->next;
1989 /* Calculate actual segments used from the dseg pointer. */
1990 if ((uintptr_t)wqe < (uintptr_t)dseg)
1991 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
1993 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
1994 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
1999 * The routine checks timestamp flag in the current packet,
2000 * and push WAIT WQE into the queue if scheduling is required.
2003 * Pointer to TX queue structure.
2005 * Pointer to burst routine local context.
2007 * Configured Tx offloads mask. It is fully defined at
2008 * compile time and may be used for optimization.
2011 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2012 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
2013 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
2014 * Local context variables partially updated.
2016 static __rte_always_inline enum mlx5_txcmp_code
2017 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
2018 struct mlx5_txq_local *restrict loc,
2021 if (MLX5_TXOFF_CONFIG(TXPP) &&
2022 loc->mbuf->ol_flags & txq->ts_mask) {
2023 struct mlx5_wqe *wqe;
2028 * Estimate the required space quickly and roughly.
2029 * We would like to ensure the packet can be pushed
2030 * to the queue and we won't get the orphan WAIT WQE.
2032 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
2033 loc->elts_free < NB_SEGS(loc->mbuf))
2034 return MLX5_TXCMP_CODE_EXIT;
2035 /* Convert the timestamp into completion to wait. */
2036 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
2037 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
2038 if (unlikely(wci < 0))
2039 return MLX5_TXCMP_CODE_SINGLE;
2040 /* Build the WAIT WQE with specified completion. */
2041 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2042 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
2043 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
2046 return MLX5_TXCMP_CODE_MULTI;
2048 return MLX5_TXCMP_CODE_SINGLE;
2052 * Tx one packet function for multi-segment TSO. Supports all
2053 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
2054 * sends one packet per WQE.
2056 * This routine is responsible for storing processed mbuf
2057 * into elts ring buffer and update elts_head.
2060 * Pointer to TX queue structure.
2062 * Pointer to burst routine local context.
2064 * Configured Tx offloads mask. It is fully defined at
2065 * compile time and may be used for optimization.
2068 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2069 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2070 * Local context variables partially updated.
2072 static __rte_always_inline enum mlx5_txcmp_code
2073 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
2074 struct mlx5_txq_local *__rte_restrict loc,
2077 struct mlx5_wqe *__rte_restrict wqe;
2078 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
2080 if (MLX5_TXOFF_CONFIG(TXPP)) {
2081 enum mlx5_txcmp_code wret;
2083 /* Generate WAIT for scheduling if requested. */
2084 wret = mlx5_tx_schedule_send(txq, loc, olx);
2085 if (wret == MLX5_TXCMP_CODE_EXIT)
2086 return MLX5_TXCMP_CODE_EXIT;
2087 if (wret == MLX5_TXCMP_CODE_ERROR)
2088 return MLX5_TXCMP_CODE_ERROR;
2091 * Calculate data length to be inlined to estimate
2092 * the required space in WQE ring buffer.
2094 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
2095 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2096 vlan = sizeof(struct rte_vlan_hdr);
2097 inlen = loc->mbuf->l2_len + vlan +
2098 loc->mbuf->l3_len + loc->mbuf->l4_len;
2099 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
2100 return MLX5_TXCMP_CODE_ERROR;
2101 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
2102 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
2103 /* Packet must contain all TSO headers. */
2104 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
2105 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2106 inlen > (dlen + vlan)))
2107 return MLX5_TXCMP_CODE_ERROR;
2108 MLX5_ASSERT(inlen >= txq->inlen_mode);
2110 * Check whether there are enough free WQEBBs:
2112 * - Ethernet Segment
2113 * - First Segment of inlined Ethernet data
2114 * - ... data continued ...
2115 * - Data Segments of pointer/min inline type
2117 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2118 MLX5_ESEG_MIN_INLINE_SIZE +
2120 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2121 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2122 return MLX5_TXCMP_CODE_EXIT;
2123 /* Check for maximal WQE size. */
2124 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2125 return MLX5_TXCMP_CODE_ERROR;
2126 #ifdef MLX5_PMD_SOFT_COUNTERS
2127 /* Update sent data bytes/packets counters. */
2128 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
2129 loc->mbuf->tso_segsz;
2131 * One will be added for mbuf itself
2132 * at the end of the mlx5_tx_burst from
2133 * loc->pkts_sent field.
2136 txq->stats.opackets += ntcp;
2137 txq->stats.obytes += dlen + vlan + ntcp * inlen;
2139 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2140 loc->wqe_last = wqe;
2141 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
2142 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
2143 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2144 txq->wqe_ci += (ds + 3) / 4;
2145 loc->wqe_free -= (ds + 3) / 4;
2146 return MLX5_TXCMP_CODE_MULTI;
2150 * Tx one packet function for multi-segment SEND. Supports all
2151 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
2152 * sends one packet per WQE, without any data inlining in
2155 * This routine is responsible for storing processed mbuf
2156 * into elts ring buffer and update elts_head.
2159 * Pointer to TX queue structure.
2161 * Pointer to burst routine local context.
2163 * Configured Tx offloads mask. It is fully defined at
2164 * compile time and may be used for optimization.
2167 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2168 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2169 * Local context variables partially updated.
2171 static __rte_always_inline enum mlx5_txcmp_code
2172 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
2173 struct mlx5_txq_local *__rte_restrict loc,
2176 struct mlx5_wqe_dseg *__rte_restrict dseg;
2177 struct mlx5_wqe *__rte_restrict wqe;
2178 unsigned int ds, nseg;
2180 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2181 if (MLX5_TXOFF_CONFIG(TXPP)) {
2182 enum mlx5_txcmp_code wret;
2184 /* Generate WAIT for scheduling if requested. */
2185 wret = mlx5_tx_schedule_send(txq, loc, olx);
2186 if (wret == MLX5_TXCMP_CODE_EXIT)
2187 return MLX5_TXCMP_CODE_EXIT;
2188 if (wret == MLX5_TXCMP_CODE_ERROR)
2189 return MLX5_TXCMP_CODE_ERROR;
2192 * No inline at all, it means the CPU cycles saving
2193 * is prioritized at configuration, we should not
2194 * copy any packet data to WQE.
2196 nseg = NB_SEGS(loc->mbuf);
2198 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2199 return MLX5_TXCMP_CODE_EXIT;
2200 /* Check for maximal WQE size. */
2201 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2202 return MLX5_TXCMP_CODE_ERROR;
2204 * Some Tx offloads may cause an error if
2205 * packet is not long enough, check against
2206 * assumed minimal length.
2208 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
2209 return MLX5_TXCMP_CODE_ERROR;
2210 #ifdef MLX5_PMD_SOFT_COUNTERS
2211 /* Update sent data bytes counter. */
2212 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
2213 if (MLX5_TXOFF_CONFIG(VLAN) &&
2214 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2215 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
2218 * SEND WQE, one WQEBB:
2219 * - Control Segment, SEND opcode
2220 * - Ethernet Segment, optional VLAN, no inline
2221 * - Data Segments, pointer only type
2223 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2224 loc->wqe_last = wqe;
2225 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
2226 mlx5_tx_eseg_none(txq, loc, wqe, olx);
2227 dseg = &wqe->dseg[0];
2229 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
2230 struct rte_mbuf *mbuf;
2233 * Zero length segment found, have to
2234 * correct total size of WQE in segments.
2235 * It is supposed to be rare occasion, so
2236 * in normal case (no zero length segments)
2237 * we avoid extra writing to the Control
2241 wqe->cseg.sq_ds -= RTE_BE32(1);
2243 loc->mbuf = mbuf->next;
2244 rte_pktmbuf_free_seg(mbuf);
2250 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2251 rte_pktmbuf_data_len(loc->mbuf), olx);
2252 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2257 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2258 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2259 loc->mbuf = loc->mbuf->next;
2262 txq->wqe_ci += (ds + 3) / 4;
2263 loc->wqe_free -= (ds + 3) / 4;
2264 return MLX5_TXCMP_CODE_MULTI;
2268 * Tx one packet function for multi-segment SEND. Supports all
2269 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
2270 * sends one packet per WQE, with data inlining in
2271 * Ethernet Segment and minimal Data Segments.
2273 * This routine is responsible for storing processed mbuf
2274 * into elts ring buffer and update elts_head.
2277 * Pointer to TX queue structure.
2279 * Pointer to burst routine local context.
2281 * Configured Tx offloads mask. It is fully defined at
2282 * compile time and may be used for optimization.
2285 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2286 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2287 * Local context variables partially updated.
2289 static __rte_always_inline enum mlx5_txcmp_code
2290 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
2291 struct mlx5_txq_local *__rte_restrict loc,
2294 struct mlx5_wqe *__rte_restrict wqe;
2295 unsigned int ds, inlen, dlen, vlan = 0;
2297 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2298 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2299 if (MLX5_TXOFF_CONFIG(TXPP)) {
2300 enum mlx5_txcmp_code wret;
2302 /* Generate WAIT for scheduling if requested. */
2303 wret = mlx5_tx_schedule_send(txq, loc, olx);
2304 if (wret == MLX5_TXCMP_CODE_EXIT)
2305 return MLX5_TXCMP_CODE_EXIT;
2306 if (wret == MLX5_TXCMP_CODE_ERROR)
2307 return MLX5_TXCMP_CODE_ERROR;
2310 * First calculate data length to be inlined
2311 * to estimate the required space for WQE.
2313 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
2314 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2315 vlan = sizeof(struct rte_vlan_hdr);
2316 inlen = dlen + vlan;
2317 /* Check against minimal length. */
2318 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
2319 return MLX5_TXCMP_CODE_ERROR;
2320 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
2321 if (inlen > txq->inlen_send ||
2322 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2323 struct rte_mbuf *mbuf;
2328 * Packet length exceeds the allowed inline
2329 * data length, check whether the minimal
2330 * inlining is required.
2332 if (txq->inlen_mode) {
2333 MLX5_ASSERT(txq->inlen_mode >=
2334 MLX5_ESEG_MIN_INLINE_SIZE);
2335 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
2336 inlen = txq->inlen_mode;
2338 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
2339 !vlan || txq->vlan_en) {
2341 * VLAN insertion will be done inside by HW.
2342 * It is not utmost effective - VLAN flag is
2343 * checked twice, but we should proceed the
2344 * inlining length correctly and take into
2345 * account the VLAN header being inserted.
2347 return mlx5_tx_packet_multi_send
2350 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
2353 * Now we know the minimal amount of data is requested
2354 * to inline. Check whether we should inline the buffers
2355 * from the chain beginning to eliminate some mbufs.
2358 nxlen = rte_pktmbuf_data_len(mbuf);
2359 if (unlikely(nxlen <= txq->inlen_send)) {
2360 /* We can inline first mbuf at least. */
2361 if (nxlen < inlen) {
2364 /* Scan mbufs till inlen filled. */
2369 nxlen = rte_pktmbuf_data_len(mbuf);
2371 } while (unlikely(nxlen < inlen));
2372 if (unlikely(nxlen > txq->inlen_send)) {
2373 /* We cannot inline entire mbuf. */
2374 smlen = inlen - smlen;
2375 start = rte_pktmbuf_mtod_offset
2376 (mbuf, uintptr_t, smlen);
2383 /* There should be not end of packet. */
2385 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
2386 } while (unlikely(nxlen < txq->inlen_send));
2388 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
2390 * Check whether we can do inline to align start
2391 * address of data buffer to cacheline.
2394 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
2395 if (unlikely(start)) {
2397 if (start <= txq->inlen_send)
2402 * Check whether there are enough free WQEBBs:
2404 * - Ethernet Segment
2405 * - First Segment of inlined Ethernet data
2406 * - ... data continued ...
2407 * - Data Segments of pointer/min inline type
2409 * Estimate the number of Data Segments conservatively,
2410 * supposing no any mbufs is being freed during inlining.
2412 MLX5_ASSERT(inlen <= txq->inlen_send);
2413 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2414 MLX5_ESEG_MIN_INLINE_SIZE +
2416 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2417 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2418 return MLX5_TXCMP_CODE_EXIT;
2419 /* Check for maximal WQE size. */
2420 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2421 return MLX5_TXCMP_CODE_ERROR;
2422 #ifdef MLX5_PMD_SOFT_COUNTERS
2423 /* Update sent data bytes/packets counters. */
2424 txq->stats.obytes += dlen + vlan;
2426 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2427 loc->wqe_last = wqe;
2428 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
2429 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
2430 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2431 txq->wqe_ci += (ds + 3) / 4;
2432 loc->wqe_free -= (ds + 3) / 4;
2433 return MLX5_TXCMP_CODE_MULTI;
2437 * Tx burst function for multi-segment packets. Supports all
2438 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
2439 * sends one packet per WQE. Function stops sending if it
2440 * encounters the single-segment packet.
2442 * This routine is responsible for storing processed mbuf
2443 * into elts ring buffer and update elts_head.
2446 * Pointer to TX queue structure.
2448 * Packets to transmit.
2450 * Number of packets in array.
2452 * Pointer to burst routine local context.
2454 * Configured Tx offloads mask. It is fully defined at
2455 * compile time and may be used for optimization.
2458 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2459 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2460 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2461 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
2462 * Local context variables updated.
2464 static __rte_always_inline enum mlx5_txcmp_code
2465 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
2466 struct rte_mbuf **__rte_restrict pkts,
2467 unsigned int pkts_n,
2468 struct mlx5_txq_local *__rte_restrict loc,
2471 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2472 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2473 pkts += loc->pkts_sent + 1;
2474 pkts_n -= loc->pkts_sent;
2476 enum mlx5_txcmp_code ret;
2478 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
2480 * Estimate the number of free elts quickly but
2481 * conservatively. Some segment may be fully inlined
2482 * and freed, ignore this here - precise estimation
2485 if (loc->elts_free < NB_SEGS(loc->mbuf))
2486 return MLX5_TXCMP_CODE_EXIT;
2487 if (MLX5_TXOFF_CONFIG(TSO) &&
2488 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
2489 /* Proceed with multi-segment TSO. */
2490 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
2491 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
2492 /* Proceed with multi-segment SEND with inlining. */
2493 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
2495 /* Proceed with multi-segment SEND w/o inlining. */
2496 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
2498 if (ret == MLX5_TXCMP_CODE_EXIT)
2499 return MLX5_TXCMP_CODE_EXIT;
2500 if (ret == MLX5_TXCMP_CODE_ERROR)
2501 return MLX5_TXCMP_CODE_ERROR;
2502 /* WQE is built, go to the next packet. */
2505 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2506 return MLX5_TXCMP_CODE_EXIT;
2507 loc->mbuf = *pkts++;
2509 rte_prefetch0(*pkts);
2510 if (likely(NB_SEGS(loc->mbuf) > 1))
2512 /* Here ends the series of multi-segment packets. */
2513 if (MLX5_TXOFF_CONFIG(TSO) &&
2514 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2515 return MLX5_TXCMP_CODE_TSO;
2516 return MLX5_TXCMP_CODE_SINGLE;
2522 * Tx burst function for single-segment packets with TSO.
2523 * Supports all types of Tx offloads, except multi-packets.
2524 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
2525 * Function stops sending if it encounters the multi-segment
2526 * packet or packet without TSO requested.
2528 * The routine is responsible for storing processed mbuf
2529 * into elts ring buffer and update elts_head if inline
2530 * offloads is requested due to possible early freeing
2531 * of the inlined mbufs (can not store pkts array in elts
2535 * Pointer to TX queue structure.
2537 * Packets to transmit.
2539 * Number of packets in array.
2541 * Pointer to burst routine local context.
2543 * Configured Tx offloads mask. It is fully defined at
2544 * compile time and may be used for optimization.
2547 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2548 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2549 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
2550 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2551 * Local context variables updated.
2553 static __rte_always_inline enum mlx5_txcmp_code
2554 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
2555 struct rte_mbuf **__rte_restrict pkts,
2556 unsigned int pkts_n,
2557 struct mlx5_txq_local *__rte_restrict loc,
2560 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2561 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2562 pkts += loc->pkts_sent + 1;
2563 pkts_n -= loc->pkts_sent;
2565 struct mlx5_wqe_dseg *__rte_restrict dseg;
2566 struct mlx5_wqe *__rte_restrict wqe;
2567 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
2570 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2571 if (MLX5_TXOFF_CONFIG(TXPP)) {
2572 enum mlx5_txcmp_code wret;
2574 /* Generate WAIT for scheduling if requested. */
2575 wret = mlx5_tx_schedule_send(txq, loc, olx);
2576 if (wret == MLX5_TXCMP_CODE_EXIT)
2577 return MLX5_TXCMP_CODE_EXIT;
2578 if (wret == MLX5_TXCMP_CODE_ERROR)
2579 return MLX5_TXCMP_CODE_ERROR;
2581 dlen = rte_pktmbuf_data_len(loc->mbuf);
2582 if (MLX5_TXOFF_CONFIG(VLAN) &&
2583 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2584 vlan = sizeof(struct rte_vlan_hdr);
2587 * First calculate the WQE size to check
2588 * whether we have enough space in ring buffer.
2590 hlen = loc->mbuf->l2_len + vlan +
2591 loc->mbuf->l3_len + loc->mbuf->l4_len;
2592 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
2593 return MLX5_TXCMP_CODE_ERROR;
2594 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
2595 hlen += loc->mbuf->outer_l2_len +
2596 loc->mbuf->outer_l3_len;
2597 /* Segment must contain all TSO headers. */
2598 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
2599 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2600 hlen > (dlen + vlan)))
2601 return MLX5_TXCMP_CODE_ERROR;
2603 * Check whether there are enough free WQEBBs:
2605 * - Ethernet Segment
2606 * - First Segment of inlined Ethernet data
2607 * - ... data continued ...
2608 * - Finishing Data Segment of pointer type
2610 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
2611 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2612 if (loc->wqe_free < ((ds + 3) / 4))
2613 return MLX5_TXCMP_CODE_EXIT;
2614 #ifdef MLX5_PMD_SOFT_COUNTERS
2615 /* Update sent data bytes/packets counters. */
2616 ntcp = (dlen + vlan - hlen +
2617 loc->mbuf->tso_segsz - 1) /
2618 loc->mbuf->tso_segsz;
2620 * One will be added for mbuf itself at the end
2621 * of the mlx5_tx_burst from loc->pkts_sent field.
2624 txq->stats.opackets += ntcp;
2625 txq->stats.obytes += dlen + vlan + ntcp * hlen;
2628 * Build the TSO WQE:
2630 * - Ethernet Segment with hlen bytes inlined
2631 * - Data Segment of pointer type
2633 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2634 loc->wqe_last = wqe;
2635 mlx5_tx_cseg_init(txq, loc, wqe, ds,
2636 MLX5_OPCODE_TSO, olx);
2637 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
2638 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
2639 dlen -= hlen - vlan;
2640 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
2642 * WQE is built, update the loop parameters
2643 * and go to the next packet.
2645 txq->wqe_ci += (ds + 3) / 4;
2646 loc->wqe_free -= (ds + 3) / 4;
2647 if (MLX5_TXOFF_CONFIG(INLINE))
2648 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2652 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
2653 return MLX5_TXCMP_CODE_EXIT;
2654 loc->mbuf = *pkts++;
2656 rte_prefetch0(*pkts);
2657 if (MLX5_TXOFF_CONFIG(MULTI) &&
2658 unlikely(NB_SEGS(loc->mbuf) > 1))
2659 return MLX5_TXCMP_CODE_MULTI;
2660 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
2661 return MLX5_TXCMP_CODE_SINGLE;
2662 /* Continue with the next TSO packet. */
2668 * Analyze the packet and select the best method to send.
2671 * Pointer to TX queue structure.
2673 * Pointer to burst routine local context.
2675 * Configured Tx offloads mask. It is fully defined at
2676 * compile time and may be used for optimization.
2678 * The predefined flag whether do complete check for
2679 * multi-segment packets and TSO.
2682 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2683 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
2684 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
2685 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
2687 static __rte_always_inline enum mlx5_txcmp_code
2688 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
2689 struct mlx5_txq_local *__rte_restrict loc,
2693 /* Check for multi-segment packet. */
2695 MLX5_TXOFF_CONFIG(MULTI) &&
2696 unlikely(NB_SEGS(loc->mbuf) > 1))
2697 return MLX5_TXCMP_CODE_MULTI;
2698 /* Check for TSO packet. */
2700 MLX5_TXOFF_CONFIG(TSO) &&
2701 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
2702 return MLX5_TXCMP_CODE_TSO;
2703 /* Check if eMPW is enabled at all. */
2704 if (!MLX5_TXOFF_CONFIG(EMPW))
2705 return MLX5_TXCMP_CODE_SINGLE;
2706 /* Check if eMPW can be engaged. */
2707 if (MLX5_TXOFF_CONFIG(VLAN) &&
2708 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
2709 (!MLX5_TXOFF_CONFIG(INLINE) ||
2710 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
2711 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
2713 * eMPW does not support VLAN insertion offload,
2714 * we have to inline the entire packet but
2715 * packet is too long for inlining.
2717 return MLX5_TXCMP_CODE_SINGLE;
2719 return MLX5_TXCMP_CODE_EMPW;
2723 * Check the next packet attributes to match with the eMPW batch ones.
2724 * In addition, for legacy MPW the packet length is checked either.
2727 * Pointer to TX queue structure.
2729 * Pointer to Ethernet Segment of eMPW batch.
2731 * Pointer to burst routine local context.
2733 * Length of previous packet in MPW descriptor.
2735 * Configured Tx offloads mask. It is fully defined at
2736 * compile time and may be used for optimization.
2739 * true - packet match with eMPW batch attributes.
2740 * false - no match, eMPW should be restarted.
2742 static __rte_always_inline bool
2743 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
2744 struct mlx5_wqe_eseg *__rte_restrict es,
2745 struct mlx5_txq_local *__rte_restrict loc,
2749 uint8_t swp_flags = 0;
2751 /* Compare the checksum flags, if any. */
2752 if (MLX5_TXOFF_CONFIG(CSUM) &&
2753 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
2755 /* Compare the Software Parser offsets and flags. */
2756 if (MLX5_TXOFF_CONFIG(SWP) &&
2757 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
2758 es->swp_flags != swp_flags))
2760 /* Fill metadata field if needed. */
2761 if (MLX5_TXOFF_CONFIG(METADATA) &&
2762 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2763 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
2765 /* Legacy MPW can send packets with the same lengt only. */
2766 if (MLX5_TXOFF_CONFIG(MPW) &&
2767 dlen != rte_pktmbuf_data_len(loc->mbuf))
2769 /* There must be no VLAN packets in eMPW loop. */
2770 if (MLX5_TXOFF_CONFIG(VLAN))
2771 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
2772 /* Check if the scheduling is requested. */
2773 if (MLX5_TXOFF_CONFIG(TXPP) &&
2774 loc->mbuf->ol_flags & txq->ts_mask)
2780 * Update send loop variables and WQE for eMPW loop
2781 * without data inlining. Number of Data Segments is
2782 * equal to the number of sent packets.
2785 * Pointer to TX queue structure.
2787 * Pointer to burst routine local context.
2789 * Number of packets/Data Segments/Packets.
2791 * Accumulated statistics, bytes sent
2793 * Configured Tx offloads mask. It is fully defined at
2794 * compile time and may be used for optimization.
2797 * true - packet match with eMPW batch attributes.
2798 * false - no match, eMPW should be restarted.
2800 static __rte_always_inline void
2801 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
2802 struct mlx5_txq_local *__rte_restrict loc,
2805 unsigned int olx __rte_unused)
2807 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2808 #ifdef MLX5_PMD_SOFT_COUNTERS
2809 /* Update sent data bytes counter. */
2810 txq->stats.obytes += slen;
2814 loc->elts_free -= ds;
2815 loc->pkts_sent += ds;
2817 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2818 txq->wqe_ci += (ds + 3) / 4;
2819 loc->wqe_free -= (ds + 3) / 4;
2823 * Update send loop variables and WQE for eMPW loop
2824 * with data inlining. Gets the size of pushed descriptors
2825 * and data to the WQE.
2828 * Pointer to TX queue structure.
2830 * Pointer to burst routine local context.
2832 * Total size of descriptor/data in bytes.
2834 * Accumulated statistics, data bytes sent.
2836 * The base WQE for the eMPW/MPW descriptor.
2838 * Configured Tx offloads mask. It is fully defined at
2839 * compile time and may be used for optimization.
2842 * true - packet match with eMPW batch attributes.
2843 * false - no match, eMPW should be restarted.
2845 static __rte_always_inline void
2846 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
2847 struct mlx5_txq_local *__rte_restrict loc,
2850 struct mlx5_wqe *__rte_restrict wqem,
2851 unsigned int olx __rte_unused)
2853 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
2855 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
2856 #ifdef MLX5_PMD_SOFT_COUNTERS
2857 /* Update sent data bytes counter. */
2858 txq->stats.obytes += slen;
2862 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
2864 * If the legacy MPW session contains the inline packets
2865 * we should set the only inline data segment length
2866 * and align the total length to the segment size.
2868 MLX5_ASSERT(len > sizeof(dseg->bcount));
2869 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
2870 MLX5_ETH_WQE_DATA_INLINE);
2871 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
2874 * The session is not legacy MPW or contains the
2875 * data buffer pointer segments.
2877 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
2878 len = len / MLX5_WSEG_SIZE + 2;
2880 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
2881 txq->wqe_ci += (len + 3) / 4;
2882 loc->wqe_free -= (len + 3) / 4;
2883 loc->wqe_last = wqem;
2887 * The set of Tx burst functions for single-segment packets
2888 * without TSO and with Multi-Packet Writing feature support.
2889 * Supports all types of Tx offloads, except multi-packets
2892 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
2893 * as many packet per WQE as it can. If eMPW is not configured
2894 * or packet can not be sent with eMPW (VLAN insertion) the
2895 * ordinary SEND opcode is used and only one packet placed
2898 * Functions stop sending if it encounters the multi-segment
2899 * packet or packet with TSO requested.
2901 * The routines are responsible for storing processed mbuf
2902 * into elts ring buffer and update elts_head if inlining
2903 * offload is requested. Otherwise the copying mbufs to elts
2904 * can be postponed and completed at the end of burst routine.
2907 * Pointer to TX queue structure.
2909 * Packets to transmit.
2911 * Number of packets in array.
2913 * Pointer to burst routine local context.
2915 * Configured Tx offloads mask. It is fully defined at
2916 * compile time and may be used for optimization.
2919 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2920 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2921 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
2922 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
2923 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
2924 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
2926 * Local context variables updated.
2929 * The routine sends packets with MLX5_OPCODE_EMPW
2930 * without inlining, this is dedicated optimized branch.
2931 * No VLAN insertion is supported.
2933 static __rte_always_inline enum mlx5_txcmp_code
2934 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
2935 struct rte_mbuf **__rte_restrict pkts,
2936 unsigned int pkts_n,
2937 struct mlx5_txq_local *__rte_restrict loc,
2941 * Subroutine is the part of mlx5_tx_burst_single()
2942 * and sends single-segment packet with eMPW opcode
2943 * without data inlining.
2945 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
2946 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
2947 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
2948 MLX5_ASSERT(pkts_n > loc->pkts_sent);
2949 pkts += loc->pkts_sent + 1;
2950 pkts_n -= loc->pkts_sent;
2952 struct mlx5_wqe_dseg *__rte_restrict dseg;
2953 struct mlx5_wqe_eseg *__rte_restrict eseg;
2954 enum mlx5_txcmp_code ret;
2955 unsigned int part, loop;
2956 unsigned int slen = 0;
2959 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
2960 if (MLX5_TXOFF_CONFIG(TXPP)) {
2961 enum mlx5_txcmp_code wret;
2963 /* Generate WAIT for scheduling if requested. */
2964 wret = mlx5_tx_schedule_send(txq, loc, olx);
2965 if (wret == MLX5_TXCMP_CODE_EXIT)
2966 return MLX5_TXCMP_CODE_EXIT;
2967 if (wret == MLX5_TXCMP_CODE_ERROR)
2968 return MLX5_TXCMP_CODE_ERROR;
2970 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
2971 MLX5_MPW_MAX_PACKETS :
2972 MLX5_EMPW_MAX_PACKETS);
2973 if (unlikely(loc->elts_free < part)) {
2974 /* We have no enough elts to save all mbufs. */
2975 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
2976 return MLX5_TXCMP_CODE_EXIT;
2977 /* But we still able to send at least minimal eMPW. */
2978 part = loc->elts_free;
2980 /* Check whether we have enough WQEs */
2981 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
2982 if (unlikely(loc->wqe_free <
2983 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
2984 return MLX5_TXCMP_CODE_EXIT;
2985 part = (loc->wqe_free * 4) - 2;
2987 if (likely(part > 1))
2988 rte_prefetch0(*pkts);
2989 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2991 * Build eMPW title WQEBB:
2992 * - Control Segment, eMPW opcode
2993 * - Ethernet Segment, no inline
2995 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
2996 MLX5_OPCODE_ENHANCED_MPSW, olx);
2997 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
2998 olx & ~MLX5_TXOFF_CONFIG_VLAN);
2999 eseg = &loc->wqe_last->eseg;
3000 dseg = &loc->wqe_last->dseg[0];
3002 /* Store the packet length for legacy MPW. */
3003 if (MLX5_TXOFF_CONFIG(MPW))
3004 eseg->mss = rte_cpu_to_be_16
3005 (rte_pktmbuf_data_len(loc->mbuf));
3007 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3008 #ifdef MLX5_PMD_SOFT_COUNTERS
3009 /* Update sent data bytes counter. */
3014 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3016 if (unlikely(--loop == 0))
3018 loc->mbuf = *pkts++;
3019 if (likely(loop > 1))
3020 rte_prefetch0(*pkts);
3021 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3023 * Unroll the completion code to avoid
3024 * returning variable value - it results in
3025 * unoptimized sequent checking in caller.
3027 if (ret == MLX5_TXCMP_CODE_MULTI) {
3029 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3030 if (unlikely(!loc->elts_free ||
3032 return MLX5_TXCMP_CODE_EXIT;
3033 return MLX5_TXCMP_CODE_MULTI;
3035 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3036 if (ret == MLX5_TXCMP_CODE_TSO) {
3038 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3039 if (unlikely(!loc->elts_free ||
3041 return MLX5_TXCMP_CODE_EXIT;
3042 return MLX5_TXCMP_CODE_TSO;
3044 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3046 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3047 if (unlikely(!loc->elts_free ||
3049 return MLX5_TXCMP_CODE_EXIT;
3050 return MLX5_TXCMP_CODE_SINGLE;
3052 if (ret != MLX5_TXCMP_CODE_EMPW) {
3055 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3056 return MLX5_TXCMP_CODE_ERROR;
3059 * Check whether packet parameters coincide
3060 * within assumed eMPW batch:
3061 * - check sum settings
3063 * - software parser settings
3064 * - packets length (legacy MPW only)
3065 * - scheduling is not required
3067 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
3070 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3071 if (unlikely(!loc->elts_free ||
3073 return MLX5_TXCMP_CODE_EXIT;
3077 /* Packet attributes match, continue the same eMPW. */
3079 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3080 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3082 /* eMPW is built successfully, update loop parameters. */
3084 MLX5_ASSERT(pkts_n >= part);
3085 #ifdef MLX5_PMD_SOFT_COUNTERS
3086 /* Update sent data bytes counter. */
3087 txq->stats.obytes += slen;
3089 loc->elts_free -= part;
3090 loc->pkts_sent += part;
3091 txq->wqe_ci += (2 + part + 3) / 4;
3092 loc->wqe_free -= (2 + part + 3) / 4;
3094 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3095 return MLX5_TXCMP_CODE_EXIT;
3096 loc->mbuf = *pkts++;
3097 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3098 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
3100 /* Continue sending eMPW batches. */
3106 * The routine sends packets with MLX5_OPCODE_EMPW
3107 * with inlining, optionally supports VLAN insertion.
3109 static __rte_always_inline enum mlx5_txcmp_code
3110 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
3111 struct rte_mbuf **__rte_restrict pkts,
3112 unsigned int pkts_n,
3113 struct mlx5_txq_local *__rte_restrict loc,
3117 * Subroutine is the part of mlx5_tx_burst_single()
3118 * and sends single-segment packet with eMPW opcode
3119 * with data inlining.
3121 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3122 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
3123 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3124 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3125 pkts += loc->pkts_sent + 1;
3126 pkts_n -= loc->pkts_sent;
3128 struct mlx5_wqe_dseg *__rte_restrict dseg;
3129 struct mlx5_wqe *__rte_restrict wqem;
3130 enum mlx5_txcmp_code ret;
3131 unsigned int room, part, nlim;
3132 unsigned int slen = 0;
3134 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3135 if (MLX5_TXOFF_CONFIG(TXPP)) {
3136 enum mlx5_txcmp_code wret;
3138 /* Generate WAIT for scheduling if requested. */
3139 wret = mlx5_tx_schedule_send(txq, loc, olx);
3140 if (wret == MLX5_TXCMP_CODE_EXIT)
3141 return MLX5_TXCMP_CODE_EXIT;
3142 if (wret == MLX5_TXCMP_CODE_ERROR)
3143 return MLX5_TXCMP_CODE_ERROR;
3146 * Limits the amount of packets in one WQE
3147 * to improve CQE latency generation.
3149 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
3150 MLX5_MPW_INLINE_MAX_PACKETS :
3151 MLX5_EMPW_MAX_PACKETS);
3152 /* Check whether we have minimal amount WQEs */
3153 if (unlikely(loc->wqe_free <
3154 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3155 return MLX5_TXCMP_CODE_EXIT;
3156 if (likely(pkts_n > 1))
3157 rte_prefetch0(*pkts);
3158 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3160 * Build eMPW title WQEBB:
3161 * - Control Segment, eMPW opcode, zero DS
3162 * - Ethernet Segment, no inline
3164 mlx5_tx_cseg_init(txq, loc, wqem, 0,
3165 MLX5_OPCODE_ENHANCED_MPSW, olx);
3166 mlx5_tx_eseg_none(txq, loc, wqem,
3167 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3168 dseg = &wqem->dseg[0];
3169 /* Store the packet length for legacy MPW. */
3170 if (MLX5_TXOFF_CONFIG(MPW))
3171 wqem->eseg.mss = rte_cpu_to_be_16
3172 (rte_pktmbuf_data_len(loc->mbuf));
3173 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
3174 loc->wqe_free) * MLX5_WQE_SIZE -
3175 MLX5_WQE_CSEG_SIZE -
3177 /* Limit the room for legacy MPW sessions for performance. */
3178 if (MLX5_TXOFF_CONFIG(MPW))
3179 room = RTE_MIN(room,
3180 RTE_MAX(txq->inlen_empw +
3181 sizeof(dseg->bcount) +
3182 (MLX5_TXOFF_CONFIG(VLAN) ?
3183 sizeof(struct rte_vlan_hdr) : 0),
3184 MLX5_MPW_INLINE_MAX_PACKETS *
3185 MLX5_WQE_DSEG_SIZE));
3186 /* Build WQE till we have space, packets and resources. */
3189 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3190 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
3193 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
3194 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
3195 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
3197 * Some Tx offloads may cause an error if
3198 * packet is not long enough, check against
3199 * assumed minimal length.
3201 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
3203 if (unlikely(!part))
3204 return MLX5_TXCMP_CODE_ERROR;
3206 * We have some successfully built
3207 * packet Data Segments to send.
3209 mlx5_tx_idone_empw(txq, loc, part,
3211 return MLX5_TXCMP_CODE_ERROR;
3213 /* Inline or not inline - that's the Question. */
3214 if (dlen > txq->inlen_empw ||
3215 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
3217 if (MLX5_TXOFF_CONFIG(MPW)) {
3218 if (dlen > txq->inlen_send)
3222 /* Open new inline MPW session. */
3223 tlen += sizeof(dseg->bcount);
3224 dseg->bcount = RTE_BE32(0);
3226 (dseg, sizeof(dseg->bcount));
3229 * No pointer and inline descriptor
3230 * intermix for legacy MPW sessions.
3232 if (wqem->dseg[0].bcount)
3236 tlen = sizeof(dseg->bcount) + dlen;
3238 /* Inline entire packet, optional VLAN insertion. */
3239 if (MLX5_TXOFF_CONFIG(VLAN) &&
3240 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3242 * The packet length must be checked in
3243 * mlx5_tx_able_to_empw() and packet
3244 * fits into inline length guaranteed.
3247 sizeof(struct rte_vlan_hdr)) <=
3249 tlen += sizeof(struct rte_vlan_hdr);
3252 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
3254 #ifdef MLX5_PMD_SOFT_COUNTERS
3255 /* Update sent data bytes counter. */
3256 slen += sizeof(struct rte_vlan_hdr);
3261 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
3264 if (!MLX5_TXOFF_CONFIG(MPW))
3265 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
3266 MLX5_ASSERT(room >= tlen);
3269 * Packet data are completely inline,
3270 * we can try to free the packet.
3272 if (likely(loc->pkts_sent == loc->mbuf_free)) {
3274 * All the packets from the burst beginning
3275 * are inline, we can free mbufs directly
3276 * from the origin array on tx_burst exit().
3282 * In order no to call rte_pktmbuf_free_seg() here,
3283 * in the most inner loop (that might be very
3284 * expensive) we just save the mbuf in elts.
3286 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3291 * No pointer and inline descriptor
3292 * intermix for legacy MPW sessions.
3294 if (MLX5_TXOFF_CONFIG(MPW) &&
3296 wqem->dseg[0].bcount == RTE_BE32(0))
3299 * Not inlinable VLAN packets are
3300 * proceeded outside of this routine.
3302 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
3303 if (MLX5_TXOFF_CONFIG(VLAN))
3304 MLX5_ASSERT(!(loc->mbuf->ol_flags &
3306 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3307 /* We have to store mbuf in elts.*/
3308 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3310 room -= MLX5_WQE_DSEG_SIZE;
3311 /* Ring buffer wraparound is checked at the loop end.*/
3314 #ifdef MLX5_PMD_SOFT_COUNTERS
3315 /* Update sent data bytes counter. */
3320 if (unlikely(!pkts_n || !loc->elts_free)) {
3322 * We have no resources/packets to
3323 * continue build descriptors.
3326 mlx5_tx_idone_empw(txq, loc, part,
3328 return MLX5_TXCMP_CODE_EXIT;
3330 loc->mbuf = *pkts++;
3331 if (likely(pkts_n > 1))
3332 rte_prefetch0(*pkts);
3333 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3335 * Unroll the completion code to avoid
3336 * returning variable value - it results in
3337 * unoptimized sequent checking in caller.
3339 if (ret == MLX5_TXCMP_CODE_MULTI) {
3341 mlx5_tx_idone_empw(txq, loc, part,
3343 if (unlikely(!loc->elts_free ||
3345 return MLX5_TXCMP_CODE_EXIT;
3346 return MLX5_TXCMP_CODE_MULTI;
3348 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3349 if (ret == MLX5_TXCMP_CODE_TSO) {
3351 mlx5_tx_idone_empw(txq, loc, part,
3353 if (unlikely(!loc->elts_free ||
3355 return MLX5_TXCMP_CODE_EXIT;
3356 return MLX5_TXCMP_CODE_TSO;
3358 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3360 mlx5_tx_idone_empw(txq, loc, part,
3362 if (unlikely(!loc->elts_free ||
3364 return MLX5_TXCMP_CODE_EXIT;
3365 return MLX5_TXCMP_CODE_SINGLE;
3367 if (ret != MLX5_TXCMP_CODE_EMPW) {
3370 mlx5_tx_idone_empw(txq, loc, part,
3372 return MLX5_TXCMP_CODE_ERROR;
3374 /* Check if we have minimal room left. */
3376 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
3379 * Check whether packet parameters coincide
3380 * within assumed eMPW batch:
3381 * - check sum settings
3383 * - software parser settings
3384 * - packets length (legacy MPW only)
3385 * - scheduling is not required
3387 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
3390 /* Packet attributes match, continue the same eMPW. */
3391 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3392 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3395 * We get here to close an existing eMPW
3396 * session and start the new one.
3398 MLX5_ASSERT(pkts_n);
3400 if (unlikely(!part))
3401 return MLX5_TXCMP_CODE_EXIT;
3402 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
3403 if (unlikely(!loc->elts_free ||
3405 return MLX5_TXCMP_CODE_EXIT;
3406 /* Continue the loop with new eMPW session. */
3412 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
3413 * Data inlining and VLAN insertion are supported.
3415 static __rte_always_inline enum mlx5_txcmp_code
3416 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
3417 struct rte_mbuf **__rte_restrict pkts,
3418 unsigned int pkts_n,
3419 struct mlx5_txq_local *__rte_restrict loc,
3423 * Subroutine is the part of mlx5_tx_burst_single()
3424 * and sends single-segment packet with SEND opcode.
3426 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3427 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3428 pkts += loc->pkts_sent + 1;
3429 pkts_n -= loc->pkts_sent;
3431 struct mlx5_wqe *__rte_restrict wqe;
3432 enum mlx5_txcmp_code ret;
3434 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3435 if (MLX5_TXOFF_CONFIG(TXPP)) {
3436 enum mlx5_txcmp_code wret;
3438 /* Generate WAIT for scheduling if requested. */
3439 wret = mlx5_tx_schedule_send(txq, loc, olx);
3440 if (wret == MLX5_TXCMP_CODE_EXIT)
3441 return MLX5_TXCMP_CODE_EXIT;
3442 if (wret == MLX5_TXCMP_CODE_ERROR)
3443 return MLX5_TXCMP_CODE_ERROR;
3445 if (MLX5_TXOFF_CONFIG(INLINE)) {
3446 unsigned int inlen, vlan = 0;
3448 inlen = rte_pktmbuf_data_len(loc->mbuf);
3449 if (MLX5_TXOFF_CONFIG(VLAN) &&
3450 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3451 vlan = sizeof(struct rte_vlan_hdr);
3455 * If inlining is enabled at configuration time
3456 * the limit must be not less than minimal size.
3457 * Otherwise we would do extra check for data
3458 * size to avoid crashes due to length overflow.
3460 MLX5_ASSERT(txq->inlen_send >=
3461 MLX5_ESEG_MIN_INLINE_SIZE);
3462 if (inlen <= txq->inlen_send) {
3463 unsigned int seg_n, wqe_n;
3465 rte_prefetch0(rte_pktmbuf_mtod
3466 (loc->mbuf, uint8_t *));
3467 /* Check against minimal length. */
3468 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3469 return MLX5_TXCMP_CODE_ERROR;
3470 if (loc->mbuf->ol_flags &
3471 PKT_TX_DYNF_NOINLINE) {
3473 * The hint flag not to inline packet
3474 * data is set. Check whether we can
3477 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
3479 (MLX5_TXOFF_CONFIG(MPW) &&
3481 if (inlen <= txq->inlen_send)
3484 * The hardware requires the
3485 * minimal inline data header.
3487 goto single_min_inline;
3489 if (MLX5_TXOFF_CONFIG(VLAN) &&
3490 vlan && !txq->vlan_en) {
3492 * We must insert VLAN tag
3493 * by software means.
3495 goto single_part_inline;
3497 goto single_no_inline;
3501 * Completely inlined packet data WQE:
3502 * - Control Segment, SEND opcode
3503 * - Ethernet Segment, no VLAN insertion
3504 * - Data inlined, VLAN optionally inserted
3505 * - Alignment to MLX5_WSEG_SIZE
3506 * Have to estimate amount of WQEBBs
3508 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
3509 MLX5_ESEG_MIN_INLINE_SIZE +
3510 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3511 /* Check if there are enough WQEBBs. */
3512 wqe_n = (seg_n + 3) / 4;
3513 if (wqe_n > loc->wqe_free)
3514 return MLX5_TXCMP_CODE_EXIT;
3515 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3516 loc->wqe_last = wqe;
3517 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
3518 MLX5_OPCODE_SEND, olx);
3519 mlx5_tx_eseg_data(txq, loc, wqe,
3520 vlan, inlen, 0, olx);
3521 txq->wqe_ci += wqe_n;
3522 loc->wqe_free -= wqe_n;
3524 * Packet data are completely inlined,
3525 * free the packet immediately.
3527 rte_pktmbuf_free_seg(loc->mbuf);
3528 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
3529 MLX5_TXOFF_CONFIG(MPW)) &&
3532 * If minimal inlining is requested the eMPW
3533 * feature should be disabled due to data is
3534 * inlined into Ethernet Segment, which can
3535 * not contain inlined data for eMPW due to
3536 * segment shared for all packets.
3538 struct mlx5_wqe_dseg *__rte_restrict dseg;
3543 * The inline-mode settings require
3544 * to inline the specified amount of
3545 * data bytes to the Ethernet Segment.
3546 * We should check the free space in
3547 * WQE ring buffer to inline partially.
3550 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
3551 MLX5_ASSERT(inlen > txq->inlen_mode);
3552 MLX5_ASSERT(txq->inlen_mode >=
3553 MLX5_ESEG_MIN_INLINE_SIZE);
3555 * Check whether there are enough free WQEBBs:
3557 * - Ethernet Segment
3558 * - First Segment of inlined Ethernet data
3559 * - ... data continued ...
3560 * - Finishing Data Segment of pointer type
3562 ds = (MLX5_WQE_CSEG_SIZE +
3563 MLX5_WQE_ESEG_SIZE +
3564 MLX5_WQE_DSEG_SIZE +
3566 MLX5_ESEG_MIN_INLINE_SIZE +
3567 MLX5_WQE_DSEG_SIZE +
3568 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3569 if (loc->wqe_free < ((ds + 3) / 4))
3570 return MLX5_TXCMP_CODE_EXIT;
3572 * Build the ordinary SEND WQE:
3574 * - Ethernet Segment, inline inlen_mode bytes
3575 * - Data Segment of pointer type
3577 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3578 loc->wqe_last = wqe;
3579 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3580 MLX5_OPCODE_SEND, olx);
3581 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
3584 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3585 txq->inlen_mode - vlan;
3586 inlen -= txq->inlen_mode;
3587 mlx5_tx_dseg_ptr(txq, loc, dseg,
3590 * WQE is built, update the loop parameters
3591 * and got to the next packet.
3593 txq->wqe_ci += (ds + 3) / 4;
3594 loc->wqe_free -= (ds + 3) / 4;
3595 /* We have to store mbuf in elts.*/
3596 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3597 txq->elts[txq->elts_head++ & txq->elts_m] =
3605 * Partially inlined packet data WQE, we have
3606 * some space in title WQEBB, we can fill it
3607 * with some packet data. It takes one WQEBB,
3608 * it is available, no extra space check:
3609 * - Control Segment, SEND opcode
3610 * - Ethernet Segment, no VLAN insertion
3611 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
3612 * - Data Segment, pointer type
3614 * We also get here if VLAN insertion is not
3615 * supported by HW, the inline is enabled.
3618 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3619 loc->wqe_last = wqe;
3620 mlx5_tx_cseg_init(txq, loc, wqe, 4,
3621 MLX5_OPCODE_SEND, olx);
3622 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
3623 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
3624 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
3626 * The length check is performed above, by
3627 * comparing with txq->inlen_send. We should
3628 * not get overflow here.
3630 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
3631 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
3632 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
3636 /* We have to store mbuf in elts.*/
3637 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3638 txq->elts[txq->elts_head++ & txq->elts_m] =
3642 #ifdef MLX5_PMD_SOFT_COUNTERS
3643 /* Update sent data bytes counter. */
3644 txq->stats.obytes += vlan +
3645 rte_pktmbuf_data_len(loc->mbuf);
3649 * No inline at all, it means the CPU cycles saving
3650 * is prioritized at configuration, we should not
3651 * copy any packet data to WQE.
3653 * SEND WQE, one WQEBB:
3654 * - Control Segment, SEND opcode
3655 * - Ethernet Segment, optional VLAN, no inline
3656 * - Data Segment, pointer type
3659 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3660 loc->wqe_last = wqe;
3661 mlx5_tx_cseg_init(txq, loc, wqe, 3,
3662 MLX5_OPCODE_SEND, olx);
3663 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3665 (txq, loc, &wqe->dseg[0],
3666 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3667 rte_pktmbuf_data_len(loc->mbuf), olx);
3671 * We should not store mbuf pointer in elts
3672 * if no inlining is configured, this is done
3673 * by calling routine in a batch copy.
3675 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3677 #ifdef MLX5_PMD_SOFT_COUNTERS
3678 /* Update sent data bytes counter. */
3679 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
3680 if (MLX5_TXOFF_CONFIG(VLAN) &&
3681 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3682 txq->stats.obytes +=
3683 sizeof(struct rte_vlan_hdr);
3688 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3689 return MLX5_TXCMP_CODE_EXIT;
3690 loc->mbuf = *pkts++;
3692 rte_prefetch0(*pkts);
3693 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3694 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
3700 static __rte_always_inline enum mlx5_txcmp_code
3701 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
3702 struct rte_mbuf **__rte_restrict pkts,
3703 unsigned int pkts_n,
3704 struct mlx5_txq_local *__rte_restrict loc,
3707 enum mlx5_txcmp_code ret;
3709 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
3710 if (ret == MLX5_TXCMP_CODE_SINGLE)
3712 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
3714 /* Optimize for inline/no inline eMPW send. */
3715 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
3716 mlx5_tx_burst_empw_inline
3717 (txq, pkts, pkts_n, loc, olx) :
3718 mlx5_tx_burst_empw_simple
3719 (txq, pkts, pkts_n, loc, olx);
3720 if (ret != MLX5_TXCMP_CODE_SINGLE)
3722 /* The resources to send one packet should remain. */
3723 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3725 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
3726 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
3727 if (ret != MLX5_TXCMP_CODE_EMPW)
3729 /* The resources to send one packet should remain. */
3730 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3735 * DPDK Tx callback template. This is configured template
3736 * used to generate routines optimized for specified offload setup.
3737 * One of this generated functions is chosen at SQ configuration
3741 * Generic pointer to TX queue structure.
3743 * Packets to transmit.
3745 * Number of packets in array.
3747 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
3748 * values. Should be static to take compile time static configuration
3752 * Number of packets successfully transmitted (<= pkts_n).
3754 static __rte_always_inline uint16_t
3755 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
3756 struct rte_mbuf **__rte_restrict pkts,
3760 struct mlx5_txq_local loc;
3761 enum mlx5_txcmp_code ret;
3764 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3765 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3766 if (unlikely(!pkts_n))
3768 if (MLX5_TXOFF_CONFIG(INLINE))
3772 loc.wqe_last = NULL;
3775 loc.pkts_loop = loc.pkts_sent;
3777 * Check if there are some CQEs, if any:
3778 * - process an encountered errors
3779 * - process the completed WQEs
3780 * - free related mbufs
3781 * - doorbell the NIC about processed CQEs
3783 rte_prefetch0(*(pkts + loc.pkts_sent));
3784 mlx5_tx_handle_completion(txq, olx);
3786 * Calculate the number of available resources - elts and WQEs.
3787 * There are two possible different scenarios:
3788 * - no data inlining into WQEs, one WQEBB may contains up to
3789 * four packets, in this case elts become scarce resource
3790 * - data inlining into WQEs, one packet may require multiple
3791 * WQEBBs, the WQEs become the limiting factor.
3793 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
3794 loc.elts_free = txq->elts_s -
3795 (uint16_t)(txq->elts_head - txq->elts_tail);
3796 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
3797 loc.wqe_free = txq->wqe_s -
3798 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
3799 if (unlikely(!loc.elts_free || !loc.wqe_free))
3803 * Fetch the packet from array. Usually this is
3804 * the first packet in series of multi/single
3807 loc.mbuf = *(pkts + loc.pkts_sent);
3808 /* Dedicated branch for multi-segment packets. */
3809 if (MLX5_TXOFF_CONFIG(MULTI) &&
3810 unlikely(NB_SEGS(loc.mbuf) > 1)) {
3812 * Multi-segment packet encountered.
3813 * Hardware is able to process it only
3814 * with SEND/TSO opcodes, one packet
3815 * per WQE, do it in dedicated routine.
3818 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
3819 part = loc.pkts_sent - loc.pkts_copy;
3820 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
3822 * There are some single-segment mbufs not
3823 * stored in elts. The mbufs must be in the
3824 * same order as WQEs, so we must copy the
3825 * mbufs to elts here, before the coming
3826 * multi-segment packet mbufs is appended.
3828 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
3830 loc.pkts_copy = loc.pkts_sent;
3832 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3833 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
3834 if (!MLX5_TXOFF_CONFIG(INLINE))
3835 loc.pkts_copy = loc.pkts_sent;
3837 * These returned code checks are supposed
3838 * to be optimized out due to routine inlining.
3840 if (ret == MLX5_TXCMP_CODE_EXIT) {
3842 * The routine returns this code when
3843 * all packets are sent or there is no
3844 * enough resources to complete request.
3848 if (ret == MLX5_TXCMP_CODE_ERROR) {
3850 * The routine returns this code when
3851 * some error in the incoming packets
3854 txq->stats.oerrors++;
3857 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3859 * The single-segment packet was encountered
3860 * in the array, try to send it with the
3861 * best optimized way, possible engaging eMPW.
3863 goto enter_send_single;
3865 if (MLX5_TXOFF_CONFIG(TSO) &&
3866 ret == MLX5_TXCMP_CODE_TSO) {
3868 * The single-segment TSO packet was
3869 * encountered in the array.
3871 goto enter_send_tso;
3873 /* We must not get here. Something is going wrong. */
3875 txq->stats.oerrors++;
3878 /* Dedicated branch for single-segment TSO packets. */
3879 if (MLX5_TXOFF_CONFIG(TSO) &&
3880 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3882 * TSO might require special way for inlining
3883 * (dedicated parameters) and is sent with
3884 * MLX5_OPCODE_TSO opcode only, provide this
3885 * in dedicated branch.
3888 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
3889 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3890 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
3892 * These returned code checks are supposed
3893 * to be optimized out due to routine inlining.
3895 if (ret == MLX5_TXCMP_CODE_EXIT)
3897 if (ret == MLX5_TXCMP_CODE_ERROR) {
3898 txq->stats.oerrors++;
3901 if (ret == MLX5_TXCMP_CODE_SINGLE)
3902 goto enter_send_single;
3903 if (MLX5_TXOFF_CONFIG(MULTI) &&
3904 ret == MLX5_TXCMP_CODE_MULTI) {
3906 * The multi-segment packet was
3907 * encountered in the array.
3909 goto enter_send_multi;
3911 /* We must not get here. Something is going wrong. */
3913 txq->stats.oerrors++;
3917 * The dedicated branch for the single-segment packets
3918 * without TSO. Often these ones can be sent using
3919 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
3920 * The routine builds the WQEs till it encounters
3921 * the TSO or multi-segment packet (in case if these
3922 * offloads are requested at SQ configuration time).
3925 MLX5_ASSERT(pkts_n > loc.pkts_sent);
3926 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
3928 * These returned code checks are supposed
3929 * to be optimized out due to routine inlining.
3931 if (ret == MLX5_TXCMP_CODE_EXIT)
3933 if (ret == MLX5_TXCMP_CODE_ERROR) {
3934 txq->stats.oerrors++;
3937 if (MLX5_TXOFF_CONFIG(MULTI) &&
3938 ret == MLX5_TXCMP_CODE_MULTI) {
3940 * The multi-segment packet was
3941 * encountered in the array.
3943 goto enter_send_multi;
3945 if (MLX5_TXOFF_CONFIG(TSO) &&
3946 ret == MLX5_TXCMP_CODE_TSO) {
3948 * The single-segment TSO packet was
3949 * encountered in the array.
3951 goto enter_send_tso;
3953 /* We must not get here. Something is going wrong. */
3955 txq->stats.oerrors++;
3959 * Main Tx loop is completed, do the rest:
3960 * - set completion request if thresholds are reached
3961 * - doorbell the hardware
3962 * - copy the rest of mbufs to elts (if any)
3964 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
3965 loc.pkts_sent >= loc.pkts_copy);
3966 /* Take a shortcut if nothing is sent. */
3967 if (unlikely(loc.pkts_sent == loc.pkts_loop))
3969 /* Request CQE generation if limits are reached. */
3970 mlx5_tx_request_completion(txq, &loc, olx);
3972 * Ring QP doorbell immediately after WQE building completion
3973 * to improve latencies. The pure software related data treatment
3974 * can be completed after doorbell. Tx CQEs for this SQ are
3975 * processed in this thread only by the polling.
3977 * The rdma core library can map doorbell register in two ways,
3978 * depending on the environment variable "MLX5_SHUT_UP_BF":
3980 * - as regular cached memory, the variable is either missing or
3981 * set to zero. This type of mapping may cause the significant
3982 * doorbell register writing latency and requires explicit
3983 * memory write barrier to mitigate this issue and prevent
3986 * - as non-cached memory, the variable is present and set to
3987 * not "0" value. This type of mapping may cause performance
3988 * impact under heavy loading conditions but the explicit write
3989 * memory barrier is not required and it may improve core
3992 * - the legacy behaviour (prior 19.08 release) was to use some
3993 * heuristics to decide whether write memory barrier should
3994 * be performed. This behavior is supported with specifying
3995 * tx_db_nc=2, write barrier is skipped if application
3996 * provides the full recommended burst of packets, it
3997 * supposes the next packets are coming and the write barrier
3998 * will be issued on the next burst (after descriptor writing,
4001 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
4002 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
4003 /* Not all of the mbufs may be stored into elts yet. */
4004 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
4005 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4007 * There are some single-segment mbufs not stored in elts.
4008 * It can be only if the last packet was single-segment.
4009 * The copying is gathered into one place due to it is
4010 * a good opportunity to optimize that with SIMD.
4011 * Unfortunately if inlining is enabled the gaps in
4012 * pointer array may happen due to early freeing of the
4015 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
4016 loc.pkts_copy = loc.pkts_sent;
4018 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4019 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4020 if (pkts_n > loc.pkts_sent) {
4022 * If burst size is large there might be no enough CQE
4023 * fetched from completion queue and no enough resources
4024 * freed to send all the packets.
4029 #ifdef MLX5_PMD_SOFT_COUNTERS
4030 /* Increment sent packets counter. */
4031 txq->stats.opackets += loc.pkts_sent;
4033 if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
4034 __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
4035 return loc.pkts_sent;
4038 /* Generate routines with Enhanced Multi-Packet Write support. */
4039 MLX5_TXOFF_DECL(full_empw,
4040 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
4042 MLX5_TXOFF_DECL(none_empw,
4043 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4045 MLX5_TXOFF_DECL(md_empw,
4046 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4048 MLX5_TXOFF_DECL(mt_empw,
4049 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4050 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4052 MLX5_TXOFF_DECL(mtsc_empw,
4053 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4054 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4055 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4057 MLX5_TXOFF_DECL(mti_empw,
4058 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4059 MLX5_TXOFF_CONFIG_INLINE |
4060 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4062 MLX5_TXOFF_DECL(mtv_empw,
4063 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4064 MLX5_TXOFF_CONFIG_VLAN |
4065 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4067 MLX5_TXOFF_DECL(mtiv_empw,
4068 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4069 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4070 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4072 MLX5_TXOFF_DECL(sc_empw,
4073 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4074 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4076 MLX5_TXOFF_DECL(sci_empw,
4077 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4078 MLX5_TXOFF_CONFIG_INLINE |
4079 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4081 MLX5_TXOFF_DECL(scv_empw,
4082 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4083 MLX5_TXOFF_CONFIG_VLAN |
4084 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4086 MLX5_TXOFF_DECL(sciv_empw,
4087 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4088 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4089 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4091 MLX5_TXOFF_DECL(i_empw,
4092 MLX5_TXOFF_CONFIG_INLINE |
4093 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4095 MLX5_TXOFF_DECL(v_empw,
4096 MLX5_TXOFF_CONFIG_VLAN |
4097 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4099 MLX5_TXOFF_DECL(iv_empw,
4100 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4101 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4103 /* Generate routines without Enhanced Multi-Packet Write support. */
4104 MLX5_TXOFF_DECL(full,
4105 MLX5_TXOFF_CONFIG_FULL)
4107 MLX5_TXOFF_DECL(none,
4108 MLX5_TXOFF_CONFIG_NONE)
4111 MLX5_TXOFF_CONFIG_METADATA)
4114 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4115 MLX5_TXOFF_CONFIG_METADATA)
4117 MLX5_TXOFF_DECL(mtsc,
4118 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4119 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4120 MLX5_TXOFF_CONFIG_METADATA)
4122 MLX5_TXOFF_DECL(mti,
4123 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4124 MLX5_TXOFF_CONFIG_INLINE |
4125 MLX5_TXOFF_CONFIG_METADATA)
4128 MLX5_TXOFF_DECL(mtv,
4129 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4130 MLX5_TXOFF_CONFIG_VLAN |
4131 MLX5_TXOFF_CONFIG_METADATA)
4134 MLX5_TXOFF_DECL(mtiv,
4135 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4136 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4137 MLX5_TXOFF_CONFIG_METADATA)
4140 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4141 MLX5_TXOFF_CONFIG_METADATA)
4143 MLX5_TXOFF_DECL(sci,
4144 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4145 MLX5_TXOFF_CONFIG_INLINE |
4146 MLX5_TXOFF_CONFIG_METADATA)
4149 MLX5_TXOFF_DECL(scv,
4150 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4151 MLX5_TXOFF_CONFIG_VLAN |
4152 MLX5_TXOFF_CONFIG_METADATA)
4155 MLX5_TXOFF_DECL(sciv,
4156 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4157 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4158 MLX5_TXOFF_CONFIG_METADATA)
4161 MLX5_TXOFF_CONFIG_INLINE |
4162 MLX5_TXOFF_CONFIG_METADATA)
4165 MLX5_TXOFF_CONFIG_VLAN |
4166 MLX5_TXOFF_CONFIG_METADATA)
4169 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4170 MLX5_TXOFF_CONFIG_METADATA)
4172 /* Generate routines with timestamp scheduling. */
4173 MLX5_TXOFF_DECL(full_ts_nompw,
4174 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
4176 MLX5_TXOFF_DECL(full_ts_nompwi,
4177 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4178 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4179 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
4180 MLX5_TXOFF_CONFIG_TXPP)
4182 MLX5_TXOFF_DECL(full_ts,
4183 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
4184 MLX5_TXOFF_CONFIG_EMPW)
4186 MLX5_TXOFF_DECL(full_ts_noi,
4187 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4188 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4189 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
4190 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4192 MLX5_TXOFF_DECL(none_ts,
4193 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
4194 MLX5_TXOFF_CONFIG_EMPW)
4196 MLX5_TXOFF_DECL(mdi_ts,
4197 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
4198 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4200 MLX5_TXOFF_DECL(mti_ts,
4201 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4202 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
4203 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4205 MLX5_TXOFF_DECL(mtiv_ts,
4206 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4207 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4208 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
4209 MLX5_TXOFF_CONFIG_EMPW)
4212 * Generate routines with Legacy Multi-Packet Write support.
4213 * This mode is supported by ConnectX-4 Lx only and imposes
4214 * offload limitations, not supported:
4215 * - ACL/Flows (metadata are becoming meaningless)
4216 * - WQE Inline headers
4217 * - SRIOV (E-Switch offloads)
4219 * - tunnel encapsulation/decapsulation
4222 MLX5_TXOFF_DECL(none_mpw,
4223 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
4224 MLX5_TXOFF_CONFIG_MPW)
4226 MLX5_TXOFF_DECL(mci_mpw,
4227 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4228 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4229 MLX5_TXOFF_CONFIG_MPW)
4231 MLX5_TXOFF_DECL(mc_mpw,
4232 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4233 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
4235 MLX5_TXOFF_DECL(i_mpw,
4236 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4237 MLX5_TXOFF_CONFIG_MPW)
4240 * Array of declared and compiled Tx burst function and corresponding
4241 * supported offloads set. The array is used to select the Tx burst
4242 * function for specified offloads set at Tx queue configuration time.
4245 eth_tx_burst_t func;
4248 MLX5_TXOFF_INFO(full_empw,
4249 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4250 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4251 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4252 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4254 MLX5_TXOFF_INFO(none_empw,
4255 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4257 MLX5_TXOFF_INFO(md_empw,
4258 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4260 MLX5_TXOFF_INFO(mt_empw,
4261 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4262 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4264 MLX5_TXOFF_INFO(mtsc_empw,
4265 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4266 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4267 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4269 MLX5_TXOFF_INFO(mti_empw,
4270 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4271 MLX5_TXOFF_CONFIG_INLINE |
4272 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4274 MLX5_TXOFF_INFO(mtv_empw,
4275 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4276 MLX5_TXOFF_CONFIG_VLAN |
4277 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4279 MLX5_TXOFF_INFO(mtiv_empw,
4280 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4281 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4282 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4284 MLX5_TXOFF_INFO(sc_empw,
4285 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4286 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4288 MLX5_TXOFF_INFO(sci_empw,
4289 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4290 MLX5_TXOFF_CONFIG_INLINE |
4291 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4293 MLX5_TXOFF_INFO(scv_empw,
4294 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4295 MLX5_TXOFF_CONFIG_VLAN |
4296 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4298 MLX5_TXOFF_INFO(sciv_empw,
4299 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4300 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4301 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4303 MLX5_TXOFF_INFO(i_empw,
4304 MLX5_TXOFF_CONFIG_INLINE |
4305 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4307 MLX5_TXOFF_INFO(v_empw,
4308 MLX5_TXOFF_CONFIG_VLAN |
4309 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4311 MLX5_TXOFF_INFO(iv_empw,
4312 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4313 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4315 MLX5_TXOFF_INFO(full_ts_nompw,
4316 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
4318 MLX5_TXOFF_INFO(full_ts_nompwi,
4319 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4320 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4321 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
4322 MLX5_TXOFF_CONFIG_TXPP)
4324 MLX5_TXOFF_INFO(full_ts,
4325 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
4326 MLX5_TXOFF_CONFIG_EMPW)
4328 MLX5_TXOFF_INFO(full_ts_noi,
4329 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4330 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4331 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
4332 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4334 MLX5_TXOFF_INFO(none_ts,
4335 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
4336 MLX5_TXOFF_CONFIG_EMPW)
4338 MLX5_TXOFF_INFO(mdi_ts,
4339 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
4340 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4342 MLX5_TXOFF_INFO(mti_ts,
4343 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4344 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
4345 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
4347 MLX5_TXOFF_INFO(mtiv_ts,
4348 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4349 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4350 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
4351 MLX5_TXOFF_CONFIG_EMPW)
4353 MLX5_TXOFF_INFO(full,
4354 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4355 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4356 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4357 MLX5_TXOFF_CONFIG_METADATA)
4359 MLX5_TXOFF_INFO(none,
4360 MLX5_TXOFF_CONFIG_NONE)
4363 MLX5_TXOFF_CONFIG_METADATA)
4366 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4367 MLX5_TXOFF_CONFIG_METADATA)
4369 MLX5_TXOFF_INFO(mtsc,
4370 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4371 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4372 MLX5_TXOFF_CONFIG_METADATA)
4374 MLX5_TXOFF_INFO(mti,
4375 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4376 MLX5_TXOFF_CONFIG_INLINE |
4377 MLX5_TXOFF_CONFIG_METADATA)
4379 MLX5_TXOFF_INFO(mtv,
4380 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4381 MLX5_TXOFF_CONFIG_VLAN |
4382 MLX5_TXOFF_CONFIG_METADATA)
4384 MLX5_TXOFF_INFO(mtiv,
4385 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4386 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4387 MLX5_TXOFF_CONFIG_METADATA)
4390 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4391 MLX5_TXOFF_CONFIG_METADATA)
4393 MLX5_TXOFF_INFO(sci,
4394 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4395 MLX5_TXOFF_CONFIG_INLINE |
4396 MLX5_TXOFF_CONFIG_METADATA)
4398 MLX5_TXOFF_INFO(scv,
4399 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4400 MLX5_TXOFF_CONFIG_VLAN |
4401 MLX5_TXOFF_CONFIG_METADATA)
4403 MLX5_TXOFF_INFO(sciv,
4404 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4405 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4406 MLX5_TXOFF_CONFIG_METADATA)
4409 MLX5_TXOFF_CONFIG_INLINE |
4410 MLX5_TXOFF_CONFIG_METADATA)
4413 MLX5_TXOFF_CONFIG_VLAN |
4414 MLX5_TXOFF_CONFIG_METADATA)
4417 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4418 MLX5_TXOFF_CONFIG_METADATA)
4420 MLX5_TXOFF_INFO(none_mpw,
4421 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
4422 MLX5_TXOFF_CONFIG_MPW)
4424 MLX5_TXOFF_INFO(mci_mpw,
4425 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4426 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4427 MLX5_TXOFF_CONFIG_MPW)
4429 MLX5_TXOFF_INFO(mc_mpw,
4430 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4431 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
4433 MLX5_TXOFF_INFO(i_mpw,
4434 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4435 MLX5_TXOFF_CONFIG_MPW)
4439 * Configure the Tx function to use. The routine checks configured
4440 * Tx offloads for the device and selects appropriate Tx burst
4441 * routine. There are multiple Tx burst routines compiled from
4442 * the same template in the most optimal way for the dedicated
4446 * Pointer to private data structure.
4449 * Pointer to selected Tx burst function.
4452 mlx5_select_tx_function(struct rte_eth_dev *dev)
4454 struct mlx5_priv *priv = dev->data->dev_private;
4455 struct mlx5_dev_config *config = &priv->config;
4456 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
4457 unsigned int diff = 0, olx = 0, i, m;
4460 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
4461 /* We should support Multi-Segment Packets. */
4462 olx |= MLX5_TXOFF_CONFIG_MULTI;
4464 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
4465 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
4466 DEV_TX_OFFLOAD_GRE_TNL_TSO |
4467 DEV_TX_OFFLOAD_IP_TNL_TSO |
4468 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
4469 /* We should support TCP Send Offload. */
4470 olx |= MLX5_TXOFF_CONFIG_TSO;
4472 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
4473 DEV_TX_OFFLOAD_UDP_TNL_TSO |
4474 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
4475 /* We should support Software Parser for Tunnels. */
4476 olx |= MLX5_TXOFF_CONFIG_SWP;
4478 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
4479 DEV_TX_OFFLOAD_UDP_CKSUM |
4480 DEV_TX_OFFLOAD_TCP_CKSUM |
4481 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
4482 /* We should support IP/TCP/UDP Checksums. */
4483 olx |= MLX5_TXOFF_CONFIG_CSUM;
4485 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
4486 /* We should support VLAN insertion. */
4487 olx |= MLX5_TXOFF_CONFIG_VLAN;
4489 if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
4490 rte_mbuf_dynflag_lookup
4491 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
4492 rte_mbuf_dynfield_lookup
4493 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL) >= 0) {
4494 /* Offload configured, dynamic entities registered. */
4495 olx |= MLX5_TXOFF_CONFIG_TXPP;
4497 if (priv->txqs_n && (*priv->txqs)[0]) {
4498 struct mlx5_txq_data *txd = (*priv->txqs)[0];
4500 if (txd->inlen_send) {
4502 * Check the data inline requirements. Data inline
4503 * is enabled on per device basis, we can check
4504 * the first Tx queue only.
4506 * If device does not support VLAN insertion in WQE
4507 * and some queues are requested to perform VLAN
4508 * insertion offload than inline must be enabled.
4510 olx |= MLX5_TXOFF_CONFIG_INLINE;
4513 if (config->mps == MLX5_MPW_ENHANCED &&
4514 config->txq_inline_min <= 0) {
4516 * The NIC supports Enhanced Multi-Packet Write
4517 * and does not require minimal inline data.
4519 olx |= MLX5_TXOFF_CONFIG_EMPW;
4521 if (rte_flow_dynf_metadata_avail()) {
4522 /* We should support Flow metadata. */
4523 olx |= MLX5_TXOFF_CONFIG_METADATA;
4525 if (config->mps == MLX5_MPW) {
4527 * The NIC supports Legacy Multi-Packet Write.
4528 * The MLX5_TXOFF_CONFIG_MPW controls the
4529 * descriptor building method in combination
4530 * with MLX5_TXOFF_CONFIG_EMPW.
4532 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
4533 MLX5_TXOFF_CONFIG_SWP |
4534 MLX5_TXOFF_CONFIG_VLAN |
4535 MLX5_TXOFF_CONFIG_METADATA)))
4536 olx |= MLX5_TXOFF_CONFIG_EMPW |
4537 MLX5_TXOFF_CONFIG_MPW;
4540 * Scan the routines table to find the minimal
4541 * satisfying routine with requested offloads.
4543 m = RTE_DIM(txoff_func);
4544 for (i = 0; i < RTE_DIM(txoff_func); i++) {
4547 tmp = txoff_func[i].olx;
4549 /* Meets requested offloads exactly.*/
4553 if ((tmp & olx) != olx) {
4554 /* Does not meet requested offloads at all. */
4557 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW)
4558 /* Do not enable legacy MPW if not configured. */
4560 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
4561 /* Do not enable eMPW if not configured. */
4563 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
4564 /* Do not enable inlining if not configured. */
4566 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_TXPP)
4567 /* Do not enable scheduling if not configured. */
4570 * Some routine meets the requirements.
4571 * Check whether it has minimal amount
4572 * of not requested offloads.
4574 tmp = __builtin_popcountl(tmp & ~olx);
4575 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
4576 /* First or better match, save and continue. */
4582 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
4583 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
4584 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
4585 /* Lighter not requested offload. */
4590 if (m >= RTE_DIM(txoff_func)) {
4591 DRV_LOG(DEBUG, "port %u has no selected Tx function"
4592 " for requested offloads %04X",
4593 dev->data->port_id, olx);
4596 DRV_LOG(DEBUG, "port %u has selected Tx function"
4597 " supporting offloads %04X/%04X",
4598 dev->data->port_id, olx, txoff_func[m].olx);
4599 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
4600 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
4601 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
4602 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
4603 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
4604 DRV_LOG(DEBUG, "\tSWP (software parser)");
4605 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
4606 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
4607 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
4608 DRV_LOG(DEBUG, "\tINLIN (inline data)");
4609 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
4610 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
4611 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
4612 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
4613 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TXPP)
4614 DRV_LOG(DEBUG, "\tMETAD (tx Scheduling)");
4615 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
4616 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
4617 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
4619 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
4621 return txoff_func[m].func;
4625 * DPDK callback to get the TX queue information
4628 * Pointer to the device structure.
4630 * @param tx_queue_id
4631 * Tx queue identificator.
4634 * Pointer to the TX queue information structure.
4641 mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
4642 struct rte_eth_txq_info *qinfo)
4644 struct mlx5_priv *priv = dev->data->dev_private;
4645 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
4646 struct mlx5_txq_ctrl *txq_ctrl =
4647 container_of(txq, struct mlx5_txq_ctrl, txq);
4651 qinfo->nb_desc = txq->elts_s;
4652 qinfo->conf.tx_thresh.pthresh = 0;
4653 qinfo->conf.tx_thresh.hthresh = 0;
4654 qinfo->conf.tx_thresh.wthresh = 0;
4655 qinfo->conf.tx_rs_thresh = 0;
4656 qinfo->conf.tx_free_thresh = 0;
4657 qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
4658 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
4662 * DPDK callback to get the TX packet burst mode information
4665 * Pointer to the device structure.
4667 * @param tx_queue_id
4668 * Tx queue identificatior.
4671 * Pointer to the burts mode information.
4674 * 0 as success, -EINVAL as failure.
4678 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
4679 uint16_t tx_queue_id,
4680 struct rte_eth_burst_mode *mode)
4682 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
4683 struct mlx5_priv *priv = dev->data->dev_private;
4684 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
4685 unsigned int i, olx;
4687 for (i = 0; i < RTE_DIM(txoff_func); i++) {
4688 if (pkt_burst == txoff_func[i].func) {
4689 olx = txoff_func[i].olx;
4690 snprintf(mode->info, sizeof(mode->info),
4691 "%s%s%s%s%s%s%s%s%s%s",
4692 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
4693 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
4694 "Legacy MPW" : "Enhanced MPW") : "No MPW",
4695 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
4697 (olx & MLX5_TXOFF_CONFIG_TSO) ?
4699 (olx & MLX5_TXOFF_CONFIG_SWP) ?
4701 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
4703 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
4705 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
4707 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
4709 (olx & MLX5_TXOFF_CONFIG_TXPP) ?
4711 (txq && txq->fast_free) ?
4712 " + Fast Free" : "");