1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
20 #include <mlx5_common.h>
22 #include "mlx5_autoconf.h"
23 #include "mlx5_defs.h"
26 #include "mlx5_utils.h"
27 #include "mlx5_rxtx.h"
29 /* TX burst subroutines return codes. */
30 enum mlx5_txcmp_code {
31 MLX5_TXCMP_CODE_EXIT = 0,
32 MLX5_TXCMP_CODE_ERROR,
33 MLX5_TXCMP_CODE_SINGLE,
34 MLX5_TXCMP_CODE_MULTI,
40 * These defines are used to configure Tx burst routine option set
41 * supported at compile time. The not specified options are optimized out
42 * out due to if conditions can be explicitly calculated at compile time.
43 * The offloads with bigger runtime check (require more CPU cycles to
44 * skip) overhead should have the bigger index - this is needed to
45 * select the better matching routine function if no exact match and
46 * some offloads are not actually requested.
48 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
49 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
50 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
51 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
52 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
53 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
54 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
55 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
56 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
57 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
59 /* The most common offloads groups. */
60 #define MLX5_TXOFF_CONFIG_NONE 0
61 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
62 MLX5_TXOFF_CONFIG_TSO | \
63 MLX5_TXOFF_CONFIG_SWP | \
64 MLX5_TXOFF_CONFIG_CSUM | \
65 MLX5_TXOFF_CONFIG_INLINE | \
66 MLX5_TXOFF_CONFIG_VLAN | \
67 MLX5_TXOFF_CONFIG_METADATA)
69 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
71 #define MLX5_TXOFF_DECL(func, olx) \
72 static uint16_t mlx5_tx_burst_##func(void *txq, \
73 struct rte_mbuf **pkts, \
76 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
77 pkts, pkts_n, (olx)); \
80 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
83 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
84 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
85 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
87 sizeof(rte_v128u32_t)),
88 "invalid Ethernet Segment data size");
89 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
91 sizeof(struct rte_vlan_hdr) +
92 2 * RTE_ETHER_ADDR_LEN),
93 "invalid Ethernet Segment data size");
94 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
96 sizeof(rte_v128u32_t)),
97 "invalid Ethernet Segment data size");
98 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
100 sizeof(struct rte_vlan_hdr) +
101 2 * RTE_ETHER_ADDR_LEN),
102 "invalid Ethernet Segment data size");
103 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
105 sizeof(rte_v128u32_t)),
106 "invalid Ethernet Segment data size");
107 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
109 sizeof(struct rte_vlan_hdr) +
110 2 * RTE_ETHER_ADDR_LEN),
111 "invalid Ethernet Segment data size");
112 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
113 (2 * RTE_ETHER_ADDR_LEN),
114 "invalid Data Segment data size");
115 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
116 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
117 static_assert((sizeof(struct rte_vlan_hdr) +
118 sizeof(struct rte_ether_hdr)) ==
119 MLX5_ESEG_MIN_INLINE_SIZE,
120 "invalid min inline data size");
121 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
122 MLX5_DSEG_MAX, "invalid WQE max size");
123 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
124 "invalid WQE Control Segment size");
125 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
126 "invalid WQE Ethernet Segment size");
127 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
128 "invalid WQE Data Segment size");
129 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
132 static __rte_always_inline uint32_t
133 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
134 volatile struct mlx5_mini_cqe8 *mcqe);
136 static __rte_always_inline int
137 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
138 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
140 static __rte_always_inline uint32_t
141 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
143 static __rte_always_inline void
144 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
145 volatile struct mlx5_cqe *cqe,
146 volatile struct mlx5_mini_cqe8 *mcqe);
149 mlx5_queue_state_modify(struct rte_eth_dev *dev,
150 struct mlx5_mp_arg_queue_state_modify *sm);
153 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
154 volatile struct mlx5_cqe *__rte_restrict cqe,
155 uint32_t phcsum, uint8_t l4_type);
158 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
159 volatile struct mlx5_cqe *__rte_restrict cqe,
160 volatile struct mlx5_mini_cqe8 *mcqe,
161 struct mlx5_rxq_data *rxq, uint32_t len);
163 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
164 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
167 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
168 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
170 uint64_t rte_net_mlx5_dynf_inline_mask;
171 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
174 * Build a table to translate Rx completion flags to packet type.
176 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
179 mlx5_set_ptype_table(void)
182 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
184 /* Last entry must not be overwritten, reserved for errored packet. */
185 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
186 (*p)[i] = RTE_PTYPE_UNKNOWN;
188 * The index to the array should have:
189 * bit[1:0] = l3_hdr_type
190 * bit[4:2] = l4_hdr_type
193 * bit[7] = outer_l3_type
196 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
198 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
199 RTE_PTYPE_L4_NONFRAG;
200 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
201 RTE_PTYPE_L4_NONFRAG;
203 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
205 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
208 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
210 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
212 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
214 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
216 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
218 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
221 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
223 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
225 /* Repeat with outer_l3_type being set. Just in case. */
226 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
227 RTE_PTYPE_L4_NONFRAG;
228 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
229 RTE_PTYPE_L4_NONFRAG;
230 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
232 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
234 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
236 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
238 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
240 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
242 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
244 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
246 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
248 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
251 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
252 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L4_NONFRAG;
255 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L4_NONFRAG;
258 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
259 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_NONFRAG;
262 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L4_NONFRAG;
265 /* Tunneled - Fragmented */
266 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
268 RTE_PTYPE_INNER_L4_FRAG;
269 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L4_FRAG;
272 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
273 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
274 RTE_PTYPE_INNER_L4_FRAG;
275 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
276 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
277 RTE_PTYPE_INNER_L4_FRAG;
279 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
280 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
281 RTE_PTYPE_INNER_L4_TCP;
282 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
283 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
284 RTE_PTYPE_INNER_L4_TCP;
285 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
286 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
287 RTE_PTYPE_INNER_L4_TCP;
288 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
289 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
290 RTE_PTYPE_INNER_L4_TCP;
291 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
292 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
293 RTE_PTYPE_INNER_L4_TCP;
294 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
295 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
296 RTE_PTYPE_INNER_L4_TCP;
297 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
298 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
299 RTE_PTYPE_INNER_L4_TCP;
300 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
301 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
302 RTE_PTYPE_INNER_L4_TCP;
303 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
304 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
305 RTE_PTYPE_INNER_L4_TCP;
306 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
307 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
308 RTE_PTYPE_INNER_L4_TCP;
309 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
310 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
311 RTE_PTYPE_INNER_L4_TCP;
312 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
313 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
314 RTE_PTYPE_INNER_L4_TCP;
316 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
317 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
318 RTE_PTYPE_INNER_L4_UDP;
319 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
320 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
321 RTE_PTYPE_INNER_L4_UDP;
322 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
323 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
324 RTE_PTYPE_INNER_L4_UDP;
325 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
326 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
327 RTE_PTYPE_INNER_L4_UDP;
331 * Build a table to translate packet to checksum type of Verbs.
334 mlx5_set_cksum_table(void)
340 * The index should have:
341 * bit[0] = PKT_TX_TCP_SEG
342 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
343 * bit[4] = PKT_TX_IP_CKSUM
344 * bit[8] = PKT_TX_OUTER_IP_CKSUM
347 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
350 /* Tunneled packet. */
351 if (i & (1 << 8)) /* Outer IP. */
352 v |= MLX5_ETH_WQE_L3_CSUM;
353 if (i & (1 << 4)) /* Inner IP. */
354 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
355 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
356 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
359 if (i & (1 << 4)) /* IP. */
360 v |= MLX5_ETH_WQE_L3_CSUM;
361 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
362 v |= MLX5_ETH_WQE_L4_CSUM;
364 mlx5_cksum_table[i] = v;
369 * Build a table to translate packet type of mbuf to SWP type of Verbs.
372 mlx5_set_swp_types_table(void)
378 * The index should have:
379 * bit[0:1] = PKT_TX_L4_MASK
380 * bit[4] = PKT_TX_IPV6
381 * bit[8] = PKT_TX_OUTER_IPV6
382 * bit[9] = PKT_TX_OUTER_UDP
384 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
387 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
389 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
391 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
392 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
393 v |= MLX5_ETH_WQE_L4_INNER_UDP;
394 mlx5_swp_types_table[i] = v;
399 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
400 * Flags must be preliminary initialized to zero.
403 * Pointer to burst routine local context.
405 * Pointer to store Software Parser flags
407 * Configured Tx offloads mask. It is fully defined at
408 * compile time and may be used for optimization.
411 * Software Parser offsets packed in dword.
412 * Software Parser flags are set by pointer.
414 static __rte_always_inline uint32_t
415 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
420 unsigned int idx, off;
423 if (!MLX5_TXOFF_CONFIG(SWP))
425 ol = loc->mbuf->ol_flags;
426 tunnel = ol & PKT_TX_TUNNEL_MASK;
428 * Check whether Software Parser is required.
429 * Only customized tunnels may ask for.
431 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
434 * The index should have:
435 * bit[0:1] = PKT_TX_L4_MASK
436 * bit[4] = PKT_TX_IPV6
437 * bit[8] = PKT_TX_OUTER_IPV6
438 * bit[9] = PKT_TX_OUTER_UDP
440 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
441 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
442 *swp_flags = mlx5_swp_types_table[idx];
444 * Set offsets for SW parser. Since ConnectX-5, SW parser just
445 * complements HW parser. SW parser starts to engage only if HW parser
446 * can't reach a header. For the older devices, HW parser will not kick
447 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
448 * should be set regardless of HW offload.
450 off = loc->mbuf->outer_l2_len;
451 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
452 off += sizeof(struct rte_vlan_hdr);
453 set = (off >> 1) << 8; /* Outer L3 offset. */
454 off += loc->mbuf->outer_l3_len;
455 if (tunnel == PKT_TX_TUNNEL_UDP)
456 set |= off >> 1; /* Outer L4 offset. */
457 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
458 const uint64_t csum = ol & PKT_TX_L4_MASK;
459 off += loc->mbuf->l2_len;
460 set |= (off >> 1) << 24; /* Inner L3 offset. */
461 if (csum == PKT_TX_TCP_CKSUM ||
462 csum == PKT_TX_UDP_CKSUM ||
463 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
464 off += loc->mbuf->l3_len;
465 set |= (off >> 1) << 16; /* Inner L4 offset. */
468 set = rte_cpu_to_le_32(set);
473 * Convert the Checksum offloads to Verbs.
476 * Pointer to the mbuf.
479 * Converted checksum flags.
481 static __rte_always_inline uint8_t
482 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
485 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
486 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
487 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
490 * The index should have:
491 * bit[0] = PKT_TX_TCP_SEG
492 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
493 * bit[4] = PKT_TX_IP_CKSUM
494 * bit[8] = PKT_TX_OUTER_IP_CKSUM
497 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
498 return mlx5_cksum_table[idx];
502 * Internal function to compute the number of used descriptors in an RX queue
508 * The number of used rx descriptor.
511 rx_queue_count(struct mlx5_rxq_data *rxq)
513 struct rxq_zip *zip = &rxq->zip;
514 volatile struct mlx5_cqe *cqe;
515 const unsigned int cqe_n = (1 << rxq->cqe_n);
516 const unsigned int sges_n = (1 << rxq->sges_n);
517 const unsigned int elts_n = (1 << rxq->elts_n);
518 const unsigned int strd_n = (1 << rxq->strd_num_n);
519 const unsigned int cqe_cnt = cqe_n - 1;
520 unsigned int cq_ci, used;
522 /* if we are processing a compressed cqe */
524 used = zip->cqe_cnt - zip->ai;
530 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
531 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
535 op_own = cqe->op_own;
536 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
537 n = rte_be_to_cpu_32(cqe->byte_cnt);
542 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
544 used = RTE_MIN(used * sges_n, elts_n * strd_n);
549 * DPDK callback to check the status of a rx descriptor.
554 * The index of the descriptor in the ring.
557 * The status of the tx descriptor.
560 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
562 struct mlx5_rxq_data *rxq = rx_queue;
563 struct mlx5_rxq_ctrl *rxq_ctrl =
564 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
565 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
567 if (dev->rx_pkt_burst == NULL ||
568 dev->rx_pkt_burst == removed_rx_burst) {
572 if (offset >= (1 << rxq->cqe_n)) {
576 if (offset < rx_queue_count(rxq))
577 return RTE_ETH_RX_DESC_DONE;
578 return RTE_ETH_RX_DESC_AVAIL;
582 * DPDK callback to get the RX queue information
585 * Pointer to the device structure.
588 * Rx queue identificator.
591 * Pointer to the RX queue information structure.
598 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
599 struct rte_eth_rxq_info *qinfo)
601 struct mlx5_priv *priv = dev->data->dev_private;
602 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
603 struct mlx5_rxq_ctrl *rxq_ctrl =
604 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
608 qinfo->mp = mlx5_rxq_mprq_enabled(rxq) ?
609 rxq->mprq_mp : rxq->mp;
610 qinfo->conf.rx_thresh.pthresh = 0;
611 qinfo->conf.rx_thresh.hthresh = 0;
612 qinfo->conf.rx_thresh.wthresh = 0;
613 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
614 qinfo->conf.rx_drop_en = 1;
615 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
616 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
617 qinfo->scattered_rx = dev->data->scattered_rx;
618 qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
619 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
624 * DPDK callback to get the RX packet burst mode information
627 * Pointer to the device structure.
630 * Rx queue identificatior.
633 * Pointer to the burts mode information.
636 * 0 as success, -EINVAL as failure.
640 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
641 uint16_t rx_queue_id __rte_unused,
642 struct rte_eth_burst_mode *mode)
644 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
645 struct mlx5_priv *priv = dev->data->dev_private;
646 struct mlx5_rxq_data *rxq;
648 rxq = (*priv->rxqs)[rx_queue_id];
653 if (pkt_burst == mlx5_rx_burst) {
654 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
655 } else if (pkt_burst == mlx5_rx_burst_mprq) {
656 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
657 } else if (pkt_burst == mlx5_rx_burst_vec) {
658 #if defined RTE_ARCH_X86_64
659 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
660 #elif defined RTE_ARCH_ARM64
661 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
662 #elif defined RTE_ARCH_PPC_64
663 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
667 } else if (pkt_burst == mlx5_rx_burst_mprq_vec) {
668 #if defined RTE_ARCH_X86_64
669 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector SSE");
670 #elif defined RTE_ARCH_ARM64
671 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector Neon");
672 #elif defined RTE_ARCH_PPC_64
673 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector AltiVec");
684 * DPDK callback to get the number of used descriptors in a RX queue
687 * Pointer to the device structure.
693 * The number of used rx descriptor.
694 * -EINVAL if the queue is invalid
697 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
699 struct mlx5_priv *priv = dev->data->dev_private;
700 struct mlx5_rxq_data *rxq;
702 if (dev->rx_pkt_burst == NULL ||
703 dev->rx_pkt_burst == removed_rx_burst) {
707 rxq = (*priv->rxqs)[rx_queue_id];
712 return rx_queue_count(rxq);
715 #define MLX5_SYSTEM_LOG_DIR "/var/log"
717 * Dump debug information to log file.
722 * If not NULL this string is printed as a header to the output
723 * and the output will be in hexadecimal view.
725 * This is the buffer address to print out.
727 * The number of bytes to dump out.
730 mlx5_dump_debug_information(const char *fname, const char *hex_title,
731 const void *buf, unsigned int hex_len)
735 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
736 fd = fopen(path, "a+");
738 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
739 MKSTR(path2, "./%s", fname);
740 fd = fopen(path2, "a+");
742 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
745 DRV_LOG(INFO, "New debug dump in file %s", path2);
747 DRV_LOG(INFO, "New debug dump in file %s", path);
750 rte_hexdump(fd, hex_title, buf, hex_len);
752 fprintf(fd, "%s", (const char *)buf);
753 fprintf(fd, "\n\n\n");
758 * Move QP from error state to running state and initialize indexes.
761 * Pointer to TX queue control structure.
764 * 0 on success, else -1.
767 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
769 struct mlx5_mp_arg_queue_state_modify sm = {
771 .queue_id = txq_ctrl->txq.idx,
774 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
776 txq_ctrl->txq.wqe_ci = 0;
777 txq_ctrl->txq.wqe_pi = 0;
778 txq_ctrl->txq.elts_comp = 0;
782 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
784 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
786 static const uint8_t magic[] = "seen";
790 for (i = 0; i < sizeof(magic); ++i)
791 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
793 err_cqe->rsvd1[i] = magic[i];
802 * Pointer to TX queue structure.
804 * Pointer to the error CQE.
807 * Negative value if queue recovery failed, otherwise
808 * the error completion entry is handled successfully.
811 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
812 volatile struct mlx5_err_cqe *err_cqe)
814 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
815 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
816 struct mlx5_txq_ctrl *txq_ctrl =
817 container_of(txq, struct mlx5_txq_ctrl, txq);
818 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
819 int seen = check_err_cqe_seen(err_cqe);
821 if (!seen && txq_ctrl->dump_file_n <
822 txq_ctrl->priv->config.max_dump_files_num) {
823 MKSTR(err_str, "Unexpected CQE error syndrome "
824 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
825 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
826 txq->cqe_s, txq->qp_num_8s >> 8,
827 rte_be_to_cpu_16(err_cqe->wqe_counter),
828 txq->wqe_ci, txq->cq_ci);
829 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
830 PORT_ID(txq_ctrl->priv), txq->idx,
831 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
832 mlx5_dump_debug_information(name, NULL, err_str, 0);
833 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
834 (const void *)((uintptr_t)
838 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
839 (const void *)((uintptr_t)
843 txq_ctrl->dump_file_n++;
847 * Count errors in WQEs units.
848 * Later it can be improved to count error packets,
849 * for example, by SQ parsing to find how much packets
850 * should be counted for each WQE.
852 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
854 if (tx_recover_qp(txq_ctrl)) {
855 /* Recovering failed - retry later on the same WQE. */
858 /* Release all the remaining buffers. */
859 txq_free_elts(txq_ctrl);
865 * Translate RX completion flags to packet type.
868 * Pointer to RX queue structure.
872 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
875 * Packet type for struct rte_mbuf.
877 static inline uint32_t
878 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
879 volatile struct mlx5_mini_cqe8 *mcqe)
883 uint8_t pinfo = (cqe->pkt_info & 0x3) << 6;
885 /* Get l3/l4 header from mini-CQE in case L3/L4 format*/
887 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
888 ptype = (cqe->hdr_type_etc & 0xfc00) >> 10;
890 ptype = mcqe->hdr_type >> 2;
892 * The index to the array should have:
893 * bit[1:0] = l3_hdr_type
894 * bit[4:2] = l4_hdr_type
897 * bit[7] = outer_l3_type
900 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
904 * Initialize Rx WQ and indexes.
907 * Pointer to RX queue structure.
910 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
912 const unsigned int wqe_n = 1 << rxq->elts_n;
915 for (i = 0; (i != wqe_n); ++i) {
916 volatile struct mlx5_wqe_data_seg *scat;
920 if (mlx5_rxq_mprq_enabled(rxq)) {
921 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
923 scat = &((volatile struct mlx5_wqe_mprq *)
925 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
926 1 << rxq->strd_num_n);
927 byte_count = (1 << rxq->strd_sz_n) *
928 (1 << rxq->strd_num_n);
930 struct rte_mbuf *buf = (*rxq->elts)[i];
932 scat = &((volatile struct mlx5_wqe_data_seg *)
934 addr = rte_pktmbuf_mtod(buf, uintptr_t);
935 byte_count = DATA_LEN(buf);
937 /* scat->addr must be able to store a pointer. */
938 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
939 *scat = (struct mlx5_wqe_data_seg){
940 .addr = rte_cpu_to_be_64(addr),
941 .byte_count = rte_cpu_to_be_32(byte_count),
942 .lkey = mlx5_rx_addr2mr(rxq, addr),
945 rxq->consumed_strd = 0;
946 rxq->decompressed = 0;
948 rxq->zip = (struct rxq_zip){
951 rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
952 (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0;
953 /* Update doorbell counter. */
954 rxq->rq_ci = wqe_n >> rxq->sges_n;
956 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
960 * Modify a Verbs/DevX queue state.
961 * This must be called from the primary process.
964 * Pointer to Ethernet device.
966 * State modify request parameters.
969 * 0 in case of success else non-zero value and rte_errno is set.
972 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
973 const struct mlx5_mp_arg_queue_state_modify *sm)
976 struct mlx5_priv *priv = dev->data->dev_private;
979 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
980 struct mlx5_rxq_ctrl *rxq_ctrl =
981 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
983 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, sm->state);
985 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
986 sm->state, strerror(errno));
991 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
992 struct mlx5_txq_ctrl *txq_ctrl =
993 container_of(txq, struct mlx5_txq_ctrl, txq);
995 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
996 MLX5_TXQ_MOD_ERR2RDY,
997 (uint8_t)priv->dev_port);
1005 * Modify a Verbs queue state.
1008 * Pointer to Ethernet device.
1010 * State modify request parameters.
1013 * 0 in case of success else non-zero value.
1016 mlx5_queue_state_modify(struct rte_eth_dev *dev,
1017 struct mlx5_mp_arg_queue_state_modify *sm)
1019 struct mlx5_priv *priv = dev->data->dev_private;
1022 switch (rte_eal_process_type()) {
1023 case RTE_PROC_PRIMARY:
1024 ret = mlx5_queue_state_modify_primary(dev, sm);
1026 case RTE_PROC_SECONDARY:
1027 ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
1036 * Handle a Rx error.
1037 * The function inserts the RQ state to reset when the first error CQE is
1038 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
1039 * it moves the RQ state to ready and initializes the RQ.
1040 * Next CQE identification and error counting are in the caller responsibility.
1043 * Pointer to RX queue structure.
1045 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
1046 * 0 when called from non-vectorized Rx burst.
1049 * -1 in case of recovery error, otherwise the CQE status.
1052 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
1054 const uint16_t cqe_n = 1 << rxq->cqe_n;
1055 const uint16_t cqe_mask = cqe_n - 1;
1056 const uint16_t wqe_n = 1 << rxq->elts_n;
1057 const uint16_t strd_n = 1 << rxq->strd_num_n;
1058 struct mlx5_rxq_ctrl *rxq_ctrl =
1059 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1061 volatile struct mlx5_cqe *cqe;
1062 volatile struct mlx5_err_cqe *err_cqe;
1064 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
1066 struct mlx5_mp_arg_queue_state_modify sm;
1069 switch (rxq->err_state) {
1070 case MLX5_RXQ_ERR_STATE_NO_ERROR:
1071 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
1073 case MLX5_RXQ_ERR_STATE_NEED_RESET:
1075 sm.queue_id = rxq->idx;
1076 sm.state = IBV_WQS_RESET;
1077 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
1079 if (rxq_ctrl->dump_file_n <
1080 rxq_ctrl->priv->config.max_dump_files_num) {
1081 MKSTR(err_str, "Unexpected CQE error syndrome "
1082 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
1083 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
1084 rxq->cqn, rxq_ctrl->wqn,
1085 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
1086 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
1087 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
1088 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
1089 mlx5_dump_debug_information(name, NULL, err_str, 0);
1090 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
1091 (const void *)((uintptr_t)
1093 sizeof(*u.cqe) * cqe_n);
1094 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
1095 (const void *)((uintptr_t)
1098 rxq_ctrl->dump_file_n++;
1100 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
1102 case MLX5_RXQ_ERR_STATE_NEED_READY:
1103 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1104 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1106 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1109 * The RQ consumer index must be zeroed while moving
1110 * from RESET state to RDY state.
1112 *rxq->rq_db = rte_cpu_to_be_32(0);
1115 sm.queue_id = rxq->idx;
1116 sm.state = IBV_WQS_RDY;
1117 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1121 const uint32_t elts_n =
1122 mlx5_rxq_mprq_enabled(rxq) ?
1123 wqe_n * strd_n : wqe_n;
1124 const uint32_t e_mask = elts_n - 1;
1126 mlx5_rxq_mprq_enabled(rxq) ?
1127 rxq->elts_ci : rxq->rq_ci;
1129 struct rte_mbuf **elt;
1131 unsigned int n = elts_n - (elts_ci -
1134 for (i = 0; i < (int)n; ++i) {
1135 elt_idx = (elts_ci + i) & e_mask;
1136 elt = &(*rxq->elts)[elt_idx];
1137 *elt = rte_mbuf_raw_alloc(rxq->mp);
1139 for (i--; i >= 0; --i) {
1140 elt_idx = (elts_ci +
1144 rte_pktmbuf_free_seg
1150 for (i = 0; i < (int)elts_n; ++i) {
1151 elt = &(*rxq->elts)[i];
1153 (uint16_t)((*elt)->buf_len -
1154 rte_pktmbuf_headroom(*elt));
1156 /* Padding with a fake mbuf for vec Rx. */
1157 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1158 (*rxq->elts)[elts_n + i] =
1161 mlx5_rxq_initialize(rxq);
1162 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1171 * Get size of the next packet for a given CQE. For compressed CQEs, the
1172 * consumer index is updated only once all packets of the current one have
1176 * Pointer to RX queue.
1180 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1184 * 0 in case of empty CQE, otherwise the packet size in bytes.
1187 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1188 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1190 struct rxq_zip *zip = &rxq->zip;
1191 uint16_t cqe_n = cqe_cnt + 1;
1197 /* Process compressed data in the CQE and mini arrays. */
1199 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1200 (volatile struct mlx5_mini_cqe8 (*)[8])
1201 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1203 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt &
1205 *mcqe = &(*mc)[zip->ai & 7];
1206 if ((++zip->ai & 7) == 0) {
1207 /* Invalidate consumed CQEs */
1210 while (idx != end) {
1211 (*rxq->cqes)[idx & cqe_cnt].op_own =
1212 MLX5_CQE_INVALIDATE;
1216 * Increment consumer index to skip the number
1217 * of CQEs consumed. Hardware leaves holes in
1218 * the CQ ring for software use.
1223 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1224 /* Invalidate the rest */
1228 while (idx != end) {
1229 (*rxq->cqes)[idx & cqe_cnt].op_own =
1230 MLX5_CQE_INVALIDATE;
1233 rxq->cq_ci = zip->cq_ci;
1237 * No compressed data, get next CQE and verify if it is
1245 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1246 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1247 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1249 ret = mlx5_rx_err_handle(rxq, 0);
1250 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1258 * Introduce the local variable to have queue cq_ci
1259 * index in queue structure always consistent with
1260 * actual CQE boundary (not pointing to the middle
1261 * of compressed CQE session).
1263 cq_ci = rxq->cq_ci + 1;
1264 op_own = cqe->op_own;
1265 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1266 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1267 (volatile struct mlx5_mini_cqe8 (*)[8])
1268 (uintptr_t)(&(*rxq->cqes)
1269 [cq_ci & cqe_cnt].pkt_info);
1271 /* Fix endianness. */
1272 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1274 * Current mini array position is the one
1275 * returned by check_cqe64().
1277 * If completion comprises several mini arrays,
1278 * as a special case the second one is located
1279 * 7 CQEs after the initial CQE instead of 8
1280 * for subsequent ones.
1283 zip->na = zip->ca + 7;
1284 /* Compute the next non compressed CQE. */
1285 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1286 /* Get packet size to return. */
1287 len = rte_be_to_cpu_32((*mc)[0].byte_cnt &
1291 /* Prefetch all to be invalidated */
1294 while (idx != end) {
1295 rte_prefetch0(&(*rxq->cqes)[(idx) &
1301 len = rte_be_to_cpu_32(cqe->byte_cnt);
1304 if (unlikely(rxq->err_state)) {
1305 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1306 ++rxq->stats.idropped;
1314 * Translate RX completion flags to offload flags.
1320 * Offload flags (ol_flags) for struct rte_mbuf.
1322 static inline uint32_t
1323 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1325 uint32_t ol_flags = 0;
1326 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1330 MLX5_CQE_RX_L3_HDR_VALID,
1331 PKT_RX_IP_CKSUM_GOOD) |
1333 MLX5_CQE_RX_L4_HDR_VALID,
1334 PKT_RX_L4_CKSUM_GOOD);
1339 * Fill in mbuf fields from RX completion flags.
1340 * Note that pkt->ol_flags should be initialized outside of this function.
1343 * Pointer to RX queue.
1348 * @param rss_hash_res
1349 * Packet RSS Hash result.
1352 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1353 volatile struct mlx5_cqe *cqe,
1354 volatile struct mlx5_mini_cqe8 *mcqe)
1356 /* Update packet information. */
1357 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe);
1359 if (rxq->rss_hash) {
1360 uint32_t rss_hash_res = 0;
1362 /* If compressed, take hash result from mini-CQE. */
1364 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH)
1365 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1367 rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result);
1369 pkt->hash.rss = rss_hash_res;
1370 pkt->ol_flags |= PKT_RX_RSS_HASH;
1376 /* If compressed, take flow tag from mini-CQE. */
1378 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
1379 mark = cqe->sop_drop_qpn;
1381 mark = ((mcqe->byte_cnt_flow & 0xff) << 8) |
1382 (mcqe->flow_tag_high << 16);
1383 if (MLX5_FLOW_MARK_IS_VALID(mark)) {
1384 pkt->ol_flags |= PKT_RX_FDIR;
1385 if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) {
1386 pkt->ol_flags |= PKT_RX_FDIR_ID;
1387 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1391 if (rxq->dynf_meta && cqe->flow_table_metadata) {
1392 pkt->ol_flags |= rxq->flow_meta_mask;
1393 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, uint32_t *) =
1394 cqe->flow_table_metadata;
1397 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1398 if (rxq->vlan_strip) {
1402 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
1403 vlan_strip = cqe->hdr_type_etc &
1404 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
1406 vlan_strip = mcqe->hdr_type &
1407 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
1409 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1410 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1413 if (rxq->hw_timestamp) {
1414 uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
1416 if (rxq->rt_timestamp)
1417 ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
1418 mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts);
1419 pkt->ol_flags |= rxq->timestamp_rx_flag;
1424 * DPDK callback for RX.
1427 * Generic pointer to RX queue structure.
1429 * Array to store received packets.
1431 * Maximum number of packets in array.
1434 * Number of packets successfully received (<= pkts_n).
1437 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1439 struct mlx5_rxq_data *rxq = dpdk_rxq;
1440 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1441 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1442 const unsigned int sges_n = rxq->sges_n;
1443 struct rte_mbuf *pkt = NULL;
1444 struct rte_mbuf *seg = NULL;
1445 volatile struct mlx5_cqe *cqe =
1446 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1448 unsigned int rq_ci = rxq->rq_ci << sges_n;
1449 int len = 0; /* keep its value across iterations. */
1452 unsigned int idx = rq_ci & wqe_cnt;
1453 volatile struct mlx5_wqe_data_seg *wqe =
1454 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1455 struct rte_mbuf *rep = (*rxq->elts)[idx];
1456 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1464 /* Allocate the buf from the same pool. */
1465 rep = rte_mbuf_raw_alloc(seg->pool);
1466 if (unlikely(rep == NULL)) {
1467 ++rxq->stats.rx_nombuf;
1470 * no buffers before we even started,
1471 * bail out silently.
1475 while (pkt != seg) {
1476 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
1480 rte_mbuf_raw_free(pkt);
1489 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1490 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1492 rte_mbuf_raw_free(rep);
1496 MLX5_ASSERT(len >= (rxq->crc_present << 2));
1497 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1498 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
1499 if (rxq->crc_present)
1500 len -= RTE_ETHER_CRC_LEN;
1502 if (cqe->lro_num_seg > 1) {
1504 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1506 pkt->ol_flags |= PKT_RX_LRO;
1507 pkt->tso_segsz = len / cqe->lro_num_seg;
1510 DATA_LEN(rep) = DATA_LEN(seg);
1511 PKT_LEN(rep) = PKT_LEN(seg);
1512 SET_DATA_OFF(rep, DATA_OFF(seg));
1513 PORT(rep) = PORT(seg);
1514 (*rxq->elts)[idx] = rep;
1516 * Fill NIC descriptor with the new buffer. The lkey and size
1517 * of the buffers are already known, only the buffer address
1520 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1521 /* If there's only one MR, no need to replace LKey in WQE. */
1522 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1523 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1524 if (len > DATA_LEN(seg)) {
1525 len -= DATA_LEN(seg);
1530 DATA_LEN(seg) = len;
1531 #ifdef MLX5_PMD_SOFT_COUNTERS
1532 /* Increment bytes counter. */
1533 rxq->stats.ibytes += PKT_LEN(pkt);
1535 /* Return packet. */
1540 /* Align consumer index to the next stride. */
1545 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1547 /* Update the consumer index. */
1548 rxq->rq_ci = rq_ci >> sges_n;
1550 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1552 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1553 #ifdef MLX5_PMD_SOFT_COUNTERS
1554 /* Increment packets counter. */
1555 rxq->stats.ipackets += i;
1561 * Update LRO packet TCP header.
1562 * The HW LRO feature doesn't update the TCP header after coalescing the
1563 * TCP segments but supplies information in CQE to fill it by SW.
1566 * Pointer to the TCP header.
1568 * Pointer to the completion entry..
1570 * The L3 pseudo-header checksum.
1573 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
1574 volatile struct mlx5_cqe *__rte_restrict cqe,
1575 uint32_t phcsum, uint8_t l4_type)
1578 * The HW calculates only the TCP payload checksum, need to complete
1579 * the TCP header checksum and the L3 pseudo-header checksum.
1581 uint32_t csum = phcsum + cqe->csum;
1583 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1584 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1585 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1586 tcp->recv_ack = cqe->lro_ack_seq_num;
1587 tcp->rx_win = cqe->lro_tcp_win;
1589 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1590 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1592 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
1593 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1594 csum = (~csum) & 0xffff;
1601 * Update LRO packet headers.
1602 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1603 * TCP segments but supply information in CQE to fill it by SW.
1606 * The packet address.
1608 * Pointer to the completion entry..
1610 * The packet length.
1613 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
1614 volatile struct mlx5_cqe *__rte_restrict cqe,
1615 volatile struct mlx5_mini_cqe8 *mcqe,
1616 struct mlx5_rxq_data *rxq, uint32_t len)
1619 struct rte_ether_hdr *eth;
1620 struct rte_vlan_hdr *vlan;
1621 struct rte_ipv4_hdr *ipv4;
1622 struct rte_ipv6_hdr *ipv6;
1623 struct rte_tcp_hdr *tcp;
1628 uint16_t proto = h.eth->ether_type;
1633 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1634 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1635 proto = h.vlan->eth_proto;
1638 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1639 h.ipv4->time_to_live = cqe->lro_min_ttl;
1640 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1641 h.ipv4->hdr_checksum = 0;
1642 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1643 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1646 h.ipv6->hop_limits = cqe->lro_min_ttl;
1647 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1649 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1653 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
1654 l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1655 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1657 l4_type = (rte_be_to_cpu_16(mcqe->hdr_type) &
1658 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1659 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type);
1663 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1665 struct mlx5_mprq_buf *buf = opaque;
1667 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
1668 rte_mempool_put(buf->mp, buf);
1669 } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
1670 __ATOMIC_RELAXED) == 0)) {
1671 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1672 rte_mempool_put(buf->mp, buf);
1677 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1679 mlx5_mprq_buf_free_cb(NULL, buf);
1683 * DPDK callback for RX with Multi-Packet RQ support.
1686 * Generic pointer to RX queue structure.
1688 * Array to store received packets.
1690 * Maximum number of packets in array.
1693 * Number of packets successfully received (<= pkts_n).
1696 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1698 struct mlx5_rxq_data *rxq = dpdk_rxq;
1699 const uint32_t strd_n = 1 << rxq->strd_num_n;
1700 const uint32_t strd_sz = 1 << rxq->strd_sz_n;
1701 const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
1702 const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
1703 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1705 uint32_t rq_ci = rxq->rq_ci;
1706 uint16_t consumed_strd = rxq->consumed_strd;
1707 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1709 while (i < pkts_n) {
1710 struct rte_mbuf *pkt;
1716 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1717 enum mlx5_rqx_code rxq_code;
1719 if (consumed_strd == strd_n) {
1720 /* Replace WQE if the buffer is still in use. */
1721 mprq_buf_replace(rxq, rq_ci & wq_mask);
1722 /* Advance to the next WQE. */
1725 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1727 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1728 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1732 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1733 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1734 if (rxq->crc_present)
1735 len -= RTE_ETHER_CRC_LEN;
1737 rxq->mcqe_format == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
1738 strd_cnt = (len / strd_sz) + !!(len % strd_sz);
1740 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1741 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1742 MLX5_ASSERT(strd_cnt);
1743 consumed_strd += strd_cnt;
1744 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1746 strd_idx = rte_be_to_cpu_16(mcqe == NULL ?
1749 MLX5_ASSERT(strd_idx < strd_n);
1750 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1752 pkt = rte_pktmbuf_alloc(rxq->mp);
1753 if (unlikely(pkt == NULL)) {
1754 ++rxq->stats.rx_nombuf;
1757 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1758 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1759 if (rxq->crc_present)
1760 len -= RTE_ETHER_CRC_LEN;
1761 rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf,
1762 strd_idx, strd_cnt);
1763 if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
1764 rte_pktmbuf_free_seg(pkt);
1765 if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
1766 ++rxq->stats.idropped;
1769 if (rxq_code == MLX5_RXQ_CODE_NOMBUF) {
1770 ++rxq->stats.rx_nombuf;
1774 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
1775 if (cqe->lro_num_seg > 1) {
1776 mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
1777 cqe, mcqe, rxq, len);
1778 pkt->ol_flags |= PKT_RX_LRO;
1779 pkt->tso_segsz = len / cqe->lro_num_seg;
1782 PORT(pkt) = rxq->port_id;
1783 #ifdef MLX5_PMD_SOFT_COUNTERS
1784 /* Increment bytes counter. */
1785 rxq->stats.ibytes += PKT_LEN(pkt);
1787 /* Return packet. */
1791 /* Update the consumer indexes. */
1792 rxq->consumed_strd = consumed_strd;
1794 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1795 if (rq_ci != rxq->rq_ci) {
1798 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1800 #ifdef MLX5_PMD_SOFT_COUNTERS
1801 /* Increment packets counter. */
1802 rxq->stats.ipackets += i;
1808 * Dummy DPDK callback for TX.
1810 * This function is used to temporarily replace the real callback during
1811 * unsafe control operations on the queue, or in case of error.
1814 * Generic pointer to TX queue structure.
1816 * Packets to transmit.
1818 * Number of packets in array.
1821 * Number of packets successfully transmitted (<= pkts_n).
1824 removed_tx_burst(void *dpdk_txq __rte_unused,
1825 struct rte_mbuf **pkts __rte_unused,
1826 uint16_t pkts_n __rte_unused)
1833 * Dummy DPDK callback for RX.
1835 * This function is used to temporarily replace the real callback during
1836 * unsafe control operations on the queue, or in case of error.
1839 * Generic pointer to RX queue structure.
1841 * Array to store received packets.
1843 * Maximum number of packets in array.
1846 * Number of packets successfully received (<= pkts_n).
1849 removed_rx_burst(void *dpdk_txq __rte_unused,
1850 struct rte_mbuf **pkts __rte_unused,
1851 uint16_t pkts_n __rte_unused)
1858 * Vectorized Rx/Tx routines are not compiled in when required vector
1859 * instructions are not supported on a target architecture. The following null
1860 * stubs are needed for linkage when those are not included outside of this file
1861 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1865 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1866 struct rte_mbuf **pkts __rte_unused,
1867 uint16_t pkts_n __rte_unused)
1873 mlx5_rx_burst_mprq_vec(void *dpdk_txq __rte_unused,
1874 struct rte_mbuf **pkts __rte_unused,
1875 uint16_t pkts_n __rte_unused)
1881 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1887 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1893 * Free the mbufs from the linear array of pointers.
1896 * Pointer to Tx queue structure.
1898 * Pointer to array of packets to be free.
1900 * Number of packets to be freed.
1902 * Configured Tx offloads mask. It is fully defined at
1903 * compile time and may be used for optimization.
1905 static __rte_always_inline void
1906 mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
1907 struct rte_mbuf **__rte_restrict pkts,
1908 unsigned int pkts_n,
1909 unsigned int olx __rte_unused)
1911 struct rte_mempool *pool = NULL;
1912 struct rte_mbuf **p_free = NULL;
1913 struct rte_mbuf *mbuf;
1914 unsigned int n_free = 0;
1917 * The implemented algorithm eliminates
1918 * copying pointers to temporary array
1919 * for rte_mempool_put_bulk() calls.
1922 MLX5_ASSERT(pkts_n);
1924 * Free mbufs directly to the pool in bulk
1925 * if fast free offload is engaged
1927 if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
1930 rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
1936 * Decrement mbuf reference counter, detach
1937 * indirect and external buffers if needed.
1939 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1940 if (likely(mbuf != NULL)) {
1941 MLX5_ASSERT(mbuf == *pkts);
1942 if (likely(n_free != 0)) {
1943 if (unlikely(pool != mbuf->pool))
1944 /* From different pool. */
1947 /* Start new scan array. */
1954 if (unlikely(pkts_n == 0)) {
1960 * This happens if mbuf is still referenced.
1961 * We can't put it back to the pool, skip.
1965 if (unlikely(n_free != 0))
1966 /* There is some array to free.*/
1968 if (unlikely(pkts_n == 0))
1969 /* Last mbuf, nothing to free. */
1975 * This loop is implemented to avoid multiple
1976 * inlining of rte_mempool_put_bulk().
1979 MLX5_ASSERT(p_free);
1980 MLX5_ASSERT(n_free);
1982 * Free the array of pre-freed mbufs
1983 * belonging to the same memory pool.
1985 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1986 if (unlikely(mbuf != NULL)) {
1987 /* There is the request to start new scan. */
1992 if (likely(pkts_n != 0))
1995 * This is the last mbuf to be freed.
1996 * Do one more loop iteration to complete.
1997 * This is rare case of the last unique mbuf.
2002 if (likely(pkts_n == 0))
2010 * No inline version to free buffers for optimal call
2011 * on the tx_burst completion.
2013 static __rte_noinline void
2014 __mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
2015 struct rte_mbuf **__rte_restrict pkts,
2016 unsigned int pkts_n,
2017 unsigned int olx __rte_unused)
2019 mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
2023 * Free the mbuf from the elts ring buffer till new tail.
2026 * Pointer to Tx queue structure.
2028 * Index in elts to free up to, becomes new elts tail.
2030 * Configured Tx offloads mask. It is fully defined at
2031 * compile time and may be used for optimization.
2033 static __rte_always_inline void
2034 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
2036 unsigned int olx __rte_unused)
2038 uint16_t n_elts = tail - txq->elts_tail;
2040 MLX5_ASSERT(n_elts);
2041 MLX5_ASSERT(n_elts <= txq->elts_s);
2043 * Implement a loop to support ring buffer wraparound
2044 * with single inlining of mlx5_tx_free_mbuf().
2049 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
2050 part = RTE_MIN(part, n_elts);
2052 MLX5_ASSERT(part <= txq->elts_s);
2053 mlx5_tx_free_mbuf(txq,
2054 &txq->elts[txq->elts_tail & txq->elts_m],
2056 txq->elts_tail += part;
2062 * Store the mbuf being sent into elts ring buffer.
2063 * On Tx completion these mbufs will be freed.
2066 * Pointer to Tx queue structure.
2068 * Pointer to array of packets to be stored.
2070 * Number of packets to be stored.
2072 * Configured Tx offloads mask. It is fully defined at
2073 * compile time and may be used for optimization.
2075 static __rte_always_inline void
2076 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
2077 struct rte_mbuf **__rte_restrict pkts,
2078 unsigned int pkts_n,
2079 unsigned int olx __rte_unused)
2082 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2085 MLX5_ASSERT(pkts_n);
2086 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2088 MLX5_ASSERT(part <= txq->elts_s);
2089 /* This code is a good candidate for vectorizing with SIMD. */
2090 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2092 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2093 txq->elts_head += pkts_n;
2094 if (unlikely(part < pkts_n))
2095 /* The copy is wrapping around the elts array. */
2096 rte_memcpy((void *)elts, (void *)(pkts + part),
2097 (pkts_n - part) * sizeof(struct rte_mbuf *));
2101 * Update completion queue consuming index via doorbell
2102 * and flush the completed data buffers.
2105 * Pointer to TX queue structure.
2106 * @param valid CQE pointer
2107 * if not NULL update txq->wqe_pi and flush the buffers
2109 * Configured Tx offloads mask. It is fully defined at
2110 * compile time and may be used for optimization.
2112 static __rte_always_inline void
2113 mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
2114 volatile struct mlx5_cqe *last_cqe,
2115 unsigned int olx __rte_unused)
2117 if (likely(last_cqe != NULL)) {
2120 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2121 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2122 if (likely(tail != txq->elts_tail)) {
2123 mlx5_tx_free_elts(txq, tail, olx);
2124 MLX5_ASSERT(tail == txq->elts_tail);
2130 * Manage TX completions. This routine checks the CQ for
2131 * arrived CQEs, deduces the last accomplished WQE in SQ,
2132 * updates SQ producing index and frees all completed mbufs.
2135 * Pointer to TX queue structure.
2137 * Configured Tx offloads mask. It is fully defined at
2138 * compile time and may be used for optimization.
2140 * NOTE: not inlined intentionally, it makes tx_burst
2141 * routine smaller, simple and faster - from experiments.
2144 mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
2145 unsigned int olx __rte_unused)
2147 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2148 volatile struct mlx5_cqe *last_cqe = NULL;
2149 bool ring_doorbell = false;
2153 volatile struct mlx5_cqe *cqe;
2155 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
2156 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
2157 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2158 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2159 /* No new CQEs in completion queue. */
2160 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
2164 * Some error occurred, try to restart.
2165 * We have no barrier after WQE related Doorbell
2166 * written, make sure all writes are completed
2167 * here, before we might perform SQ reset.
2170 ret = mlx5_tx_error_cqe_handle
2171 (txq, (volatile struct mlx5_err_cqe *)cqe);
2172 if (unlikely(ret < 0)) {
2174 * Some error occurred on queue error
2175 * handling, we do not advance the index
2176 * here, allowing to retry on next call.
2181 * We are going to fetch all entries with
2182 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2183 * The send queue is supposed to be empty.
2185 ring_doorbell = true;
2187 txq->cq_pi = txq->cq_ci;
2191 /* Normal transmit completion. */
2192 MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
2193 #ifdef RTE_LIBRTE_MLX5_DEBUG
2194 MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
2197 ring_doorbell = true;
2201 * We have to restrict the amount of processed CQEs
2202 * in one tx_burst routine call. The CQ may be large
2203 * and many CQEs may be updated by the NIC in one
2204 * transaction. Buffers freeing is time consuming,
2205 * multiple iterations may introduce significant
2208 if (likely(--count == 0))
2211 if (likely(ring_doorbell)) {
2212 /* Ring doorbell to notify hardware. */
2213 rte_compiler_barrier();
2214 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
2215 mlx5_tx_comp_flush(txq, last_cqe, olx);
2220 * Check if the completion request flag should be set in the last WQE.
2221 * Both pushed mbufs and WQEs are monitored and the completion request
2222 * flag is set if any of thresholds is reached.
2225 * Pointer to TX queue structure.
2227 * Pointer to burst routine local context.
2229 * Configured Tx offloads mask. It is fully defined at
2230 * compile time and may be used for optimization.
2232 static __rte_always_inline void
2233 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
2234 struct mlx5_txq_local *__rte_restrict loc,
2237 uint16_t head = txq->elts_head;
2240 part = MLX5_TXOFF_CONFIG(INLINE) ?
2241 0 : loc->pkts_sent - loc->pkts_copy;
2243 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2244 (MLX5_TXOFF_CONFIG(INLINE) &&
2245 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2246 volatile struct mlx5_wqe *last = loc->wqe_last;
2249 txq->elts_comp = head;
2250 if (MLX5_TXOFF_CONFIG(INLINE))
2251 txq->wqe_comp = txq->wqe_ci;
2252 /* Request unconditional completion on last WQE. */
2253 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2254 MLX5_COMP_MODE_OFFSET);
2255 /* Save elts_head in dedicated free on completion queue. */
2256 #ifdef RTE_LIBRTE_MLX5_DEBUG
2257 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2258 (last->cseg.opcode >> 8) << 16;
2260 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2262 /* A CQE slot must always be available. */
2263 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2268 * DPDK callback to check the status of a tx descriptor.
2273 * The index of the descriptor in the ring.
2276 * The status of the tx descriptor.
2279 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2281 struct mlx5_txq_data *__rte_restrict txq = tx_queue;
2284 mlx5_tx_handle_completion(txq, 0);
2285 used = txq->elts_head - txq->elts_tail;
2287 return RTE_ETH_TX_DESC_FULL;
2288 return RTE_ETH_TX_DESC_DONE;
2292 * Build the Control Segment with specified opcode:
2293 * - MLX5_OPCODE_SEND
2294 * - MLX5_OPCODE_ENHANCED_MPSW
2298 * Pointer to TX queue structure.
2300 * Pointer to burst routine local context.
2302 * Pointer to WQE to fill with built Control Segment.
2304 * Supposed length of WQE in segments.
2306 * SQ WQE opcode to put into Control Segment.
2308 * Configured Tx offloads mask. It is fully defined at
2309 * compile time and may be used for optimization.
2311 static __rte_always_inline void
2312 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
2313 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2314 struct mlx5_wqe *__rte_restrict wqe,
2316 unsigned int opcode,
2317 unsigned int olx __rte_unused)
2319 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
2321 /* For legacy MPW replace the EMPW by TSO with modifier. */
2322 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2323 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2324 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2325 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2326 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2327 MLX5_COMP_MODE_OFFSET);
2328 cs->misc = RTE_BE32(0);
2332 * Build the Synchronize Queue Segment with specified completion index.
2335 * Pointer to TX queue structure.
2337 * Pointer to burst routine local context.
2339 * Pointer to WQE to fill with built Control Segment.
2341 * Completion index in Clock Queue to wait.
2343 * Configured Tx offloads mask. It is fully defined at
2344 * compile time and may be used for optimization.
2346 static __rte_always_inline void
2347 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
2348 struct mlx5_txq_local *restrict loc __rte_unused,
2349 struct mlx5_wqe *restrict wqe,
2351 unsigned int olx __rte_unused)
2353 struct mlx5_wqe_qseg *qs;
2355 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
2356 qs->max_index = rte_cpu_to_be_32(wci);
2357 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
2358 qs->reserved0 = RTE_BE32(0);
2359 qs->reserved1 = RTE_BE32(0);
2363 * Build the Ethernet Segment without inlined data.
2364 * Supports Software Parser, Checksums and VLAN
2365 * insertion Tx offload features.
2368 * Pointer to TX queue structure.
2370 * Pointer to burst routine local context.
2372 * Pointer to WQE to fill with built Ethernet Segment.
2374 * Configured Tx offloads mask. It is fully defined at
2375 * compile time and may be used for optimization.
2377 static __rte_always_inline void
2378 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2379 struct mlx5_txq_local *__rte_restrict loc,
2380 struct mlx5_wqe *__rte_restrict wqe,
2383 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2387 * Calculate and set check sum flags first, dword field
2388 * in segment may be shared with Software Parser flags.
2390 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2391 es->flags = rte_cpu_to_le_32(csum);
2393 * Calculate and set Software Parser offsets and flags.
2394 * These flags a set for custom UDP and IP tunnel packets.
2396 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2397 /* Fill metadata field if needed. */
2398 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2399 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2400 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2401 /* Engage VLAN tag insertion feature if requested. */
2402 if (MLX5_TXOFF_CONFIG(VLAN) &&
2403 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2405 * We should get here only if device support
2406 * this feature correctly.
2408 MLX5_ASSERT(txq->vlan_en);
2409 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2410 loc->mbuf->vlan_tci);
2412 es->inline_hdr = RTE_BE32(0);
2417 * Build the Ethernet Segment with minimal inlined data
2418 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2419 * used to fill the gap in single WQEBB WQEs.
2420 * Supports Software Parser, Checksums and VLAN
2421 * insertion Tx offload features.
2424 * Pointer to TX queue structure.
2426 * Pointer to burst routine local context.
2428 * Pointer to WQE to fill with built Ethernet Segment.
2430 * Length of VLAN tag insertion if any.
2432 * Configured Tx offloads mask. It is fully defined at
2433 * compile time and may be used for optimization.
2435 static __rte_always_inline void
2436 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2437 struct mlx5_txq_local *__rte_restrict loc,
2438 struct mlx5_wqe *__rte_restrict wqe,
2442 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2444 uint8_t *psrc, *pdst;
2447 * Calculate and set check sum flags first, dword field
2448 * in segment may be shared with Software Parser flags.
2450 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2451 es->flags = rte_cpu_to_le_32(csum);
2453 * Calculate and set Software Parser offsets and flags.
2454 * These flags a set for custom UDP and IP tunnel packets.
2456 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2457 /* Fill metadata field if needed. */
2458 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2459 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2460 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2461 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2462 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2463 es->inline_data = *(unaligned_uint16_t *)psrc;
2464 psrc += sizeof(uint16_t);
2465 pdst = (uint8_t *)(es + 1);
2466 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2467 /* Implement VLAN tag insertion as part inline data. */
2468 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2469 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2470 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2471 /* Insert VLAN ethertype + VLAN tag. */
2472 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2473 ((RTE_ETHER_TYPE_VLAN << 16) |
2474 loc->mbuf->vlan_tci);
2475 pdst += sizeof(struct rte_vlan_hdr);
2476 /* Copy the rest two bytes from packet data. */
2477 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2478 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2480 /* Fill the gap in the title WQEBB with inline data. */
2481 rte_mov16(pdst, psrc);
2486 * Build the Ethernet Segment with entire packet
2487 * data inlining. Checks the boundary of WQEBB and
2488 * ring buffer wrapping, supports Software Parser,
2489 * Checksums and VLAN insertion Tx offload features.
2492 * Pointer to TX queue structure.
2494 * Pointer to burst routine local context.
2496 * Pointer to WQE to fill with built Ethernet Segment.
2498 * Length of VLAN tag insertion if any.
2500 * Length of data to inline (VLAN included, if any).
2502 * TSO flag, set mss field from the packet.
2504 * Configured Tx offloads mask. It is fully defined at
2505 * compile time and may be used for optimization.
2508 * Pointer to the next Data Segment (aligned and wrapped around).
2510 static __rte_always_inline struct mlx5_wqe_dseg *
2511 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
2512 struct mlx5_txq_local *__rte_restrict loc,
2513 struct mlx5_wqe *__rte_restrict wqe,
2519 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2521 uint8_t *psrc, *pdst;
2525 * Calculate and set check sum flags first, dword field
2526 * in segment may be shared with Software Parser flags.
2528 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2531 csum |= loc->mbuf->tso_segsz;
2532 es->flags = rte_cpu_to_be_32(csum);
2534 es->flags = rte_cpu_to_le_32(csum);
2537 * Calculate and set Software Parser offsets and flags.
2538 * These flags a set for custom UDP and IP tunnel packets.
2540 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2541 /* Fill metadata field if needed. */
2542 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2543 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2544 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2545 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2546 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2547 es->inline_data = *(unaligned_uint16_t *)psrc;
2548 psrc += sizeof(uint16_t);
2549 pdst = (uint8_t *)(es + 1);
2550 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2551 /* Implement VLAN tag insertion as part inline data. */
2552 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2553 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2554 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2555 /* Insert VLAN ethertype + VLAN tag. */
2556 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2557 ((RTE_ETHER_TYPE_VLAN << 16) |
2558 loc->mbuf->vlan_tci);
2559 pdst += sizeof(struct rte_vlan_hdr);
2560 /* Copy the rest two bytes from packet data. */
2561 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2562 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2563 psrc += sizeof(uint16_t);
2565 /* Fill the gap in the title WQEBB with inline data. */
2566 rte_mov16(pdst, psrc);
2567 psrc += sizeof(rte_v128u32_t);
2569 pdst = (uint8_t *)(es + 2);
2570 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2571 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2572 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2574 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2575 return (struct mlx5_wqe_dseg *)pdst;
2578 * The WQEBB space availability is checked by caller.
2579 * Here we should be aware of WQE ring buffer wraparound only.
2581 part = (uint8_t *)txq->wqes_end - pdst;
2582 part = RTE_MIN(part, inlen);
2584 rte_memcpy(pdst, psrc, part);
2586 if (likely(!inlen)) {
2588 * If return value is not used by the caller
2589 * the code below will be optimized out.
2592 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2593 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2594 pdst = (uint8_t *)txq->wqes;
2595 return (struct mlx5_wqe_dseg *)pdst;
2597 pdst = (uint8_t *)txq->wqes;
2604 * Copy data from chain of mbuf to the specified linear buffer.
2605 * Checksums and VLAN insertion Tx offload features. If data
2606 * from some mbuf copied completely this mbuf is freed. Local
2607 * structure is used to keep the byte stream state.
2610 * Pointer to the destination linear buffer.
2612 * Pointer to burst routine local context.
2614 * Length of data to be copied.
2616 * Length of data to be copied ignoring no inline hint.
2618 * Configured Tx offloads mask. It is fully defined at
2619 * compile time and may be used for optimization.
2622 * Number of actual copied data bytes. This is always greater than or
2623 * equal to must parameter and might be lesser than len in no inline
2624 * hint flag is encountered.
2626 static __rte_always_inline unsigned int
2627 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2628 struct mlx5_txq_local *__rte_restrict loc,
2631 unsigned int olx __rte_unused)
2633 struct rte_mbuf *mbuf;
2634 unsigned int part, dlen, copy = 0;
2638 MLX5_ASSERT(must <= len);
2640 /* Allow zero length packets, must check first. */
2641 dlen = rte_pktmbuf_data_len(loc->mbuf);
2642 if (dlen <= loc->mbuf_off) {
2643 /* Exhausted packet, just free. */
2645 loc->mbuf = mbuf->next;
2646 rte_pktmbuf_free_seg(mbuf);
2648 MLX5_ASSERT(loc->mbuf_nseg > 1);
2649 MLX5_ASSERT(loc->mbuf);
2651 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2656 * We already copied the minimal
2657 * requested amount of data.
2662 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
2664 * Copy only the minimal required
2665 * part of the data buffer.
2672 dlen -= loc->mbuf_off;
2673 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2675 part = RTE_MIN(len, dlen);
2676 rte_memcpy(pdst, psrc, part);
2678 loc->mbuf_off += part;
2681 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2683 /* Exhausted packet, just free. */
2685 loc->mbuf = mbuf->next;
2686 rte_pktmbuf_free_seg(mbuf);
2688 MLX5_ASSERT(loc->mbuf_nseg >= 1);
2698 * Build the Ethernet Segment with inlined data from
2699 * multi-segment packet. Checks the boundary of WQEBB
2700 * and ring buffer wrapping, supports Software Parser,
2701 * Checksums and VLAN insertion Tx offload features.
2704 * Pointer to TX queue structure.
2706 * Pointer to burst routine local context.
2708 * Pointer to WQE to fill with built Ethernet Segment.
2710 * Length of VLAN tag insertion if any.
2712 * Length of data to inline (VLAN included, if any).
2714 * TSO flag, set mss field from the packet.
2716 * Configured Tx offloads mask. It is fully defined at
2717 * compile time and may be used for optimization.
2720 * Pointer to the next Data Segment (aligned and
2721 * possible NOT wrapped around - caller should do
2722 * wrapping check on its own).
2724 static __rte_always_inline struct mlx5_wqe_dseg *
2725 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
2726 struct mlx5_txq_local *__rte_restrict loc,
2727 struct mlx5_wqe *__rte_restrict wqe,
2733 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2736 unsigned int part, tlen = 0;
2739 * Calculate and set check sum flags first, uint32_t field
2740 * in segment may be shared with Software Parser flags.
2742 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2745 csum |= loc->mbuf->tso_segsz;
2746 es->flags = rte_cpu_to_be_32(csum);
2748 es->flags = rte_cpu_to_le_32(csum);
2751 * Calculate and set Software Parser offsets and flags.
2752 * These flags a set for custom UDP and IP tunnel packets.
2754 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2755 /* Fill metadata field if needed. */
2756 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2757 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2758 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2759 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2760 pdst = (uint8_t *)&es->inline_data;
2761 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2762 /* Implement VLAN tag insertion as part inline data. */
2763 mlx5_tx_mseg_memcpy(pdst, loc,
2764 2 * RTE_ETHER_ADDR_LEN,
2765 2 * RTE_ETHER_ADDR_LEN, olx);
2766 pdst += 2 * RTE_ETHER_ADDR_LEN;
2767 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2768 ((RTE_ETHER_TYPE_VLAN << 16) |
2769 loc->mbuf->vlan_tci);
2770 pdst += sizeof(struct rte_vlan_hdr);
2771 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2773 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2775 * The WQEBB space availability is checked by caller.
2776 * Here we should be aware of WQE ring buffer wraparound only.
2778 part = (uint8_t *)txq->wqes_end - pdst;
2779 part = RTE_MIN(part, inlen - tlen);
2785 * Copying may be interrupted inside the routine
2786 * if run into no inline hint flag.
2788 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
2789 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
2791 if (likely(inlen <= tlen) || copy < part) {
2792 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
2794 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2795 return (struct mlx5_wqe_dseg *)pdst;
2797 pdst = (uint8_t *)txq->wqes;
2798 part = inlen - tlen;
2803 * Build the Data Segment of pointer type.
2806 * Pointer to TX queue structure.
2808 * Pointer to burst routine local context.
2810 * Pointer to WQE to fill with built Data Segment.
2812 * Data buffer to point.
2814 * Data buffer length.
2816 * Configured Tx offloads mask. It is fully defined at
2817 * compile time and may be used for optimization.
2819 static __rte_always_inline void
2820 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
2821 struct mlx5_txq_local *__rte_restrict loc,
2822 struct mlx5_wqe_dseg *__rte_restrict dseg,
2825 unsigned int olx __rte_unused)
2829 dseg->bcount = rte_cpu_to_be_32(len);
2830 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2831 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2835 * Build the Data Segment of pointer type or inline
2836 * if data length is less than buffer in minimal
2837 * Data Segment size.
2840 * Pointer to TX queue structure.
2842 * Pointer to burst routine local context.
2844 * Pointer to WQE to fill with built Data Segment.
2846 * Data buffer to point.
2848 * Data buffer length.
2850 * Configured Tx offloads mask. It is fully defined at
2851 * compile time and may be used for optimization.
2853 static __rte_always_inline void
2854 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
2855 struct mlx5_txq_local *__rte_restrict loc,
2856 struct mlx5_wqe_dseg *__rte_restrict dseg,
2859 unsigned int olx __rte_unused)
2865 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2866 dseg->bcount = rte_cpu_to_be_32(len);
2867 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2868 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2872 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2873 /* Unrolled implementation of generic rte_memcpy. */
2874 dst = (uintptr_t)&dseg->inline_data[0];
2875 src = (uintptr_t)buf;
2877 #ifdef RTE_ARCH_STRICT_ALIGN
2878 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2879 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2880 dst += sizeof(uint32_t);
2881 src += sizeof(uint32_t);
2882 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2883 dst += sizeof(uint32_t);
2884 src += sizeof(uint32_t);
2886 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2887 dst += sizeof(uint64_t);
2888 src += sizeof(uint64_t);
2892 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2893 dst += sizeof(uint32_t);
2894 src += sizeof(uint32_t);
2897 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2898 dst += sizeof(uint16_t);
2899 src += sizeof(uint16_t);
2902 *(uint8_t *)dst = *(uint8_t *)src;
2906 * Build the Data Segment of inlined data from single
2907 * segment packet, no VLAN insertion.
2910 * Pointer to TX queue structure.
2912 * Pointer to burst routine local context.
2914 * Pointer to WQE to fill with built Data Segment.
2916 * Data buffer to point.
2918 * Data buffer length.
2920 * Configured Tx offloads mask. It is fully defined at
2921 * compile time and may be used for optimization.
2924 * Pointer to the next Data Segment after inlined data.
2925 * Ring buffer wraparound check is needed. We do not
2926 * do it here because it may not be needed for the
2927 * last packet in the eMPW session.
2929 static __rte_always_inline struct mlx5_wqe_dseg *
2930 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
2931 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2932 struct mlx5_wqe_dseg *__rte_restrict dseg,
2935 unsigned int olx __rte_unused)
2940 if (!MLX5_TXOFF_CONFIG(MPW)) {
2941 /* Store the descriptor byte counter for eMPW sessions. */
2942 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2943 pdst = &dseg->inline_data[0];
2945 /* The entire legacy MPW session counter is stored on close. */
2946 pdst = (uint8_t *)dseg;
2949 * The WQEBB space availability is checked by caller.
2950 * Here we should be aware of WQE ring buffer wraparound only.
2952 part = (uint8_t *)txq->wqes_end - pdst;
2953 part = RTE_MIN(part, len);
2955 rte_memcpy(pdst, buf, part);
2959 if (!MLX5_TXOFF_CONFIG(MPW))
2960 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2961 /* Note: no final wraparound check here. */
2962 return (struct mlx5_wqe_dseg *)pdst;
2964 pdst = (uint8_t *)txq->wqes;
2971 * Build the Data Segment of inlined data from single
2972 * segment packet with VLAN insertion.
2975 * Pointer to TX queue structure.
2977 * Pointer to burst routine local context.
2979 * Pointer to the dseg fill with built Data Segment.
2981 * Data buffer to point.
2983 * Data buffer length.
2985 * Configured Tx offloads mask. It is fully defined at
2986 * compile time and may be used for optimization.
2989 * Pointer to the next Data Segment after inlined data.
2990 * Ring buffer wraparound check is needed.
2992 static __rte_always_inline struct mlx5_wqe_dseg *
2993 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
2994 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2995 struct mlx5_wqe_dseg *__rte_restrict dseg,
2998 unsigned int olx __rte_unused)
3004 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
3005 if (!MLX5_TXOFF_CONFIG(MPW)) {
3006 /* Store the descriptor byte counter for eMPW sessions. */
3007 dseg->bcount = rte_cpu_to_be_32
3008 ((len + sizeof(struct rte_vlan_hdr)) |
3009 MLX5_ETH_WQE_DATA_INLINE);
3010 pdst = &dseg->inline_data[0];
3012 /* The entire legacy MPW session counter is stored on close. */
3013 pdst = (uint8_t *)dseg;
3015 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
3016 buf += MLX5_DSEG_MIN_INLINE_SIZE;
3017 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
3018 len -= MLX5_DSEG_MIN_INLINE_SIZE;
3019 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
3020 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
3021 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
3022 pdst = (uint8_t *)txq->wqes;
3023 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
3024 loc->mbuf->vlan_tci);
3025 pdst += sizeof(struct rte_vlan_hdr);
3027 * The WQEBB space availability is checked by caller.
3028 * Here we should be aware of WQE ring buffer wraparound only.
3030 part = (uint8_t *)txq->wqes_end - pdst;
3031 part = RTE_MIN(part, len);
3033 rte_memcpy(pdst, buf, part);
3037 if (!MLX5_TXOFF_CONFIG(MPW))
3038 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
3039 /* Note: no final wraparound check here. */
3040 return (struct mlx5_wqe_dseg *)pdst;
3042 pdst = (uint8_t *)txq->wqes;
3049 * Build the Ethernet Segment with optionally inlined data with
3050 * VLAN insertion and following Data Segments (if any) from
3051 * multi-segment packet. Used by ordinary send and TSO.
3054 * Pointer to TX queue structure.
3056 * Pointer to burst routine local context.
3058 * Pointer to WQE to fill with built Ethernet/Data Segments.
3060 * Length of VLAN header to insert, 0 means no VLAN insertion.
3062 * Data length to inline. For TSO this parameter specifies
3063 * exact value, for ordinary send routine can be aligned by
3064 * caller to provide better WQE space saving and data buffer
3065 * start address alignment. This length includes VLAN header
3068 * Zero means ordinary send, inlined data can be extended,
3069 * otherwise this is TSO, inlined data length is fixed.
3071 * Configured Tx offloads mask. It is fully defined at
3072 * compile time and may be used for optimization.
3075 * Actual size of built WQE in segments.
3077 static __rte_always_inline unsigned int
3078 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
3079 struct mlx5_txq_local *__rte_restrict loc,
3080 struct mlx5_wqe *__rte_restrict wqe,
3084 unsigned int olx __rte_unused)
3086 struct mlx5_wqe_dseg *__rte_restrict dseg;
3089 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
3090 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
3093 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
3094 if (!loc->mbuf_nseg)
3097 * There are still some mbuf remaining, not inlined.
3098 * The first mbuf may be partially inlined and we
3099 * must process the possible non-zero data offset.
3101 if (loc->mbuf_off) {
3106 * Exhausted packets must be dropped before.
3107 * Non-zero offset means there are some data
3108 * remained in the packet.
3110 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
3111 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
3112 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
3114 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
3116 * Build the pointer/minimal data Data Segment.
3117 * Do ring buffer wrapping check in advance.
3119 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3120 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3121 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
3122 /* Store the mbuf to be freed on completion. */
3123 MLX5_ASSERT(loc->elts_free);
3124 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3127 if (--loc->mbuf_nseg == 0)
3129 loc->mbuf = loc->mbuf->next;
3133 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3134 struct rte_mbuf *mbuf;
3136 /* Zero length segment found, just skip. */
3138 loc->mbuf = loc->mbuf->next;
3139 rte_pktmbuf_free_seg(mbuf);
3140 if (--loc->mbuf_nseg == 0)
3143 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3144 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3147 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3148 rte_pktmbuf_data_len(loc->mbuf), olx);
3149 MLX5_ASSERT(loc->elts_free);
3150 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3153 if (--loc->mbuf_nseg == 0)
3155 loc->mbuf = loc->mbuf->next;
3160 /* Calculate actual segments used from the dseg pointer. */
3161 if ((uintptr_t)wqe < (uintptr_t)dseg)
3162 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3164 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3165 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3170 * The routine checks timestamp flag in the current packet,
3171 * and push WAIT WQE into the queue if scheduling is required.
3174 * Pointer to TX queue structure.
3176 * Pointer to burst routine local context.
3178 * Configured Tx offloads mask. It is fully defined at
3179 * compile time and may be used for optimization.
3182 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3183 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
3184 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
3185 * Local context variables partially updated.
3187 static __rte_always_inline enum mlx5_txcmp_code
3188 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
3189 struct mlx5_txq_local *restrict loc,
3192 if (MLX5_TXOFF_CONFIG(TXPP) &&
3193 loc->mbuf->ol_flags & txq->ts_mask) {
3194 struct mlx5_wqe *wqe;
3199 * Estimate the required space quickly and roughly.
3200 * We would like to ensure the packet can be pushed
3201 * to the queue and we won't get the orphan WAIT WQE.
3203 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
3204 loc->elts_free < NB_SEGS(loc->mbuf))
3205 return MLX5_TXCMP_CODE_EXIT;
3206 /* Convert the timestamp into completion to wait. */
3207 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
3208 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
3209 if (unlikely(wci < 0))
3210 return MLX5_TXCMP_CODE_SINGLE;
3211 /* Build the WAIT WQE with specified completion. */
3212 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3213 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
3214 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
3217 return MLX5_TXCMP_CODE_MULTI;
3219 return MLX5_TXCMP_CODE_SINGLE;
3223 * Tx one packet function for multi-segment TSO. Supports all
3224 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3225 * sends one packet per WQE.
3227 * This routine is responsible for storing processed mbuf
3228 * into elts ring buffer and update elts_head.
3231 * Pointer to TX queue structure.
3233 * Pointer to burst routine local context.
3235 * Configured Tx offloads mask. It is fully defined at
3236 * compile time and may be used for optimization.
3239 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3240 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3241 * Local context variables partially updated.
3243 static __rte_always_inline enum mlx5_txcmp_code
3244 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
3245 struct mlx5_txq_local *__rte_restrict loc,
3248 struct mlx5_wqe *__rte_restrict wqe;
3249 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3251 if (MLX5_TXOFF_CONFIG(TXPP)) {
3252 enum mlx5_txcmp_code wret;
3254 /* Generate WAIT for scheduling if requested. */
3255 wret = mlx5_tx_schedule_send(txq, loc, olx);
3256 if (wret == MLX5_TXCMP_CODE_EXIT)
3257 return MLX5_TXCMP_CODE_EXIT;
3258 if (wret == MLX5_TXCMP_CODE_ERROR)
3259 return MLX5_TXCMP_CODE_ERROR;
3262 * Calculate data length to be inlined to estimate
3263 * the required space in WQE ring buffer.
3265 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3266 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3267 vlan = sizeof(struct rte_vlan_hdr);
3268 inlen = loc->mbuf->l2_len + vlan +
3269 loc->mbuf->l3_len + loc->mbuf->l4_len;
3270 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3271 return MLX5_TXCMP_CODE_ERROR;
3272 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3273 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3274 /* Packet must contain all TSO headers. */
3275 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3276 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3277 inlen > (dlen + vlan)))
3278 return MLX5_TXCMP_CODE_ERROR;
3279 MLX5_ASSERT(inlen >= txq->inlen_mode);
3281 * Check whether there are enough free WQEBBs:
3283 * - Ethernet Segment
3284 * - First Segment of inlined Ethernet data
3285 * - ... data continued ...
3286 * - Data Segments of pointer/min inline type
3288 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3289 MLX5_ESEG_MIN_INLINE_SIZE +
3291 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3292 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3293 return MLX5_TXCMP_CODE_EXIT;
3294 /* Check for maximal WQE size. */
3295 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3296 return MLX5_TXCMP_CODE_ERROR;
3297 #ifdef MLX5_PMD_SOFT_COUNTERS
3298 /* Update sent data bytes/packets counters. */
3299 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3300 loc->mbuf->tso_segsz;
3302 * One will be added for mbuf itself
3303 * at the end of the mlx5_tx_burst from
3304 * loc->pkts_sent field.
3307 txq->stats.opackets += ntcp;
3308 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3310 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3311 loc->wqe_last = wqe;
3312 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3313 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3314 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3315 txq->wqe_ci += (ds + 3) / 4;
3316 loc->wqe_free -= (ds + 3) / 4;
3317 return MLX5_TXCMP_CODE_MULTI;
3321 * Tx one packet function for multi-segment SEND. Supports all
3322 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3323 * sends one packet per WQE, without any data inlining in
3326 * This routine is responsible for storing processed mbuf
3327 * into elts ring buffer and update elts_head.
3330 * Pointer to TX queue structure.
3332 * Pointer to burst routine local context.
3334 * Configured Tx offloads mask. It is fully defined at
3335 * compile time and may be used for optimization.
3338 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3339 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3340 * Local context variables partially updated.
3342 static __rte_always_inline enum mlx5_txcmp_code
3343 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
3344 struct mlx5_txq_local *__rte_restrict loc,
3347 struct mlx5_wqe_dseg *__rte_restrict dseg;
3348 struct mlx5_wqe *__rte_restrict wqe;
3349 unsigned int ds, nseg;
3351 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3352 if (MLX5_TXOFF_CONFIG(TXPP)) {
3353 enum mlx5_txcmp_code wret;
3355 /* Generate WAIT for scheduling if requested. */
3356 wret = mlx5_tx_schedule_send(txq, loc, olx);
3357 if (wret == MLX5_TXCMP_CODE_EXIT)
3358 return MLX5_TXCMP_CODE_EXIT;
3359 if (wret == MLX5_TXCMP_CODE_ERROR)
3360 return MLX5_TXCMP_CODE_ERROR;
3363 * No inline at all, it means the CPU cycles saving
3364 * is prioritized at configuration, we should not
3365 * copy any packet data to WQE.
3367 nseg = NB_SEGS(loc->mbuf);
3369 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3370 return MLX5_TXCMP_CODE_EXIT;
3371 /* Check for maximal WQE size. */
3372 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3373 return MLX5_TXCMP_CODE_ERROR;
3375 * Some Tx offloads may cause an error if
3376 * packet is not long enough, check against
3377 * assumed minimal length.
3379 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3380 return MLX5_TXCMP_CODE_ERROR;
3381 #ifdef MLX5_PMD_SOFT_COUNTERS
3382 /* Update sent data bytes counter. */
3383 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3384 if (MLX5_TXOFF_CONFIG(VLAN) &&
3385 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3386 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3389 * SEND WQE, one WQEBB:
3390 * - Control Segment, SEND opcode
3391 * - Ethernet Segment, optional VLAN, no inline
3392 * - Data Segments, pointer only type
3394 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3395 loc->wqe_last = wqe;
3396 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3397 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3398 dseg = &wqe->dseg[0];
3400 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3401 struct rte_mbuf *mbuf;
3404 * Zero length segment found, have to
3405 * correct total size of WQE in segments.
3406 * It is supposed to be rare occasion, so
3407 * in normal case (no zero length segments)
3408 * we avoid extra writing to the Control
3412 wqe->cseg.sq_ds -= RTE_BE32(1);
3414 loc->mbuf = mbuf->next;
3415 rte_pktmbuf_free_seg(mbuf);
3421 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3422 rte_pktmbuf_data_len(loc->mbuf), olx);
3423 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3428 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3429 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3430 loc->mbuf = loc->mbuf->next;
3433 txq->wqe_ci += (ds + 3) / 4;
3434 loc->wqe_free -= (ds + 3) / 4;
3435 return MLX5_TXCMP_CODE_MULTI;
3439 * Tx one packet function for multi-segment SEND. Supports all
3440 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3441 * sends one packet per WQE, with data inlining in
3442 * Ethernet Segment and minimal Data Segments.
3444 * This routine is responsible for storing processed mbuf
3445 * into elts ring buffer and update elts_head.
3448 * Pointer to TX queue structure.
3450 * Pointer to burst routine local context.
3452 * Configured Tx offloads mask. It is fully defined at
3453 * compile time and may be used for optimization.
3456 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3457 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3458 * Local context variables partially updated.
3460 static __rte_always_inline enum mlx5_txcmp_code
3461 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
3462 struct mlx5_txq_local *__rte_restrict loc,
3465 struct mlx5_wqe *__rte_restrict wqe;
3466 unsigned int ds, inlen, dlen, vlan = 0;
3468 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3469 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3470 if (MLX5_TXOFF_CONFIG(TXPP)) {
3471 enum mlx5_txcmp_code wret;
3473 /* Generate WAIT for scheduling if requested. */
3474 wret = mlx5_tx_schedule_send(txq, loc, olx);
3475 if (wret == MLX5_TXCMP_CODE_EXIT)
3476 return MLX5_TXCMP_CODE_EXIT;
3477 if (wret == MLX5_TXCMP_CODE_ERROR)
3478 return MLX5_TXCMP_CODE_ERROR;
3481 * First calculate data length to be inlined
3482 * to estimate the required space for WQE.
3484 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3485 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3486 vlan = sizeof(struct rte_vlan_hdr);
3487 inlen = dlen + vlan;
3488 /* Check against minimal length. */
3489 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3490 return MLX5_TXCMP_CODE_ERROR;
3491 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3492 if (inlen > txq->inlen_send ||
3493 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
3494 struct rte_mbuf *mbuf;
3499 * Packet length exceeds the allowed inline
3500 * data length, check whether the minimal
3501 * inlining is required.
3503 if (txq->inlen_mode) {
3504 MLX5_ASSERT(txq->inlen_mode >=
3505 MLX5_ESEG_MIN_INLINE_SIZE);
3506 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
3507 inlen = txq->inlen_mode;
3509 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
3510 !vlan || txq->vlan_en) {
3512 * VLAN insertion will be done inside by HW.
3513 * It is not utmost effective - VLAN flag is
3514 * checked twice, but we should proceed the
3515 * inlining length correctly and take into
3516 * account the VLAN header being inserted.
3518 return mlx5_tx_packet_multi_send
3521 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3524 * Now we know the minimal amount of data is requested
3525 * to inline. Check whether we should inline the buffers
3526 * from the chain beginning to eliminate some mbufs.
3529 nxlen = rte_pktmbuf_data_len(mbuf);
3530 if (unlikely(nxlen <= txq->inlen_send)) {
3531 /* We can inline first mbuf at least. */
3532 if (nxlen < inlen) {
3535 /* Scan mbufs till inlen filled. */
3540 nxlen = rte_pktmbuf_data_len(mbuf);
3542 } while (unlikely(nxlen < inlen));
3543 if (unlikely(nxlen > txq->inlen_send)) {
3544 /* We cannot inline entire mbuf. */
3545 smlen = inlen - smlen;
3546 start = rte_pktmbuf_mtod_offset
3547 (mbuf, uintptr_t, smlen);
3554 /* There should be not end of packet. */
3556 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3557 } while (unlikely(nxlen < txq->inlen_send));
3559 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3561 * Check whether we can do inline to align start
3562 * address of data buffer to cacheline.
3565 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3566 if (unlikely(start)) {
3568 if (start <= txq->inlen_send)
3573 * Check whether there are enough free WQEBBs:
3575 * - Ethernet Segment
3576 * - First Segment of inlined Ethernet data
3577 * - ... data continued ...
3578 * - Data Segments of pointer/min inline type
3580 * Estimate the number of Data Segments conservatively,
3581 * supposing no any mbufs is being freed during inlining.
3583 MLX5_ASSERT(inlen <= txq->inlen_send);
3584 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3585 MLX5_ESEG_MIN_INLINE_SIZE +
3587 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3588 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3589 return MLX5_TXCMP_CODE_EXIT;
3590 /* Check for maximal WQE size. */
3591 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3592 return MLX5_TXCMP_CODE_ERROR;
3593 #ifdef MLX5_PMD_SOFT_COUNTERS
3594 /* Update sent data bytes/packets counters. */
3595 txq->stats.obytes += dlen + vlan;
3597 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3598 loc->wqe_last = wqe;
3599 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3600 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3601 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3602 txq->wqe_ci += (ds + 3) / 4;
3603 loc->wqe_free -= (ds + 3) / 4;
3604 return MLX5_TXCMP_CODE_MULTI;
3608 * Tx burst function for multi-segment packets. Supports all
3609 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3610 * sends one packet per WQE. Function stops sending if it
3611 * encounters the single-segment packet.
3613 * This routine is responsible for storing processed mbuf
3614 * into elts ring buffer and update elts_head.
3617 * Pointer to TX queue structure.
3619 * Packets to transmit.
3621 * Number of packets in array.
3623 * Pointer to burst routine local context.
3625 * Configured Tx offloads mask. It is fully defined at
3626 * compile time and may be used for optimization.
3629 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3630 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3631 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3632 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3633 * Local context variables updated.
3635 static __rte_always_inline enum mlx5_txcmp_code
3636 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
3637 struct rte_mbuf **__rte_restrict pkts,
3638 unsigned int pkts_n,
3639 struct mlx5_txq_local *__rte_restrict loc,
3642 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3643 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3644 pkts += loc->pkts_sent + 1;
3645 pkts_n -= loc->pkts_sent;
3647 enum mlx5_txcmp_code ret;
3649 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3651 * Estimate the number of free elts quickly but
3652 * conservatively. Some segment may be fully inlined
3653 * and freed, ignore this here - precise estimation
3656 if (loc->elts_free < NB_SEGS(loc->mbuf))
3657 return MLX5_TXCMP_CODE_EXIT;
3658 if (MLX5_TXOFF_CONFIG(TSO) &&
3659 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3660 /* Proceed with multi-segment TSO. */
3661 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3662 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3663 /* Proceed with multi-segment SEND with inlining. */
3664 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3666 /* Proceed with multi-segment SEND w/o inlining. */
3667 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3669 if (ret == MLX5_TXCMP_CODE_EXIT)
3670 return MLX5_TXCMP_CODE_EXIT;
3671 if (ret == MLX5_TXCMP_CODE_ERROR)
3672 return MLX5_TXCMP_CODE_ERROR;
3673 /* WQE is built, go to the next packet. */
3676 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3677 return MLX5_TXCMP_CODE_EXIT;
3678 loc->mbuf = *pkts++;
3680 rte_prefetch0(*pkts);
3681 if (likely(NB_SEGS(loc->mbuf) > 1))
3683 /* Here ends the series of multi-segment packets. */
3684 if (MLX5_TXOFF_CONFIG(TSO) &&
3685 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3686 return MLX5_TXCMP_CODE_TSO;
3687 return MLX5_TXCMP_CODE_SINGLE;
3693 * Tx burst function for single-segment packets with TSO.
3694 * Supports all types of Tx offloads, except multi-packets.
3695 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3696 * Function stops sending if it encounters the multi-segment
3697 * packet or packet without TSO requested.
3699 * The routine is responsible for storing processed mbuf
3700 * into elts ring buffer and update elts_head if inline
3701 * offloads is requested due to possible early freeing
3702 * of the inlined mbufs (can not store pkts array in elts
3706 * Pointer to TX queue structure.
3708 * Packets to transmit.
3710 * Number of packets in array.
3712 * Pointer to burst routine local context.
3714 * Configured Tx offloads mask. It is fully defined at
3715 * compile time and may be used for optimization.
3718 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3719 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3720 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3721 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3722 * Local context variables updated.
3724 static __rte_always_inline enum mlx5_txcmp_code
3725 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
3726 struct rte_mbuf **__rte_restrict pkts,
3727 unsigned int pkts_n,
3728 struct mlx5_txq_local *__rte_restrict loc,
3731 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3732 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3733 pkts += loc->pkts_sent + 1;
3734 pkts_n -= loc->pkts_sent;
3736 struct mlx5_wqe_dseg *__rte_restrict dseg;
3737 struct mlx5_wqe *__rte_restrict wqe;
3738 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3741 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3742 if (MLX5_TXOFF_CONFIG(TXPP)) {
3743 enum mlx5_txcmp_code wret;
3745 /* Generate WAIT for scheduling if requested. */
3746 wret = mlx5_tx_schedule_send(txq, loc, olx);
3747 if (wret == MLX5_TXCMP_CODE_EXIT)
3748 return MLX5_TXCMP_CODE_EXIT;
3749 if (wret == MLX5_TXCMP_CODE_ERROR)
3750 return MLX5_TXCMP_CODE_ERROR;
3752 dlen = rte_pktmbuf_data_len(loc->mbuf);
3753 if (MLX5_TXOFF_CONFIG(VLAN) &&
3754 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3755 vlan = sizeof(struct rte_vlan_hdr);
3758 * First calculate the WQE size to check
3759 * whether we have enough space in ring buffer.
3761 hlen = loc->mbuf->l2_len + vlan +
3762 loc->mbuf->l3_len + loc->mbuf->l4_len;
3763 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3764 return MLX5_TXCMP_CODE_ERROR;
3765 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3766 hlen += loc->mbuf->outer_l2_len +
3767 loc->mbuf->outer_l3_len;
3768 /* Segment must contain all TSO headers. */
3769 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3770 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3771 hlen > (dlen + vlan)))
3772 return MLX5_TXCMP_CODE_ERROR;
3774 * Check whether there are enough free WQEBBs:
3776 * - Ethernet Segment
3777 * - First Segment of inlined Ethernet data
3778 * - ... data continued ...
3779 * - Finishing Data Segment of pointer type
3781 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3782 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3783 if (loc->wqe_free < ((ds + 3) / 4))
3784 return MLX5_TXCMP_CODE_EXIT;
3785 #ifdef MLX5_PMD_SOFT_COUNTERS
3786 /* Update sent data bytes/packets counters. */
3787 ntcp = (dlen + vlan - hlen +
3788 loc->mbuf->tso_segsz - 1) /
3789 loc->mbuf->tso_segsz;
3791 * One will be added for mbuf itself at the end
3792 * of the mlx5_tx_burst from loc->pkts_sent field.
3795 txq->stats.opackets += ntcp;
3796 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3799 * Build the TSO WQE:
3801 * - Ethernet Segment with hlen bytes inlined
3802 * - Data Segment of pointer type
3804 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3805 loc->wqe_last = wqe;
3806 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3807 MLX5_OPCODE_TSO, olx);
3808 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3809 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3810 dlen -= hlen - vlan;
3811 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3813 * WQE is built, update the loop parameters
3814 * and go to the next packet.
3816 txq->wqe_ci += (ds + 3) / 4;
3817 loc->wqe_free -= (ds + 3) / 4;
3818 if (MLX5_TXOFF_CONFIG(INLINE))
3819 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3823 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3824 return MLX5_TXCMP_CODE_EXIT;
3825 loc->mbuf = *pkts++;
3827 rte_prefetch0(*pkts);
3828 if (MLX5_TXOFF_CONFIG(MULTI) &&
3829 unlikely(NB_SEGS(loc->mbuf) > 1))
3830 return MLX5_TXCMP_CODE_MULTI;
3831 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3832 return MLX5_TXCMP_CODE_SINGLE;
3833 /* Continue with the next TSO packet. */
3839 * Analyze the packet and select the best method to send.
3842 * Pointer to TX queue structure.
3844 * Pointer to burst routine local context.
3846 * Configured Tx offloads mask. It is fully defined at
3847 * compile time and may be used for optimization.
3849 * The predefined flag whether do complete check for
3850 * multi-segment packets and TSO.
3853 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3854 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3855 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3856 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3858 static __rte_always_inline enum mlx5_txcmp_code
3859 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
3860 struct mlx5_txq_local *__rte_restrict loc,
3864 /* Check for multi-segment packet. */
3866 MLX5_TXOFF_CONFIG(MULTI) &&
3867 unlikely(NB_SEGS(loc->mbuf) > 1))
3868 return MLX5_TXCMP_CODE_MULTI;
3869 /* Check for TSO packet. */
3871 MLX5_TXOFF_CONFIG(TSO) &&
3872 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3873 return MLX5_TXCMP_CODE_TSO;
3874 /* Check if eMPW is enabled at all. */
3875 if (!MLX5_TXOFF_CONFIG(EMPW))
3876 return MLX5_TXCMP_CODE_SINGLE;
3877 /* Check if eMPW can be engaged. */
3878 if (MLX5_TXOFF_CONFIG(VLAN) &&
3879 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3880 (!MLX5_TXOFF_CONFIG(INLINE) ||
3881 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3882 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3884 * eMPW does not support VLAN insertion offload,
3885 * we have to inline the entire packet but
3886 * packet is too long for inlining.
3888 return MLX5_TXCMP_CODE_SINGLE;
3890 return MLX5_TXCMP_CODE_EMPW;
3894 * Check the next packet attributes to match with the eMPW batch ones.
3895 * In addition, for legacy MPW the packet length is checked either.
3898 * Pointer to TX queue structure.
3900 * Pointer to Ethernet Segment of eMPW batch.
3902 * Pointer to burst routine local context.
3904 * Length of previous packet in MPW descriptor.
3906 * Configured Tx offloads mask. It is fully defined at
3907 * compile time and may be used for optimization.
3910 * true - packet match with eMPW batch attributes.
3911 * false - no match, eMPW should be restarted.
3913 static __rte_always_inline bool
3914 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
3915 struct mlx5_wqe_eseg *__rte_restrict es,
3916 struct mlx5_txq_local *__rte_restrict loc,
3920 uint8_t swp_flags = 0;
3922 /* Compare the checksum flags, if any. */
3923 if (MLX5_TXOFF_CONFIG(CSUM) &&
3924 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3926 /* Compare the Software Parser offsets and flags. */
3927 if (MLX5_TXOFF_CONFIG(SWP) &&
3928 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3929 es->swp_flags != swp_flags))
3931 /* Fill metadata field if needed. */
3932 if (MLX5_TXOFF_CONFIG(METADATA) &&
3933 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3934 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3936 /* Legacy MPW can send packets with the same lengt only. */
3937 if (MLX5_TXOFF_CONFIG(MPW) &&
3938 dlen != rte_pktmbuf_data_len(loc->mbuf))
3940 /* There must be no VLAN packets in eMPW loop. */
3941 if (MLX5_TXOFF_CONFIG(VLAN))
3942 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3943 /* Check if the scheduling is requested. */
3944 if (MLX5_TXOFF_CONFIG(TXPP) &&
3945 loc->mbuf->ol_flags & txq->ts_mask)
3951 * Update send loop variables and WQE for eMPW loop
3952 * without data inlining. Number of Data Segments is
3953 * equal to the number of sent packets.
3956 * Pointer to TX queue structure.
3958 * Pointer to burst routine local context.
3960 * Number of packets/Data Segments/Packets.
3962 * Accumulated statistics, bytes sent
3964 * Configured Tx offloads mask. It is fully defined at
3965 * compile time and may be used for optimization.
3968 * true - packet match with eMPW batch attributes.
3969 * false - no match, eMPW should be restarted.
3971 static __rte_always_inline void
3972 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
3973 struct mlx5_txq_local *__rte_restrict loc,
3976 unsigned int olx __rte_unused)
3978 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3979 #ifdef MLX5_PMD_SOFT_COUNTERS
3980 /* Update sent data bytes counter. */
3981 txq->stats.obytes += slen;
3985 loc->elts_free -= ds;
3986 loc->pkts_sent += ds;
3988 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3989 txq->wqe_ci += (ds + 3) / 4;
3990 loc->wqe_free -= (ds + 3) / 4;
3994 * Update send loop variables and WQE for eMPW loop
3995 * with data inlining. Gets the size of pushed descriptors
3996 * and data to the WQE.
3999 * Pointer to TX queue structure.
4001 * Pointer to burst routine local context.
4003 * Total size of descriptor/data in bytes.
4005 * Accumulated statistics, data bytes sent.
4007 * The base WQE for the eMPW/MPW descriptor.
4009 * Configured Tx offloads mask. It is fully defined at
4010 * compile time and may be used for optimization.
4013 * true - packet match with eMPW batch attributes.
4014 * false - no match, eMPW should be restarted.
4016 static __rte_always_inline void
4017 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
4018 struct mlx5_txq_local *__rte_restrict loc,
4021 struct mlx5_wqe *__rte_restrict wqem,
4022 unsigned int olx __rte_unused)
4024 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
4026 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4027 #ifdef MLX5_PMD_SOFT_COUNTERS
4028 /* Update sent data bytes counter. */
4029 txq->stats.obytes += slen;
4033 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
4035 * If the legacy MPW session contains the inline packets
4036 * we should set the only inline data segment length
4037 * and align the total length to the segment size.
4039 MLX5_ASSERT(len > sizeof(dseg->bcount));
4040 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
4041 MLX5_ETH_WQE_DATA_INLINE);
4042 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
4045 * The session is not legacy MPW or contains the
4046 * data buffer pointer segments.
4048 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
4049 len = len / MLX5_WSEG_SIZE + 2;
4051 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
4052 txq->wqe_ci += (len + 3) / 4;
4053 loc->wqe_free -= (len + 3) / 4;
4054 loc->wqe_last = wqem;
4058 * The set of Tx burst functions for single-segment packets
4059 * without TSO and with Multi-Packet Writing feature support.
4060 * Supports all types of Tx offloads, except multi-packets
4063 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
4064 * as many packet per WQE as it can. If eMPW is not configured
4065 * or packet can not be sent with eMPW (VLAN insertion) the
4066 * ordinary SEND opcode is used and only one packet placed
4069 * Functions stop sending if it encounters the multi-segment
4070 * packet or packet with TSO requested.
4072 * The routines are responsible for storing processed mbuf
4073 * into elts ring buffer and update elts_head if inlining
4074 * offload is requested. Otherwise the copying mbufs to elts
4075 * can be postponed and completed at the end of burst routine.
4078 * Pointer to TX queue structure.
4080 * Packets to transmit.
4082 * Number of packets in array.
4084 * Pointer to burst routine local context.
4086 * Configured Tx offloads mask. It is fully defined at
4087 * compile time and may be used for optimization.
4090 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
4091 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
4092 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
4093 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
4094 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
4095 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
4097 * Local context variables updated.
4100 * The routine sends packets with MLX5_OPCODE_EMPW
4101 * without inlining, this is dedicated optimized branch.
4102 * No VLAN insertion is supported.
4104 static __rte_always_inline enum mlx5_txcmp_code
4105 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
4106 struct rte_mbuf **__rte_restrict pkts,
4107 unsigned int pkts_n,
4108 struct mlx5_txq_local *__rte_restrict loc,
4112 * Subroutine is the part of mlx5_tx_burst_single()
4113 * and sends single-segment packet with eMPW opcode
4114 * without data inlining.
4116 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4117 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4118 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4119 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4120 pkts += loc->pkts_sent + 1;
4121 pkts_n -= loc->pkts_sent;
4123 struct mlx5_wqe_dseg *__rte_restrict dseg;
4124 struct mlx5_wqe_eseg *__rte_restrict eseg;
4125 enum mlx5_txcmp_code ret;
4126 unsigned int part, loop;
4127 unsigned int slen = 0;
4130 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4131 if (MLX5_TXOFF_CONFIG(TXPP)) {
4132 enum mlx5_txcmp_code wret;
4134 /* Generate WAIT for scheduling if requested. */
4135 wret = mlx5_tx_schedule_send(txq, loc, olx);
4136 if (wret == MLX5_TXCMP_CODE_EXIT)
4137 return MLX5_TXCMP_CODE_EXIT;
4138 if (wret == MLX5_TXCMP_CODE_ERROR)
4139 return MLX5_TXCMP_CODE_ERROR;
4141 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4142 MLX5_MPW_MAX_PACKETS :
4143 MLX5_EMPW_MAX_PACKETS);
4144 if (unlikely(loc->elts_free < part)) {
4145 /* We have no enough elts to save all mbufs. */
4146 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
4147 return MLX5_TXCMP_CODE_EXIT;
4148 /* But we still able to send at least minimal eMPW. */
4149 part = loc->elts_free;
4151 /* Check whether we have enough WQEs */
4152 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
4153 if (unlikely(loc->wqe_free <
4154 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4155 return MLX5_TXCMP_CODE_EXIT;
4156 part = (loc->wqe_free * 4) - 2;
4158 if (likely(part > 1))
4159 rte_prefetch0(*pkts);
4160 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4162 * Build eMPW title WQEBB:
4163 * - Control Segment, eMPW opcode
4164 * - Ethernet Segment, no inline
4166 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
4167 MLX5_OPCODE_ENHANCED_MPSW, olx);
4168 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4169 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4170 eseg = &loc->wqe_last->eseg;
4171 dseg = &loc->wqe_last->dseg[0];
4173 /* Store the packet length for legacy MPW. */
4174 if (MLX5_TXOFF_CONFIG(MPW))
4175 eseg->mss = rte_cpu_to_be_16
4176 (rte_pktmbuf_data_len(loc->mbuf));
4178 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4179 #ifdef MLX5_PMD_SOFT_COUNTERS
4180 /* Update sent data bytes counter. */
4185 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4187 if (unlikely(--loop == 0))
4189 loc->mbuf = *pkts++;
4190 if (likely(loop > 1))
4191 rte_prefetch0(*pkts);
4192 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4194 * Unroll the completion code to avoid
4195 * returning variable value - it results in
4196 * unoptimized sequent checking in caller.
4198 if (ret == MLX5_TXCMP_CODE_MULTI) {
4200 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4201 if (unlikely(!loc->elts_free ||
4203 return MLX5_TXCMP_CODE_EXIT;
4204 return MLX5_TXCMP_CODE_MULTI;
4206 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4207 if (ret == MLX5_TXCMP_CODE_TSO) {
4209 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4210 if (unlikely(!loc->elts_free ||
4212 return MLX5_TXCMP_CODE_EXIT;
4213 return MLX5_TXCMP_CODE_TSO;
4215 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4217 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4218 if (unlikely(!loc->elts_free ||
4220 return MLX5_TXCMP_CODE_EXIT;
4221 return MLX5_TXCMP_CODE_SINGLE;
4223 if (ret != MLX5_TXCMP_CODE_EMPW) {
4226 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4227 return MLX5_TXCMP_CODE_ERROR;
4230 * Check whether packet parameters coincide
4231 * within assumed eMPW batch:
4232 * - check sum settings
4234 * - software parser settings
4235 * - packets length (legacy MPW only)
4236 * - scheduling is not required
4238 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
4241 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4242 if (unlikely(!loc->elts_free ||
4244 return MLX5_TXCMP_CODE_EXIT;
4248 /* Packet attributes match, continue the same eMPW. */
4250 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4251 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4253 /* eMPW is built successfully, update loop parameters. */
4255 MLX5_ASSERT(pkts_n >= part);
4256 #ifdef MLX5_PMD_SOFT_COUNTERS
4257 /* Update sent data bytes counter. */
4258 txq->stats.obytes += slen;
4260 loc->elts_free -= part;
4261 loc->pkts_sent += part;
4262 txq->wqe_ci += (2 + part + 3) / 4;
4263 loc->wqe_free -= (2 + part + 3) / 4;
4265 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4266 return MLX5_TXCMP_CODE_EXIT;
4267 loc->mbuf = *pkts++;
4268 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4269 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4271 /* Continue sending eMPW batches. */
4277 * The routine sends packets with MLX5_OPCODE_EMPW
4278 * with inlining, optionally supports VLAN insertion.
4280 static __rte_always_inline enum mlx5_txcmp_code
4281 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
4282 struct rte_mbuf **__rte_restrict pkts,
4283 unsigned int pkts_n,
4284 struct mlx5_txq_local *__rte_restrict loc,
4288 * Subroutine is the part of mlx5_tx_burst_single()
4289 * and sends single-segment packet with eMPW opcode
4290 * with data inlining.
4292 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4293 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4294 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4295 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4296 pkts += loc->pkts_sent + 1;
4297 pkts_n -= loc->pkts_sent;
4299 struct mlx5_wqe_dseg *__rte_restrict dseg;
4300 struct mlx5_wqe *__rte_restrict wqem;
4301 enum mlx5_txcmp_code ret;
4302 unsigned int room, part, nlim;
4303 unsigned int slen = 0;
4305 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4306 if (MLX5_TXOFF_CONFIG(TXPP)) {
4307 enum mlx5_txcmp_code wret;
4309 /* Generate WAIT for scheduling if requested. */
4310 wret = mlx5_tx_schedule_send(txq, loc, olx);
4311 if (wret == MLX5_TXCMP_CODE_EXIT)
4312 return MLX5_TXCMP_CODE_EXIT;
4313 if (wret == MLX5_TXCMP_CODE_ERROR)
4314 return MLX5_TXCMP_CODE_ERROR;
4317 * Limits the amount of packets in one WQE
4318 * to improve CQE latency generation.
4320 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4321 MLX5_MPW_INLINE_MAX_PACKETS :
4322 MLX5_EMPW_MAX_PACKETS);
4323 /* Check whether we have minimal amount WQEs */
4324 if (unlikely(loc->wqe_free <
4325 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4326 return MLX5_TXCMP_CODE_EXIT;
4327 if (likely(pkts_n > 1))
4328 rte_prefetch0(*pkts);
4329 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4331 * Build eMPW title WQEBB:
4332 * - Control Segment, eMPW opcode, zero DS
4333 * - Ethernet Segment, no inline
4335 mlx5_tx_cseg_init(txq, loc, wqem, 0,
4336 MLX5_OPCODE_ENHANCED_MPSW, olx);
4337 mlx5_tx_eseg_none(txq, loc, wqem,
4338 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4339 dseg = &wqem->dseg[0];
4340 /* Store the packet length for legacy MPW. */
4341 if (MLX5_TXOFF_CONFIG(MPW))
4342 wqem->eseg.mss = rte_cpu_to_be_16
4343 (rte_pktmbuf_data_len(loc->mbuf));
4344 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4345 loc->wqe_free) * MLX5_WQE_SIZE -
4346 MLX5_WQE_CSEG_SIZE -
4348 /* Limit the room for legacy MPW sessions for performance. */
4349 if (MLX5_TXOFF_CONFIG(MPW))
4350 room = RTE_MIN(room,
4351 RTE_MAX(txq->inlen_empw +
4352 sizeof(dseg->bcount) +
4353 (MLX5_TXOFF_CONFIG(VLAN) ?
4354 sizeof(struct rte_vlan_hdr) : 0),
4355 MLX5_MPW_INLINE_MAX_PACKETS *
4356 MLX5_WQE_DSEG_SIZE));
4357 /* Build WQE till we have space, packets and resources. */
4360 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4361 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4364 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4365 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
4366 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4368 * Some Tx offloads may cause an error if
4369 * packet is not long enough, check against
4370 * assumed minimal length.
4372 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4374 if (unlikely(!part))
4375 return MLX5_TXCMP_CODE_ERROR;
4377 * We have some successfully built
4378 * packet Data Segments to send.
4380 mlx5_tx_idone_empw(txq, loc, part,
4382 return MLX5_TXCMP_CODE_ERROR;
4384 /* Inline or not inline - that's the Question. */
4385 if (dlen > txq->inlen_empw ||
4386 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
4388 if (MLX5_TXOFF_CONFIG(MPW)) {
4389 if (dlen > txq->inlen_send)
4393 /* Open new inline MPW session. */
4394 tlen += sizeof(dseg->bcount);
4395 dseg->bcount = RTE_BE32(0);
4397 (dseg, sizeof(dseg->bcount));
4400 * No pointer and inline descriptor
4401 * intermix for legacy MPW sessions.
4403 if (wqem->dseg[0].bcount)
4407 tlen = sizeof(dseg->bcount) + dlen;
4409 /* Inline entire packet, optional VLAN insertion. */
4410 if (MLX5_TXOFF_CONFIG(VLAN) &&
4411 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4413 * The packet length must be checked in
4414 * mlx5_tx_able_to_empw() and packet
4415 * fits into inline length guaranteed.
4418 sizeof(struct rte_vlan_hdr)) <=
4420 tlen += sizeof(struct rte_vlan_hdr);
4423 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4425 #ifdef MLX5_PMD_SOFT_COUNTERS
4426 /* Update sent data bytes counter. */
4427 slen += sizeof(struct rte_vlan_hdr);
4432 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4435 if (!MLX5_TXOFF_CONFIG(MPW))
4436 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4437 MLX5_ASSERT(room >= tlen);
4440 * Packet data are completely inline,
4441 * we can try to free the packet.
4443 if (likely(loc->pkts_sent == loc->mbuf_free)) {
4445 * All the packets from the burst beginning
4446 * are inline, we can free mbufs directly
4447 * from the origin array on tx_burst exit().
4453 * In order no to call rte_pktmbuf_free_seg() here,
4454 * in the most inner loop (that might be very
4455 * expensive) we just save the mbuf in elts.
4457 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4462 * No pointer and inline descriptor
4463 * intermix for legacy MPW sessions.
4465 if (MLX5_TXOFF_CONFIG(MPW) &&
4467 wqem->dseg[0].bcount == RTE_BE32(0))
4470 * Not inlinable VLAN packets are
4471 * proceeded outside of this routine.
4473 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4474 if (MLX5_TXOFF_CONFIG(VLAN))
4475 MLX5_ASSERT(!(loc->mbuf->ol_flags &
4477 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4478 /* We have to store mbuf in elts.*/
4479 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4481 room -= MLX5_WQE_DSEG_SIZE;
4482 /* Ring buffer wraparound is checked at the loop end.*/
4485 #ifdef MLX5_PMD_SOFT_COUNTERS
4486 /* Update sent data bytes counter. */
4491 if (unlikely(!pkts_n || !loc->elts_free)) {
4493 * We have no resources/packets to
4494 * continue build descriptors.
4497 mlx5_tx_idone_empw(txq, loc, part,
4499 return MLX5_TXCMP_CODE_EXIT;
4501 loc->mbuf = *pkts++;
4502 if (likely(pkts_n > 1))
4503 rte_prefetch0(*pkts);
4504 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4506 * Unroll the completion code to avoid
4507 * returning variable value - it results in
4508 * unoptimized sequent checking in caller.
4510 if (ret == MLX5_TXCMP_CODE_MULTI) {
4512 mlx5_tx_idone_empw(txq, loc, part,
4514 if (unlikely(!loc->elts_free ||
4516 return MLX5_TXCMP_CODE_EXIT;
4517 return MLX5_TXCMP_CODE_MULTI;
4519 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4520 if (ret == MLX5_TXCMP_CODE_TSO) {
4522 mlx5_tx_idone_empw(txq, loc, part,
4524 if (unlikely(!loc->elts_free ||
4526 return MLX5_TXCMP_CODE_EXIT;
4527 return MLX5_TXCMP_CODE_TSO;
4529 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4531 mlx5_tx_idone_empw(txq, loc, part,
4533 if (unlikely(!loc->elts_free ||
4535 return MLX5_TXCMP_CODE_EXIT;
4536 return MLX5_TXCMP_CODE_SINGLE;
4538 if (ret != MLX5_TXCMP_CODE_EMPW) {
4541 mlx5_tx_idone_empw(txq, loc, part,
4543 return MLX5_TXCMP_CODE_ERROR;
4545 /* Check if we have minimal room left. */
4547 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4550 * Check whether packet parameters coincide
4551 * within assumed eMPW batch:
4552 * - check sum settings
4554 * - software parser settings
4555 * - packets length (legacy MPW only)
4556 * - scheduling is not required
4558 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
4561 /* Packet attributes match, continue the same eMPW. */
4562 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4563 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4566 * We get here to close an existing eMPW
4567 * session and start the new one.
4569 MLX5_ASSERT(pkts_n);
4571 if (unlikely(!part))
4572 return MLX5_TXCMP_CODE_EXIT;
4573 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
4574 if (unlikely(!loc->elts_free ||
4576 return MLX5_TXCMP_CODE_EXIT;
4577 /* Continue the loop with new eMPW session. */
4583 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4584 * Data inlining and VLAN insertion are supported.
4586 static __rte_always_inline enum mlx5_txcmp_code
4587 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
4588 struct rte_mbuf **__rte_restrict pkts,
4589 unsigned int pkts_n,
4590 struct mlx5_txq_local *__rte_restrict loc,
4594 * Subroutine is the part of mlx5_tx_burst_single()
4595 * and sends single-segment packet with SEND opcode.
4597 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4598 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4599 pkts += loc->pkts_sent + 1;
4600 pkts_n -= loc->pkts_sent;
4602 struct mlx5_wqe *__rte_restrict wqe;
4603 enum mlx5_txcmp_code ret;
4605 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4606 if (MLX5_TXOFF_CONFIG(TXPP)) {
4607 enum mlx5_txcmp_code wret;
4609 /* Generate WAIT for scheduling if requested. */
4610 wret = mlx5_tx_schedule_send(txq, loc, olx);
4611 if (wret == MLX5_TXCMP_CODE_EXIT)
4612 return MLX5_TXCMP_CODE_EXIT;
4613 if (wret == MLX5_TXCMP_CODE_ERROR)
4614 return MLX5_TXCMP_CODE_ERROR;
4616 if (MLX5_TXOFF_CONFIG(INLINE)) {
4617 unsigned int inlen, vlan = 0;
4619 inlen = rte_pktmbuf_data_len(loc->mbuf);
4620 if (MLX5_TXOFF_CONFIG(VLAN) &&
4621 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4622 vlan = sizeof(struct rte_vlan_hdr);
4626 * If inlining is enabled at configuration time
4627 * the limit must be not less than minimal size.
4628 * Otherwise we would do extra check for data
4629 * size to avoid crashes due to length overflow.
4631 MLX5_ASSERT(txq->inlen_send >=
4632 MLX5_ESEG_MIN_INLINE_SIZE);
4633 if (inlen <= txq->inlen_send) {
4634 unsigned int seg_n, wqe_n;
4636 rte_prefetch0(rte_pktmbuf_mtod
4637 (loc->mbuf, uint8_t *));
4638 /* Check against minimal length. */
4639 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4640 return MLX5_TXCMP_CODE_ERROR;
4641 if (loc->mbuf->ol_flags &
4642 PKT_TX_DYNF_NOINLINE) {
4644 * The hint flag not to inline packet
4645 * data is set. Check whether we can
4648 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
4650 (MLX5_TXOFF_CONFIG(MPW) &&
4652 if (inlen <= txq->inlen_send)
4655 * The hardware requires the
4656 * minimal inline data header.
4658 goto single_min_inline;
4660 if (MLX5_TXOFF_CONFIG(VLAN) &&
4661 vlan && !txq->vlan_en) {
4663 * We must insert VLAN tag
4664 * by software means.
4666 goto single_part_inline;
4668 goto single_no_inline;
4672 * Completely inlined packet data WQE:
4673 * - Control Segment, SEND opcode
4674 * - Ethernet Segment, no VLAN insertion
4675 * - Data inlined, VLAN optionally inserted
4676 * - Alignment to MLX5_WSEG_SIZE
4677 * Have to estimate amount of WQEBBs
4679 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4680 MLX5_ESEG_MIN_INLINE_SIZE +
4681 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4682 /* Check if there are enough WQEBBs. */
4683 wqe_n = (seg_n + 3) / 4;
4684 if (wqe_n > loc->wqe_free)
4685 return MLX5_TXCMP_CODE_EXIT;
4686 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4687 loc->wqe_last = wqe;
4688 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4689 MLX5_OPCODE_SEND, olx);
4690 mlx5_tx_eseg_data(txq, loc, wqe,
4691 vlan, inlen, 0, olx);
4692 txq->wqe_ci += wqe_n;
4693 loc->wqe_free -= wqe_n;
4695 * Packet data are completely inlined,
4696 * free the packet immediately.
4698 rte_pktmbuf_free_seg(loc->mbuf);
4699 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4700 MLX5_TXOFF_CONFIG(MPW)) &&
4703 * If minimal inlining is requested the eMPW
4704 * feature should be disabled due to data is
4705 * inlined into Ethernet Segment, which can
4706 * not contain inlined data for eMPW due to
4707 * segment shared for all packets.
4709 struct mlx5_wqe_dseg *__rte_restrict dseg;
4714 * The inline-mode settings require
4715 * to inline the specified amount of
4716 * data bytes to the Ethernet Segment.
4717 * We should check the free space in
4718 * WQE ring buffer to inline partially.
4721 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
4722 MLX5_ASSERT(inlen > txq->inlen_mode);
4723 MLX5_ASSERT(txq->inlen_mode >=
4724 MLX5_ESEG_MIN_INLINE_SIZE);
4726 * Check whether there are enough free WQEBBs:
4728 * - Ethernet Segment
4729 * - First Segment of inlined Ethernet data
4730 * - ... data continued ...
4731 * - Finishing Data Segment of pointer type
4733 ds = (MLX5_WQE_CSEG_SIZE +
4734 MLX5_WQE_ESEG_SIZE +
4735 MLX5_WQE_DSEG_SIZE +
4737 MLX5_ESEG_MIN_INLINE_SIZE +
4738 MLX5_WQE_DSEG_SIZE +
4739 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4740 if (loc->wqe_free < ((ds + 3) / 4))
4741 return MLX5_TXCMP_CODE_EXIT;
4743 * Build the ordinary SEND WQE:
4745 * - Ethernet Segment, inline inlen_mode bytes
4746 * - Data Segment of pointer type
4748 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4749 loc->wqe_last = wqe;
4750 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4751 MLX5_OPCODE_SEND, olx);
4752 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4755 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4756 txq->inlen_mode - vlan;
4757 inlen -= txq->inlen_mode;
4758 mlx5_tx_dseg_ptr(txq, loc, dseg,
4761 * WQE is built, update the loop parameters
4762 * and got to the next packet.
4764 txq->wqe_ci += (ds + 3) / 4;
4765 loc->wqe_free -= (ds + 3) / 4;
4766 /* We have to store mbuf in elts.*/
4767 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4768 txq->elts[txq->elts_head++ & txq->elts_m] =
4776 * Partially inlined packet data WQE, we have
4777 * some space in title WQEBB, we can fill it
4778 * with some packet data. It takes one WQEBB,
4779 * it is available, no extra space check:
4780 * - Control Segment, SEND opcode
4781 * - Ethernet Segment, no VLAN insertion
4782 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4783 * - Data Segment, pointer type
4785 * We also get here if VLAN insertion is not
4786 * supported by HW, the inline is enabled.
4789 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4790 loc->wqe_last = wqe;
4791 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4792 MLX5_OPCODE_SEND, olx);
4793 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4794 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4795 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4797 * The length check is performed above, by
4798 * comparing with txq->inlen_send. We should
4799 * not get overflow here.
4801 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4802 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4803 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4807 /* We have to store mbuf in elts.*/
4808 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4809 txq->elts[txq->elts_head++ & txq->elts_m] =
4813 #ifdef MLX5_PMD_SOFT_COUNTERS
4814 /* Update sent data bytes counter. */
4815 txq->stats.obytes += vlan +
4816 rte_pktmbuf_data_len(loc->mbuf);
4820 * No inline at all, it means the CPU cycles saving
4821 * is prioritized at configuration, we should not
4822 * copy any packet data to WQE.
4824 * SEND WQE, one WQEBB:
4825 * - Control Segment, SEND opcode
4826 * - Ethernet Segment, optional VLAN, no inline
4827 * - Data Segment, pointer type
4830 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4831 loc->wqe_last = wqe;
4832 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4833 MLX5_OPCODE_SEND, olx);
4834 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4836 (txq, loc, &wqe->dseg[0],
4837 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4838 rte_pktmbuf_data_len(loc->mbuf), olx);
4842 * We should not store mbuf pointer in elts
4843 * if no inlining is configured, this is done
4844 * by calling routine in a batch copy.
4846 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4848 #ifdef MLX5_PMD_SOFT_COUNTERS
4849 /* Update sent data bytes counter. */
4850 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4851 if (MLX5_TXOFF_CONFIG(VLAN) &&
4852 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4853 txq->stats.obytes +=
4854 sizeof(struct rte_vlan_hdr);
4859 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4860 return MLX5_TXCMP_CODE_EXIT;
4861 loc->mbuf = *pkts++;
4863 rte_prefetch0(*pkts);
4864 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4865 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4871 static __rte_always_inline enum mlx5_txcmp_code
4872 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
4873 struct rte_mbuf **__rte_restrict pkts,
4874 unsigned int pkts_n,
4875 struct mlx5_txq_local *__rte_restrict loc,
4878 enum mlx5_txcmp_code ret;
4880 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4881 if (ret == MLX5_TXCMP_CODE_SINGLE)
4883 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
4885 /* Optimize for inline/no inline eMPW send. */
4886 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4887 mlx5_tx_burst_empw_inline
4888 (txq, pkts, pkts_n, loc, olx) :
4889 mlx5_tx_burst_empw_simple
4890 (txq, pkts, pkts_n, loc, olx);
4891 if (ret != MLX5_TXCMP_CODE_SINGLE)
4893 /* The resources to send one packet should remain. */
4894 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4896 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4897 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
4898 if (ret != MLX5_TXCMP_CODE_EMPW)
4900 /* The resources to send one packet should remain. */
4901 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4906 * DPDK Tx callback template. This is configured template
4907 * used to generate routines optimized for specified offload setup.
4908 * One of this generated functions is chosen at SQ configuration
4912 * Generic pointer to TX queue structure.
4914 * Packets to transmit.
4916 * Number of packets in array.
4918 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4919 * values. Should be static to take compile time static configuration
4923 * Number of packets successfully transmitted (<= pkts_n).
4925 static __rte_always_inline uint16_t
4926 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
4927 struct rte_mbuf **__rte_restrict pkts,
4931 struct mlx5_txq_local loc;
4932 enum mlx5_txcmp_code ret;
4935 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4936 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4937 if (unlikely(!pkts_n))
4939 if (MLX5_TXOFF_CONFIG(INLINE))
4943 loc.wqe_last = NULL;
4946 loc.pkts_loop = loc.pkts_sent;
4948 * Check if there are some CQEs, if any:
4949 * - process an encountered errors
4950 * - process the completed WQEs
4951 * - free related mbufs
4952 * - doorbell the NIC about processed CQEs
4954 rte_prefetch0(*(pkts + loc.pkts_sent));
4955 mlx5_tx_handle_completion(txq, olx);
4957 * Calculate the number of available resources - elts and WQEs.
4958 * There are two possible different scenarios:
4959 * - no data inlining into WQEs, one WQEBB may contains up to
4960 * four packets, in this case elts become scarce resource
4961 * - data inlining into WQEs, one packet may require multiple
4962 * WQEBBs, the WQEs become the limiting factor.
4964 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4965 loc.elts_free = txq->elts_s -
4966 (uint16_t)(txq->elts_head - txq->elts_tail);
4967 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4968 loc.wqe_free = txq->wqe_s -
4969 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4970 if (unlikely(!loc.elts_free || !loc.wqe_free))
4974 * Fetch the packet from array. Usually this is
4975 * the first packet in series of multi/single
4978 loc.mbuf = *(pkts + loc.pkts_sent);
4979 /* Dedicated branch for multi-segment packets. */
4980 if (MLX5_TXOFF_CONFIG(MULTI) &&
4981 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4983 * Multi-segment packet encountered.
4984 * Hardware is able to process it only
4985 * with SEND/TSO opcodes, one packet
4986 * per WQE, do it in dedicated routine.
4989 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
4990 part = loc.pkts_sent - loc.pkts_copy;
4991 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4993 * There are some single-segment mbufs not
4994 * stored in elts. The mbufs must be in the
4995 * same order as WQEs, so we must copy the
4996 * mbufs to elts here, before the coming
4997 * multi-segment packet mbufs is appended.
4999 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
5001 loc.pkts_copy = loc.pkts_sent;
5003 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5004 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
5005 if (!MLX5_TXOFF_CONFIG(INLINE))
5006 loc.pkts_copy = loc.pkts_sent;
5008 * These returned code checks are supposed
5009 * to be optimized out due to routine inlining.
5011 if (ret == MLX5_TXCMP_CODE_EXIT) {
5013 * The routine returns this code when
5014 * all packets are sent or there is no
5015 * enough resources to complete request.
5019 if (ret == MLX5_TXCMP_CODE_ERROR) {
5021 * The routine returns this code when
5022 * some error in the incoming packets
5025 txq->stats.oerrors++;
5028 if (ret == MLX5_TXCMP_CODE_SINGLE) {
5030 * The single-segment packet was encountered
5031 * in the array, try to send it with the
5032 * best optimized way, possible engaging eMPW.
5034 goto enter_send_single;
5036 if (MLX5_TXOFF_CONFIG(TSO) &&
5037 ret == MLX5_TXCMP_CODE_TSO) {
5039 * The single-segment TSO packet was
5040 * encountered in the array.
5042 goto enter_send_tso;
5044 /* We must not get here. Something is going wrong. */
5046 txq->stats.oerrors++;
5049 /* Dedicated branch for single-segment TSO packets. */
5050 if (MLX5_TXOFF_CONFIG(TSO) &&
5051 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
5053 * TSO might require special way for inlining
5054 * (dedicated parameters) and is sent with
5055 * MLX5_OPCODE_TSO opcode only, provide this
5056 * in dedicated branch.
5059 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
5060 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5061 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
5063 * These returned code checks are supposed
5064 * to be optimized out due to routine inlining.
5066 if (ret == MLX5_TXCMP_CODE_EXIT)
5068 if (ret == MLX5_TXCMP_CODE_ERROR) {
5069 txq->stats.oerrors++;
5072 if (ret == MLX5_TXCMP_CODE_SINGLE)
5073 goto enter_send_single;
5074 if (MLX5_TXOFF_CONFIG(MULTI) &&
5075 ret == MLX5_TXCMP_CODE_MULTI) {
5077 * The multi-segment packet was
5078 * encountered in the array.
5080 goto enter_send_multi;
5082 /* We must not get here. Something is going wrong. */
5084 txq->stats.oerrors++;
5088 * The dedicated branch for the single-segment packets
5089 * without TSO. Often these ones can be sent using
5090 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
5091 * The routine builds the WQEs till it encounters
5092 * the TSO or multi-segment packet (in case if these
5093 * offloads are requested at SQ configuration time).
5096 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5097 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
5099 * These returned code checks are supposed
5100 * to be optimized out due to routine inlining.
5102 if (ret == MLX5_TXCMP_CODE_EXIT)
5104 if (ret == MLX5_TXCMP_CODE_ERROR) {
5105 txq->stats.oerrors++;
5108 if (MLX5_TXOFF_CONFIG(MULTI) &&
5109 ret == MLX5_TXCMP_CODE_MULTI) {
5111 * The multi-segment packet was
5112 * encountered in the array.
5114 goto enter_send_multi;
5116 if (MLX5_TXOFF_CONFIG(TSO) &&
5117 ret == MLX5_TXCMP_CODE_TSO) {
5119 * The single-segment TSO packet was
5120 * encountered in the array.
5122 goto enter_send_tso;
5124 /* We must not get here. Something is going wrong. */
5126 txq->stats.oerrors++;
5130 * Main Tx loop is completed, do the rest:
5131 * - set completion request if thresholds are reached
5132 * - doorbell the hardware
5133 * - copy the rest of mbufs to elts (if any)
5135 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
5136 loc.pkts_sent >= loc.pkts_copy);
5137 /* Take a shortcut if nothing is sent. */
5138 if (unlikely(loc.pkts_sent == loc.pkts_loop))
5140 /* Request CQE generation if limits are reached. */
5141 mlx5_tx_request_completion(txq, &loc, olx);
5143 * Ring QP doorbell immediately after WQE building completion
5144 * to improve latencies. The pure software related data treatment
5145 * can be completed after doorbell. Tx CQEs for this SQ are
5146 * processed in this thread only by the polling.
5148 * The rdma core library can map doorbell register in two ways,
5149 * depending on the environment variable "MLX5_SHUT_UP_BF":
5151 * - as regular cached memory, the variable is either missing or
5152 * set to zero. This type of mapping may cause the significant
5153 * doorbell register writing latency and requires explicit
5154 * memory write barrier to mitigate this issue and prevent
5157 * - as non-cached memory, the variable is present and set to
5158 * not "0" value. This type of mapping may cause performance
5159 * impact under heavy loading conditions but the explicit write
5160 * memory barrier is not required and it may improve core
5163 * - the legacy behaviour (prior 19.08 release) was to use some
5164 * heuristics to decide whether write memory barrier should
5165 * be performed. This behavior is supported with specifying
5166 * tx_db_nc=2, write barrier is skipped if application
5167 * provides the full recommended burst of packets, it
5168 * supposes the next packets are coming and the write barrier
5169 * will be issued on the next burst (after descriptor writing,
5172 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
5173 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
5174 /* Not all of the mbufs may be stored into elts yet. */
5175 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
5176 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
5178 * There are some single-segment mbufs not stored in elts.
5179 * It can be only if the last packet was single-segment.
5180 * The copying is gathered into one place due to it is
5181 * a good opportunity to optimize that with SIMD.
5182 * Unfortunately if inlining is enabled the gaps in
5183 * pointer array may happen due to early freeing of the
5186 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
5187 loc.pkts_copy = loc.pkts_sent;
5189 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
5190 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
5191 if (pkts_n > loc.pkts_sent) {
5193 * If burst size is large there might be no enough CQE
5194 * fetched from completion queue and no enough resources
5195 * freed to send all the packets.
5200 #ifdef MLX5_PMD_SOFT_COUNTERS
5201 /* Increment sent packets counter. */
5202 txq->stats.opackets += loc.pkts_sent;
5204 if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
5205 __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
5206 return loc.pkts_sent;
5209 /* Generate routines with Enhanced Multi-Packet Write support. */
5210 MLX5_TXOFF_DECL(full_empw,
5211 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
5213 MLX5_TXOFF_DECL(none_empw,
5214 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5216 MLX5_TXOFF_DECL(md_empw,
5217 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5219 MLX5_TXOFF_DECL(mt_empw,
5220 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5221 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5223 MLX5_TXOFF_DECL(mtsc_empw,
5224 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5225 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5226 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5228 MLX5_TXOFF_DECL(mti_empw,
5229 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5230 MLX5_TXOFF_CONFIG_INLINE |
5231 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5233 MLX5_TXOFF_DECL(mtv_empw,
5234 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5235 MLX5_TXOFF_CONFIG_VLAN |
5236 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5238 MLX5_TXOFF_DECL(mtiv_empw,
5239 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5240 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5241 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5243 MLX5_TXOFF_DECL(sc_empw,
5244 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5245 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5247 MLX5_TXOFF_DECL(sci_empw,
5248 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5249 MLX5_TXOFF_CONFIG_INLINE |
5250 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5252 MLX5_TXOFF_DECL(scv_empw,
5253 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5254 MLX5_TXOFF_CONFIG_VLAN |
5255 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5257 MLX5_TXOFF_DECL(sciv_empw,
5258 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5259 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5260 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5262 MLX5_TXOFF_DECL(i_empw,
5263 MLX5_TXOFF_CONFIG_INLINE |
5264 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5266 MLX5_TXOFF_DECL(v_empw,
5267 MLX5_TXOFF_CONFIG_VLAN |
5268 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5270 MLX5_TXOFF_DECL(iv_empw,
5271 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5272 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5274 /* Generate routines without Enhanced Multi-Packet Write support. */
5275 MLX5_TXOFF_DECL(full,
5276 MLX5_TXOFF_CONFIG_FULL)
5278 MLX5_TXOFF_DECL(none,
5279 MLX5_TXOFF_CONFIG_NONE)
5282 MLX5_TXOFF_CONFIG_METADATA)
5285 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5286 MLX5_TXOFF_CONFIG_METADATA)
5288 MLX5_TXOFF_DECL(mtsc,
5289 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5290 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5291 MLX5_TXOFF_CONFIG_METADATA)
5293 MLX5_TXOFF_DECL(mti,
5294 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5295 MLX5_TXOFF_CONFIG_INLINE |
5296 MLX5_TXOFF_CONFIG_METADATA)
5299 MLX5_TXOFF_DECL(mtv,
5300 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5301 MLX5_TXOFF_CONFIG_VLAN |
5302 MLX5_TXOFF_CONFIG_METADATA)
5305 MLX5_TXOFF_DECL(mtiv,
5306 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5307 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5308 MLX5_TXOFF_CONFIG_METADATA)
5311 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5312 MLX5_TXOFF_CONFIG_METADATA)
5314 MLX5_TXOFF_DECL(sci,
5315 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5316 MLX5_TXOFF_CONFIG_INLINE |
5317 MLX5_TXOFF_CONFIG_METADATA)
5320 MLX5_TXOFF_DECL(scv,
5321 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5322 MLX5_TXOFF_CONFIG_VLAN |
5323 MLX5_TXOFF_CONFIG_METADATA)
5326 MLX5_TXOFF_DECL(sciv,
5327 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5328 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5329 MLX5_TXOFF_CONFIG_METADATA)
5332 MLX5_TXOFF_CONFIG_INLINE |
5333 MLX5_TXOFF_CONFIG_METADATA)
5336 MLX5_TXOFF_CONFIG_VLAN |
5337 MLX5_TXOFF_CONFIG_METADATA)
5340 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5341 MLX5_TXOFF_CONFIG_METADATA)
5343 /* Generate routines with timestamp scheduling. */
5344 MLX5_TXOFF_DECL(full_ts_nompw,
5345 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
5347 MLX5_TXOFF_DECL(full_ts_nompwi,
5348 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5349 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5350 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5351 MLX5_TXOFF_CONFIG_TXPP)
5353 MLX5_TXOFF_DECL(full_ts,
5354 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
5355 MLX5_TXOFF_CONFIG_EMPW)
5357 MLX5_TXOFF_DECL(full_ts_noi,
5358 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5359 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5360 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5361 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5363 MLX5_TXOFF_DECL(none_ts,
5364 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
5365 MLX5_TXOFF_CONFIG_EMPW)
5367 MLX5_TXOFF_DECL(mdi_ts,
5368 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5369 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5371 MLX5_TXOFF_DECL(mti_ts,
5372 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5373 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5374 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5376 MLX5_TXOFF_DECL(mtiv_ts,
5377 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5378 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5379 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
5380 MLX5_TXOFF_CONFIG_EMPW)
5383 * Generate routines with Legacy Multi-Packet Write support.
5384 * This mode is supported by ConnectX-4 Lx only and imposes
5385 * offload limitations, not supported:
5386 * - ACL/Flows (metadata are becoming meaningless)
5387 * - WQE Inline headers
5388 * - SRIOV (E-Switch offloads)
5390 * - tunnel encapsulation/decapsulation
5393 MLX5_TXOFF_DECL(none_mpw,
5394 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5395 MLX5_TXOFF_CONFIG_MPW)
5397 MLX5_TXOFF_DECL(mci_mpw,
5398 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5399 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5400 MLX5_TXOFF_CONFIG_MPW)
5402 MLX5_TXOFF_DECL(mc_mpw,
5403 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5404 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5406 MLX5_TXOFF_DECL(i_mpw,
5407 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5408 MLX5_TXOFF_CONFIG_MPW)
5411 * Array of declared and compiled Tx burst function and corresponding
5412 * supported offloads set. The array is used to select the Tx burst
5413 * function for specified offloads set at Tx queue configuration time.
5416 eth_tx_burst_t func;
5419 MLX5_TXOFF_INFO(full_empw,
5420 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5421 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5422 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5423 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5425 MLX5_TXOFF_INFO(none_empw,
5426 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5428 MLX5_TXOFF_INFO(md_empw,
5429 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5431 MLX5_TXOFF_INFO(mt_empw,
5432 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5433 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5435 MLX5_TXOFF_INFO(mtsc_empw,
5436 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5437 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5438 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5440 MLX5_TXOFF_INFO(mti_empw,
5441 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5442 MLX5_TXOFF_CONFIG_INLINE |
5443 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5445 MLX5_TXOFF_INFO(mtv_empw,
5446 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5447 MLX5_TXOFF_CONFIG_VLAN |
5448 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5450 MLX5_TXOFF_INFO(mtiv_empw,
5451 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5452 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5453 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5455 MLX5_TXOFF_INFO(sc_empw,
5456 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5457 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5459 MLX5_TXOFF_INFO(sci_empw,
5460 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5461 MLX5_TXOFF_CONFIG_INLINE |
5462 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5464 MLX5_TXOFF_INFO(scv_empw,
5465 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5466 MLX5_TXOFF_CONFIG_VLAN |
5467 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5469 MLX5_TXOFF_INFO(sciv_empw,
5470 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5471 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5472 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5474 MLX5_TXOFF_INFO(i_empw,
5475 MLX5_TXOFF_CONFIG_INLINE |
5476 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5478 MLX5_TXOFF_INFO(v_empw,
5479 MLX5_TXOFF_CONFIG_VLAN |
5480 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5482 MLX5_TXOFF_INFO(iv_empw,
5483 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5484 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5486 MLX5_TXOFF_INFO(full_ts_nompw,
5487 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
5489 MLX5_TXOFF_INFO(full_ts_nompwi,
5490 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5491 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5492 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5493 MLX5_TXOFF_CONFIG_TXPP)
5495 MLX5_TXOFF_INFO(full_ts,
5496 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
5497 MLX5_TXOFF_CONFIG_EMPW)
5499 MLX5_TXOFF_INFO(full_ts_noi,
5500 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5501 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5502 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5503 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5505 MLX5_TXOFF_INFO(none_ts,
5506 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
5507 MLX5_TXOFF_CONFIG_EMPW)
5509 MLX5_TXOFF_INFO(mdi_ts,
5510 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5511 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5513 MLX5_TXOFF_INFO(mti_ts,
5514 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5515 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5516 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5518 MLX5_TXOFF_INFO(mtiv_ts,
5519 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5520 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5521 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
5522 MLX5_TXOFF_CONFIG_EMPW)
5524 MLX5_TXOFF_INFO(full,
5525 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5526 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5527 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5528 MLX5_TXOFF_CONFIG_METADATA)
5530 MLX5_TXOFF_INFO(none,
5531 MLX5_TXOFF_CONFIG_NONE)
5534 MLX5_TXOFF_CONFIG_METADATA)
5537 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5538 MLX5_TXOFF_CONFIG_METADATA)
5540 MLX5_TXOFF_INFO(mtsc,
5541 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5542 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5543 MLX5_TXOFF_CONFIG_METADATA)
5545 MLX5_TXOFF_INFO(mti,
5546 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5547 MLX5_TXOFF_CONFIG_INLINE |
5548 MLX5_TXOFF_CONFIG_METADATA)
5550 MLX5_TXOFF_INFO(mtv,
5551 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5552 MLX5_TXOFF_CONFIG_VLAN |
5553 MLX5_TXOFF_CONFIG_METADATA)
5555 MLX5_TXOFF_INFO(mtiv,
5556 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5557 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5558 MLX5_TXOFF_CONFIG_METADATA)
5561 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5562 MLX5_TXOFF_CONFIG_METADATA)
5564 MLX5_TXOFF_INFO(sci,
5565 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5566 MLX5_TXOFF_CONFIG_INLINE |
5567 MLX5_TXOFF_CONFIG_METADATA)
5569 MLX5_TXOFF_INFO(scv,
5570 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5571 MLX5_TXOFF_CONFIG_VLAN |
5572 MLX5_TXOFF_CONFIG_METADATA)
5574 MLX5_TXOFF_INFO(sciv,
5575 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5576 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5577 MLX5_TXOFF_CONFIG_METADATA)
5580 MLX5_TXOFF_CONFIG_INLINE |
5581 MLX5_TXOFF_CONFIG_METADATA)
5584 MLX5_TXOFF_CONFIG_VLAN |
5585 MLX5_TXOFF_CONFIG_METADATA)
5588 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5589 MLX5_TXOFF_CONFIG_METADATA)
5591 MLX5_TXOFF_INFO(none_mpw,
5592 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5593 MLX5_TXOFF_CONFIG_MPW)
5595 MLX5_TXOFF_INFO(mci_mpw,
5596 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5597 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5598 MLX5_TXOFF_CONFIG_MPW)
5600 MLX5_TXOFF_INFO(mc_mpw,
5601 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5602 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5604 MLX5_TXOFF_INFO(i_mpw,
5605 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5606 MLX5_TXOFF_CONFIG_MPW)
5610 * Configure the Tx function to use. The routine checks configured
5611 * Tx offloads for the device and selects appropriate Tx burst
5612 * routine. There are multiple Tx burst routines compiled from
5613 * the same template in the most optimal way for the dedicated
5617 * Pointer to private data structure.
5620 * Pointer to selected Tx burst function.
5623 mlx5_select_tx_function(struct rte_eth_dev *dev)
5625 struct mlx5_priv *priv = dev->data->dev_private;
5626 struct mlx5_dev_config *config = &priv->config;
5627 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5628 unsigned int diff = 0, olx = 0, i, m;
5631 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5632 /* We should support Multi-Segment Packets. */
5633 olx |= MLX5_TXOFF_CONFIG_MULTI;
5635 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5636 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5637 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5638 DEV_TX_OFFLOAD_IP_TNL_TSO |
5639 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5640 /* We should support TCP Send Offload. */
5641 olx |= MLX5_TXOFF_CONFIG_TSO;
5643 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5644 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5645 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5646 /* We should support Software Parser for Tunnels. */
5647 olx |= MLX5_TXOFF_CONFIG_SWP;
5649 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5650 DEV_TX_OFFLOAD_UDP_CKSUM |
5651 DEV_TX_OFFLOAD_TCP_CKSUM |
5652 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5653 /* We should support IP/TCP/UDP Checksums. */
5654 olx |= MLX5_TXOFF_CONFIG_CSUM;
5656 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5657 /* We should support VLAN insertion. */
5658 olx |= MLX5_TXOFF_CONFIG_VLAN;
5660 if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
5661 rte_mbuf_dynflag_lookup
5662 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
5663 rte_mbuf_dynfield_lookup
5664 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL) >= 0) {
5665 /* Offload configured, dynamic entities registered. */
5666 olx |= MLX5_TXOFF_CONFIG_TXPP;
5668 if (priv->txqs_n && (*priv->txqs)[0]) {
5669 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5671 if (txd->inlen_send) {
5673 * Check the data inline requirements. Data inline
5674 * is enabled on per device basis, we can check
5675 * the first Tx queue only.
5677 * If device does not support VLAN insertion in WQE
5678 * and some queues are requested to perform VLAN
5679 * insertion offload than inline must be enabled.
5681 olx |= MLX5_TXOFF_CONFIG_INLINE;
5684 if (config->mps == MLX5_MPW_ENHANCED &&
5685 config->txq_inline_min <= 0) {
5687 * The NIC supports Enhanced Multi-Packet Write
5688 * and does not require minimal inline data.
5690 olx |= MLX5_TXOFF_CONFIG_EMPW;
5692 if (rte_flow_dynf_metadata_avail()) {
5693 /* We should support Flow metadata. */
5694 olx |= MLX5_TXOFF_CONFIG_METADATA;
5696 if (config->mps == MLX5_MPW) {
5698 * The NIC supports Legacy Multi-Packet Write.
5699 * The MLX5_TXOFF_CONFIG_MPW controls the
5700 * descriptor building method in combination
5701 * with MLX5_TXOFF_CONFIG_EMPW.
5703 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5704 MLX5_TXOFF_CONFIG_SWP |
5705 MLX5_TXOFF_CONFIG_VLAN |
5706 MLX5_TXOFF_CONFIG_METADATA)))
5707 olx |= MLX5_TXOFF_CONFIG_EMPW |
5708 MLX5_TXOFF_CONFIG_MPW;
5711 * Scan the routines table to find the minimal
5712 * satisfying routine with requested offloads.
5714 m = RTE_DIM(txoff_func);
5715 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5718 tmp = txoff_func[i].olx;
5720 /* Meets requested offloads exactly.*/
5724 if ((tmp & olx) != olx) {
5725 /* Does not meet requested offloads at all. */
5728 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW)
5729 /* Do not enable legacy MPW if not configured. */
5731 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5732 /* Do not enable eMPW if not configured. */
5734 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5735 /* Do not enable inlining if not configured. */
5737 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_TXPP)
5738 /* Do not enable scheduling if not configured. */
5741 * Some routine meets the requirements.
5742 * Check whether it has minimal amount
5743 * of not requested offloads.
5745 tmp = __builtin_popcountl(tmp & ~olx);
5746 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5747 /* First or better match, save and continue. */
5753 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5754 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5755 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5756 /* Lighter not requested offload. */
5761 if (m >= RTE_DIM(txoff_func)) {
5762 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5763 " for requested offloads %04X",
5764 dev->data->port_id, olx);
5767 DRV_LOG(DEBUG, "port %u has selected Tx function"
5768 " supporting offloads %04X/%04X",
5769 dev->data->port_id, olx, txoff_func[m].olx);
5770 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5771 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5772 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5773 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5774 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5775 DRV_LOG(DEBUG, "\tSWP (software parser)");
5776 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5777 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5778 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5779 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5780 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5781 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5782 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5783 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5784 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TXPP)
5785 DRV_LOG(DEBUG, "\tMETAD (tx Scheduling)");
5786 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5787 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5788 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5790 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5792 return txoff_func[m].func;
5796 * DPDK callback to get the TX queue information
5799 * Pointer to the device structure.
5801 * @param tx_queue_id
5802 * Tx queue identificator.
5805 * Pointer to the TX queue information structure.
5812 mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
5813 struct rte_eth_txq_info *qinfo)
5815 struct mlx5_priv *priv = dev->data->dev_private;
5816 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
5817 struct mlx5_txq_ctrl *txq_ctrl =
5818 container_of(txq, struct mlx5_txq_ctrl, txq);
5822 qinfo->nb_desc = txq->elts_s;
5823 qinfo->conf.tx_thresh.pthresh = 0;
5824 qinfo->conf.tx_thresh.hthresh = 0;
5825 qinfo->conf.tx_thresh.wthresh = 0;
5826 qinfo->conf.tx_rs_thresh = 0;
5827 qinfo->conf.tx_free_thresh = 0;
5828 qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
5829 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
5833 * DPDK callback to get the TX packet burst mode information
5836 * Pointer to the device structure.
5838 * @param tx_queue_id
5839 * Tx queue identificatior.
5842 * Pointer to the burts mode information.
5845 * 0 as success, -EINVAL as failure.
5849 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
5850 uint16_t tx_queue_id,
5851 struct rte_eth_burst_mode *mode)
5853 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
5854 struct mlx5_priv *priv = dev->data->dev_private;
5855 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
5856 unsigned int i, olx;
5858 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5859 if (pkt_burst == txoff_func[i].func) {
5860 olx = txoff_func[i].olx;
5861 snprintf(mode->info, sizeof(mode->info),
5862 "%s%s%s%s%s%s%s%s%s%s",
5863 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
5864 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
5865 "Legacy MPW" : "Enhanced MPW") : "No MPW",
5866 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
5868 (olx & MLX5_TXOFF_CONFIG_TSO) ?
5870 (olx & MLX5_TXOFF_CONFIG_SWP) ?
5872 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
5874 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
5876 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
5878 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
5880 (olx & MLX5_TXOFF_CONFIG_TXPP) ?
5882 (txq && txq->fast_free) ?
5883 " + Fast Free" : "");