1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
16 #include <infiniband/mlx5dv.h>
18 #pragma GCC diagnostic error "-Wpedantic"
22 #include <rte_mempool.h>
23 #include <rte_prefetch.h>
24 #include <rte_common.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_ether.h>
27 #include <rte_cycles.h>
30 #include <mlx5_devx_cmds.h>
32 #include <mlx5_common.h>
34 #include "mlx5_defs.h"
37 #include "mlx5_utils.h"
38 #include "mlx5_rxtx.h"
39 #include "mlx5_autoconf.h"
41 /* TX burst subroutines return codes. */
42 enum mlx5_txcmp_code {
43 MLX5_TXCMP_CODE_EXIT = 0,
44 MLX5_TXCMP_CODE_ERROR,
45 MLX5_TXCMP_CODE_SINGLE,
46 MLX5_TXCMP_CODE_MULTI,
52 * These defines are used to configure Tx burst routine option set
53 * supported at compile time. The not specified options are optimized out
54 * out due to if conditions can be explicitly calculated at compile time.
55 * The offloads with bigger runtime check (require more CPU cycles to
56 * skip) overhead should have the bigger index - this is needed to
57 * select the better matching routine function if no exact match and
58 * some offloads are not actually requested.
60 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
61 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
62 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
63 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
64 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
65 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
66 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
67 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
68 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
69 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
71 /* The most common offloads groups. */
72 #define MLX5_TXOFF_CONFIG_NONE 0
73 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
74 MLX5_TXOFF_CONFIG_TSO | \
75 MLX5_TXOFF_CONFIG_SWP | \
76 MLX5_TXOFF_CONFIG_CSUM | \
77 MLX5_TXOFF_CONFIG_INLINE | \
78 MLX5_TXOFF_CONFIG_VLAN | \
79 MLX5_TXOFF_CONFIG_METADATA)
81 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
83 #define MLX5_TXOFF_DECL(func, olx) \
84 static uint16_t mlx5_tx_burst_##func(void *txq, \
85 struct rte_mbuf **pkts, \
88 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
89 pkts, pkts_n, (olx)); \
92 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
94 static __rte_always_inline uint32_t
95 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
97 static __rte_always_inline int
98 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
99 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
101 static __rte_always_inline uint32_t
102 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
104 static __rte_always_inline void
105 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
106 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
108 static __rte_always_inline void
109 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
110 const unsigned int strd_n);
113 mlx5_queue_state_modify(struct rte_eth_dev *dev,
114 struct mlx5_mp_arg_queue_state_modify *sm);
117 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
118 volatile struct mlx5_cqe *__rte_restrict cqe,
122 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
123 volatile struct mlx5_cqe *__rte_restrict cqe,
126 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
127 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
130 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
131 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
133 uint64_t rte_net_mlx5_dynf_inline_mask;
134 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
137 * Build a table to translate Rx completion flags to packet type.
139 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
142 mlx5_set_ptype_table(void)
145 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
147 /* Last entry must not be overwritten, reserved for errored packet. */
148 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
149 (*p)[i] = RTE_PTYPE_UNKNOWN;
151 * The index to the array should have:
152 * bit[1:0] = l3_hdr_type
153 * bit[4:2] = l4_hdr_type
156 * bit[7] = outer_l3_type
159 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
161 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
162 RTE_PTYPE_L4_NONFRAG;
163 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
164 RTE_PTYPE_L4_NONFRAG;
166 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
168 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
171 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
173 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
175 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
177 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
179 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
181 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
184 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
186 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
188 /* Repeat with outer_l3_type being set. Just in case. */
189 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
190 RTE_PTYPE_L4_NONFRAG;
191 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
192 RTE_PTYPE_L4_NONFRAG;
193 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
195 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
197 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
199 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
201 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
203 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
205 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
207 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
209 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
211 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
214 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
215 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
216 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L4_NONFRAG;
218 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
219 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
220 RTE_PTYPE_INNER_L4_NONFRAG;
221 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
222 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L4_NONFRAG;
225 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L4_NONFRAG;
228 /* Tunneled - Fragmented */
229 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L4_FRAG;
232 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L4_FRAG;
235 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
237 RTE_PTYPE_INNER_L4_FRAG;
238 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
240 RTE_PTYPE_INNER_L4_FRAG;
242 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
244 RTE_PTYPE_INNER_L4_TCP;
245 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
247 RTE_PTYPE_INNER_L4_TCP;
248 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
250 RTE_PTYPE_INNER_L4_TCP;
251 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L4_TCP;
254 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L4_TCP;
257 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
259 RTE_PTYPE_INNER_L4_TCP;
260 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
262 RTE_PTYPE_INNER_L4_TCP;
263 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
265 RTE_PTYPE_INNER_L4_TCP;
266 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
268 RTE_PTYPE_INNER_L4_TCP;
269 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L4_TCP;
272 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
273 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
274 RTE_PTYPE_INNER_L4_TCP;
275 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
276 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
277 RTE_PTYPE_INNER_L4_TCP;
279 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
280 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
281 RTE_PTYPE_INNER_L4_UDP;
282 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
283 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
284 RTE_PTYPE_INNER_L4_UDP;
285 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
286 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
287 RTE_PTYPE_INNER_L4_UDP;
288 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
289 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
290 RTE_PTYPE_INNER_L4_UDP;
294 * Build a table to translate packet to checksum type of Verbs.
297 mlx5_set_cksum_table(void)
303 * The index should have:
304 * bit[0] = PKT_TX_TCP_SEG
305 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
306 * bit[4] = PKT_TX_IP_CKSUM
307 * bit[8] = PKT_TX_OUTER_IP_CKSUM
310 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
313 /* Tunneled packet. */
314 if (i & (1 << 8)) /* Outer IP. */
315 v |= MLX5_ETH_WQE_L3_CSUM;
316 if (i & (1 << 4)) /* Inner IP. */
317 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
318 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
319 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
322 if (i & (1 << 4)) /* IP. */
323 v |= MLX5_ETH_WQE_L3_CSUM;
324 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
325 v |= MLX5_ETH_WQE_L4_CSUM;
327 mlx5_cksum_table[i] = v;
332 * Build a table to translate packet type of mbuf to SWP type of Verbs.
335 mlx5_set_swp_types_table(void)
341 * The index should have:
342 * bit[0:1] = PKT_TX_L4_MASK
343 * bit[4] = PKT_TX_IPV6
344 * bit[8] = PKT_TX_OUTER_IPV6
345 * bit[9] = PKT_TX_OUTER_UDP
347 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
350 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
352 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
354 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
355 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
356 v |= MLX5_ETH_WQE_L4_INNER_UDP;
357 mlx5_swp_types_table[i] = v;
362 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
363 * Flags must be preliminary initialized to zero.
366 * Pointer to burst routine local context.
368 * Pointer to store Software Parser flags
370 * Configured Tx offloads mask. It is fully defined at
371 * compile time and may be used for optimization.
374 * Software Parser offsets packed in dword.
375 * Software Parser flags are set by pointer.
377 static __rte_always_inline uint32_t
378 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
383 unsigned int idx, off;
386 if (!MLX5_TXOFF_CONFIG(SWP))
388 ol = loc->mbuf->ol_flags;
389 tunnel = ol & PKT_TX_TUNNEL_MASK;
391 * Check whether Software Parser is required.
392 * Only customized tunnels may ask for.
394 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
397 * The index should have:
398 * bit[0:1] = PKT_TX_L4_MASK
399 * bit[4] = PKT_TX_IPV6
400 * bit[8] = PKT_TX_OUTER_IPV6
401 * bit[9] = PKT_TX_OUTER_UDP
403 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
404 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
405 *swp_flags = mlx5_swp_types_table[idx];
407 * Set offsets for SW parser. Since ConnectX-5, SW parser just
408 * complements HW parser. SW parser starts to engage only if HW parser
409 * can't reach a header. For the older devices, HW parser will not kick
410 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
411 * should be set regardless of HW offload.
413 off = loc->mbuf->outer_l2_len;
414 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
415 off += sizeof(struct rte_vlan_hdr);
416 set = (off >> 1) << 8; /* Outer L3 offset. */
417 off += loc->mbuf->outer_l3_len;
418 if (tunnel == PKT_TX_TUNNEL_UDP)
419 set |= off >> 1; /* Outer L4 offset. */
420 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
421 const uint64_t csum = ol & PKT_TX_L4_MASK;
422 off += loc->mbuf->l2_len;
423 set |= (off >> 1) << 24; /* Inner L3 offset. */
424 if (csum == PKT_TX_TCP_CKSUM ||
425 csum == PKT_TX_UDP_CKSUM ||
426 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
427 off += loc->mbuf->l3_len;
428 set |= (off >> 1) << 16; /* Inner L4 offset. */
431 set = rte_cpu_to_le_32(set);
436 * Convert the Checksum offloads to Verbs.
439 * Pointer to the mbuf.
442 * Converted checksum flags.
444 static __rte_always_inline uint8_t
445 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
448 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
449 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
450 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
453 * The index should have:
454 * bit[0] = PKT_TX_TCP_SEG
455 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
456 * bit[4] = PKT_TX_IP_CKSUM
457 * bit[8] = PKT_TX_OUTER_IP_CKSUM
460 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
461 return mlx5_cksum_table[idx];
465 * Internal function to compute the number of used descriptors in an RX queue
471 * The number of used rx descriptor.
474 rx_queue_count(struct mlx5_rxq_data *rxq)
476 struct rxq_zip *zip = &rxq->zip;
477 volatile struct mlx5_cqe *cqe;
478 const unsigned int cqe_n = (1 << rxq->cqe_n);
479 const unsigned int cqe_cnt = cqe_n - 1;
483 /* if we are processing a compressed cqe */
485 used = zip->cqe_cnt - zip->ca;
491 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
492 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
496 op_own = cqe->op_own;
497 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
498 n = rte_be_to_cpu_32(cqe->byte_cnt);
503 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
505 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
510 * DPDK callback to check the status of a rx descriptor.
515 * The index of the descriptor in the ring.
518 * The status of the tx descriptor.
521 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
523 struct mlx5_rxq_data *rxq = rx_queue;
524 struct mlx5_rxq_ctrl *rxq_ctrl =
525 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
526 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
528 if (dev->rx_pkt_burst != mlx5_rx_burst) {
532 if (offset >= (1 << rxq->elts_n)) {
536 if (offset < rx_queue_count(rxq))
537 return RTE_ETH_RX_DESC_DONE;
538 return RTE_ETH_RX_DESC_AVAIL;
542 * DPDK callback to get the RX queue information
545 * Pointer to the device structure.
548 * Rx queue identificator.
551 * Pointer to the RX queue information structure.
558 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
559 struct rte_eth_rxq_info *qinfo)
561 struct mlx5_priv *priv = dev->data->dev_private;
562 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
563 struct mlx5_rxq_ctrl *rxq_ctrl =
564 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
568 qinfo->mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
569 rxq->mprq_mp : rxq->mp;
570 qinfo->conf.rx_thresh.pthresh = 0;
571 qinfo->conf.rx_thresh.hthresh = 0;
572 qinfo->conf.rx_thresh.wthresh = 0;
573 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
574 qinfo->conf.rx_drop_en = 1;
575 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
576 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
577 qinfo->scattered_rx = dev->data->scattered_rx;
578 qinfo->nb_desc = 1 << rxq->elts_n;
582 * DPDK callback to get the RX packet burst mode information
585 * Pointer to the device structure.
588 * Rx queue identificatior.
591 * Pointer to the burts mode information.
594 * 0 as success, -EINVAL as failure.
598 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
599 uint16_t rx_queue_id __rte_unused,
600 struct rte_eth_burst_mode *mode)
602 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
604 if (pkt_burst == mlx5_rx_burst) {
605 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
606 } else if (pkt_burst == mlx5_rx_burst_mprq) {
607 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
608 } else if (pkt_burst == mlx5_rx_burst_vec) {
609 #if defined RTE_ARCH_X86_64
610 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
611 #elif defined RTE_ARCH_ARM64
612 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
613 #elif defined RTE_ARCH_PPC_64
614 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
625 * DPDK callback to get the number of used descriptors in a RX queue
628 * Pointer to the device structure.
634 * The number of used rx descriptor.
635 * -EINVAL if the queue is invalid
638 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
640 struct mlx5_priv *priv = dev->data->dev_private;
641 struct mlx5_rxq_data *rxq;
643 if (dev->rx_pkt_burst != mlx5_rx_burst) {
647 rxq = (*priv->rxqs)[rx_queue_id];
652 return rx_queue_count(rxq);
655 #define MLX5_SYSTEM_LOG_DIR "/var/log"
657 * Dump debug information to log file.
662 * If not NULL this string is printed as a header to the output
663 * and the output will be in hexadecimal view.
665 * This is the buffer address to print out.
667 * The number of bytes to dump out.
670 mlx5_dump_debug_information(const char *fname, const char *hex_title,
671 const void *buf, unsigned int hex_len)
675 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
676 fd = fopen(path, "a+");
678 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
679 MKSTR(path2, "./%s", fname);
680 fd = fopen(path2, "a+");
682 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
685 DRV_LOG(INFO, "New debug dump in file %s", path2);
687 DRV_LOG(INFO, "New debug dump in file %s", path);
690 rte_hexdump(fd, hex_title, buf, hex_len);
692 fprintf(fd, "%s", (const char *)buf);
693 fprintf(fd, "\n\n\n");
698 * Move QP from error state to running state and initialize indexes.
701 * Pointer to TX queue control structure.
704 * 0 on success, else -1.
707 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
709 struct mlx5_mp_arg_queue_state_modify sm = {
711 .queue_id = txq_ctrl->txq.idx,
714 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
716 txq_ctrl->txq.wqe_ci = 0;
717 txq_ctrl->txq.wqe_pi = 0;
718 txq_ctrl->txq.elts_comp = 0;
722 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
724 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
726 static const uint8_t magic[] = "seen";
730 for (i = 0; i < sizeof(magic); ++i)
731 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
733 err_cqe->rsvd1[i] = magic[i];
742 * Pointer to TX queue structure.
744 * Pointer to the error CQE.
747 * Negative value if queue recovery failed, otherwise
748 * the error completion entry is handled successfully.
751 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
752 volatile struct mlx5_err_cqe *err_cqe)
754 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
755 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
756 struct mlx5_txq_ctrl *txq_ctrl =
757 container_of(txq, struct mlx5_txq_ctrl, txq);
758 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
759 int seen = check_err_cqe_seen(err_cqe);
761 if (!seen && txq_ctrl->dump_file_n <
762 txq_ctrl->priv->config.max_dump_files_num) {
763 MKSTR(err_str, "Unexpected CQE error syndrome "
764 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
765 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
766 txq->cqe_s, txq->qp_num_8s >> 8,
767 rte_be_to_cpu_16(err_cqe->wqe_counter),
768 txq->wqe_ci, txq->cq_ci);
769 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
770 PORT_ID(txq_ctrl->priv), txq->idx,
771 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
772 mlx5_dump_debug_information(name, NULL, err_str, 0);
773 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
774 (const void *)((uintptr_t)
778 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
779 (const void *)((uintptr_t)
783 txq_ctrl->dump_file_n++;
787 * Count errors in WQEs units.
788 * Later it can be improved to count error packets,
789 * for example, by SQ parsing to find how much packets
790 * should be counted for each WQE.
792 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
794 if (tx_recover_qp(txq_ctrl)) {
795 /* Recovering failed - retry later on the same WQE. */
798 /* Release all the remaining buffers. */
799 txq_free_elts(txq_ctrl);
805 * Translate RX completion flags to packet type.
808 * Pointer to RX queue structure.
812 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
815 * Packet type for struct rte_mbuf.
817 static inline uint32_t
818 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
821 uint8_t pinfo = cqe->pkt_info;
822 uint16_t ptype = cqe->hdr_type_etc;
825 * The index to the array should have:
826 * bit[1:0] = l3_hdr_type
827 * bit[4:2] = l4_hdr_type
830 * bit[7] = outer_l3_type
832 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
833 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
837 * Initialize Rx WQ and indexes.
840 * Pointer to RX queue structure.
843 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
845 const unsigned int wqe_n = 1 << rxq->elts_n;
848 for (i = 0; (i != wqe_n); ++i) {
849 volatile struct mlx5_wqe_data_seg *scat;
853 if (mlx5_rxq_mprq_enabled(rxq)) {
854 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
856 scat = &((volatile struct mlx5_wqe_mprq *)
858 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
859 1 << rxq->strd_num_n);
860 byte_count = (1 << rxq->strd_sz_n) *
861 (1 << rxq->strd_num_n);
863 struct rte_mbuf *buf = (*rxq->elts)[i];
865 scat = &((volatile struct mlx5_wqe_data_seg *)
867 addr = rte_pktmbuf_mtod(buf, uintptr_t);
868 byte_count = DATA_LEN(buf);
870 /* scat->addr must be able to store a pointer. */
871 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
872 *scat = (struct mlx5_wqe_data_seg){
873 .addr = rte_cpu_to_be_64(addr),
874 .byte_count = rte_cpu_to_be_32(byte_count),
875 .lkey = mlx5_rx_addr2mr(rxq, addr),
878 rxq->consumed_strd = 0;
879 rxq->decompressed = 0;
881 rxq->zip = (struct rxq_zip){
884 /* Update doorbell counter. */
885 rxq->rq_ci = wqe_n >> rxq->sges_n;
887 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
891 * Modify a Verbs/DevX queue state.
892 * This must be called from the primary process.
895 * Pointer to Ethernet device.
897 * State modify request parameters.
900 * 0 in case of success else non-zero value and rte_errno is set.
903 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
904 const struct mlx5_mp_arg_queue_state_modify *sm)
907 struct mlx5_priv *priv = dev->data->dev_private;
910 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
911 struct mlx5_rxq_ctrl *rxq_ctrl =
912 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
914 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
915 struct ibv_wq_attr mod = {
916 .attr_mask = IBV_WQ_ATTR_STATE,
917 .wq_state = sm->state,
920 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
921 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
922 struct mlx5_devx_modify_rq_attr rq_attr;
924 memset(&rq_attr, 0, sizeof(rq_attr));
925 if (sm->state == IBV_WQS_RESET) {
926 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
927 rq_attr.state = MLX5_RQC_STATE_RST;
928 } else if (sm->state == IBV_WQS_RDY) {
929 rq_attr.rq_state = MLX5_RQC_STATE_RST;
930 rq_attr.state = MLX5_RQC_STATE_RDY;
931 } else if (sm->state == IBV_WQS_ERR) {
932 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
933 rq_attr.state = MLX5_RQC_STATE_ERR;
935 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
939 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
940 sm->state, strerror(errno));
945 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
946 struct mlx5_txq_ctrl *txq_ctrl =
947 container_of(txq, struct mlx5_txq_ctrl, txq);
949 if (txq_ctrl->obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
950 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
952 /* Change queue state to reset. */
953 msq_attr.sq_state = MLX5_SQC_STATE_ERR;
954 msq_attr.state = MLX5_SQC_STATE_RST;
955 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq_devx,
958 DRV_LOG(ERR, "Cannot change the "
959 "Tx QP state to RESET %s",
964 /* Change queue state to ready. */
965 msq_attr.sq_state = MLX5_SQC_STATE_RST;
966 msq_attr.state = MLX5_SQC_STATE_RDY;
967 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq_devx,
970 DRV_LOG(ERR, "Cannot change the "
971 "Tx QP state to READY %s",
977 struct ibv_qp_attr mod = {
978 .qp_state = IBV_QPS_RESET,
979 .port_num = (uint8_t)priv->dev_port,
981 struct ibv_qp *qp = txq_ctrl->obj->qp;
984 (txq_ctrl->obj->type == MLX5_TXQ_OBJ_TYPE_IBV);
986 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
988 DRV_LOG(ERR, "Cannot change the "
989 "Tx QP state to RESET %s",
994 mod.qp_state = IBV_QPS_INIT;
995 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
997 DRV_LOG(ERR, "Cannot change the "
998 "Tx QP state to INIT %s",
1003 mod.qp_state = IBV_QPS_RTR;
1004 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
1006 DRV_LOG(ERR, "Cannot change the "
1007 "Tx QP state to RTR %s",
1012 mod.qp_state = IBV_QPS_RTS;
1013 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
1015 DRV_LOG(ERR, "Cannot change the "
1016 "Tx QP state to RTS %s",
1027 * Modify a Verbs queue state.
1030 * Pointer to Ethernet device.
1032 * State modify request parameters.
1035 * 0 in case of success else non-zero value.
1038 mlx5_queue_state_modify(struct rte_eth_dev *dev,
1039 struct mlx5_mp_arg_queue_state_modify *sm)
1041 struct mlx5_priv *priv = dev->data->dev_private;
1044 switch (rte_eal_process_type()) {
1045 case RTE_PROC_PRIMARY:
1046 ret = mlx5_queue_state_modify_primary(dev, sm);
1048 case RTE_PROC_SECONDARY:
1049 ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
1058 * Handle a Rx error.
1059 * The function inserts the RQ state to reset when the first error CQE is
1060 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
1061 * it moves the RQ state to ready and initializes the RQ.
1062 * Next CQE identification and error counting are in the caller responsibility.
1065 * Pointer to RX queue structure.
1067 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
1068 * 0 when called from non-vectorized Rx burst.
1071 * -1 in case of recovery error, otherwise the CQE status.
1074 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
1076 const uint16_t cqe_n = 1 << rxq->cqe_n;
1077 const uint16_t cqe_mask = cqe_n - 1;
1078 const unsigned int wqe_n = 1 << rxq->elts_n;
1079 struct mlx5_rxq_ctrl *rxq_ctrl =
1080 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1082 volatile struct mlx5_cqe *cqe;
1083 volatile struct mlx5_err_cqe *err_cqe;
1085 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
1087 struct mlx5_mp_arg_queue_state_modify sm;
1090 switch (rxq->err_state) {
1091 case MLX5_RXQ_ERR_STATE_NO_ERROR:
1092 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
1094 case MLX5_RXQ_ERR_STATE_NEED_RESET:
1096 sm.queue_id = rxq->idx;
1097 sm.state = IBV_WQS_RESET;
1098 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
1100 if (rxq_ctrl->dump_file_n <
1101 rxq_ctrl->priv->config.max_dump_files_num) {
1102 MKSTR(err_str, "Unexpected CQE error syndrome "
1103 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
1104 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
1105 rxq->cqn, rxq_ctrl->wqn,
1106 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
1107 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
1108 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
1109 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
1110 mlx5_dump_debug_information(name, NULL, err_str, 0);
1111 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
1112 (const void *)((uintptr_t)
1114 sizeof(*u.cqe) * cqe_n);
1115 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
1116 (const void *)((uintptr_t)
1119 rxq_ctrl->dump_file_n++;
1121 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
1123 case MLX5_RXQ_ERR_STATE_NEED_READY:
1124 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1125 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1127 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1130 * The RQ consumer index must be zeroed while moving
1131 * from RESET state to RDY state.
1133 *rxq->rq_db = rte_cpu_to_be_32(0);
1136 sm.queue_id = rxq->idx;
1137 sm.state = IBV_WQS_RDY;
1138 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1142 const uint16_t q_mask = wqe_n - 1;
1144 struct rte_mbuf **elt;
1146 unsigned int n = wqe_n - (rxq->rq_ci -
1149 for (i = 0; i < (int)n; ++i) {
1150 elt_idx = (rxq->rq_ci + i) & q_mask;
1151 elt = &(*rxq->elts)[elt_idx];
1152 *elt = rte_mbuf_raw_alloc(rxq->mp);
1154 for (i--; i >= 0; --i) {
1155 elt_idx = (rxq->rq_ci +
1159 rte_pktmbuf_free_seg
1165 for (i = 0; i < (int)wqe_n; ++i) {
1166 elt = &(*rxq->elts)[i];
1168 (uint16_t)((*elt)->buf_len -
1169 rte_pktmbuf_headroom(*elt));
1171 /* Padding with a fake mbuf for vec Rx. */
1172 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1173 (*rxq->elts)[wqe_n + i] =
1176 mlx5_rxq_initialize(rxq);
1177 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1186 * Get size of the next packet for a given CQE. For compressed CQEs, the
1187 * consumer index is updated only once all packets of the current one have
1191 * Pointer to RX queue.
1195 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1199 * 0 in case of empty CQE, otherwise the packet size in bytes.
1202 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1203 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1205 struct rxq_zip *zip = &rxq->zip;
1206 uint16_t cqe_n = cqe_cnt + 1;
1212 /* Process compressed data in the CQE and mini arrays. */
1214 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1215 (volatile struct mlx5_mini_cqe8 (*)[8])
1216 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1219 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1220 *mcqe = &(*mc)[zip->ai & 7];
1221 if ((++zip->ai & 7) == 0) {
1222 /* Invalidate consumed CQEs */
1225 while (idx != end) {
1226 (*rxq->cqes)[idx & cqe_cnt].op_own =
1227 MLX5_CQE_INVALIDATE;
1231 * Increment consumer index to skip the number
1232 * of CQEs consumed. Hardware leaves holes in
1233 * the CQ ring for software use.
1238 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1239 /* Invalidate the rest */
1243 while (idx != end) {
1244 (*rxq->cqes)[idx & cqe_cnt].op_own =
1245 MLX5_CQE_INVALIDATE;
1248 rxq->cq_ci = zip->cq_ci;
1252 * No compressed data, get next CQE and verify if it is
1259 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1260 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1261 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1263 ret = mlx5_rx_err_handle(rxq, 0);
1264 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1272 op_own = cqe->op_own;
1273 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1274 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1275 (volatile struct mlx5_mini_cqe8 (*)[8])
1276 (uintptr_t)(&(*rxq->cqes)
1280 /* Fix endianness. */
1281 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1283 * Current mini array position is the one
1284 * returned by check_cqe64().
1286 * If completion comprises several mini arrays,
1287 * as a special case the second one is located
1288 * 7 CQEs after the initial CQE instead of 8
1289 * for subsequent ones.
1291 zip->ca = rxq->cq_ci;
1292 zip->na = zip->ca + 7;
1293 /* Compute the next non compressed CQE. */
1295 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1296 /* Get packet size to return. */
1297 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1300 /* Prefetch all to be invalidated */
1303 while (idx != end) {
1304 rte_prefetch0(&(*rxq->cqes)[(idx) &
1309 len = rte_be_to_cpu_32(cqe->byte_cnt);
1312 if (unlikely(rxq->err_state)) {
1313 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1314 ++rxq->stats.idropped;
1322 * Translate RX completion flags to offload flags.
1328 * Offload flags (ol_flags) for struct rte_mbuf.
1330 static inline uint32_t
1331 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1333 uint32_t ol_flags = 0;
1334 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1338 MLX5_CQE_RX_L3_HDR_VALID,
1339 PKT_RX_IP_CKSUM_GOOD) |
1341 MLX5_CQE_RX_L4_HDR_VALID,
1342 PKT_RX_L4_CKSUM_GOOD);
1347 * Fill in mbuf fields from RX completion flags.
1348 * Note that pkt->ol_flags should be initialized outside of this function.
1351 * Pointer to RX queue.
1356 * @param rss_hash_res
1357 * Packet RSS Hash result.
1360 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1361 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1363 /* Update packet information. */
1364 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1365 if (rss_hash_res && rxq->rss_hash) {
1366 pkt->hash.rss = rss_hash_res;
1367 pkt->ol_flags |= PKT_RX_RSS_HASH;
1369 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1370 pkt->ol_flags |= PKT_RX_FDIR;
1371 if (cqe->sop_drop_qpn !=
1372 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1373 uint32_t mark = cqe->sop_drop_qpn;
1375 pkt->ol_flags |= PKT_RX_FDIR_ID;
1376 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1379 if (rxq->dynf_meta && cqe->flow_table_metadata) {
1380 pkt->ol_flags |= rxq->flow_meta_mask;
1381 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, uint32_t *) =
1382 cqe->flow_table_metadata;
1385 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1386 if (rxq->vlan_strip &&
1387 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1388 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1389 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1391 if (rxq->hw_timestamp) {
1392 uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
1394 if (rxq->rt_timestamp)
1395 ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
1396 pkt->timestamp = ts;
1397 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1402 * DPDK callback for RX.
1405 * Generic pointer to RX queue structure.
1407 * Array to store received packets.
1409 * Maximum number of packets in array.
1412 * Number of packets successfully received (<= pkts_n).
1415 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1417 struct mlx5_rxq_data *rxq = dpdk_rxq;
1418 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1419 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1420 const unsigned int sges_n = rxq->sges_n;
1421 struct rte_mbuf *pkt = NULL;
1422 struct rte_mbuf *seg = NULL;
1423 volatile struct mlx5_cqe *cqe =
1424 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1426 unsigned int rq_ci = rxq->rq_ci << sges_n;
1427 int len = 0; /* keep its value across iterations. */
1430 unsigned int idx = rq_ci & wqe_cnt;
1431 volatile struct mlx5_wqe_data_seg *wqe =
1432 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1433 struct rte_mbuf *rep = (*rxq->elts)[idx];
1434 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1435 uint32_t rss_hash_res;
1443 rep = rte_mbuf_raw_alloc(rxq->mp);
1444 if (unlikely(rep == NULL)) {
1445 ++rxq->stats.rx_nombuf;
1448 * no buffers before we even started,
1449 * bail out silently.
1453 while (pkt != seg) {
1454 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
1458 rte_mbuf_raw_free(pkt);
1464 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1465 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1467 rte_mbuf_raw_free(rep);
1471 MLX5_ASSERT(len >= (rxq->crc_present << 2));
1472 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1473 /* If compressed, take hash result from mini-CQE. */
1474 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1476 mcqe->rx_hash_result);
1477 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1478 if (rxq->crc_present)
1479 len -= RTE_ETHER_CRC_LEN;
1481 if (cqe->lro_num_seg > 1) {
1483 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1485 pkt->ol_flags |= PKT_RX_LRO;
1486 pkt->tso_segsz = len / cqe->lro_num_seg;
1489 DATA_LEN(rep) = DATA_LEN(seg);
1490 PKT_LEN(rep) = PKT_LEN(seg);
1491 SET_DATA_OFF(rep, DATA_OFF(seg));
1492 PORT(rep) = PORT(seg);
1493 (*rxq->elts)[idx] = rep;
1495 * Fill NIC descriptor with the new buffer. The lkey and size
1496 * of the buffers are already known, only the buffer address
1499 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1500 /* If there's only one MR, no need to replace LKey in WQE. */
1501 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1502 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1503 if (len > DATA_LEN(seg)) {
1504 len -= DATA_LEN(seg);
1509 DATA_LEN(seg) = len;
1510 #ifdef MLX5_PMD_SOFT_COUNTERS
1511 /* Increment bytes counter. */
1512 rxq->stats.ibytes += PKT_LEN(pkt);
1514 /* Return packet. */
1519 /* Align consumer index to the next stride. */
1524 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1526 /* Update the consumer index. */
1527 rxq->rq_ci = rq_ci >> sges_n;
1529 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1531 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1532 #ifdef MLX5_PMD_SOFT_COUNTERS
1533 /* Increment packets counter. */
1534 rxq->stats.ipackets += i;
1540 * Update LRO packet TCP header.
1541 * The HW LRO feature doesn't update the TCP header after coalescing the
1542 * TCP segments but supplies information in CQE to fill it by SW.
1545 * Pointer to the TCP header.
1547 * Pointer to the completion entry..
1549 * The L3 pseudo-header checksum.
1552 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
1553 volatile struct mlx5_cqe *__rte_restrict cqe,
1556 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1557 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1559 * The HW calculates only the TCP payload checksum, need to complete
1560 * the TCP header checksum and the L3 pseudo-header checksum.
1562 uint32_t csum = phcsum + cqe->csum;
1564 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1565 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1566 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1567 tcp->recv_ack = cqe->lro_ack_seq_num;
1568 tcp->rx_win = cqe->lro_tcp_win;
1570 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1571 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1573 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
1574 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1575 csum = (~csum) & 0xffff;
1582 * Update LRO packet headers.
1583 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1584 * TCP segments but supply information in CQE to fill it by SW.
1587 * The packet address.
1589 * Pointer to the completion entry..
1591 * The packet length.
1594 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
1595 volatile struct mlx5_cqe *__rte_restrict cqe,
1599 struct rte_ether_hdr *eth;
1600 struct rte_vlan_hdr *vlan;
1601 struct rte_ipv4_hdr *ipv4;
1602 struct rte_ipv6_hdr *ipv6;
1603 struct rte_tcp_hdr *tcp;
1608 uint16_t proto = h.eth->ether_type;
1612 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1613 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1614 proto = h.vlan->eth_proto;
1617 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1618 h.ipv4->time_to_live = cqe->lro_min_ttl;
1619 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1620 h.ipv4->hdr_checksum = 0;
1621 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1622 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1625 h.ipv6->hop_limits = cqe->lro_min_ttl;
1626 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1628 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1631 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1635 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1637 struct mlx5_mprq_buf *buf = opaque;
1639 if (rte_atomic16_read(&buf->refcnt) == 1) {
1640 rte_mempool_put(buf->mp, buf);
1641 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1642 rte_atomic16_set(&buf->refcnt, 1);
1643 rte_mempool_put(buf->mp, buf);
1648 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1650 mlx5_mprq_buf_free_cb(NULL, buf);
1654 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1655 const unsigned int strd_n)
1657 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1658 volatile struct mlx5_wqe_data_seg *wqe =
1659 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1662 MLX5_ASSERT(rep != NULL);
1663 /* Replace MPRQ buf. */
1664 (*rxq->mprq_bufs)[rq_idx] = rep;
1666 addr = mlx5_mprq_buf_addr(rep, strd_n);
1667 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1668 /* If there's only one MR, no need to replace LKey in WQE. */
1669 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1670 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1671 /* Stash a mbuf for next replacement. */
1672 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1673 rxq->mprq_repl = rep;
1675 rxq->mprq_repl = NULL;
1679 * DPDK callback for RX with Multi-Packet RQ support.
1682 * Generic pointer to RX queue structure.
1684 * Array to store received packets.
1686 * Maximum number of packets in array.
1689 * Number of packets successfully received (<= pkts_n).
1692 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1694 struct mlx5_rxq_data *rxq = dpdk_rxq;
1695 const unsigned int strd_n = 1 << rxq->strd_num_n;
1696 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1697 const unsigned int strd_shift =
1698 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1699 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1700 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1701 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1703 uint32_t rq_ci = rxq->rq_ci;
1704 uint16_t consumed_strd = rxq->consumed_strd;
1705 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1707 while (i < pkts_n) {
1708 struct rte_mbuf *pkt;
1716 int32_t hdrm_overlap;
1717 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1718 uint32_t rss_hash_res = 0;
1720 if (consumed_strd == strd_n) {
1721 /* Replace WQE only if the buffer is still in use. */
1722 if (rte_atomic16_read(&buf->refcnt) > 1) {
1723 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1724 /* Release the old buffer. */
1725 mlx5_mprq_buf_free(buf);
1726 } else if (unlikely(rxq->mprq_repl == NULL)) {
1727 struct mlx5_mprq_buf *rep;
1730 * Currently, the MPRQ mempool is out of buffer
1731 * and doing memcpy regardless of the size of Rx
1732 * packet. Retry allocation to get back to
1735 if (!rte_mempool_get(rxq->mprq_mp,
1737 rxq->mprq_repl = rep;
1739 /* Advance to the next WQE. */
1742 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1744 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1745 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1749 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1750 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1751 MLX5_ASSERT(strd_cnt);
1752 consumed_strd += strd_cnt;
1753 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1756 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1757 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1759 /* mini-CQE for MPRQ doesn't have hash result. */
1760 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1762 MLX5_ASSERT(strd_idx < strd_n);
1763 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1765 pkt = rte_pktmbuf_alloc(rxq->mp);
1766 if (unlikely(pkt == NULL)) {
1767 ++rxq->stats.rx_nombuf;
1770 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1771 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1772 if (rxq->crc_present)
1773 len -= RTE_ETHER_CRC_LEN;
1774 offset = strd_idx * strd_sz + strd_shift;
1775 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1776 hdrm_overlap = len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz;
1778 * Memcpy packets to the target mbuf if:
1779 * - The size of packet is smaller than mprq_max_memcpy_len.
1780 * - Out of buffer in the Mempool for Multi-Packet RQ.
1781 * - The packet's stride overlaps a headroom and scatter is off.
1783 if (len <= rxq->mprq_max_memcpy_len ||
1784 rxq->mprq_repl == NULL ||
1785 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
1786 if (likely(rte_pktmbuf_tailroom(pkt) >= len)) {
1787 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
1789 DATA_LEN(pkt) = len;
1790 } else if (rxq->strd_scatter_en) {
1791 struct rte_mbuf *prev = pkt;
1793 RTE_MIN(rte_pktmbuf_tailroom(pkt), len);
1794 uint32_t rem_len = len - seg_len;
1796 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
1798 DATA_LEN(pkt) = seg_len;
1800 struct rte_mbuf *next =
1801 rte_pktmbuf_alloc(rxq->mp);
1803 if (unlikely(next == NULL)) {
1804 rte_pktmbuf_free(pkt);
1805 ++rxq->stats.rx_nombuf;
1809 SET_DATA_OFF(next, 0);
1810 addr = RTE_PTR_ADD(addr, seg_len);
1812 (rte_pktmbuf_tailroom(next),
1815 (rte_pktmbuf_mtod(next, void *),
1817 DATA_LEN(next) = seg_len;
1823 rte_pktmbuf_free_seg(pkt);
1824 ++rxq->stats.idropped;
1828 rte_iova_t buf_iova;
1829 struct rte_mbuf_ext_shared_info *shinfo;
1830 uint16_t buf_len = strd_cnt * strd_sz;
1833 /* Increment the refcnt of the whole chunk. */
1834 rte_atomic16_add_return(&buf->refcnt, 1);
1835 MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1837 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
1839 * MLX5 device doesn't use iova but it is necessary in a
1840 * case where the Rx packet is transmitted via a
1843 buf_iova = rte_mempool_virt2iova(buf) +
1844 RTE_PTR_DIFF(buf_addr, buf);
1845 shinfo = &buf->shinfos[strd_idx];
1846 rte_mbuf_ext_refcnt_set(shinfo, 1);
1848 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1849 * attaching the stride to mbuf and more offload flags
1850 * will be added below by calling rxq_cq_to_mbuf().
1851 * Other fields will be overwritten.
1853 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1855 /* Set mbuf head-room. */
1856 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
1857 MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
1858 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
1859 len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
1860 DATA_LEN(pkt) = len;
1862 * Copy the last fragment of a packet (up to headroom
1863 * size bytes) in case there is a stride overlap with
1864 * a next packet's headroom. Allocate a separate mbuf
1865 * to store this fragment and link it. Scatter is on.
1867 if (hdrm_overlap > 0) {
1868 MLX5_ASSERT(rxq->strd_scatter_en);
1869 struct rte_mbuf *seg =
1870 rte_pktmbuf_alloc(rxq->mp);
1872 if (unlikely(seg == NULL)) {
1873 rte_pktmbuf_free_seg(pkt);
1874 ++rxq->stats.rx_nombuf;
1877 SET_DATA_OFF(seg, 0);
1878 rte_memcpy(rte_pktmbuf_mtod(seg, void *),
1879 RTE_PTR_ADD(addr, len - hdrm_overlap),
1881 DATA_LEN(seg) = hdrm_overlap;
1882 DATA_LEN(pkt) = len - hdrm_overlap;
1887 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1888 if (cqe->lro_num_seg > 1) {
1889 mlx5_lro_update_hdr(addr, cqe, len);
1890 pkt->ol_flags |= PKT_RX_LRO;
1891 pkt->tso_segsz = len / cqe->lro_num_seg;
1894 PORT(pkt) = rxq->port_id;
1895 #ifdef MLX5_PMD_SOFT_COUNTERS
1896 /* Increment bytes counter. */
1897 rxq->stats.ibytes += PKT_LEN(pkt);
1899 /* Return packet. */
1904 /* Update the consumer indexes. */
1905 rxq->consumed_strd = consumed_strd;
1907 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1908 if (rq_ci != rxq->rq_ci) {
1911 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1913 #ifdef MLX5_PMD_SOFT_COUNTERS
1914 /* Increment packets counter. */
1915 rxq->stats.ipackets += i;
1921 * Dummy DPDK callback for TX.
1923 * This function is used to temporarily replace the real callback during
1924 * unsafe control operations on the queue, or in case of error.
1927 * Generic pointer to TX queue structure.
1929 * Packets to transmit.
1931 * Number of packets in array.
1934 * Number of packets successfully transmitted (<= pkts_n).
1937 removed_tx_burst(void *dpdk_txq __rte_unused,
1938 struct rte_mbuf **pkts __rte_unused,
1939 uint16_t pkts_n __rte_unused)
1946 * Dummy DPDK callback for RX.
1948 * This function is used to temporarily replace the real callback during
1949 * unsafe control operations on the queue, or in case of error.
1952 * Generic pointer to RX queue structure.
1954 * Array to store received packets.
1956 * Maximum number of packets in array.
1959 * Number of packets successfully received (<= pkts_n).
1962 removed_rx_burst(void *dpdk_txq __rte_unused,
1963 struct rte_mbuf **pkts __rte_unused,
1964 uint16_t pkts_n __rte_unused)
1971 * Vectorized Rx/Tx routines are not compiled in when required vector
1972 * instructions are not supported on a target architecture. The following null
1973 * stubs are needed for linkage when those are not included outside of this file
1974 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1978 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1979 struct rte_mbuf **pkts __rte_unused,
1980 uint16_t pkts_n __rte_unused)
1986 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1992 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1998 * Free the mbufs from the linear array of pointers.
2001 * Pointer to array of packets to be free.
2003 * Number of packets to be freed.
2005 * Configured Tx offloads mask. It is fully defined at
2006 * compile time and may be used for optimization.
2008 static __rte_always_inline void
2009 mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
2010 unsigned int pkts_n,
2011 unsigned int olx __rte_unused)
2013 struct rte_mempool *pool = NULL;
2014 struct rte_mbuf **p_free = NULL;
2015 struct rte_mbuf *mbuf;
2016 unsigned int n_free = 0;
2019 * The implemented algorithm eliminates
2020 * copying pointers to temporary array
2021 * for rte_mempool_put_bulk() calls.
2024 MLX5_ASSERT(pkts_n);
2028 * Decrement mbuf reference counter, detach
2029 * indirect and external buffers if needed.
2031 mbuf = rte_pktmbuf_prefree_seg(*pkts);
2032 if (likely(mbuf != NULL)) {
2033 MLX5_ASSERT(mbuf == *pkts);
2034 if (likely(n_free != 0)) {
2035 if (unlikely(pool != mbuf->pool))
2036 /* From different pool. */
2039 /* Start new scan array. */
2046 if (unlikely(pkts_n == 0)) {
2052 * This happens if mbuf is still referenced.
2053 * We can't put it back to the pool, skip.
2057 if (unlikely(n_free != 0))
2058 /* There is some array to free.*/
2060 if (unlikely(pkts_n == 0))
2061 /* Last mbuf, nothing to free. */
2067 * This loop is implemented to avoid multiple
2068 * inlining of rte_mempool_put_bulk().
2071 MLX5_ASSERT(p_free);
2072 MLX5_ASSERT(n_free);
2074 * Free the array of pre-freed mbufs
2075 * belonging to the same memory pool.
2077 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
2078 if (unlikely(mbuf != NULL)) {
2079 /* There is the request to start new scan. */
2084 if (likely(pkts_n != 0))
2087 * This is the last mbuf to be freed.
2088 * Do one more loop iteration to complete.
2089 * This is rare case of the last unique mbuf.
2094 if (likely(pkts_n == 0))
2103 * Free the mbuf from the elts ring buffer till new tail.
2106 * Pointer to Tx queue structure.
2108 * Index in elts to free up to, becomes new elts tail.
2110 * Configured Tx offloads mask. It is fully defined at
2111 * compile time and may be used for optimization.
2113 static __rte_always_inline void
2114 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
2116 unsigned int olx __rte_unused)
2118 uint16_t n_elts = tail - txq->elts_tail;
2120 MLX5_ASSERT(n_elts);
2121 MLX5_ASSERT(n_elts <= txq->elts_s);
2123 * Implement a loop to support ring buffer wraparound
2124 * with single inlining of mlx5_tx_free_mbuf().
2129 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
2130 part = RTE_MIN(part, n_elts);
2132 MLX5_ASSERT(part <= txq->elts_s);
2133 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
2135 txq->elts_tail += part;
2141 * Store the mbuf being sent into elts ring buffer.
2142 * On Tx completion these mbufs will be freed.
2145 * Pointer to Tx queue structure.
2147 * Pointer to array of packets to be stored.
2149 * Number of packets to be stored.
2151 * Configured Tx offloads mask. It is fully defined at
2152 * compile time and may be used for optimization.
2154 static __rte_always_inline void
2155 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
2156 struct rte_mbuf **__rte_restrict pkts,
2157 unsigned int pkts_n,
2158 unsigned int olx __rte_unused)
2161 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2164 MLX5_ASSERT(pkts_n);
2165 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2167 MLX5_ASSERT(part <= txq->elts_s);
2168 /* This code is a good candidate for vectorizing with SIMD. */
2169 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2171 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2172 txq->elts_head += pkts_n;
2173 if (unlikely(part < pkts_n))
2174 /* The copy is wrapping around the elts array. */
2175 rte_memcpy((void *)elts, (void *)(pkts + part),
2176 (pkts_n - part) * sizeof(struct rte_mbuf *));
2180 * Update completion queue consuming index via doorbell
2181 * and flush the completed data buffers.
2184 * Pointer to TX queue structure.
2185 * @param valid CQE pointer
2186 * if not NULL update txq->wqe_pi and flush the buffers
2188 * Configured Tx offloads mask. It is fully defined at
2189 * compile time and may be used for optimization.
2191 static __rte_always_inline void
2192 mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
2193 volatile struct mlx5_cqe *last_cqe,
2194 unsigned int olx __rte_unused)
2196 if (likely(last_cqe != NULL)) {
2199 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2200 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2201 if (likely(tail != txq->elts_tail)) {
2202 mlx5_tx_free_elts(txq, tail, olx);
2203 MLX5_ASSERT(tail == txq->elts_tail);
2209 * Manage TX completions. This routine checks the CQ for
2210 * arrived CQEs, deduces the last accomplished WQE in SQ,
2211 * updates SQ producing index and frees all completed mbufs.
2214 * Pointer to TX queue structure.
2216 * Configured Tx offloads mask. It is fully defined at
2217 * compile time and may be used for optimization.
2219 * NOTE: not inlined intentionally, it makes tx_burst
2220 * routine smaller, simple and faster - from experiments.
2223 mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
2224 unsigned int olx __rte_unused)
2226 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2227 volatile struct mlx5_cqe *last_cqe = NULL;
2228 bool ring_doorbell = false;
2231 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2232 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2234 volatile struct mlx5_cqe *cqe;
2236 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
2237 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
2238 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2239 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2240 /* No new CQEs in completion queue. */
2241 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
2245 * Some error occurred, try to restart.
2246 * We have no barrier after WQE related Doorbell
2247 * written, make sure all writes are completed
2248 * here, before we might perform SQ reset.
2251 ret = mlx5_tx_error_cqe_handle
2252 (txq, (volatile struct mlx5_err_cqe *)cqe);
2253 if (unlikely(ret < 0)) {
2255 * Some error occurred on queue error
2256 * handling, we do not advance the index
2257 * here, allowing to retry on next call.
2262 * We are going to fetch all entries with
2263 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2264 * The send queue is supposed to be empty.
2266 ring_doorbell = true;
2268 txq->cq_pi = txq->cq_ci;
2272 /* Normal transmit completion. */
2273 MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
2274 MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
2276 ring_doorbell = true;
2280 * We have to restrict the amount of processed CQEs
2281 * in one tx_burst routine call. The CQ may be large
2282 * and many CQEs may be updated by the NIC in one
2283 * transaction. Buffers freeing is time consuming,
2284 * multiple iterations may introduce significant
2287 if (likely(--count == 0))
2290 if (likely(ring_doorbell)) {
2291 /* Ring doorbell to notify hardware. */
2292 rte_compiler_barrier();
2293 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
2294 mlx5_tx_comp_flush(txq, last_cqe, olx);
2299 * Check if the completion request flag should be set in the last WQE.
2300 * Both pushed mbufs and WQEs are monitored and the completion request
2301 * flag is set if any of thresholds is reached.
2304 * Pointer to TX queue structure.
2306 * Pointer to burst routine local context.
2308 * Configured Tx offloads mask. It is fully defined at
2309 * compile time and may be used for optimization.
2311 static __rte_always_inline void
2312 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
2313 struct mlx5_txq_local *__rte_restrict loc,
2316 uint16_t head = txq->elts_head;
2319 part = MLX5_TXOFF_CONFIG(INLINE) ?
2320 0 : loc->pkts_sent - loc->pkts_copy;
2322 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2323 (MLX5_TXOFF_CONFIG(INLINE) &&
2324 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2325 volatile struct mlx5_wqe *last = loc->wqe_last;
2328 txq->elts_comp = head;
2329 if (MLX5_TXOFF_CONFIG(INLINE))
2330 txq->wqe_comp = txq->wqe_ci;
2331 /* Request unconditional completion on last WQE. */
2332 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2333 MLX5_COMP_MODE_OFFSET);
2334 /* Save elts_head in dedicated free on completion queue. */
2335 #ifdef RTE_LIBRTE_MLX5_DEBUG
2336 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2337 (last->cseg.opcode >> 8) << 16;
2339 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2341 /* A CQE slot must always be available. */
2342 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2347 * DPDK callback to check the status of a tx descriptor.
2352 * The index of the descriptor in the ring.
2355 * The status of the tx descriptor.
2358 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2360 struct mlx5_txq_data *__rte_restrict txq = tx_queue;
2363 mlx5_tx_handle_completion(txq, 0);
2364 used = txq->elts_head - txq->elts_tail;
2366 return RTE_ETH_TX_DESC_FULL;
2367 return RTE_ETH_TX_DESC_DONE;
2371 * Build the Control Segment with specified opcode:
2372 * - MLX5_OPCODE_SEND
2373 * - MLX5_OPCODE_ENHANCED_MPSW
2377 * Pointer to TX queue structure.
2379 * Pointer to burst routine local context.
2381 * Pointer to WQE to fill with built Control Segment.
2383 * Supposed length of WQE in segments.
2385 * SQ WQE opcode to put into Control Segment.
2387 * Configured Tx offloads mask. It is fully defined at
2388 * compile time and may be used for optimization.
2390 static __rte_always_inline void
2391 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
2392 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2393 struct mlx5_wqe *__rte_restrict wqe,
2395 unsigned int opcode,
2396 unsigned int olx __rte_unused)
2398 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
2400 /* For legacy MPW replace the EMPW by TSO with modifier. */
2401 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2402 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2403 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2404 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2405 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2406 MLX5_COMP_MODE_OFFSET);
2407 cs->misc = RTE_BE32(0);
2411 * Build the Synchronize Queue Segment with specified completion index.
2414 * Pointer to TX queue structure.
2416 * Pointer to burst routine local context.
2418 * Pointer to WQE to fill with built Control Segment.
2420 * Completion index in Clock Queue to wait.
2422 * Configured Tx offloads mask. It is fully defined at
2423 * compile time and may be used for optimization.
2425 static __rte_always_inline void
2426 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
2427 struct mlx5_txq_local *restrict loc __rte_unused,
2428 struct mlx5_wqe *restrict wqe,
2430 unsigned int olx __rte_unused)
2432 struct mlx5_wqe_qseg *qs;
2434 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
2435 qs->max_index = rte_cpu_to_be_32(wci);
2436 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq->id);
2437 qs->reserved0 = RTE_BE32(0);
2438 qs->reserved1 = RTE_BE32(0);
2442 * Build the Ethernet Segment without inlined data.
2443 * Supports Software Parser, Checksums and VLAN
2444 * insertion Tx offload features.
2447 * Pointer to TX queue structure.
2449 * Pointer to burst routine local context.
2451 * Pointer to WQE to fill with built Ethernet Segment.
2453 * Configured Tx offloads mask. It is fully defined at
2454 * compile time and may be used for optimization.
2456 static __rte_always_inline void
2457 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2458 struct mlx5_txq_local *__rte_restrict loc,
2459 struct mlx5_wqe *__rte_restrict wqe,
2462 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2466 * Calculate and set check sum flags first, dword field
2467 * in segment may be shared with Software Parser flags.
2469 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2470 es->flags = rte_cpu_to_le_32(csum);
2472 * Calculate and set Software Parser offsets and flags.
2473 * These flags a set for custom UDP and IP tunnel packets.
2475 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2476 /* Fill metadata field if needed. */
2477 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2478 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2479 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2480 /* Engage VLAN tag insertion feature if requested. */
2481 if (MLX5_TXOFF_CONFIG(VLAN) &&
2482 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2484 * We should get here only if device support
2485 * this feature correctly.
2487 MLX5_ASSERT(txq->vlan_en);
2488 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2489 loc->mbuf->vlan_tci);
2491 es->inline_hdr = RTE_BE32(0);
2496 * Build the Ethernet Segment with minimal inlined data
2497 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2498 * used to fill the gap in single WQEBB WQEs.
2499 * Supports Software Parser, Checksums and VLAN
2500 * insertion Tx offload features.
2503 * Pointer to TX queue structure.
2505 * Pointer to burst routine local context.
2507 * Pointer to WQE to fill with built Ethernet Segment.
2509 * Length of VLAN tag insertion if any.
2511 * Configured Tx offloads mask. It is fully defined at
2512 * compile time and may be used for optimization.
2514 static __rte_always_inline void
2515 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2516 struct mlx5_txq_local *__rte_restrict loc,
2517 struct mlx5_wqe *__rte_restrict wqe,
2521 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2523 uint8_t *psrc, *pdst;
2526 * Calculate and set check sum flags first, dword field
2527 * in segment may be shared with Software Parser flags.
2529 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2530 es->flags = rte_cpu_to_le_32(csum);
2532 * Calculate and set Software Parser offsets and flags.
2533 * These flags a set for custom UDP and IP tunnel packets.
2535 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2536 /* Fill metadata field if needed. */
2537 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2538 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2539 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2540 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2542 sizeof(rte_v128u32_t)),
2543 "invalid Ethernet Segment data size");
2544 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2546 sizeof(struct rte_vlan_hdr) +
2547 2 * RTE_ETHER_ADDR_LEN),
2548 "invalid Ethernet Segment data size");
2549 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2550 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2551 es->inline_data = *(unaligned_uint16_t *)psrc;
2552 psrc += sizeof(uint16_t);
2553 pdst = (uint8_t *)(es + 1);
2554 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2555 /* Implement VLAN tag insertion as part inline data. */
2556 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2557 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2558 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2559 /* Insert VLAN ethertype + VLAN tag. */
2560 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2561 ((RTE_ETHER_TYPE_VLAN << 16) |
2562 loc->mbuf->vlan_tci);
2563 pdst += sizeof(struct rte_vlan_hdr);
2564 /* Copy the rest two bytes from packet data. */
2565 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2566 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2568 /* Fill the gap in the title WQEBB with inline data. */
2569 rte_mov16(pdst, psrc);
2574 * Build the Ethernet Segment with entire packet
2575 * data inlining. Checks the boundary of WQEBB and
2576 * ring buffer wrapping, supports Software Parser,
2577 * Checksums and VLAN insertion Tx offload features.
2580 * Pointer to TX queue structure.
2582 * Pointer to burst routine local context.
2584 * Pointer to WQE to fill with built Ethernet Segment.
2586 * Length of VLAN tag insertion if any.
2588 * Length of data to inline (VLAN included, if any).
2590 * TSO flag, set mss field from the packet.
2592 * Configured Tx offloads mask. It is fully defined at
2593 * compile time and may be used for optimization.
2596 * Pointer to the next Data Segment (aligned and wrapped around).
2598 static __rte_always_inline struct mlx5_wqe_dseg *
2599 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
2600 struct mlx5_txq_local *__rte_restrict loc,
2601 struct mlx5_wqe *__rte_restrict wqe,
2607 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2609 uint8_t *psrc, *pdst;
2613 * Calculate and set check sum flags first, dword field
2614 * in segment may be shared with Software Parser flags.
2616 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2619 csum |= loc->mbuf->tso_segsz;
2620 es->flags = rte_cpu_to_be_32(csum);
2622 es->flags = rte_cpu_to_le_32(csum);
2625 * Calculate and set Software Parser offsets and flags.
2626 * These flags a set for custom UDP and IP tunnel packets.
2628 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2629 /* Fill metadata field if needed. */
2630 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2631 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2632 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2633 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2635 sizeof(rte_v128u32_t)),
2636 "invalid Ethernet Segment data size");
2637 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2639 sizeof(struct rte_vlan_hdr) +
2640 2 * RTE_ETHER_ADDR_LEN),
2641 "invalid Ethernet Segment data size");
2642 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2643 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2644 es->inline_data = *(unaligned_uint16_t *)psrc;
2645 psrc += sizeof(uint16_t);
2646 pdst = (uint8_t *)(es + 1);
2647 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2648 /* Implement VLAN tag insertion as part inline data. */
2649 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2650 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2651 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2652 /* Insert VLAN ethertype + VLAN tag. */
2653 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2654 ((RTE_ETHER_TYPE_VLAN << 16) |
2655 loc->mbuf->vlan_tci);
2656 pdst += sizeof(struct rte_vlan_hdr);
2657 /* Copy the rest two bytes from packet data. */
2658 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2659 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2660 psrc += sizeof(uint16_t);
2662 /* Fill the gap in the title WQEBB with inline data. */
2663 rte_mov16(pdst, psrc);
2664 psrc += sizeof(rte_v128u32_t);
2666 pdst = (uint8_t *)(es + 2);
2667 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2668 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2669 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2671 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2672 return (struct mlx5_wqe_dseg *)pdst;
2675 * The WQEBB space availability is checked by caller.
2676 * Here we should be aware of WQE ring buffer wraparound only.
2678 part = (uint8_t *)txq->wqes_end - pdst;
2679 part = RTE_MIN(part, inlen);
2681 rte_memcpy(pdst, psrc, part);
2683 if (likely(!inlen)) {
2685 * If return value is not used by the caller
2686 * the code below will be optimized out.
2689 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2690 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2691 pdst = (uint8_t *)txq->wqes;
2692 return (struct mlx5_wqe_dseg *)pdst;
2694 pdst = (uint8_t *)txq->wqes;
2701 * Copy data from chain of mbuf to the specified linear buffer.
2702 * Checksums and VLAN insertion Tx offload features. If data
2703 * from some mbuf copied completely this mbuf is freed. Local
2704 * structure is used to keep the byte stream state.
2707 * Pointer to the destination linear buffer.
2709 * Pointer to burst routine local context.
2711 * Length of data to be copied.
2713 * Length of data to be copied ignoring no inline hint.
2715 * Configured Tx offloads mask. It is fully defined at
2716 * compile time and may be used for optimization.
2719 * Number of actual copied data bytes. This is always greater than or
2720 * equal to must parameter and might be lesser than len in no inline
2721 * hint flag is encountered.
2723 static __rte_always_inline unsigned int
2724 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2725 struct mlx5_txq_local *__rte_restrict loc,
2728 unsigned int olx __rte_unused)
2730 struct rte_mbuf *mbuf;
2731 unsigned int part, dlen, copy = 0;
2735 MLX5_ASSERT(must <= len);
2737 /* Allow zero length packets, must check first. */
2738 dlen = rte_pktmbuf_data_len(loc->mbuf);
2739 if (dlen <= loc->mbuf_off) {
2740 /* Exhausted packet, just free. */
2742 loc->mbuf = mbuf->next;
2743 rte_pktmbuf_free_seg(mbuf);
2745 MLX5_ASSERT(loc->mbuf_nseg > 1);
2746 MLX5_ASSERT(loc->mbuf);
2748 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2753 * We already copied the minimal
2754 * requested amount of data.
2759 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
2761 * Copy only the minimal required
2762 * part of the data buffer.
2769 dlen -= loc->mbuf_off;
2770 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2772 part = RTE_MIN(len, dlen);
2773 rte_memcpy(pdst, psrc, part);
2775 loc->mbuf_off += part;
2778 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2780 /* Exhausted packet, just free. */
2782 loc->mbuf = mbuf->next;
2783 rte_pktmbuf_free_seg(mbuf);
2785 MLX5_ASSERT(loc->mbuf_nseg >= 1);
2795 * Build the Ethernet Segment with inlined data from
2796 * multi-segment packet. Checks the boundary of WQEBB
2797 * and ring buffer wrapping, supports Software Parser,
2798 * Checksums and VLAN insertion Tx offload features.
2801 * Pointer to TX queue structure.
2803 * Pointer to burst routine local context.
2805 * Pointer to WQE to fill with built Ethernet Segment.
2807 * Length of VLAN tag insertion if any.
2809 * Length of data to inline (VLAN included, if any).
2811 * TSO flag, set mss field from the packet.
2813 * Configured Tx offloads mask. It is fully defined at
2814 * compile time and may be used for optimization.
2817 * Pointer to the next Data Segment (aligned and
2818 * possible NOT wrapped around - caller should do
2819 * wrapping check on its own).
2821 static __rte_always_inline struct mlx5_wqe_dseg *
2822 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
2823 struct mlx5_txq_local *__rte_restrict loc,
2824 struct mlx5_wqe *__rte_restrict wqe,
2830 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2833 unsigned int part, tlen = 0;
2836 * Calculate and set check sum flags first, uint32_t field
2837 * in segment may be shared with Software Parser flags.
2839 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2842 csum |= loc->mbuf->tso_segsz;
2843 es->flags = rte_cpu_to_be_32(csum);
2845 es->flags = rte_cpu_to_le_32(csum);
2848 * Calculate and set Software Parser offsets and flags.
2849 * These flags a set for custom UDP and IP tunnel packets.
2851 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2852 /* Fill metadata field if needed. */
2853 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2854 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2855 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2856 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2858 sizeof(rte_v128u32_t)),
2859 "invalid Ethernet Segment data size");
2860 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2862 sizeof(struct rte_vlan_hdr) +
2863 2 * RTE_ETHER_ADDR_LEN),
2864 "invalid Ethernet Segment data size");
2865 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2866 pdst = (uint8_t *)&es->inline_data;
2867 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2868 /* Implement VLAN tag insertion as part inline data. */
2869 mlx5_tx_mseg_memcpy(pdst, loc,
2870 2 * RTE_ETHER_ADDR_LEN,
2871 2 * RTE_ETHER_ADDR_LEN, olx);
2872 pdst += 2 * RTE_ETHER_ADDR_LEN;
2873 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2874 ((RTE_ETHER_TYPE_VLAN << 16) |
2875 loc->mbuf->vlan_tci);
2876 pdst += sizeof(struct rte_vlan_hdr);
2877 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2879 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2881 * The WQEBB space availability is checked by caller.
2882 * Here we should be aware of WQE ring buffer wraparound only.
2884 part = (uint8_t *)txq->wqes_end - pdst;
2885 part = RTE_MIN(part, inlen - tlen);
2891 * Copying may be interrupted inside the routine
2892 * if run into no inline hint flag.
2894 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
2895 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
2897 if (likely(inlen <= tlen) || copy < part) {
2898 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
2900 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2901 return (struct mlx5_wqe_dseg *)pdst;
2903 pdst = (uint8_t *)txq->wqes;
2904 part = inlen - tlen;
2909 * Build the Data Segment of pointer type.
2912 * Pointer to TX queue structure.
2914 * Pointer to burst routine local context.
2916 * Pointer to WQE to fill with built Data Segment.
2918 * Data buffer to point.
2920 * Data buffer length.
2922 * Configured Tx offloads mask. It is fully defined at
2923 * compile time and may be used for optimization.
2925 static __rte_always_inline void
2926 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
2927 struct mlx5_txq_local *__rte_restrict loc,
2928 struct mlx5_wqe_dseg *__rte_restrict dseg,
2931 unsigned int olx __rte_unused)
2935 dseg->bcount = rte_cpu_to_be_32(len);
2936 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2937 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2941 * Build the Data Segment of pointer type or inline
2942 * if data length is less than buffer in minimal
2943 * Data Segment size.
2946 * Pointer to TX queue structure.
2948 * Pointer to burst routine local context.
2950 * Pointer to WQE to fill with built Data Segment.
2952 * Data buffer to point.
2954 * Data buffer length.
2956 * Configured Tx offloads mask. It is fully defined at
2957 * compile time and may be used for optimization.
2959 static __rte_always_inline void
2960 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
2961 struct mlx5_txq_local *__rte_restrict loc,
2962 struct mlx5_wqe_dseg *__rte_restrict dseg,
2965 unsigned int olx __rte_unused)
2971 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2972 dseg->bcount = rte_cpu_to_be_32(len);
2973 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2974 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2978 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2979 /* Unrolled implementation of generic rte_memcpy. */
2980 dst = (uintptr_t)&dseg->inline_data[0];
2981 src = (uintptr_t)buf;
2983 #ifdef RTE_ARCH_STRICT_ALIGN
2984 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2985 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2986 dst += sizeof(uint32_t);
2987 src += sizeof(uint32_t);
2988 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2989 dst += sizeof(uint32_t);
2990 src += sizeof(uint32_t);
2992 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2993 dst += sizeof(uint64_t);
2994 src += sizeof(uint64_t);
2998 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2999 dst += sizeof(uint32_t);
3000 src += sizeof(uint32_t);
3003 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
3004 dst += sizeof(uint16_t);
3005 src += sizeof(uint16_t);
3008 *(uint8_t *)dst = *(uint8_t *)src;
3012 * Build the Data Segment of inlined data from single
3013 * segment packet, no VLAN insertion.
3016 * Pointer to TX queue structure.
3018 * Pointer to burst routine local context.
3020 * Pointer to WQE to fill with built Data Segment.
3022 * Data buffer to point.
3024 * Data buffer length.
3026 * Configured Tx offloads mask. It is fully defined at
3027 * compile time and may be used for optimization.
3030 * Pointer to the next Data Segment after inlined data.
3031 * Ring buffer wraparound check is needed. We do not
3032 * do it here because it may not be needed for the
3033 * last packet in the eMPW session.
3035 static __rte_always_inline struct mlx5_wqe_dseg *
3036 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
3037 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
3038 struct mlx5_wqe_dseg *__rte_restrict dseg,
3041 unsigned int olx __rte_unused)
3046 if (!MLX5_TXOFF_CONFIG(MPW)) {
3047 /* Store the descriptor byte counter for eMPW sessions. */
3048 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
3049 pdst = &dseg->inline_data[0];
3051 /* The entire legacy MPW session counter is stored on close. */
3052 pdst = (uint8_t *)dseg;
3055 * The WQEBB space availability is checked by caller.
3056 * Here we should be aware of WQE ring buffer wraparound only.
3058 part = (uint8_t *)txq->wqes_end - pdst;
3059 part = RTE_MIN(part, len);
3061 rte_memcpy(pdst, buf, part);
3065 if (!MLX5_TXOFF_CONFIG(MPW))
3066 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
3067 /* Note: no final wraparound check here. */
3068 return (struct mlx5_wqe_dseg *)pdst;
3070 pdst = (uint8_t *)txq->wqes;
3077 * Build the Data Segment of inlined data from single
3078 * segment packet with VLAN insertion.
3081 * Pointer to TX queue structure.
3083 * Pointer to burst routine local context.
3085 * Pointer to the dseg fill with built Data Segment.
3087 * Data buffer to point.
3089 * Data buffer length.
3091 * Configured Tx offloads mask. It is fully defined at
3092 * compile time and may be used for optimization.
3095 * Pointer to the next Data Segment after inlined data.
3096 * Ring buffer wraparound check is needed.
3098 static __rte_always_inline struct mlx5_wqe_dseg *
3099 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
3100 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
3101 struct mlx5_wqe_dseg *__rte_restrict dseg,
3104 unsigned int olx __rte_unused)
3110 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
3111 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
3112 (2 * RTE_ETHER_ADDR_LEN),
3113 "invalid Data Segment data size");
3114 if (!MLX5_TXOFF_CONFIG(MPW)) {
3115 /* Store the descriptor byte counter for eMPW sessions. */
3116 dseg->bcount = rte_cpu_to_be_32
3117 ((len + sizeof(struct rte_vlan_hdr)) |
3118 MLX5_ETH_WQE_DATA_INLINE);
3119 pdst = &dseg->inline_data[0];
3121 /* The entire legacy MPW session counter is stored on close. */
3122 pdst = (uint8_t *)dseg;
3124 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
3125 buf += MLX5_DSEG_MIN_INLINE_SIZE;
3126 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
3127 len -= MLX5_DSEG_MIN_INLINE_SIZE;
3128 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
3129 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
3130 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
3131 pdst = (uint8_t *)txq->wqes;
3132 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
3133 loc->mbuf->vlan_tci);
3134 pdst += sizeof(struct rte_vlan_hdr);
3136 * The WQEBB space availability is checked by caller.
3137 * Here we should be aware of WQE ring buffer wraparound only.
3139 part = (uint8_t *)txq->wqes_end - pdst;
3140 part = RTE_MIN(part, len);
3142 rte_memcpy(pdst, buf, part);
3146 if (!MLX5_TXOFF_CONFIG(MPW))
3147 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
3148 /* Note: no final wraparound check here. */
3149 return (struct mlx5_wqe_dseg *)pdst;
3151 pdst = (uint8_t *)txq->wqes;
3158 * Build the Ethernet Segment with optionally inlined data with
3159 * VLAN insertion and following Data Segments (if any) from
3160 * multi-segment packet. Used by ordinary send and TSO.
3163 * Pointer to TX queue structure.
3165 * Pointer to burst routine local context.
3167 * Pointer to WQE to fill with built Ethernet/Data Segments.
3169 * Length of VLAN header to insert, 0 means no VLAN insertion.
3171 * Data length to inline. For TSO this parameter specifies
3172 * exact value, for ordinary send routine can be aligned by
3173 * caller to provide better WQE space saving and data buffer
3174 * start address alignment. This length includes VLAN header
3177 * Zero means ordinary send, inlined data can be extended,
3178 * otherwise this is TSO, inlined data length is fixed.
3180 * Configured Tx offloads mask. It is fully defined at
3181 * compile time and may be used for optimization.
3184 * Actual size of built WQE in segments.
3186 static __rte_always_inline unsigned int
3187 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
3188 struct mlx5_txq_local *__rte_restrict loc,
3189 struct mlx5_wqe *__rte_restrict wqe,
3193 unsigned int olx __rte_unused)
3195 struct mlx5_wqe_dseg *__rte_restrict dseg;
3198 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
3199 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
3202 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
3203 if (!loc->mbuf_nseg)
3206 * There are still some mbuf remaining, not inlined.
3207 * The first mbuf may be partially inlined and we
3208 * must process the possible non-zero data offset.
3210 if (loc->mbuf_off) {
3215 * Exhausted packets must be dropped before.
3216 * Non-zero offset means there are some data
3217 * remained in the packet.
3219 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
3220 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
3221 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
3223 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
3225 * Build the pointer/minimal data Data Segment.
3226 * Do ring buffer wrapping check in advance.
3228 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3229 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3230 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
3231 /* Store the mbuf to be freed on completion. */
3232 MLX5_ASSERT(loc->elts_free);
3233 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3236 if (--loc->mbuf_nseg == 0)
3238 loc->mbuf = loc->mbuf->next;
3242 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3243 struct rte_mbuf *mbuf;
3245 /* Zero length segment found, just skip. */
3247 loc->mbuf = loc->mbuf->next;
3248 rte_pktmbuf_free_seg(mbuf);
3249 if (--loc->mbuf_nseg == 0)
3252 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3253 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3256 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3257 rte_pktmbuf_data_len(loc->mbuf), olx);
3258 MLX5_ASSERT(loc->elts_free);
3259 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3262 if (--loc->mbuf_nseg == 0)
3264 loc->mbuf = loc->mbuf->next;
3269 /* Calculate actual segments used from the dseg pointer. */
3270 if ((uintptr_t)wqe < (uintptr_t)dseg)
3271 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3273 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3274 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3279 * The routine checks timestamp flag in the current packet,
3280 * and push WAIT WQE into the queue if scheduling is required.
3283 * Pointer to TX queue structure.
3285 * Pointer to burst routine local context.
3287 * Configured Tx offloads mask. It is fully defined at
3288 * compile time and may be used for optimization.
3291 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3292 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
3293 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
3294 * Local context variables partially updated.
3296 static __rte_always_inline enum mlx5_txcmp_code
3297 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
3298 struct mlx5_txq_local *restrict loc,
3301 if (MLX5_TXOFF_CONFIG(TXPP) &&
3302 loc->mbuf->ol_flags & txq->ts_mask) {
3303 struct mlx5_wqe *wqe;
3308 * Estimate the required space quickly and roughly.
3309 * We would like to ensure the packet can be pushed
3310 * to the queue and we won't get the orphan WAIT WQE.
3312 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
3313 loc->elts_free < NB_SEGS(loc->mbuf))
3314 return MLX5_TXCMP_CODE_EXIT;
3315 /* Convert the timestamp into completion to wait. */
3316 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
3317 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
3318 if (unlikely(wci < 0))
3319 return MLX5_TXCMP_CODE_SINGLE;
3320 /* Build the WAIT WQE with specified completion. */
3321 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3322 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
3323 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
3326 return MLX5_TXCMP_CODE_MULTI;
3328 return MLX5_TXCMP_CODE_SINGLE;
3332 * Tx one packet function for multi-segment TSO. Supports all
3333 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3334 * sends one packet per WQE.
3336 * This routine is responsible for storing processed mbuf
3337 * into elts ring buffer and update elts_head.
3340 * Pointer to TX queue structure.
3342 * Pointer to burst routine local context.
3344 * Configured Tx offloads mask. It is fully defined at
3345 * compile time and may be used for optimization.
3348 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3349 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3350 * Local context variables partially updated.
3352 static __rte_always_inline enum mlx5_txcmp_code
3353 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
3354 struct mlx5_txq_local *__rte_restrict loc,
3357 struct mlx5_wqe *__rte_restrict wqe;
3358 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3360 if (MLX5_TXOFF_CONFIG(TXPP)) {
3361 enum mlx5_txcmp_code wret;
3363 /* Generate WAIT for scheduling if requested. */
3364 wret = mlx5_tx_schedule_send(txq, loc, olx);
3365 if (wret == MLX5_TXCMP_CODE_EXIT)
3366 return MLX5_TXCMP_CODE_EXIT;
3367 if (wret == MLX5_TXCMP_CODE_ERROR)
3368 return MLX5_TXCMP_CODE_ERROR;
3371 * Calculate data length to be inlined to estimate
3372 * the required space in WQE ring buffer.
3374 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3375 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3376 vlan = sizeof(struct rte_vlan_hdr);
3377 inlen = loc->mbuf->l2_len + vlan +
3378 loc->mbuf->l3_len + loc->mbuf->l4_len;
3379 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3380 return MLX5_TXCMP_CODE_ERROR;
3381 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3382 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3383 /* Packet must contain all TSO headers. */
3384 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3385 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3386 inlen > (dlen + vlan)))
3387 return MLX5_TXCMP_CODE_ERROR;
3388 MLX5_ASSERT(inlen >= txq->inlen_mode);
3390 * Check whether there are enough free WQEBBs:
3392 * - Ethernet Segment
3393 * - First Segment of inlined Ethernet data
3394 * - ... data continued ...
3395 * - Data Segments of pointer/min inline type
3397 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3398 MLX5_ESEG_MIN_INLINE_SIZE +
3400 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3401 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3402 return MLX5_TXCMP_CODE_EXIT;
3403 /* Check for maximal WQE size. */
3404 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3405 return MLX5_TXCMP_CODE_ERROR;
3406 #ifdef MLX5_PMD_SOFT_COUNTERS
3407 /* Update sent data bytes/packets counters. */
3408 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3409 loc->mbuf->tso_segsz;
3411 * One will be added for mbuf itself
3412 * at the end of the mlx5_tx_burst from
3413 * loc->pkts_sent field.
3416 txq->stats.opackets += ntcp;
3417 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3419 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3420 loc->wqe_last = wqe;
3421 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3422 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3423 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3424 txq->wqe_ci += (ds + 3) / 4;
3425 loc->wqe_free -= (ds + 3) / 4;
3426 return MLX5_TXCMP_CODE_MULTI;
3430 * Tx one packet function for multi-segment SEND. Supports all
3431 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3432 * sends one packet per WQE, without any data inlining in
3435 * This routine is responsible for storing processed mbuf
3436 * into elts ring buffer and update elts_head.
3439 * Pointer to TX queue structure.
3441 * Pointer to burst routine local context.
3443 * Configured Tx offloads mask. It is fully defined at
3444 * compile time and may be used for optimization.
3447 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3448 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3449 * Local context variables partially updated.
3451 static __rte_always_inline enum mlx5_txcmp_code
3452 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
3453 struct mlx5_txq_local *__rte_restrict loc,
3456 struct mlx5_wqe_dseg *__rte_restrict dseg;
3457 struct mlx5_wqe *__rte_restrict wqe;
3458 unsigned int ds, nseg;
3460 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3461 if (MLX5_TXOFF_CONFIG(TXPP)) {
3462 enum mlx5_txcmp_code wret;
3464 /* Generate WAIT for scheduling if requested. */
3465 wret = mlx5_tx_schedule_send(txq, loc, olx);
3466 if (wret == MLX5_TXCMP_CODE_EXIT)
3467 return MLX5_TXCMP_CODE_EXIT;
3468 if (wret == MLX5_TXCMP_CODE_ERROR)
3469 return MLX5_TXCMP_CODE_ERROR;
3472 * No inline at all, it means the CPU cycles saving
3473 * is prioritized at configuration, we should not
3474 * copy any packet data to WQE.
3476 nseg = NB_SEGS(loc->mbuf);
3478 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3479 return MLX5_TXCMP_CODE_EXIT;
3480 /* Check for maximal WQE size. */
3481 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3482 return MLX5_TXCMP_CODE_ERROR;
3484 * Some Tx offloads may cause an error if
3485 * packet is not long enough, check against
3486 * assumed minimal length.
3488 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3489 return MLX5_TXCMP_CODE_ERROR;
3490 #ifdef MLX5_PMD_SOFT_COUNTERS
3491 /* Update sent data bytes counter. */
3492 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3493 if (MLX5_TXOFF_CONFIG(VLAN) &&
3494 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3495 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3498 * SEND WQE, one WQEBB:
3499 * - Control Segment, SEND opcode
3500 * - Ethernet Segment, optional VLAN, no inline
3501 * - Data Segments, pointer only type
3503 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3504 loc->wqe_last = wqe;
3505 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3506 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3507 dseg = &wqe->dseg[0];
3509 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3510 struct rte_mbuf *mbuf;
3513 * Zero length segment found, have to
3514 * correct total size of WQE in segments.
3515 * It is supposed to be rare occasion, so
3516 * in normal case (no zero length segments)
3517 * we avoid extra writing to the Control
3521 wqe->cseg.sq_ds -= RTE_BE32(1);
3523 loc->mbuf = mbuf->next;
3524 rte_pktmbuf_free_seg(mbuf);
3530 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3531 rte_pktmbuf_data_len(loc->mbuf), olx);
3532 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3537 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3538 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3539 loc->mbuf = loc->mbuf->next;
3542 txq->wqe_ci += (ds + 3) / 4;
3543 loc->wqe_free -= (ds + 3) / 4;
3544 return MLX5_TXCMP_CODE_MULTI;
3548 * Tx one packet function for multi-segment SEND. Supports all
3549 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3550 * sends one packet per WQE, with data inlining in
3551 * Ethernet Segment and minimal Data Segments.
3553 * This routine is responsible for storing processed mbuf
3554 * into elts ring buffer and update elts_head.
3557 * Pointer to TX queue structure.
3559 * Pointer to burst routine local context.
3561 * Configured Tx offloads mask. It is fully defined at
3562 * compile time and may be used for optimization.
3565 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3566 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3567 * Local context variables partially updated.
3569 static __rte_always_inline enum mlx5_txcmp_code
3570 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
3571 struct mlx5_txq_local *__rte_restrict loc,
3574 struct mlx5_wqe *__rte_restrict wqe;
3575 unsigned int ds, inlen, dlen, vlan = 0;
3577 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3578 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3579 if (MLX5_TXOFF_CONFIG(TXPP)) {
3580 enum mlx5_txcmp_code wret;
3582 /* Generate WAIT for scheduling if requested. */
3583 wret = mlx5_tx_schedule_send(txq, loc, olx);
3584 if (wret == MLX5_TXCMP_CODE_EXIT)
3585 return MLX5_TXCMP_CODE_EXIT;
3586 if (wret == MLX5_TXCMP_CODE_ERROR)
3587 return MLX5_TXCMP_CODE_ERROR;
3590 * First calculate data length to be inlined
3591 * to estimate the required space for WQE.
3593 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3594 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3595 vlan = sizeof(struct rte_vlan_hdr);
3596 inlen = dlen + vlan;
3597 /* Check against minimal length. */
3598 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3599 return MLX5_TXCMP_CODE_ERROR;
3600 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3601 if (inlen > txq->inlen_send ||
3602 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
3603 struct rte_mbuf *mbuf;
3608 * Packet length exceeds the allowed inline
3609 * data length, check whether the minimal
3610 * inlining is required.
3612 if (txq->inlen_mode) {
3613 MLX5_ASSERT(txq->inlen_mode >=
3614 MLX5_ESEG_MIN_INLINE_SIZE);
3615 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
3616 inlen = txq->inlen_mode;
3618 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
3619 !vlan || txq->vlan_en) {
3621 * VLAN insertion will be done inside by HW.
3622 * It is not utmost effective - VLAN flag is
3623 * checked twice, but we should proceed the
3624 * inlining length correctly and take into
3625 * account the VLAN header being inserted.
3627 return mlx5_tx_packet_multi_send
3630 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3633 * Now we know the minimal amount of data is requested
3634 * to inline. Check whether we should inline the buffers
3635 * from the chain beginning to eliminate some mbufs.
3638 nxlen = rte_pktmbuf_data_len(mbuf);
3639 if (unlikely(nxlen <= txq->inlen_send)) {
3640 /* We can inline first mbuf at least. */
3641 if (nxlen < inlen) {
3644 /* Scan mbufs till inlen filled. */
3649 nxlen = rte_pktmbuf_data_len(mbuf);
3651 } while (unlikely(nxlen < inlen));
3652 if (unlikely(nxlen > txq->inlen_send)) {
3653 /* We cannot inline entire mbuf. */
3654 smlen = inlen - smlen;
3655 start = rte_pktmbuf_mtod_offset
3656 (mbuf, uintptr_t, smlen);
3663 /* There should be not end of packet. */
3665 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3666 } while (unlikely(nxlen < txq->inlen_send));
3668 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3670 * Check whether we can do inline to align start
3671 * address of data buffer to cacheline.
3674 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3675 if (unlikely(start)) {
3677 if (start <= txq->inlen_send)
3682 * Check whether there are enough free WQEBBs:
3684 * - Ethernet Segment
3685 * - First Segment of inlined Ethernet data
3686 * - ... data continued ...
3687 * - Data Segments of pointer/min inline type
3689 * Estimate the number of Data Segments conservatively,
3690 * supposing no any mbufs is being freed during inlining.
3692 MLX5_ASSERT(inlen <= txq->inlen_send);
3693 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3694 MLX5_ESEG_MIN_INLINE_SIZE +
3696 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3697 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3698 return MLX5_TXCMP_CODE_EXIT;
3699 /* Check for maximal WQE size. */
3700 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3701 return MLX5_TXCMP_CODE_ERROR;
3702 #ifdef MLX5_PMD_SOFT_COUNTERS
3703 /* Update sent data bytes/packets counters. */
3704 txq->stats.obytes += dlen + vlan;
3706 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3707 loc->wqe_last = wqe;
3708 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3709 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3710 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3711 txq->wqe_ci += (ds + 3) / 4;
3712 loc->wqe_free -= (ds + 3) / 4;
3713 return MLX5_TXCMP_CODE_MULTI;
3717 * Tx burst function for multi-segment packets. Supports all
3718 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3719 * sends one packet per WQE. Function stops sending if it
3720 * encounters the single-segment packet.
3722 * This routine is responsible for storing processed mbuf
3723 * into elts ring buffer and update elts_head.
3726 * Pointer to TX queue structure.
3728 * Packets to transmit.
3730 * Number of packets in array.
3732 * Pointer to burst routine local context.
3734 * Configured Tx offloads mask. It is fully defined at
3735 * compile time and may be used for optimization.
3738 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3739 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3740 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3741 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3742 * Local context variables updated.
3744 static __rte_always_inline enum mlx5_txcmp_code
3745 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
3746 struct rte_mbuf **__rte_restrict pkts,
3747 unsigned int pkts_n,
3748 struct mlx5_txq_local *__rte_restrict loc,
3751 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3752 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3753 pkts += loc->pkts_sent + 1;
3754 pkts_n -= loc->pkts_sent;
3756 enum mlx5_txcmp_code ret;
3758 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3760 * Estimate the number of free elts quickly but
3761 * conservatively. Some segment may be fully inlined
3762 * and freed, ignore this here - precise estimation
3765 if (loc->elts_free < NB_SEGS(loc->mbuf))
3766 return MLX5_TXCMP_CODE_EXIT;
3767 if (MLX5_TXOFF_CONFIG(TSO) &&
3768 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3769 /* Proceed with multi-segment TSO. */
3770 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3771 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3772 /* Proceed with multi-segment SEND with inlining. */
3773 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3775 /* Proceed with multi-segment SEND w/o inlining. */
3776 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3778 if (ret == MLX5_TXCMP_CODE_EXIT)
3779 return MLX5_TXCMP_CODE_EXIT;
3780 if (ret == MLX5_TXCMP_CODE_ERROR)
3781 return MLX5_TXCMP_CODE_ERROR;
3782 /* WQE is built, go to the next packet. */
3785 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3786 return MLX5_TXCMP_CODE_EXIT;
3787 loc->mbuf = *pkts++;
3789 rte_prefetch0(*pkts);
3790 if (likely(NB_SEGS(loc->mbuf) > 1))
3792 /* Here ends the series of multi-segment packets. */
3793 if (MLX5_TXOFF_CONFIG(TSO) &&
3794 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3795 return MLX5_TXCMP_CODE_TSO;
3796 return MLX5_TXCMP_CODE_SINGLE;
3802 * Tx burst function for single-segment packets with TSO.
3803 * Supports all types of Tx offloads, except multi-packets.
3804 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3805 * Function stops sending if it encounters the multi-segment
3806 * packet or packet without TSO requested.
3808 * The routine is responsible for storing processed mbuf
3809 * into elts ring buffer and update elts_head if inline
3810 * offloads is requested due to possible early freeing
3811 * of the inlined mbufs (can not store pkts array in elts
3815 * Pointer to TX queue structure.
3817 * Packets to transmit.
3819 * Number of packets in array.
3821 * Pointer to burst routine local context.
3823 * Configured Tx offloads mask. It is fully defined at
3824 * compile time and may be used for optimization.
3827 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3828 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3829 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3830 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3831 * Local context variables updated.
3833 static __rte_always_inline enum mlx5_txcmp_code
3834 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
3835 struct rte_mbuf **__rte_restrict pkts,
3836 unsigned int pkts_n,
3837 struct mlx5_txq_local *__rte_restrict loc,
3840 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3841 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3842 pkts += loc->pkts_sent + 1;
3843 pkts_n -= loc->pkts_sent;
3845 struct mlx5_wqe_dseg *__rte_restrict dseg;
3846 struct mlx5_wqe *__rte_restrict wqe;
3847 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3850 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3851 if (MLX5_TXOFF_CONFIG(TXPP)) {
3852 enum mlx5_txcmp_code wret;
3854 /* Generate WAIT for scheduling if requested. */
3855 wret = mlx5_tx_schedule_send(txq, loc, olx);
3856 if (wret == MLX5_TXCMP_CODE_EXIT)
3857 return MLX5_TXCMP_CODE_EXIT;
3858 if (wret == MLX5_TXCMP_CODE_ERROR)
3859 return MLX5_TXCMP_CODE_ERROR;
3861 dlen = rte_pktmbuf_data_len(loc->mbuf);
3862 if (MLX5_TXOFF_CONFIG(VLAN) &&
3863 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3864 vlan = sizeof(struct rte_vlan_hdr);
3867 * First calculate the WQE size to check
3868 * whether we have enough space in ring buffer.
3870 hlen = loc->mbuf->l2_len + vlan +
3871 loc->mbuf->l3_len + loc->mbuf->l4_len;
3872 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3873 return MLX5_TXCMP_CODE_ERROR;
3874 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3875 hlen += loc->mbuf->outer_l2_len +
3876 loc->mbuf->outer_l3_len;
3877 /* Segment must contain all TSO headers. */
3878 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3879 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3880 hlen > (dlen + vlan)))
3881 return MLX5_TXCMP_CODE_ERROR;
3883 * Check whether there are enough free WQEBBs:
3885 * - Ethernet Segment
3886 * - First Segment of inlined Ethernet data
3887 * - ... data continued ...
3888 * - Finishing Data Segment of pointer type
3890 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3891 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3892 if (loc->wqe_free < ((ds + 3) / 4))
3893 return MLX5_TXCMP_CODE_EXIT;
3894 #ifdef MLX5_PMD_SOFT_COUNTERS
3895 /* Update sent data bytes/packets counters. */
3896 ntcp = (dlen + vlan - hlen +
3897 loc->mbuf->tso_segsz - 1) /
3898 loc->mbuf->tso_segsz;
3900 * One will be added for mbuf itself at the end
3901 * of the mlx5_tx_burst from loc->pkts_sent field.
3904 txq->stats.opackets += ntcp;
3905 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3908 * Build the TSO WQE:
3910 * - Ethernet Segment with hlen bytes inlined
3911 * - Data Segment of pointer type
3913 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3914 loc->wqe_last = wqe;
3915 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3916 MLX5_OPCODE_TSO, olx);
3917 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3918 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3919 dlen -= hlen - vlan;
3920 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3922 * WQE is built, update the loop parameters
3923 * and go to the next packet.
3925 txq->wqe_ci += (ds + 3) / 4;
3926 loc->wqe_free -= (ds + 3) / 4;
3927 if (MLX5_TXOFF_CONFIG(INLINE))
3928 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3932 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3933 return MLX5_TXCMP_CODE_EXIT;
3934 loc->mbuf = *pkts++;
3936 rte_prefetch0(*pkts);
3937 if (MLX5_TXOFF_CONFIG(MULTI) &&
3938 unlikely(NB_SEGS(loc->mbuf) > 1))
3939 return MLX5_TXCMP_CODE_MULTI;
3940 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3941 return MLX5_TXCMP_CODE_SINGLE;
3942 /* Continue with the next TSO packet. */
3948 * Analyze the packet and select the best method to send.
3951 * Pointer to TX queue structure.
3953 * Pointer to burst routine local context.
3955 * Configured Tx offloads mask. It is fully defined at
3956 * compile time and may be used for optimization.
3958 * The predefined flag whether do complete check for
3959 * multi-segment packets and TSO.
3962 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3963 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3964 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3965 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3967 static __rte_always_inline enum mlx5_txcmp_code
3968 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
3969 struct mlx5_txq_local *__rte_restrict loc,
3973 /* Check for multi-segment packet. */
3975 MLX5_TXOFF_CONFIG(MULTI) &&
3976 unlikely(NB_SEGS(loc->mbuf) > 1))
3977 return MLX5_TXCMP_CODE_MULTI;
3978 /* Check for TSO packet. */
3980 MLX5_TXOFF_CONFIG(TSO) &&
3981 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3982 return MLX5_TXCMP_CODE_TSO;
3983 /* Check if eMPW is enabled at all. */
3984 if (!MLX5_TXOFF_CONFIG(EMPW))
3985 return MLX5_TXCMP_CODE_SINGLE;
3986 /* Check if eMPW can be engaged. */
3987 if (MLX5_TXOFF_CONFIG(VLAN) &&
3988 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3989 (!MLX5_TXOFF_CONFIG(INLINE) ||
3990 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3991 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3993 * eMPW does not support VLAN insertion offload,
3994 * we have to inline the entire packet but
3995 * packet is too long for inlining.
3997 return MLX5_TXCMP_CODE_SINGLE;
3999 return MLX5_TXCMP_CODE_EMPW;
4003 * Check the next packet attributes to match with the eMPW batch ones.
4004 * In addition, for legacy MPW the packet length is checked either.
4007 * Pointer to TX queue structure.
4009 * Pointer to Ethernet Segment of eMPW batch.
4011 * Pointer to burst routine local context.
4013 * Length of previous packet in MPW descriptor.
4015 * Configured Tx offloads mask. It is fully defined at
4016 * compile time and may be used for optimization.
4019 * true - packet match with eMPW batch attributes.
4020 * false - no match, eMPW should be restarted.
4022 static __rte_always_inline bool
4023 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
4024 struct mlx5_wqe_eseg *__rte_restrict es,
4025 struct mlx5_txq_local *__rte_restrict loc,
4029 uint8_t swp_flags = 0;
4031 /* Compare the checksum flags, if any. */
4032 if (MLX5_TXOFF_CONFIG(CSUM) &&
4033 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
4035 /* Compare the Software Parser offsets and flags. */
4036 if (MLX5_TXOFF_CONFIG(SWP) &&
4037 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
4038 es->swp_flags != swp_flags))
4040 /* Fill metadata field if needed. */
4041 if (MLX5_TXOFF_CONFIG(METADATA) &&
4042 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
4043 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
4045 /* Legacy MPW can send packets with the same lengt only. */
4046 if (MLX5_TXOFF_CONFIG(MPW) &&
4047 dlen != rte_pktmbuf_data_len(loc->mbuf))
4049 /* There must be no VLAN packets in eMPW loop. */
4050 if (MLX5_TXOFF_CONFIG(VLAN))
4051 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
4052 /* Check if the scheduling is requested. */
4053 if (MLX5_TXOFF_CONFIG(TXPP) &&
4054 loc->mbuf->ol_flags & txq->ts_mask)
4060 * Update send loop variables and WQE for eMPW loop
4061 * without data inlining. Number of Data Segments is
4062 * equal to the number of sent packets.
4065 * Pointer to TX queue structure.
4067 * Pointer to burst routine local context.
4069 * Number of packets/Data Segments/Packets.
4071 * Accumulated statistics, bytes sent
4073 * Configured Tx offloads mask. It is fully defined at
4074 * compile time and may be used for optimization.
4077 * true - packet match with eMPW batch attributes.
4078 * false - no match, eMPW should be restarted.
4080 static __rte_always_inline void
4081 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
4082 struct mlx5_txq_local *__rte_restrict loc,
4085 unsigned int olx __rte_unused)
4087 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4088 #ifdef MLX5_PMD_SOFT_COUNTERS
4089 /* Update sent data bytes counter. */
4090 txq->stats.obytes += slen;
4094 loc->elts_free -= ds;
4095 loc->pkts_sent += ds;
4097 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
4098 txq->wqe_ci += (ds + 3) / 4;
4099 loc->wqe_free -= (ds + 3) / 4;
4103 * Update send loop variables and WQE for eMPW loop
4104 * with data inlining. Gets the size of pushed descriptors
4105 * and data to the WQE.
4108 * Pointer to TX queue structure.
4110 * Pointer to burst routine local context.
4112 * Total size of descriptor/data in bytes.
4114 * Accumulated statistics, data bytes sent.
4116 * The base WQE for the eMPW/MPW descriptor.
4118 * Configured Tx offloads mask. It is fully defined at
4119 * compile time and may be used for optimization.
4122 * true - packet match with eMPW batch attributes.
4123 * false - no match, eMPW should be restarted.
4125 static __rte_always_inline void
4126 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
4127 struct mlx5_txq_local *__rte_restrict loc,
4130 struct mlx5_wqe *__rte_restrict wqem,
4131 unsigned int olx __rte_unused)
4133 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
4135 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4136 #ifdef MLX5_PMD_SOFT_COUNTERS
4137 /* Update sent data bytes counter. */
4138 txq->stats.obytes += slen;
4142 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
4144 * If the legacy MPW session contains the inline packets
4145 * we should set the only inline data segment length
4146 * and align the total length to the segment size.
4148 MLX5_ASSERT(len > sizeof(dseg->bcount));
4149 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
4150 MLX5_ETH_WQE_DATA_INLINE);
4151 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
4154 * The session is not legacy MPW or contains the
4155 * data buffer pointer segments.
4157 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
4158 len = len / MLX5_WSEG_SIZE + 2;
4160 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
4161 txq->wqe_ci += (len + 3) / 4;
4162 loc->wqe_free -= (len + 3) / 4;
4163 loc->wqe_last = wqem;
4167 * The set of Tx burst functions for single-segment packets
4168 * without TSO and with Multi-Packet Writing feature support.
4169 * Supports all types of Tx offloads, except multi-packets
4172 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
4173 * as many packet per WQE as it can. If eMPW is not configured
4174 * or packet can not be sent with eMPW (VLAN insertion) the
4175 * ordinary SEND opcode is used and only one packet placed
4178 * Functions stop sending if it encounters the multi-segment
4179 * packet or packet with TSO requested.
4181 * The routines are responsible for storing processed mbuf
4182 * into elts ring buffer and update elts_head if inlining
4183 * offload is requested. Otherwise the copying mbufs to elts
4184 * can be postponed and completed at the end of burst routine.
4187 * Pointer to TX queue structure.
4189 * Packets to transmit.
4191 * Number of packets in array.
4193 * Pointer to burst routine local context.
4195 * Configured Tx offloads mask. It is fully defined at
4196 * compile time and may be used for optimization.
4199 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
4200 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
4201 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
4202 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
4203 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
4204 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
4206 * Local context variables updated.
4209 * The routine sends packets with MLX5_OPCODE_EMPW
4210 * without inlining, this is dedicated optimized branch.
4211 * No VLAN insertion is supported.
4213 static __rte_always_inline enum mlx5_txcmp_code
4214 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
4215 struct rte_mbuf **__rte_restrict pkts,
4216 unsigned int pkts_n,
4217 struct mlx5_txq_local *__rte_restrict loc,
4221 * Subroutine is the part of mlx5_tx_burst_single()
4222 * and sends single-segment packet with eMPW opcode
4223 * without data inlining.
4225 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4226 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4227 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4228 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4229 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4230 pkts += loc->pkts_sent + 1;
4231 pkts_n -= loc->pkts_sent;
4233 struct mlx5_wqe_dseg *__rte_restrict dseg;
4234 struct mlx5_wqe_eseg *__rte_restrict eseg;
4235 enum mlx5_txcmp_code ret;
4236 unsigned int part, loop;
4237 unsigned int slen = 0;
4240 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4241 if (MLX5_TXOFF_CONFIG(TXPP)) {
4242 enum mlx5_txcmp_code wret;
4244 /* Generate WAIT for scheduling if requested. */
4245 wret = mlx5_tx_schedule_send(txq, loc, olx);
4246 if (wret == MLX5_TXCMP_CODE_EXIT)
4247 return MLX5_TXCMP_CODE_EXIT;
4248 if (wret == MLX5_TXCMP_CODE_ERROR)
4249 return MLX5_TXCMP_CODE_ERROR;
4251 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4252 MLX5_MPW_MAX_PACKETS :
4253 MLX5_EMPW_MAX_PACKETS);
4254 if (unlikely(loc->elts_free < part)) {
4255 /* We have no enough elts to save all mbufs. */
4256 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
4257 return MLX5_TXCMP_CODE_EXIT;
4258 /* But we still able to send at least minimal eMPW. */
4259 part = loc->elts_free;
4261 /* Check whether we have enough WQEs */
4262 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
4263 if (unlikely(loc->wqe_free <
4264 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4265 return MLX5_TXCMP_CODE_EXIT;
4266 part = (loc->wqe_free * 4) - 2;
4268 if (likely(part > 1))
4269 rte_prefetch0(*pkts);
4270 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4272 * Build eMPW title WQEBB:
4273 * - Control Segment, eMPW opcode
4274 * - Ethernet Segment, no inline
4276 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
4277 MLX5_OPCODE_ENHANCED_MPSW, olx);
4278 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4279 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4280 eseg = &loc->wqe_last->eseg;
4281 dseg = &loc->wqe_last->dseg[0];
4283 /* Store the packet length for legacy MPW. */
4284 if (MLX5_TXOFF_CONFIG(MPW))
4285 eseg->mss = rte_cpu_to_be_16
4286 (rte_pktmbuf_data_len(loc->mbuf));
4288 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4289 #ifdef MLX5_PMD_SOFT_COUNTERS
4290 /* Update sent data bytes counter. */
4295 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4297 if (unlikely(--loop == 0))
4299 loc->mbuf = *pkts++;
4300 if (likely(loop > 1))
4301 rte_prefetch0(*pkts);
4302 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4304 * Unroll the completion code to avoid
4305 * returning variable value - it results in
4306 * unoptimized sequent checking in caller.
4308 if (ret == MLX5_TXCMP_CODE_MULTI) {
4310 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4311 if (unlikely(!loc->elts_free ||
4313 return MLX5_TXCMP_CODE_EXIT;
4314 return MLX5_TXCMP_CODE_MULTI;
4316 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4317 if (ret == MLX5_TXCMP_CODE_TSO) {
4319 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4320 if (unlikely(!loc->elts_free ||
4322 return MLX5_TXCMP_CODE_EXIT;
4323 return MLX5_TXCMP_CODE_TSO;
4325 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4327 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4328 if (unlikely(!loc->elts_free ||
4330 return MLX5_TXCMP_CODE_EXIT;
4331 return MLX5_TXCMP_CODE_SINGLE;
4333 if (ret != MLX5_TXCMP_CODE_EMPW) {
4336 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4337 return MLX5_TXCMP_CODE_ERROR;
4340 * Check whether packet parameters coincide
4341 * within assumed eMPW batch:
4342 * - check sum settings
4344 * - software parser settings
4345 * - packets length (legacy MPW only)
4346 * - scheduling is not required
4348 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
4351 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4352 if (unlikely(!loc->elts_free ||
4354 return MLX5_TXCMP_CODE_EXIT;
4358 /* Packet attributes match, continue the same eMPW. */
4360 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4361 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4363 /* eMPW is built successfully, update loop parameters. */
4365 MLX5_ASSERT(pkts_n >= part);
4366 #ifdef MLX5_PMD_SOFT_COUNTERS
4367 /* Update sent data bytes counter. */
4368 txq->stats.obytes += slen;
4370 loc->elts_free -= part;
4371 loc->pkts_sent += part;
4372 txq->wqe_ci += (2 + part + 3) / 4;
4373 loc->wqe_free -= (2 + part + 3) / 4;
4375 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4376 return MLX5_TXCMP_CODE_EXIT;
4377 loc->mbuf = *pkts++;
4378 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4379 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4381 /* Continue sending eMPW batches. */
4387 * The routine sends packets with MLX5_OPCODE_EMPW
4388 * with inlining, optionally supports VLAN insertion.
4390 static __rte_always_inline enum mlx5_txcmp_code
4391 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
4392 struct rte_mbuf **__rte_restrict pkts,
4393 unsigned int pkts_n,
4394 struct mlx5_txq_local *__rte_restrict loc,
4398 * Subroutine is the part of mlx5_tx_burst_single()
4399 * and sends single-segment packet with eMPW opcode
4400 * with data inlining.
4402 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4403 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4404 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4405 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4406 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4407 pkts += loc->pkts_sent + 1;
4408 pkts_n -= loc->pkts_sent;
4410 struct mlx5_wqe_dseg *__rte_restrict dseg;
4411 struct mlx5_wqe *__rte_restrict wqem;
4412 enum mlx5_txcmp_code ret;
4413 unsigned int room, part, nlim;
4414 unsigned int slen = 0;
4416 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4417 if (MLX5_TXOFF_CONFIG(TXPP)) {
4418 enum mlx5_txcmp_code wret;
4420 /* Generate WAIT for scheduling if requested. */
4421 wret = mlx5_tx_schedule_send(txq, loc, olx);
4422 if (wret == MLX5_TXCMP_CODE_EXIT)
4423 return MLX5_TXCMP_CODE_EXIT;
4424 if (wret == MLX5_TXCMP_CODE_ERROR)
4425 return MLX5_TXCMP_CODE_ERROR;
4428 * Limits the amount of packets in one WQE
4429 * to improve CQE latency generation.
4431 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4432 MLX5_MPW_INLINE_MAX_PACKETS :
4433 MLX5_EMPW_MAX_PACKETS);
4434 /* Check whether we have minimal amount WQEs */
4435 if (unlikely(loc->wqe_free <
4436 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4437 return MLX5_TXCMP_CODE_EXIT;
4438 if (likely(pkts_n > 1))
4439 rte_prefetch0(*pkts);
4440 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4442 * Build eMPW title WQEBB:
4443 * - Control Segment, eMPW opcode, zero DS
4444 * - Ethernet Segment, no inline
4446 mlx5_tx_cseg_init(txq, loc, wqem, 0,
4447 MLX5_OPCODE_ENHANCED_MPSW, olx);
4448 mlx5_tx_eseg_none(txq, loc, wqem,
4449 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4450 dseg = &wqem->dseg[0];
4451 /* Store the packet length for legacy MPW. */
4452 if (MLX5_TXOFF_CONFIG(MPW))
4453 wqem->eseg.mss = rte_cpu_to_be_16
4454 (rte_pktmbuf_data_len(loc->mbuf));
4455 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4456 loc->wqe_free) * MLX5_WQE_SIZE -
4457 MLX5_WQE_CSEG_SIZE -
4459 /* Limit the room for legacy MPW sessions for performance. */
4460 if (MLX5_TXOFF_CONFIG(MPW))
4461 room = RTE_MIN(room,
4462 RTE_MAX(txq->inlen_empw +
4463 sizeof(dseg->bcount) +
4464 (MLX5_TXOFF_CONFIG(VLAN) ?
4465 sizeof(struct rte_vlan_hdr) : 0),
4466 MLX5_MPW_INLINE_MAX_PACKETS *
4467 MLX5_WQE_DSEG_SIZE));
4468 /* Build WQE till we have space, packets and resources. */
4471 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4472 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4475 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4476 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
4477 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4479 * Some Tx offloads may cause an error if
4480 * packet is not long enough, check against
4481 * assumed minimal length.
4483 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4485 if (unlikely(!part))
4486 return MLX5_TXCMP_CODE_ERROR;
4488 * We have some successfully built
4489 * packet Data Segments to send.
4491 mlx5_tx_idone_empw(txq, loc, part,
4493 return MLX5_TXCMP_CODE_ERROR;
4495 /* Inline or not inline - that's the Question. */
4496 if (dlen > txq->inlen_empw ||
4497 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
4499 if (MLX5_TXOFF_CONFIG(MPW)) {
4500 if (dlen > txq->inlen_send)
4504 /* Open new inline MPW session. */
4505 tlen += sizeof(dseg->bcount);
4506 dseg->bcount = RTE_BE32(0);
4508 (dseg, sizeof(dseg->bcount));
4511 * No pointer and inline descriptor
4512 * intermix for legacy MPW sessions.
4514 if (wqem->dseg[0].bcount)
4518 tlen = sizeof(dseg->bcount) + dlen;
4520 /* Inline entire packet, optional VLAN insertion. */
4521 if (MLX5_TXOFF_CONFIG(VLAN) &&
4522 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4524 * The packet length must be checked in
4525 * mlx5_tx_able_to_empw() and packet
4526 * fits into inline length guaranteed.
4529 sizeof(struct rte_vlan_hdr)) <=
4531 tlen += sizeof(struct rte_vlan_hdr);
4534 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4536 #ifdef MLX5_PMD_SOFT_COUNTERS
4537 /* Update sent data bytes counter. */
4538 slen += sizeof(struct rte_vlan_hdr);
4543 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4546 if (!MLX5_TXOFF_CONFIG(MPW))
4547 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4548 MLX5_ASSERT(room >= tlen);
4551 * Packet data are completely inlined,
4552 * free the packet immediately.
4554 rte_pktmbuf_free_seg(loc->mbuf);
4558 * No pointer and inline descriptor
4559 * intermix for legacy MPW sessions.
4561 if (MLX5_TXOFF_CONFIG(MPW) &&
4563 wqem->dseg[0].bcount == RTE_BE32(0))
4566 * Not inlinable VLAN packets are
4567 * proceeded outside of this routine.
4569 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4570 if (MLX5_TXOFF_CONFIG(VLAN))
4571 MLX5_ASSERT(!(loc->mbuf->ol_flags &
4573 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4574 /* We have to store mbuf in elts.*/
4575 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4576 room -= MLX5_WQE_DSEG_SIZE;
4577 /* Ring buffer wraparound is checked at the loop end.*/
4580 #ifdef MLX5_PMD_SOFT_COUNTERS
4581 /* Update sent data bytes counter. */
4587 if (unlikely(!pkts_n || !loc->elts_free)) {
4589 * We have no resources/packets to
4590 * continue build descriptors.
4593 mlx5_tx_idone_empw(txq, loc, part,
4595 return MLX5_TXCMP_CODE_EXIT;
4597 loc->mbuf = *pkts++;
4598 if (likely(pkts_n > 1))
4599 rte_prefetch0(*pkts);
4600 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4602 * Unroll the completion code to avoid
4603 * returning variable value - it results in
4604 * unoptimized sequent checking in caller.
4606 if (ret == MLX5_TXCMP_CODE_MULTI) {
4608 mlx5_tx_idone_empw(txq, loc, part,
4610 if (unlikely(!loc->elts_free ||
4612 return MLX5_TXCMP_CODE_EXIT;
4613 return MLX5_TXCMP_CODE_MULTI;
4615 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4616 if (ret == MLX5_TXCMP_CODE_TSO) {
4618 mlx5_tx_idone_empw(txq, loc, part,
4620 if (unlikely(!loc->elts_free ||
4622 return MLX5_TXCMP_CODE_EXIT;
4623 return MLX5_TXCMP_CODE_TSO;
4625 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4627 mlx5_tx_idone_empw(txq, loc, part,
4629 if (unlikely(!loc->elts_free ||
4631 return MLX5_TXCMP_CODE_EXIT;
4632 return MLX5_TXCMP_CODE_SINGLE;
4634 if (ret != MLX5_TXCMP_CODE_EMPW) {
4637 mlx5_tx_idone_empw(txq, loc, part,
4639 return MLX5_TXCMP_CODE_ERROR;
4641 /* Check if we have minimal room left. */
4643 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4646 * Check whether packet parameters coincide
4647 * within assumed eMPW batch:
4648 * - check sum settings
4650 * - software parser settings
4651 * - packets length (legacy MPW only)
4652 * - scheduling is not required
4654 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
4657 /* Packet attributes match, continue the same eMPW. */
4658 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4659 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4662 * We get here to close an existing eMPW
4663 * session and start the new one.
4665 MLX5_ASSERT(pkts_n);
4667 if (unlikely(!part))
4668 return MLX5_TXCMP_CODE_EXIT;
4669 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
4670 if (unlikely(!loc->elts_free ||
4672 return MLX5_TXCMP_CODE_EXIT;
4673 /* Continue the loop with new eMPW session. */
4679 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4680 * Data inlining and VLAN insertion are supported.
4682 static __rte_always_inline enum mlx5_txcmp_code
4683 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
4684 struct rte_mbuf **__rte_restrict pkts,
4685 unsigned int pkts_n,
4686 struct mlx5_txq_local *__rte_restrict loc,
4690 * Subroutine is the part of mlx5_tx_burst_single()
4691 * and sends single-segment packet with SEND opcode.
4693 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4694 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4695 pkts += loc->pkts_sent + 1;
4696 pkts_n -= loc->pkts_sent;
4698 struct mlx5_wqe *__rte_restrict wqe;
4699 enum mlx5_txcmp_code ret;
4701 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4702 if (MLX5_TXOFF_CONFIG(TXPP)) {
4703 enum mlx5_txcmp_code wret;
4705 /* Generate WAIT for scheduling if requested. */
4706 wret = mlx5_tx_schedule_send(txq, loc, olx);
4707 if (wret == MLX5_TXCMP_CODE_EXIT)
4708 return MLX5_TXCMP_CODE_EXIT;
4709 if (wret == MLX5_TXCMP_CODE_ERROR)
4710 return MLX5_TXCMP_CODE_ERROR;
4712 if (MLX5_TXOFF_CONFIG(INLINE)) {
4713 unsigned int inlen, vlan = 0;
4715 inlen = rte_pktmbuf_data_len(loc->mbuf);
4716 if (MLX5_TXOFF_CONFIG(VLAN) &&
4717 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4718 vlan = sizeof(struct rte_vlan_hdr);
4720 static_assert((sizeof(struct rte_vlan_hdr) +
4721 sizeof(struct rte_ether_hdr)) ==
4722 MLX5_ESEG_MIN_INLINE_SIZE,
4723 "invalid min inline data size");
4726 * If inlining is enabled at configuration time
4727 * the limit must be not less than minimal size.
4728 * Otherwise we would do extra check for data
4729 * size to avoid crashes due to length overflow.
4731 MLX5_ASSERT(txq->inlen_send >=
4732 MLX5_ESEG_MIN_INLINE_SIZE);
4733 if (inlen <= txq->inlen_send) {
4734 unsigned int seg_n, wqe_n;
4736 rte_prefetch0(rte_pktmbuf_mtod
4737 (loc->mbuf, uint8_t *));
4738 /* Check against minimal length. */
4739 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4740 return MLX5_TXCMP_CODE_ERROR;
4741 if (loc->mbuf->ol_flags &
4742 PKT_TX_DYNF_NOINLINE) {
4744 * The hint flag not to inline packet
4745 * data is set. Check whether we can
4748 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
4750 (MLX5_TXOFF_CONFIG(MPW) &&
4753 * The hardware requires the
4754 * minimal inline data header.
4756 goto single_min_inline;
4758 if (MLX5_TXOFF_CONFIG(VLAN) &&
4759 vlan && !txq->vlan_en) {
4761 * We must insert VLAN tag
4762 * by software means.
4764 goto single_part_inline;
4766 goto single_no_inline;
4769 * Completely inlined packet data WQE:
4770 * - Control Segment, SEND opcode
4771 * - Ethernet Segment, no VLAN insertion
4772 * - Data inlined, VLAN optionally inserted
4773 * - Alignment to MLX5_WSEG_SIZE
4774 * Have to estimate amount of WQEBBs
4776 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4777 MLX5_ESEG_MIN_INLINE_SIZE +
4778 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4779 /* Check if there are enough WQEBBs. */
4780 wqe_n = (seg_n + 3) / 4;
4781 if (wqe_n > loc->wqe_free)
4782 return MLX5_TXCMP_CODE_EXIT;
4783 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4784 loc->wqe_last = wqe;
4785 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4786 MLX5_OPCODE_SEND, olx);
4787 mlx5_tx_eseg_data(txq, loc, wqe,
4788 vlan, inlen, 0, olx);
4789 txq->wqe_ci += wqe_n;
4790 loc->wqe_free -= wqe_n;
4792 * Packet data are completely inlined,
4793 * free the packet immediately.
4795 rte_pktmbuf_free_seg(loc->mbuf);
4796 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4797 MLX5_TXOFF_CONFIG(MPW)) &&
4800 * If minimal inlining is requested the eMPW
4801 * feature should be disabled due to data is
4802 * inlined into Ethernet Segment, which can
4803 * not contain inlined data for eMPW due to
4804 * segment shared for all packets.
4806 struct mlx5_wqe_dseg *__rte_restrict dseg;
4811 * The inline-mode settings require
4812 * to inline the specified amount of
4813 * data bytes to the Ethernet Segment.
4814 * We should check the free space in
4815 * WQE ring buffer to inline partially.
4818 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
4819 MLX5_ASSERT(inlen > txq->inlen_mode);
4820 MLX5_ASSERT(txq->inlen_mode >=
4821 MLX5_ESEG_MIN_INLINE_SIZE);
4823 * Check whether there are enough free WQEBBs:
4825 * - Ethernet Segment
4826 * - First Segment of inlined Ethernet data
4827 * - ... data continued ...
4828 * - Finishing Data Segment of pointer type
4830 ds = (MLX5_WQE_CSEG_SIZE +
4831 MLX5_WQE_ESEG_SIZE +
4832 MLX5_WQE_DSEG_SIZE +
4834 MLX5_ESEG_MIN_INLINE_SIZE +
4835 MLX5_WQE_DSEG_SIZE +
4836 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4837 if (loc->wqe_free < ((ds + 3) / 4))
4838 return MLX5_TXCMP_CODE_EXIT;
4840 * Build the ordinary SEND WQE:
4842 * - Ethernet Segment, inline inlen_mode bytes
4843 * - Data Segment of pointer type
4845 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4846 loc->wqe_last = wqe;
4847 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4848 MLX5_OPCODE_SEND, olx);
4849 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4852 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4853 txq->inlen_mode - vlan;
4854 inlen -= txq->inlen_mode;
4855 mlx5_tx_dseg_ptr(txq, loc, dseg,
4858 * WQE is built, update the loop parameters
4859 * and got to the next packet.
4861 txq->wqe_ci += (ds + 3) / 4;
4862 loc->wqe_free -= (ds + 3) / 4;
4863 /* We have to store mbuf in elts.*/
4864 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4865 txq->elts[txq->elts_head++ & txq->elts_m] =
4873 * Partially inlined packet data WQE, we have
4874 * some space in title WQEBB, we can fill it
4875 * with some packet data. It takes one WQEBB,
4876 * it is available, no extra space check:
4877 * - Control Segment, SEND opcode
4878 * - Ethernet Segment, no VLAN insertion
4879 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4880 * - Data Segment, pointer type
4882 * We also get here if VLAN insertion is not
4883 * supported by HW, the inline is enabled.
4886 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4887 loc->wqe_last = wqe;
4888 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4889 MLX5_OPCODE_SEND, olx);
4890 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4891 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4892 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4894 * The length check is performed above, by
4895 * comparing with txq->inlen_send. We should
4896 * not get overflow here.
4898 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4899 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4900 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4904 /* We have to store mbuf in elts.*/
4905 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4906 txq->elts[txq->elts_head++ & txq->elts_m] =
4910 #ifdef MLX5_PMD_SOFT_COUNTERS
4911 /* Update sent data bytes counter. */
4912 txq->stats.obytes += vlan +
4913 rte_pktmbuf_data_len(loc->mbuf);
4917 * No inline at all, it means the CPU cycles saving
4918 * is prioritized at configuration, we should not
4919 * copy any packet data to WQE.
4921 * SEND WQE, one WQEBB:
4922 * - Control Segment, SEND opcode
4923 * - Ethernet Segment, optional VLAN, no inline
4924 * - Data Segment, pointer type
4927 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4928 loc->wqe_last = wqe;
4929 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4930 MLX5_OPCODE_SEND, olx);
4931 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4933 (txq, loc, &wqe->dseg[0],
4934 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4935 rte_pktmbuf_data_len(loc->mbuf), olx);
4939 * We should not store mbuf pointer in elts
4940 * if no inlining is configured, this is done
4941 * by calling routine in a batch copy.
4943 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4945 #ifdef MLX5_PMD_SOFT_COUNTERS
4946 /* Update sent data bytes counter. */
4947 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4948 if (MLX5_TXOFF_CONFIG(VLAN) &&
4949 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4950 txq->stats.obytes +=
4951 sizeof(struct rte_vlan_hdr);
4956 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4957 return MLX5_TXCMP_CODE_EXIT;
4958 loc->mbuf = *pkts++;
4960 rte_prefetch0(*pkts);
4961 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4962 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4968 static __rte_always_inline enum mlx5_txcmp_code
4969 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
4970 struct rte_mbuf **__rte_restrict pkts,
4971 unsigned int pkts_n,
4972 struct mlx5_txq_local *__rte_restrict loc,
4975 enum mlx5_txcmp_code ret;
4977 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4978 if (ret == MLX5_TXCMP_CODE_SINGLE)
4980 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
4982 /* Optimize for inline/no inline eMPW send. */
4983 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4984 mlx5_tx_burst_empw_inline
4985 (txq, pkts, pkts_n, loc, olx) :
4986 mlx5_tx_burst_empw_simple
4987 (txq, pkts, pkts_n, loc, olx);
4988 if (ret != MLX5_TXCMP_CODE_SINGLE)
4990 /* The resources to send one packet should remain. */
4991 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4993 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4994 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
4995 if (ret != MLX5_TXCMP_CODE_EMPW)
4997 /* The resources to send one packet should remain. */
4998 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
5003 * DPDK Tx callback template. This is configured template
5004 * used to generate routines optimized for specified offload setup.
5005 * One of this generated functions is chosen at SQ configuration
5009 * Generic pointer to TX queue structure.
5011 * Packets to transmit.
5013 * Number of packets in array.
5015 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
5016 * values. Should be static to take compile time static configuration
5020 * Number of packets successfully transmitted (<= pkts_n).
5022 static __rte_always_inline uint16_t
5023 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
5024 struct rte_mbuf **__rte_restrict pkts,
5028 struct mlx5_txq_local loc;
5029 enum mlx5_txcmp_code ret;
5032 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
5033 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
5034 if (unlikely(!pkts_n))
5038 loc.wqe_last = NULL;
5041 loc.pkts_loop = loc.pkts_sent;
5043 * Check if there are some CQEs, if any:
5044 * - process an encountered errors
5045 * - process the completed WQEs
5046 * - free related mbufs
5047 * - doorbell the NIC about processed CQEs
5049 rte_prefetch0(*(pkts + loc.pkts_sent));
5050 mlx5_tx_handle_completion(txq, olx);
5052 * Calculate the number of available resources - elts and WQEs.
5053 * There are two possible different scenarios:
5054 * - no data inlining into WQEs, one WQEBB may contains up to
5055 * four packets, in this case elts become scarce resource
5056 * - data inlining into WQEs, one packet may require multiple
5057 * WQEBBs, the WQEs become the limiting factor.
5059 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
5060 loc.elts_free = txq->elts_s -
5061 (uint16_t)(txq->elts_head - txq->elts_tail);
5062 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
5063 loc.wqe_free = txq->wqe_s -
5064 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
5065 if (unlikely(!loc.elts_free || !loc.wqe_free))
5069 * Fetch the packet from array. Usually this is
5070 * the first packet in series of multi/single
5073 loc.mbuf = *(pkts + loc.pkts_sent);
5074 /* Dedicated branch for multi-segment packets. */
5075 if (MLX5_TXOFF_CONFIG(MULTI) &&
5076 unlikely(NB_SEGS(loc.mbuf) > 1)) {
5078 * Multi-segment packet encountered.
5079 * Hardware is able to process it only
5080 * with SEND/TSO opcodes, one packet
5081 * per WQE, do it in dedicated routine.
5084 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
5085 part = loc.pkts_sent - loc.pkts_copy;
5086 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
5088 * There are some single-segment mbufs not
5089 * stored in elts. The mbufs must be in the
5090 * same order as WQEs, so we must copy the
5091 * mbufs to elts here, before the coming
5092 * multi-segment packet mbufs is appended.
5094 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
5096 loc.pkts_copy = loc.pkts_sent;
5098 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5099 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
5100 if (!MLX5_TXOFF_CONFIG(INLINE))
5101 loc.pkts_copy = loc.pkts_sent;
5103 * These returned code checks are supposed
5104 * to be optimized out due to routine inlining.
5106 if (ret == MLX5_TXCMP_CODE_EXIT) {
5108 * The routine returns this code when
5109 * all packets are sent or there is no
5110 * enough resources to complete request.
5114 if (ret == MLX5_TXCMP_CODE_ERROR) {
5116 * The routine returns this code when
5117 * some error in the incoming packets
5120 txq->stats.oerrors++;
5123 if (ret == MLX5_TXCMP_CODE_SINGLE) {
5125 * The single-segment packet was encountered
5126 * in the array, try to send it with the
5127 * best optimized way, possible engaging eMPW.
5129 goto enter_send_single;
5131 if (MLX5_TXOFF_CONFIG(TSO) &&
5132 ret == MLX5_TXCMP_CODE_TSO) {
5134 * The single-segment TSO packet was
5135 * encountered in the array.
5137 goto enter_send_tso;
5139 /* We must not get here. Something is going wrong. */
5141 txq->stats.oerrors++;
5144 /* Dedicated branch for single-segment TSO packets. */
5145 if (MLX5_TXOFF_CONFIG(TSO) &&
5146 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
5148 * TSO might require special way for inlining
5149 * (dedicated parameters) and is sent with
5150 * MLX5_OPCODE_TSO opcode only, provide this
5151 * in dedicated branch.
5154 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
5155 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5156 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
5158 * These returned code checks are supposed
5159 * to be optimized out due to routine inlining.
5161 if (ret == MLX5_TXCMP_CODE_EXIT)
5163 if (ret == MLX5_TXCMP_CODE_ERROR) {
5164 txq->stats.oerrors++;
5167 if (ret == MLX5_TXCMP_CODE_SINGLE)
5168 goto enter_send_single;
5169 if (MLX5_TXOFF_CONFIG(MULTI) &&
5170 ret == MLX5_TXCMP_CODE_MULTI) {
5172 * The multi-segment packet was
5173 * encountered in the array.
5175 goto enter_send_multi;
5177 /* We must not get here. Something is going wrong. */
5179 txq->stats.oerrors++;
5183 * The dedicated branch for the single-segment packets
5184 * without TSO. Often these ones can be sent using
5185 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
5186 * The routine builds the WQEs till it encounters
5187 * the TSO or multi-segment packet (in case if these
5188 * offloads are requested at SQ configuration time).
5191 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5192 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
5194 * These returned code checks are supposed
5195 * to be optimized out due to routine inlining.
5197 if (ret == MLX5_TXCMP_CODE_EXIT)
5199 if (ret == MLX5_TXCMP_CODE_ERROR) {
5200 txq->stats.oerrors++;
5203 if (MLX5_TXOFF_CONFIG(MULTI) &&
5204 ret == MLX5_TXCMP_CODE_MULTI) {
5206 * The multi-segment packet was
5207 * encountered in the array.
5209 goto enter_send_multi;
5211 if (MLX5_TXOFF_CONFIG(TSO) &&
5212 ret == MLX5_TXCMP_CODE_TSO) {
5214 * The single-segment TSO packet was
5215 * encountered in the array.
5217 goto enter_send_tso;
5219 /* We must not get here. Something is going wrong. */
5221 txq->stats.oerrors++;
5225 * Main Tx loop is completed, do the rest:
5226 * - set completion request if thresholds are reached
5227 * - doorbell the hardware
5228 * - copy the rest of mbufs to elts (if any)
5230 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
5231 loc.pkts_sent >= loc.pkts_copy);
5232 /* Take a shortcut if nothing is sent. */
5233 if (unlikely(loc.pkts_sent == loc.pkts_loop))
5235 /* Request CQE generation if limits are reached. */
5236 mlx5_tx_request_completion(txq, &loc, olx);
5238 * Ring QP doorbell immediately after WQE building completion
5239 * to improve latencies. The pure software related data treatment
5240 * can be completed after doorbell. Tx CQEs for this SQ are
5241 * processed in this thread only by the polling.
5243 * The rdma core library can map doorbell register in two ways,
5244 * depending on the environment variable "MLX5_SHUT_UP_BF":
5246 * - as regular cached memory, the variable is either missing or
5247 * set to zero. This type of mapping may cause the significant
5248 * doorbell register writing latency and requires explicit
5249 * memory write barrier to mitigate this issue and prevent
5252 * - as non-cached memory, the variable is present and set to
5253 * not "0" value. This type of mapping may cause performance
5254 * impact under heavy loading conditions but the explicit write
5255 * memory barrier is not required and it may improve core
5258 * - the legacy behaviour (prior 19.08 release) was to use some
5259 * heuristics to decide whether write memory barrier should
5260 * be performed. This behavior is supported with specifying
5261 * tx_db_nc=2, write barrier is skipped if application
5262 * provides the full recommended burst of packets, it
5263 * supposes the next packets are coming and the write barrier
5264 * will be issued on the next burst (after descriptor writing,
5267 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
5268 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
5269 /* Not all of the mbufs may be stored into elts yet. */
5270 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
5271 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
5273 * There are some single-segment mbufs not stored in elts.
5274 * It can be only if the last packet was single-segment.
5275 * The copying is gathered into one place due to it is
5276 * a good opportunity to optimize that with SIMD.
5277 * Unfortunately if inlining is enabled the gaps in
5278 * pointer array may happen due to early freeing of the
5281 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
5282 loc.pkts_copy = loc.pkts_sent;
5284 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
5285 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
5286 if (pkts_n > loc.pkts_sent) {
5288 * If burst size is large there might be no enough CQE
5289 * fetched from completion queue and no enough resources
5290 * freed to send all the packets.
5295 #ifdef MLX5_PMD_SOFT_COUNTERS
5296 /* Increment sent packets counter. */
5297 txq->stats.opackets += loc.pkts_sent;
5299 return loc.pkts_sent;
5302 /* Generate routines with Enhanced Multi-Packet Write support. */
5303 MLX5_TXOFF_DECL(full_empw,
5304 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
5306 MLX5_TXOFF_DECL(none_empw,
5307 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5309 MLX5_TXOFF_DECL(md_empw,
5310 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5312 MLX5_TXOFF_DECL(mt_empw,
5313 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5314 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5316 MLX5_TXOFF_DECL(mtsc_empw,
5317 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5318 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5319 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5321 MLX5_TXOFF_DECL(mti_empw,
5322 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5323 MLX5_TXOFF_CONFIG_INLINE |
5324 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5326 MLX5_TXOFF_DECL(mtv_empw,
5327 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5328 MLX5_TXOFF_CONFIG_VLAN |
5329 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5331 MLX5_TXOFF_DECL(mtiv_empw,
5332 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5333 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5334 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5336 MLX5_TXOFF_DECL(sc_empw,
5337 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5338 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5340 MLX5_TXOFF_DECL(sci_empw,
5341 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5342 MLX5_TXOFF_CONFIG_INLINE |
5343 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5345 MLX5_TXOFF_DECL(scv_empw,
5346 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5347 MLX5_TXOFF_CONFIG_VLAN |
5348 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5350 MLX5_TXOFF_DECL(sciv_empw,
5351 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5352 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5353 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5355 MLX5_TXOFF_DECL(i_empw,
5356 MLX5_TXOFF_CONFIG_INLINE |
5357 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5359 MLX5_TXOFF_DECL(v_empw,
5360 MLX5_TXOFF_CONFIG_VLAN |
5361 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5363 MLX5_TXOFF_DECL(iv_empw,
5364 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5365 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5367 /* Generate routines without Enhanced Multi-Packet Write support. */
5368 MLX5_TXOFF_DECL(full,
5369 MLX5_TXOFF_CONFIG_FULL)
5371 MLX5_TXOFF_DECL(none,
5372 MLX5_TXOFF_CONFIG_NONE)
5375 MLX5_TXOFF_CONFIG_METADATA)
5378 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5379 MLX5_TXOFF_CONFIG_METADATA)
5381 MLX5_TXOFF_DECL(mtsc,
5382 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5383 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5384 MLX5_TXOFF_CONFIG_METADATA)
5386 MLX5_TXOFF_DECL(mti,
5387 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5388 MLX5_TXOFF_CONFIG_INLINE |
5389 MLX5_TXOFF_CONFIG_METADATA)
5392 MLX5_TXOFF_DECL(mtv,
5393 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5394 MLX5_TXOFF_CONFIG_VLAN |
5395 MLX5_TXOFF_CONFIG_METADATA)
5398 MLX5_TXOFF_DECL(mtiv,
5399 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5400 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5401 MLX5_TXOFF_CONFIG_METADATA)
5404 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5405 MLX5_TXOFF_CONFIG_METADATA)
5407 MLX5_TXOFF_DECL(sci,
5408 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5409 MLX5_TXOFF_CONFIG_INLINE |
5410 MLX5_TXOFF_CONFIG_METADATA)
5413 MLX5_TXOFF_DECL(scv,
5414 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5415 MLX5_TXOFF_CONFIG_VLAN |
5416 MLX5_TXOFF_CONFIG_METADATA)
5419 MLX5_TXOFF_DECL(sciv,
5420 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5421 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5422 MLX5_TXOFF_CONFIG_METADATA)
5425 MLX5_TXOFF_CONFIG_INLINE |
5426 MLX5_TXOFF_CONFIG_METADATA)
5429 MLX5_TXOFF_CONFIG_VLAN |
5430 MLX5_TXOFF_CONFIG_METADATA)
5433 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5434 MLX5_TXOFF_CONFIG_METADATA)
5436 /* Generate routines with timestamp scheduling. */
5437 MLX5_TXOFF_DECL(full_ts_nompw,
5438 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
5440 MLX5_TXOFF_DECL(full_ts_nompwi,
5441 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5442 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5443 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5444 MLX5_TXOFF_CONFIG_TXPP)
5446 MLX5_TXOFF_DECL(full_ts,
5447 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
5448 MLX5_TXOFF_CONFIG_EMPW)
5450 MLX5_TXOFF_DECL(full_ts_noi,
5451 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5452 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5453 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5454 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5456 MLX5_TXOFF_DECL(none_ts,
5457 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
5458 MLX5_TXOFF_CONFIG_EMPW)
5460 MLX5_TXOFF_DECL(mdi_ts,
5461 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5462 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5464 MLX5_TXOFF_DECL(mti_ts,
5465 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5466 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5467 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5469 MLX5_TXOFF_DECL(mtiv_ts,
5470 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5471 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5472 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
5473 MLX5_TXOFF_CONFIG_EMPW)
5476 * Generate routines with Legacy Multi-Packet Write support.
5477 * This mode is supported by ConnectX-4 Lx only and imposes
5478 * offload limitations, not supported:
5479 * - ACL/Flows (metadata are becoming meaningless)
5480 * - WQE Inline headers
5481 * - SRIOV (E-Switch offloads)
5483 * - tunnel encapsulation/decapsulation
5486 MLX5_TXOFF_DECL(none_mpw,
5487 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5488 MLX5_TXOFF_CONFIG_MPW)
5490 MLX5_TXOFF_DECL(mci_mpw,
5491 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5492 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5493 MLX5_TXOFF_CONFIG_MPW)
5495 MLX5_TXOFF_DECL(mc_mpw,
5496 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5497 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5499 MLX5_TXOFF_DECL(i_mpw,
5500 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5501 MLX5_TXOFF_CONFIG_MPW)
5504 * Array of declared and compiled Tx burst function and corresponding
5505 * supported offloads set. The array is used to select the Tx burst
5506 * function for specified offloads set at Tx queue configuration time.
5509 eth_tx_burst_t func;
5512 MLX5_TXOFF_INFO(full_empw,
5513 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5514 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5515 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5516 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5518 MLX5_TXOFF_INFO(none_empw,
5519 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5521 MLX5_TXOFF_INFO(md_empw,
5522 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5524 MLX5_TXOFF_INFO(mt_empw,
5525 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5526 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5528 MLX5_TXOFF_INFO(mtsc_empw,
5529 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5530 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5531 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5533 MLX5_TXOFF_INFO(mti_empw,
5534 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5535 MLX5_TXOFF_CONFIG_INLINE |
5536 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5538 MLX5_TXOFF_INFO(mtv_empw,
5539 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5540 MLX5_TXOFF_CONFIG_VLAN |
5541 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5543 MLX5_TXOFF_INFO(mtiv_empw,
5544 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5545 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5546 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5548 MLX5_TXOFF_INFO(sc_empw,
5549 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5550 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5552 MLX5_TXOFF_INFO(sci_empw,
5553 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5554 MLX5_TXOFF_CONFIG_INLINE |
5555 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5557 MLX5_TXOFF_INFO(scv_empw,
5558 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5559 MLX5_TXOFF_CONFIG_VLAN |
5560 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5562 MLX5_TXOFF_INFO(sciv_empw,
5563 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5564 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5565 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5567 MLX5_TXOFF_INFO(i_empw,
5568 MLX5_TXOFF_CONFIG_INLINE |
5569 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5571 MLX5_TXOFF_INFO(v_empw,
5572 MLX5_TXOFF_CONFIG_VLAN |
5573 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5575 MLX5_TXOFF_INFO(iv_empw,
5576 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5577 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5579 MLX5_TXOFF_INFO(full_ts_nompw,
5580 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
5582 MLX5_TXOFF_INFO(full_ts_nompwi,
5583 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5584 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5585 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5586 MLX5_TXOFF_CONFIG_TXPP)
5588 MLX5_TXOFF_INFO(full_ts,
5589 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
5590 MLX5_TXOFF_CONFIG_EMPW)
5592 MLX5_TXOFF_INFO(full_ts_noi,
5593 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5594 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5595 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5596 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5598 MLX5_TXOFF_INFO(none_ts,
5599 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
5600 MLX5_TXOFF_CONFIG_EMPW)
5602 MLX5_TXOFF_INFO(mdi_ts,
5603 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5604 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5606 MLX5_TXOFF_INFO(mti_ts,
5607 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5608 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5609 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5611 MLX5_TXOFF_INFO(mtiv_ts,
5612 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5613 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5614 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
5615 MLX5_TXOFF_CONFIG_EMPW)
5617 MLX5_TXOFF_INFO(full,
5618 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5619 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5620 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5621 MLX5_TXOFF_CONFIG_METADATA)
5623 MLX5_TXOFF_INFO(none,
5624 MLX5_TXOFF_CONFIG_NONE)
5627 MLX5_TXOFF_CONFIG_METADATA)
5630 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5631 MLX5_TXOFF_CONFIG_METADATA)
5633 MLX5_TXOFF_INFO(mtsc,
5634 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5635 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5636 MLX5_TXOFF_CONFIG_METADATA)
5638 MLX5_TXOFF_INFO(mti,
5639 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5640 MLX5_TXOFF_CONFIG_INLINE |
5641 MLX5_TXOFF_CONFIG_METADATA)
5643 MLX5_TXOFF_INFO(mtv,
5644 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5645 MLX5_TXOFF_CONFIG_VLAN |
5646 MLX5_TXOFF_CONFIG_METADATA)
5648 MLX5_TXOFF_INFO(mtiv,
5649 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5650 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5651 MLX5_TXOFF_CONFIG_METADATA)
5654 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5655 MLX5_TXOFF_CONFIG_METADATA)
5657 MLX5_TXOFF_INFO(sci,
5658 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5659 MLX5_TXOFF_CONFIG_INLINE |
5660 MLX5_TXOFF_CONFIG_METADATA)
5662 MLX5_TXOFF_INFO(scv,
5663 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5664 MLX5_TXOFF_CONFIG_VLAN |
5665 MLX5_TXOFF_CONFIG_METADATA)
5667 MLX5_TXOFF_INFO(sciv,
5668 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5669 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5670 MLX5_TXOFF_CONFIG_METADATA)
5673 MLX5_TXOFF_CONFIG_INLINE |
5674 MLX5_TXOFF_CONFIG_METADATA)
5677 MLX5_TXOFF_CONFIG_VLAN |
5678 MLX5_TXOFF_CONFIG_METADATA)
5681 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5682 MLX5_TXOFF_CONFIG_METADATA)
5684 MLX5_TXOFF_INFO(none_mpw,
5685 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5686 MLX5_TXOFF_CONFIG_MPW)
5688 MLX5_TXOFF_INFO(mci_mpw,
5689 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5690 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5691 MLX5_TXOFF_CONFIG_MPW)
5693 MLX5_TXOFF_INFO(mc_mpw,
5694 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5695 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5697 MLX5_TXOFF_INFO(i_mpw,
5698 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5699 MLX5_TXOFF_CONFIG_MPW)
5703 * Configure the Tx function to use. The routine checks configured
5704 * Tx offloads for the device and selects appropriate Tx burst
5705 * routine. There are multiple Tx burst routines compiled from
5706 * the same template in the most optimal way for the dedicated
5710 * Pointer to private data structure.
5713 * Pointer to selected Tx burst function.
5716 mlx5_select_tx_function(struct rte_eth_dev *dev)
5718 struct mlx5_priv *priv = dev->data->dev_private;
5719 struct mlx5_dev_config *config = &priv->config;
5720 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5721 unsigned int diff = 0, olx = 0, i, m;
5723 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5724 MLX5_DSEG_MAX, "invalid WQE max size");
5725 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5726 "invalid WQE Control Segment size");
5727 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5728 "invalid WQE Ethernet Segment size");
5729 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5730 "invalid WQE Data Segment size");
5731 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5732 "invalid WQE size");
5734 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5735 /* We should support Multi-Segment Packets. */
5736 olx |= MLX5_TXOFF_CONFIG_MULTI;
5738 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5739 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5740 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5741 DEV_TX_OFFLOAD_IP_TNL_TSO |
5742 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5743 /* We should support TCP Send Offload. */
5744 olx |= MLX5_TXOFF_CONFIG_TSO;
5746 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5747 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5748 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5749 /* We should support Software Parser for Tunnels. */
5750 olx |= MLX5_TXOFF_CONFIG_SWP;
5752 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5753 DEV_TX_OFFLOAD_UDP_CKSUM |
5754 DEV_TX_OFFLOAD_TCP_CKSUM |
5755 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5756 /* We should support IP/TCP/UDP Checksums. */
5757 olx |= MLX5_TXOFF_CONFIG_CSUM;
5759 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5760 /* We should support VLAN insertion. */
5761 olx |= MLX5_TXOFF_CONFIG_VLAN;
5763 if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
5764 rte_mbuf_dynflag_lookup
5765 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) > 0 &&
5766 rte_mbuf_dynfield_lookup
5767 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL) > 0) {
5768 /* Offload configured, dynamic entities registered. */
5769 olx |= MLX5_TXOFF_CONFIG_TXPP;
5771 if (priv->txqs_n && (*priv->txqs)[0]) {
5772 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5774 if (txd->inlen_send) {
5776 * Check the data inline requirements. Data inline
5777 * is enabled on per device basis, we can check
5778 * the first Tx queue only.
5780 * If device does not support VLAN insertion in WQE
5781 * and some queues are requested to perform VLAN
5782 * insertion offload than inline must be enabled.
5784 olx |= MLX5_TXOFF_CONFIG_INLINE;
5787 if (config->mps == MLX5_MPW_ENHANCED &&
5788 config->txq_inline_min <= 0) {
5790 * The NIC supports Enhanced Multi-Packet Write
5791 * and does not require minimal inline data.
5793 olx |= MLX5_TXOFF_CONFIG_EMPW;
5795 if (rte_flow_dynf_metadata_avail()) {
5796 /* We should support Flow metadata. */
5797 olx |= MLX5_TXOFF_CONFIG_METADATA;
5799 if (config->mps == MLX5_MPW) {
5801 * The NIC supports Legacy Multi-Packet Write.
5802 * The MLX5_TXOFF_CONFIG_MPW controls the
5803 * descriptor building method in combination
5804 * with MLX5_TXOFF_CONFIG_EMPW.
5806 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5807 MLX5_TXOFF_CONFIG_SWP |
5808 MLX5_TXOFF_CONFIG_VLAN |
5809 MLX5_TXOFF_CONFIG_METADATA)))
5810 olx |= MLX5_TXOFF_CONFIG_EMPW |
5811 MLX5_TXOFF_CONFIG_MPW;
5814 * Scan the routines table to find the minimal
5815 * satisfying routine with requested offloads.
5817 m = RTE_DIM(txoff_func);
5818 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5821 tmp = txoff_func[i].olx;
5823 /* Meets requested offloads exactly.*/
5827 if ((tmp & olx) != olx) {
5828 /* Does not meet requested offloads at all. */
5831 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW)
5832 /* Do not enable legacy MPW if not configured. */
5834 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5835 /* Do not enable eMPW if not configured. */
5837 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5838 /* Do not enable inlining if not configured. */
5840 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_TXPP)
5841 /* Do not enable scheduling if not configured. */
5844 * Some routine meets the requirements.
5845 * Check whether it has minimal amount
5846 * of not requested offloads.
5848 tmp = __builtin_popcountl(tmp & ~olx);
5849 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5850 /* First or better match, save and continue. */
5856 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5857 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5858 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5859 /* Lighter not requested offload. */
5864 if (m >= RTE_DIM(txoff_func)) {
5865 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5866 " for requested offloads %04X",
5867 dev->data->port_id, olx);
5870 DRV_LOG(DEBUG, "port %u has selected Tx function"
5871 " supporting offloads %04X/%04X",
5872 dev->data->port_id, olx, txoff_func[m].olx);
5873 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5874 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5875 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5876 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5877 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5878 DRV_LOG(DEBUG, "\tSWP (software parser)");
5879 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5880 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5881 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5882 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5883 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5884 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5885 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5886 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5887 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TXPP)
5888 DRV_LOG(DEBUG, "\tMETAD (tx Scheduling)");
5889 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5890 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5891 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5893 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5895 return txoff_func[m].func;
5899 * DPDK callback to get the TX queue information
5902 * Pointer to the device structure.
5904 * @param tx_queue_id
5905 * Tx queue identificator.
5908 * Pointer to the TX queue information structure.
5915 mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
5916 struct rte_eth_txq_info *qinfo)
5918 struct mlx5_priv *priv = dev->data->dev_private;
5919 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
5920 struct mlx5_txq_ctrl *txq_ctrl =
5921 container_of(txq, struct mlx5_txq_ctrl, txq);
5925 qinfo->nb_desc = txq->elts_s;
5926 qinfo->conf.tx_thresh.pthresh = 0;
5927 qinfo->conf.tx_thresh.hthresh = 0;
5928 qinfo->conf.tx_thresh.wthresh = 0;
5929 qinfo->conf.tx_rs_thresh = 0;
5930 qinfo->conf.tx_free_thresh = 0;
5931 qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
5932 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
5936 * DPDK callback to get the TX packet burst mode information
5939 * Pointer to the device structure.
5941 * @param tx_queue_id
5942 * Tx queue identificatior.
5945 * Pointer to the burts mode information.
5948 * 0 as success, -EINVAL as failure.
5952 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
5953 uint16_t tx_queue_id __rte_unused,
5954 struct rte_eth_burst_mode *mode)
5956 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
5957 unsigned int i, olx;
5959 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5960 if (pkt_burst == txoff_func[i].func) {
5961 olx = txoff_func[i].olx;
5962 snprintf(mode->info, sizeof(mode->info),
5963 "%s%s%s%s%s%s%s%s%s",
5964 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
5965 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
5966 "Legacy MPW" : "Enhanced MPW") : "No MPW",
5967 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
5969 (olx & MLX5_TXOFF_CONFIG_TSO) ?
5971 (olx & MLX5_TXOFF_CONFIG_SWP) ?
5973 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
5975 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
5977 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
5979 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
5981 (olx & MLX5_TXOFF_CONFIG_TXPP) ?