1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
25 #include <rte_common.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_ether.h>
28 #include <rte_cycles.h>
31 #include "mlx5_utils.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_autoconf.h"
34 #include "mlx5_defs.h"
37 /* TX burst subroutines return codes. */
38 enum mlx5_txcmp_code {
39 MLX5_TXCMP_CODE_EXIT = 0,
40 MLX5_TXCMP_CODE_ERROR,
41 MLX5_TXCMP_CODE_SINGLE,
42 MLX5_TXCMP_CODE_MULTI,
48 * These defines are used to configure Tx burst routine option set
49 * supported at compile time. The not specified options are optimized out
50 * out due to if conditions can be explicitly calculated at compile time.
51 * The offloads with bigger runtime check (require more CPU cycles to
52 * skip) overhead should have the bigger index - this is needed to
53 * select the better matching routine function if no exact match and
54 * some offloads are not actually requested.
56 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
57 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
58 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
59 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
60 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
61 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
62 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
63 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
65 /* The most common offloads groups. */
66 #define MLX5_TXOFF_CONFIG_NONE 0
67 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
68 MLX5_TXOFF_CONFIG_TSO | \
69 MLX5_TXOFF_CONFIG_SWP | \
70 MLX5_TXOFF_CONFIG_CSUM | \
71 MLX5_TXOFF_CONFIG_INLINE | \
72 MLX5_TXOFF_CONFIG_VLAN | \
73 MLX5_TXOFF_CONFIG_METADATA)
75 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
77 #define MLX5_TXOFF_DECL(func, olx) \
78 static uint16_t mlx5_tx_burst_##func(void *txq, \
79 struct rte_mbuf **pkts, \
82 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
83 pkts, pkts_n, (olx)); \
86 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
88 static __rte_always_inline uint32_t
89 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
91 static __rte_always_inline int
92 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
93 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
95 static __rte_always_inline uint32_t
96 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
98 static __rte_always_inline void
99 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
100 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
102 static __rte_always_inline void
103 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
104 const unsigned int strd_n);
107 mlx5_queue_state_modify(struct rte_eth_dev *dev,
108 struct mlx5_mp_arg_queue_state_modify *sm);
111 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
112 volatile struct mlx5_cqe *restrict cqe,
116 mlx5_lro_update_hdr(uint8_t *restrict padd,
117 volatile struct mlx5_cqe *restrict cqe,
120 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
121 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
124 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
125 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
128 * Build a table to translate Rx completion flags to packet type.
130 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
133 mlx5_set_ptype_table(void)
136 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
138 /* Last entry must not be overwritten, reserved for errored packet. */
139 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
140 (*p)[i] = RTE_PTYPE_UNKNOWN;
142 * The index to the array should have:
143 * bit[1:0] = l3_hdr_type
144 * bit[4:2] = l4_hdr_type
147 * bit[7] = outer_l3_type
150 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
152 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
153 RTE_PTYPE_L4_NONFRAG;
154 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
155 RTE_PTYPE_L4_NONFRAG;
157 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
159 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
162 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
164 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
166 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
168 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
170 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
172 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
175 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
177 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
179 /* Repeat with outer_l3_type being set. Just in case. */
180 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
181 RTE_PTYPE_L4_NONFRAG;
182 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
183 RTE_PTYPE_L4_NONFRAG;
184 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
186 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
188 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
190 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
192 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
194 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
196 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
198 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
200 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
202 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
205 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
206 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
207 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
208 RTE_PTYPE_INNER_L4_NONFRAG;
209 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
210 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
211 RTE_PTYPE_INNER_L4_NONFRAG;
212 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
213 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
215 RTE_PTYPE_INNER_L4_NONFRAG;
216 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L4_NONFRAG;
219 /* Tunneled - Fragmented */
220 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
221 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
222 RTE_PTYPE_INNER_L4_FRAG;
223 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L4_FRAG;
226 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
228 RTE_PTYPE_INNER_L4_FRAG;
229 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L4_FRAG;
233 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L4_TCP;
236 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
237 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L4_TCP;
239 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
240 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
241 RTE_PTYPE_INNER_L4_TCP;
242 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
244 RTE_PTYPE_INNER_L4_TCP;
245 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
247 RTE_PTYPE_INNER_L4_TCP;
248 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
250 RTE_PTYPE_INNER_L4_TCP;
251 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L4_TCP;
254 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L4_TCP;
257 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
259 RTE_PTYPE_INNER_L4_TCP;
260 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
262 RTE_PTYPE_INNER_L4_TCP;
263 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
265 RTE_PTYPE_INNER_L4_TCP;
266 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
268 RTE_PTYPE_INNER_L4_TCP;
270 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
272 RTE_PTYPE_INNER_L4_UDP;
273 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
274 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
275 RTE_PTYPE_INNER_L4_UDP;
276 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
277 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
278 RTE_PTYPE_INNER_L4_UDP;
279 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
280 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
281 RTE_PTYPE_INNER_L4_UDP;
285 * Build a table to translate packet to checksum type of Verbs.
288 mlx5_set_cksum_table(void)
294 * The index should have:
295 * bit[0] = PKT_TX_TCP_SEG
296 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
297 * bit[4] = PKT_TX_IP_CKSUM
298 * bit[8] = PKT_TX_OUTER_IP_CKSUM
301 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
304 /* Tunneled packet. */
305 if (i & (1 << 8)) /* Outer IP. */
306 v |= MLX5_ETH_WQE_L3_CSUM;
307 if (i & (1 << 4)) /* Inner IP. */
308 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
309 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
310 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
313 if (i & (1 << 4)) /* IP. */
314 v |= MLX5_ETH_WQE_L3_CSUM;
315 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
316 v |= MLX5_ETH_WQE_L4_CSUM;
318 mlx5_cksum_table[i] = v;
323 * Build a table to translate packet type of mbuf to SWP type of Verbs.
326 mlx5_set_swp_types_table(void)
332 * The index should have:
333 * bit[0:1] = PKT_TX_L4_MASK
334 * bit[4] = PKT_TX_IPV6
335 * bit[8] = PKT_TX_OUTER_IPV6
336 * bit[9] = PKT_TX_OUTER_UDP
338 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
341 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
343 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
345 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
346 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
347 v |= MLX5_ETH_WQE_L4_INNER_UDP;
348 mlx5_swp_types_table[i] = v;
353 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
354 * Flags must be preliminary initialized to zero.
357 * Pointer to burst routine local context.
359 * Pointer to store Software Parser flags
361 * Configured Tx offloads mask. It is fully defined at
362 * compile time and may be used for optimization.
365 * Software Parser offsets packed in dword.
366 * Software Parser flags are set by pointer.
368 static __rte_always_inline uint32_t
369 txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc,
374 unsigned int idx, off;
377 if (!MLX5_TXOFF_CONFIG(SWP))
379 ol = loc->mbuf->ol_flags;
380 tunnel = ol & PKT_TX_TUNNEL_MASK;
382 * Check whether Software Parser is required.
383 * Only customized tunnels may ask for.
385 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
388 * The index should have:
389 * bit[0:1] = PKT_TX_L4_MASK
390 * bit[4] = PKT_TX_IPV6
391 * bit[8] = PKT_TX_OUTER_IPV6
392 * bit[9] = PKT_TX_OUTER_UDP
394 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
395 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
396 *swp_flags = mlx5_swp_types_table[idx];
398 * Set offsets for SW parser. Since ConnectX-5, SW parser just
399 * complements HW parser. SW parser starts to engage only if HW parser
400 * can't reach a header. For the older devices, HW parser will not kick
401 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
402 * should be set regardless of HW offload.
404 off = loc->mbuf->outer_l2_len;
405 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
406 off += sizeof(struct rte_vlan_hdr);
407 set = (off >> 1) << 8; /* Outer L3 offset. */
408 off += loc->mbuf->outer_l3_len;
409 if (tunnel == PKT_TX_TUNNEL_UDP)
410 set |= off >> 1; /* Outer L4 offset. */
411 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
412 const uint64_t csum = ol & PKT_TX_L4_MASK;
413 off += loc->mbuf->l2_len;
414 set |= (off >> 1) << 24; /* Inner L3 offset. */
415 if (csum == PKT_TX_TCP_CKSUM ||
416 csum == PKT_TX_UDP_CKSUM ||
417 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
418 off += loc->mbuf->l3_len;
419 set |= (off >> 1) << 16; /* Inner L4 offset. */
422 set = rte_cpu_to_le_32(set);
427 * Convert the Checksum offloads to Verbs.
430 * Pointer to the mbuf.
433 * Converted checksum flags.
435 static __rte_always_inline uint8_t
436 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
439 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
440 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
441 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
444 * The index should have:
445 * bit[0] = PKT_TX_TCP_SEG
446 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
447 * bit[4] = PKT_TX_IP_CKSUM
448 * bit[8] = PKT_TX_OUTER_IP_CKSUM
451 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
452 return mlx5_cksum_table[idx];
456 * Internal function to compute the number of used descriptors in an RX queue
462 * The number of used rx descriptor.
465 rx_queue_count(struct mlx5_rxq_data *rxq)
467 struct rxq_zip *zip = &rxq->zip;
468 volatile struct mlx5_cqe *cqe;
469 const unsigned int cqe_n = (1 << rxq->cqe_n);
470 const unsigned int cqe_cnt = cqe_n - 1;
474 /* if we are processing a compressed cqe */
476 used = zip->cqe_cnt - zip->ca;
482 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
483 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
487 op_own = cqe->op_own;
488 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
489 n = rte_be_to_cpu_32(cqe->byte_cnt);
494 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
496 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
501 * DPDK callback to check the status of a rx descriptor.
506 * The index of the descriptor in the ring.
509 * The status of the tx descriptor.
512 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
514 struct mlx5_rxq_data *rxq = rx_queue;
515 struct mlx5_rxq_ctrl *rxq_ctrl =
516 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
517 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
519 if (dev->rx_pkt_burst != mlx5_rx_burst) {
523 if (offset >= (1 << rxq->elts_n)) {
527 if (offset < rx_queue_count(rxq))
528 return RTE_ETH_RX_DESC_DONE;
529 return RTE_ETH_RX_DESC_AVAIL;
533 * DPDK callback to get the number of used descriptors in a RX queue
536 * Pointer to the device structure.
542 * The number of used rx descriptor.
543 * -EINVAL if the queue is invalid
546 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
548 struct mlx5_priv *priv = dev->data->dev_private;
549 struct mlx5_rxq_data *rxq;
551 if (dev->rx_pkt_burst != mlx5_rx_burst) {
555 rxq = (*priv->rxqs)[rx_queue_id];
560 return rx_queue_count(rxq);
563 #define MLX5_SYSTEM_LOG_DIR "/var/log"
565 * Dump debug information to log file.
570 * If not NULL this string is printed as a header to the output
571 * and the output will be in hexadecimal view.
573 * This is the buffer address to print out.
575 * The number of bytes to dump out.
578 mlx5_dump_debug_information(const char *fname, const char *hex_title,
579 const void *buf, unsigned int hex_len)
583 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
584 fd = fopen(path, "a+");
586 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
587 MKSTR(path2, "./%s", fname);
588 fd = fopen(path2, "a+");
590 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
593 DRV_LOG(INFO, "New debug dump in file %s", path2);
595 DRV_LOG(INFO, "New debug dump in file %s", path);
598 rte_hexdump(fd, hex_title, buf, hex_len);
600 fprintf(fd, "%s", (const char *)buf);
601 fprintf(fd, "\n\n\n");
606 * Move QP from error state to running state and initialize indexes.
609 * Pointer to TX queue control structure.
612 * 0 on success, else -1.
615 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
617 struct mlx5_mp_arg_queue_state_modify sm = {
619 .queue_id = txq_ctrl->txq.idx,
622 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
624 txq_ctrl->txq.wqe_ci = 0;
625 txq_ctrl->txq.wqe_pi = 0;
626 txq_ctrl->txq.elts_comp = 0;
630 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
632 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
634 static const uint8_t magic[] = "seen";
638 for (i = 0; i < sizeof(magic); ++i)
639 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
641 err_cqe->rsvd1[i] = magic[i];
650 * Pointer to TX queue structure.
652 * Pointer to the error CQE.
655 * Negative value if queue recovery failed,
656 * the last Tx buffer element to free otherwise.
659 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
660 volatile struct mlx5_err_cqe *err_cqe)
662 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
663 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
664 struct mlx5_txq_ctrl *txq_ctrl =
665 container_of(txq, struct mlx5_txq_ctrl, txq);
666 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
667 int seen = check_err_cqe_seen(err_cqe);
669 if (!seen && txq_ctrl->dump_file_n <
670 txq_ctrl->priv->config.max_dump_files_num) {
671 MKSTR(err_str, "Unexpected CQE error syndrome "
672 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
673 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
674 txq->cqe_s, txq->qp_num_8s >> 8,
675 rte_be_to_cpu_16(err_cqe->wqe_counter),
676 txq->wqe_ci, txq->cq_ci);
677 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
678 PORT_ID(txq_ctrl->priv), txq->idx,
679 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
680 mlx5_dump_debug_information(name, NULL, err_str, 0);
681 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
682 (const void *)((uintptr_t)
686 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
687 (const void *)((uintptr_t)
691 txq_ctrl->dump_file_n++;
695 * Count errors in WQEs units.
696 * Later it can be improved to count error packets,
697 * for example, by SQ parsing to find how much packets
698 * should be counted for each WQE.
700 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
702 if (tx_recover_qp(txq_ctrl) == 0) {
704 /* Release all the remaining buffers. */
705 return txq->elts_head;
707 /* Recovering failed - try again later on the same WQE. */
712 /* Do not release buffers. */
713 return txq->elts_tail;
717 * Translate RX completion flags to packet type.
720 * Pointer to RX queue structure.
724 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
727 * Packet type for struct rte_mbuf.
729 static inline uint32_t
730 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
733 uint8_t pinfo = cqe->pkt_info;
734 uint16_t ptype = cqe->hdr_type_etc;
737 * The index to the array should have:
738 * bit[1:0] = l3_hdr_type
739 * bit[4:2] = l4_hdr_type
742 * bit[7] = outer_l3_type
744 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
745 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
749 * Initialize Rx WQ and indexes.
752 * Pointer to RX queue structure.
755 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
757 const unsigned int wqe_n = 1 << rxq->elts_n;
760 for (i = 0; (i != wqe_n); ++i) {
761 volatile struct mlx5_wqe_data_seg *scat;
765 if (mlx5_rxq_mprq_enabled(rxq)) {
766 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
768 scat = &((volatile struct mlx5_wqe_mprq *)
770 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
771 1 << rxq->strd_num_n);
772 byte_count = (1 << rxq->strd_sz_n) *
773 (1 << rxq->strd_num_n);
775 struct rte_mbuf *buf = (*rxq->elts)[i];
777 scat = &((volatile struct mlx5_wqe_data_seg *)
779 addr = rte_pktmbuf_mtod(buf, uintptr_t);
780 byte_count = DATA_LEN(buf);
782 /* scat->addr must be able to store a pointer. */
783 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
784 *scat = (struct mlx5_wqe_data_seg){
785 .addr = rte_cpu_to_be_64(addr),
786 .byte_count = rte_cpu_to_be_32(byte_count),
787 .lkey = mlx5_rx_addr2mr(rxq, addr),
790 rxq->consumed_strd = 0;
791 rxq->decompressed = 0;
793 rxq->zip = (struct rxq_zip){
796 /* Update doorbell counter. */
797 rxq->rq_ci = wqe_n >> rxq->sges_n;
799 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
803 * Modify a Verbs/DevX queue state.
804 * This must be called from the primary process.
807 * Pointer to Ethernet device.
809 * State modify request parameters.
812 * 0 in case of success else non-zero value and rte_errno is set.
815 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
816 const struct mlx5_mp_arg_queue_state_modify *sm)
819 struct mlx5_priv *priv = dev->data->dev_private;
822 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
823 struct mlx5_rxq_ctrl *rxq_ctrl =
824 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
826 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
827 struct ibv_wq_attr mod = {
828 .attr_mask = IBV_WQ_ATTR_STATE,
829 .wq_state = sm->state,
832 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
833 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
834 struct mlx5_devx_modify_rq_attr rq_attr;
836 memset(&rq_attr, 0, sizeof(rq_attr));
837 if (sm->state == IBV_WQS_RESET) {
838 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
839 rq_attr.state = MLX5_RQC_STATE_RST;
840 } else if (sm->state == IBV_WQS_RDY) {
841 rq_attr.rq_state = MLX5_RQC_STATE_RST;
842 rq_attr.state = MLX5_RQC_STATE_RDY;
843 } else if (sm->state == IBV_WQS_ERR) {
844 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
845 rq_attr.state = MLX5_RQC_STATE_ERR;
847 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
851 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
852 sm->state, strerror(errno));
857 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
858 struct mlx5_txq_ctrl *txq_ctrl =
859 container_of(txq, struct mlx5_txq_ctrl, txq);
860 struct ibv_qp_attr mod = {
861 .qp_state = IBV_QPS_RESET,
862 .port_num = (uint8_t)priv->ibv_port,
864 struct ibv_qp *qp = txq_ctrl->obj->qp;
866 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
868 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
869 "%s", strerror(errno));
873 mod.qp_state = IBV_QPS_INIT;
874 ret = mlx5_glue->modify_qp(qp, &mod,
875 (IBV_QP_STATE | IBV_QP_PORT));
877 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
882 mod.qp_state = IBV_QPS_RTR;
883 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
885 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
890 mod.qp_state = IBV_QPS_RTS;
891 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
893 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
903 * Modify a Verbs queue state.
906 * Pointer to Ethernet device.
908 * State modify request parameters.
911 * 0 in case of success else non-zero value.
914 mlx5_queue_state_modify(struct rte_eth_dev *dev,
915 struct mlx5_mp_arg_queue_state_modify *sm)
919 switch (rte_eal_process_type()) {
920 case RTE_PROC_PRIMARY:
921 ret = mlx5_queue_state_modify_primary(dev, sm);
923 case RTE_PROC_SECONDARY:
924 ret = mlx5_mp_req_queue_state_modify(dev, sm);
934 * The function inserts the RQ state to reset when the first error CQE is
935 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
936 * it moves the RQ state to ready and initializes the RQ.
937 * Next CQE identification and error counting are in the caller responsibility.
940 * Pointer to RX queue structure.
942 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
943 * 0 when called from non-vectorized Rx burst.
946 * -1 in case of recovery error, otherwise the CQE status.
949 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
951 const uint16_t cqe_n = 1 << rxq->cqe_n;
952 const uint16_t cqe_mask = cqe_n - 1;
953 const unsigned int wqe_n = 1 << rxq->elts_n;
954 struct mlx5_rxq_ctrl *rxq_ctrl =
955 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
957 volatile struct mlx5_cqe *cqe;
958 volatile struct mlx5_err_cqe *err_cqe;
960 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
962 struct mlx5_mp_arg_queue_state_modify sm;
965 switch (rxq->err_state) {
966 case MLX5_RXQ_ERR_STATE_NO_ERROR:
967 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
969 case MLX5_RXQ_ERR_STATE_NEED_RESET:
971 sm.queue_id = rxq->idx;
972 sm.state = IBV_WQS_RESET;
973 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
975 if (rxq_ctrl->dump_file_n <
976 rxq_ctrl->priv->config.max_dump_files_num) {
977 MKSTR(err_str, "Unexpected CQE error syndrome "
978 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
979 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
980 rxq->cqn, rxq_ctrl->wqn,
981 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
982 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
983 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
984 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
985 mlx5_dump_debug_information(name, NULL, err_str, 0);
986 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
987 (const void *)((uintptr_t)
989 sizeof(*u.cqe) * cqe_n);
990 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
991 (const void *)((uintptr_t)
994 rxq_ctrl->dump_file_n++;
996 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
998 case MLX5_RXQ_ERR_STATE_NEED_READY:
999 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1000 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1002 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1005 * The RQ consumer index must be zeroed while moving
1006 * from RESET state to RDY state.
1008 *rxq->rq_db = rte_cpu_to_be_32(0);
1011 sm.queue_id = rxq->idx;
1012 sm.state = IBV_WQS_RDY;
1013 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1017 const uint16_t q_mask = wqe_n - 1;
1019 struct rte_mbuf **elt;
1021 unsigned int n = wqe_n - (rxq->rq_ci -
1024 for (i = 0; i < (int)n; ++i) {
1025 elt_idx = (rxq->rq_ci + i) & q_mask;
1026 elt = &(*rxq->elts)[elt_idx];
1027 *elt = rte_mbuf_raw_alloc(rxq->mp);
1029 for (i--; i >= 0; --i) {
1030 elt_idx = (rxq->rq_ci +
1034 rte_pktmbuf_free_seg
1040 for (i = 0; i < (int)wqe_n; ++i) {
1041 elt = &(*rxq->elts)[i];
1043 (uint16_t)((*elt)->buf_len -
1044 rte_pktmbuf_headroom(*elt));
1046 /* Padding with a fake mbuf for vec Rx. */
1047 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1048 (*rxq->elts)[wqe_n + i] =
1051 mlx5_rxq_initialize(rxq);
1052 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1061 * Get size of the next packet for a given CQE. For compressed CQEs, the
1062 * consumer index is updated only once all packets of the current one have
1066 * Pointer to RX queue.
1070 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1074 * 0 in case of empty CQE, otherwise the packet size in bytes.
1077 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1078 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1080 struct rxq_zip *zip = &rxq->zip;
1081 uint16_t cqe_n = cqe_cnt + 1;
1087 /* Process compressed data in the CQE and mini arrays. */
1089 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1090 (volatile struct mlx5_mini_cqe8 (*)[8])
1091 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1094 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1095 *mcqe = &(*mc)[zip->ai & 7];
1096 if ((++zip->ai & 7) == 0) {
1097 /* Invalidate consumed CQEs */
1100 while (idx != end) {
1101 (*rxq->cqes)[idx & cqe_cnt].op_own =
1102 MLX5_CQE_INVALIDATE;
1106 * Increment consumer index to skip the number
1107 * of CQEs consumed. Hardware leaves holes in
1108 * the CQ ring for software use.
1113 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1114 /* Invalidate the rest */
1118 while (idx != end) {
1119 (*rxq->cqes)[idx & cqe_cnt].op_own =
1120 MLX5_CQE_INVALIDATE;
1123 rxq->cq_ci = zip->cq_ci;
1127 * No compressed data, get next CQE and verify if it is
1134 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1135 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1136 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1138 ret = mlx5_rx_err_handle(rxq, 0);
1139 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1147 op_own = cqe->op_own;
1148 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1149 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1150 (volatile struct mlx5_mini_cqe8 (*)[8])
1151 (uintptr_t)(&(*rxq->cqes)
1155 /* Fix endianness. */
1156 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1158 * Current mini array position is the one
1159 * returned by check_cqe64().
1161 * If completion comprises several mini arrays,
1162 * as a special case the second one is located
1163 * 7 CQEs after the initial CQE instead of 8
1164 * for subsequent ones.
1166 zip->ca = rxq->cq_ci;
1167 zip->na = zip->ca + 7;
1168 /* Compute the next non compressed CQE. */
1170 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1171 /* Get packet size to return. */
1172 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1175 /* Prefetch all to be invalidated */
1178 while (idx != end) {
1179 rte_prefetch0(&(*rxq->cqes)[(idx) &
1184 len = rte_be_to_cpu_32(cqe->byte_cnt);
1187 if (unlikely(rxq->err_state)) {
1188 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1189 ++rxq->stats.idropped;
1197 * Translate RX completion flags to offload flags.
1203 * Offload flags (ol_flags) for struct rte_mbuf.
1205 static inline uint32_t
1206 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1208 uint32_t ol_flags = 0;
1209 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1213 MLX5_CQE_RX_L3_HDR_VALID,
1214 PKT_RX_IP_CKSUM_GOOD) |
1216 MLX5_CQE_RX_L4_HDR_VALID,
1217 PKT_RX_L4_CKSUM_GOOD);
1222 * Fill in mbuf fields from RX completion flags.
1223 * Note that pkt->ol_flags should be initialized outside of this function.
1226 * Pointer to RX queue.
1231 * @param rss_hash_res
1232 * Packet RSS Hash result.
1235 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1236 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1238 /* Update packet information. */
1239 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1240 if (rss_hash_res && rxq->rss_hash) {
1241 pkt->hash.rss = rss_hash_res;
1242 pkt->ol_flags |= PKT_RX_RSS_HASH;
1244 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1245 pkt->ol_flags |= PKT_RX_FDIR;
1246 if (cqe->sop_drop_qpn !=
1247 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1248 uint32_t mark = cqe->sop_drop_qpn;
1250 pkt->ol_flags |= PKT_RX_FDIR_ID;
1251 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1255 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1256 if (rxq->vlan_strip &&
1257 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1258 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1259 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1261 if (rxq->hw_timestamp) {
1262 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1263 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1268 * DPDK callback for RX.
1271 * Generic pointer to RX queue structure.
1273 * Array to store received packets.
1275 * Maximum number of packets in array.
1278 * Number of packets successfully received (<= pkts_n).
1281 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1283 struct mlx5_rxq_data *rxq = dpdk_rxq;
1284 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1285 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1286 const unsigned int sges_n = rxq->sges_n;
1287 struct rte_mbuf *pkt = NULL;
1288 struct rte_mbuf *seg = NULL;
1289 volatile struct mlx5_cqe *cqe =
1290 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1292 unsigned int rq_ci = rxq->rq_ci << sges_n;
1293 int len = 0; /* keep its value across iterations. */
1296 unsigned int idx = rq_ci & wqe_cnt;
1297 volatile struct mlx5_wqe_data_seg *wqe =
1298 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1299 struct rte_mbuf *rep = (*rxq->elts)[idx];
1300 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1301 uint32_t rss_hash_res;
1309 rep = rte_mbuf_raw_alloc(rxq->mp);
1310 if (unlikely(rep == NULL)) {
1311 ++rxq->stats.rx_nombuf;
1314 * no buffers before we even started,
1315 * bail out silently.
1319 while (pkt != seg) {
1320 assert(pkt != (*rxq->elts)[idx]);
1324 rte_mbuf_raw_free(pkt);
1330 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1331 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1333 rte_mbuf_raw_free(rep);
1337 assert(len >= (rxq->crc_present << 2));
1339 /* If compressed, take hash result from mini-CQE. */
1340 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1342 mcqe->rx_hash_result);
1343 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1344 if (rxq->crc_present)
1345 len -= RTE_ETHER_CRC_LEN;
1347 if (cqe->lro_num_seg > 1) {
1349 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1351 pkt->ol_flags |= PKT_RX_LRO;
1352 pkt->tso_segsz = len / cqe->lro_num_seg;
1355 DATA_LEN(rep) = DATA_LEN(seg);
1356 PKT_LEN(rep) = PKT_LEN(seg);
1357 SET_DATA_OFF(rep, DATA_OFF(seg));
1358 PORT(rep) = PORT(seg);
1359 (*rxq->elts)[idx] = rep;
1361 * Fill NIC descriptor with the new buffer. The lkey and size
1362 * of the buffers are already known, only the buffer address
1365 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1366 /* If there's only one MR, no need to replace LKey in WQE. */
1367 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1368 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1369 if (len > DATA_LEN(seg)) {
1370 len -= DATA_LEN(seg);
1375 DATA_LEN(seg) = len;
1376 #ifdef MLX5_PMD_SOFT_COUNTERS
1377 /* Increment bytes counter. */
1378 rxq->stats.ibytes += PKT_LEN(pkt);
1380 /* Return packet. */
1385 /* Align consumer index to the next stride. */
1390 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1392 /* Update the consumer index. */
1393 rxq->rq_ci = rq_ci >> sges_n;
1395 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1397 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1398 #ifdef MLX5_PMD_SOFT_COUNTERS
1399 /* Increment packets counter. */
1400 rxq->stats.ipackets += i;
1406 * Update LRO packet TCP header.
1407 * The HW LRO feature doesn't update the TCP header after coalescing the
1408 * TCP segments but supplies information in CQE to fill it by SW.
1411 * Pointer to the TCP header.
1413 * Pointer to the completion entry..
1415 * The L3 pseudo-header checksum.
1418 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
1419 volatile struct mlx5_cqe *restrict cqe,
1422 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1423 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1425 * The HW calculates only the TCP payload checksum, need to complete
1426 * the TCP header checksum and the L3 pseudo-header checksum.
1428 uint32_t csum = phcsum + cqe->csum;
1430 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1431 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1432 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1433 tcp->recv_ack = cqe->lro_ack_seq_num;
1434 tcp->rx_win = cqe->lro_tcp_win;
1436 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1437 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1439 csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4);
1440 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1441 csum = (~csum) & 0xffff;
1448 * Update LRO packet headers.
1449 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1450 * TCP segments but supply information in CQE to fill it by SW.
1453 * The packet address.
1455 * Pointer to the completion entry..
1457 * The packet length.
1460 mlx5_lro_update_hdr(uint8_t *restrict padd,
1461 volatile struct mlx5_cqe *restrict cqe,
1465 struct rte_ether_hdr *eth;
1466 struct rte_vlan_hdr *vlan;
1467 struct rte_ipv4_hdr *ipv4;
1468 struct rte_ipv6_hdr *ipv6;
1469 struct rte_tcp_hdr *tcp;
1474 uint16_t proto = h.eth->ether_type;
1478 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1479 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1480 proto = h.vlan->eth_proto;
1483 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1484 h.ipv4->time_to_live = cqe->lro_min_ttl;
1485 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1486 h.ipv4->hdr_checksum = 0;
1487 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1488 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1491 h.ipv6->hop_limits = cqe->lro_min_ttl;
1492 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1494 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1497 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1501 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1503 struct mlx5_mprq_buf *buf = opaque;
1505 if (rte_atomic16_read(&buf->refcnt) == 1) {
1506 rte_mempool_put(buf->mp, buf);
1507 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1508 rte_atomic16_set(&buf->refcnt, 1);
1509 rte_mempool_put(buf->mp, buf);
1514 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1516 mlx5_mprq_buf_free_cb(NULL, buf);
1520 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1521 const unsigned int strd_n)
1523 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1524 volatile struct mlx5_wqe_data_seg *wqe =
1525 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1528 assert(rep != NULL);
1529 /* Replace MPRQ buf. */
1530 (*rxq->mprq_bufs)[rq_idx] = rep;
1532 addr = mlx5_mprq_buf_addr(rep, strd_n);
1533 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1534 /* If there's only one MR, no need to replace LKey in WQE. */
1535 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1536 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1537 /* Stash a mbuf for next replacement. */
1538 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1539 rxq->mprq_repl = rep;
1541 rxq->mprq_repl = NULL;
1545 * DPDK callback for RX with Multi-Packet RQ support.
1548 * Generic pointer to RX queue structure.
1550 * Array to store received packets.
1552 * Maximum number of packets in array.
1555 * Number of packets successfully received (<= pkts_n).
1558 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1560 struct mlx5_rxq_data *rxq = dpdk_rxq;
1561 const unsigned int strd_n = 1 << rxq->strd_num_n;
1562 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1563 const unsigned int strd_shift =
1564 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1565 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1566 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1567 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1569 uint32_t rq_ci = rxq->rq_ci;
1570 uint16_t consumed_strd = rxq->consumed_strd;
1571 uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM;
1572 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1574 while (i < pkts_n) {
1575 struct rte_mbuf *pkt;
1583 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1584 uint32_t rss_hash_res = 0;
1585 uint8_t lro_num_seg;
1587 if (consumed_strd == strd_n) {
1588 /* Replace WQE only if the buffer is still in use. */
1589 if (rte_atomic16_read(&buf->refcnt) > 1) {
1590 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1591 /* Release the old buffer. */
1592 mlx5_mprq_buf_free(buf);
1593 } else if (unlikely(rxq->mprq_repl == NULL)) {
1594 struct mlx5_mprq_buf *rep;
1597 * Currently, the MPRQ mempool is out of buffer
1598 * and doing memcpy regardless of the size of Rx
1599 * packet. Retry allocation to get back to
1602 if (!rte_mempool_get(rxq->mprq_mp,
1604 rxq->mprq_repl = rep;
1606 /* Advance to the next WQE. */
1609 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1611 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1612 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1616 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1617 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1619 consumed_strd += strd_cnt;
1620 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1623 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1624 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1626 /* mini-CQE for MPRQ doesn't have hash result. */
1627 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1629 assert(strd_idx < strd_n);
1630 assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
1631 lro_num_seg = cqe->lro_num_seg;
1633 * Currently configured to receive a packet per a stride. But if
1634 * MTU is adjusted through kernel interface, device could
1635 * consume multiple strides without raising an error. In this
1636 * case, the packet should be dropped because it is bigger than
1637 * the max_rx_pkt_len.
1639 if (unlikely(!lro_num_seg && strd_cnt > 1)) {
1640 ++rxq->stats.idropped;
1643 pkt = rte_pktmbuf_alloc(rxq->mp);
1644 if (unlikely(pkt == NULL)) {
1645 ++rxq->stats.rx_nombuf;
1648 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1649 assert((int)len >= (rxq->crc_present << 2));
1650 if (rxq->crc_present)
1651 len -= RTE_ETHER_CRC_LEN;
1652 offset = strd_idx * strd_sz + strd_shift;
1653 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1655 * Memcpy packets to the target mbuf if:
1656 * - The size of packet is smaller than mprq_max_memcpy_len.
1657 * - Out of buffer in the Mempool for Multi-Packet RQ.
1659 if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
1661 * When memcpy'ing packet due to out-of-buffer, the
1662 * packet must be smaller than the target mbuf.
1664 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1665 rte_pktmbuf_free_seg(pkt);
1666 ++rxq->stats.idropped;
1669 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
1670 DATA_LEN(pkt) = len;
1672 rte_iova_t buf_iova;
1673 struct rte_mbuf_ext_shared_info *shinfo;
1674 uint16_t buf_len = strd_cnt * strd_sz;
1677 /* Increment the refcnt of the whole chunk. */
1678 rte_atomic16_add_return(&buf->refcnt, 1);
1679 assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1681 buf_addr = RTE_PTR_SUB(addr, headroom_sz);
1683 * MLX5 device doesn't use iova but it is necessary in a
1684 * case where the Rx packet is transmitted via a
1687 buf_iova = rte_mempool_virt2iova(buf) +
1688 RTE_PTR_DIFF(buf_addr, buf);
1689 shinfo = &buf->shinfos[strd_idx];
1690 rte_mbuf_ext_refcnt_set(shinfo, 1);
1692 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1693 * attaching the stride to mbuf and more offload flags
1694 * will be added below by calling rxq_cq_to_mbuf().
1695 * Other fields will be overwritten.
1697 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1699 /* Set mbuf head-room. */
1700 pkt->data_off = headroom_sz;
1701 assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
1703 * Prevent potential overflow due to MTU change through
1706 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1707 rte_pktmbuf_free_seg(pkt);
1708 ++rxq->stats.idropped;
1711 DATA_LEN(pkt) = len;
1713 * LRO packet may consume all the stride memory, in this
1714 * case packet head-room space is not guaranteed so must
1715 * to add an empty mbuf for the head-room.
1717 if (!rxq->strd_headroom_en) {
1718 struct rte_mbuf *headroom_mbuf =
1719 rte_pktmbuf_alloc(rxq->mp);
1721 if (unlikely(headroom_mbuf == NULL)) {
1722 rte_pktmbuf_free_seg(pkt);
1723 ++rxq->stats.rx_nombuf;
1726 PORT(pkt) = rxq->port_id;
1727 NEXT(headroom_mbuf) = pkt;
1728 pkt = headroom_mbuf;
1732 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1733 if (lro_num_seg > 1) {
1734 mlx5_lro_update_hdr(addr, cqe, len);
1735 pkt->ol_flags |= PKT_RX_LRO;
1736 pkt->tso_segsz = strd_sz;
1739 PORT(pkt) = rxq->port_id;
1740 #ifdef MLX5_PMD_SOFT_COUNTERS
1741 /* Increment bytes counter. */
1742 rxq->stats.ibytes += PKT_LEN(pkt);
1744 /* Return packet. */
1748 /* Update the consumer indexes. */
1749 rxq->consumed_strd = consumed_strd;
1751 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1752 if (rq_ci != rxq->rq_ci) {
1755 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1757 #ifdef MLX5_PMD_SOFT_COUNTERS
1758 /* Increment packets counter. */
1759 rxq->stats.ipackets += i;
1765 * Dummy DPDK callback for TX.
1767 * This function is used to temporarily replace the real callback during
1768 * unsafe control operations on the queue, or in case of error.
1771 * Generic pointer to TX queue structure.
1773 * Packets to transmit.
1775 * Number of packets in array.
1778 * Number of packets successfully transmitted (<= pkts_n).
1781 removed_tx_burst(void *dpdk_txq __rte_unused,
1782 struct rte_mbuf **pkts __rte_unused,
1783 uint16_t pkts_n __rte_unused)
1790 * Dummy DPDK callback for RX.
1792 * This function is used to temporarily replace the real callback during
1793 * unsafe control operations on the queue, or in case of error.
1796 * Generic pointer to RX queue structure.
1798 * Array to store received packets.
1800 * Maximum number of packets in array.
1803 * Number of packets successfully received (<= pkts_n).
1806 removed_rx_burst(void *dpdk_txq __rte_unused,
1807 struct rte_mbuf **pkts __rte_unused,
1808 uint16_t pkts_n __rte_unused)
1815 * Vectorized Rx/Tx routines are not compiled in when required vector
1816 * instructions are not supported on a target architecture. The following null
1817 * stubs are needed for linkage when those are not included outside of this file
1818 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1822 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1823 struct rte_mbuf **pkts __rte_unused,
1824 uint16_t pkts_n __rte_unused)
1830 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1836 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1842 * Free the mbufs from the linear array of pointers.
1845 * Pointer to array of packets to be free.
1847 * Number of packets to be freed.
1849 * Configured Tx offloads mask. It is fully defined at
1850 * compile time and may be used for optimization.
1852 static __rte_always_inline void
1853 mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
1854 unsigned int pkts_n,
1855 unsigned int olx __rte_unused)
1857 struct rte_mempool *pool = NULL;
1858 struct rte_mbuf **p_free = NULL;
1859 struct rte_mbuf *mbuf;
1860 unsigned int n_free = 0;
1863 * The implemented algorithm eliminates
1864 * copying pointers to temporary array
1865 * for rte_mempool_put_bulk() calls.
1872 * Decrement mbuf reference counter, detach
1873 * indirect and external buffers if needed.
1875 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1876 if (likely(mbuf != NULL)) {
1877 assert(mbuf == *pkts);
1878 if (likely(n_free != 0)) {
1879 if (unlikely(pool != mbuf->pool))
1880 /* From different pool. */
1883 /* Start new scan array. */
1890 if (unlikely(pkts_n == 0)) {
1896 * This happens if mbuf is still referenced.
1897 * We can't put it back to the pool, skip.
1901 if (unlikely(n_free != 0))
1902 /* There is some array to free.*/
1904 if (unlikely(pkts_n == 0))
1905 /* Last mbuf, nothing to free. */
1911 * This loop is implemented to avoid multiple
1912 * inlining of rte_mempool_put_bulk().
1918 * Free the array of pre-freed mbufs
1919 * belonging to the same memory pool.
1921 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1922 if (unlikely(mbuf != NULL)) {
1923 /* There is the request to start new scan. */
1928 if (likely(pkts_n != 0))
1931 * This is the last mbuf to be freed.
1932 * Do one more loop iteration to complete.
1933 * This is rare case of the last unique mbuf.
1938 if (likely(pkts_n == 0))
1947 * Free the mbuf from the elts ring buffer till new tail.
1950 * Pointer to Tx queue structure.
1952 * Index in elts to free up to, becomes new elts tail.
1954 * Configured Tx offloads mask. It is fully defined at
1955 * compile time and may be used for optimization.
1957 static __rte_always_inline void
1958 mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
1960 unsigned int olx __rte_unused)
1962 uint16_t n_elts = tail - txq->elts_tail;
1965 assert(n_elts <= txq->elts_s);
1967 * Implement a loop to support ring buffer wraparound
1968 * with single inlining of mlx5_tx_free_mbuf().
1973 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
1974 part = RTE_MIN(part, n_elts);
1976 assert(part <= txq->elts_s);
1977 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
1979 txq->elts_tail += part;
1985 * Store the mbuf being sent into elts ring buffer.
1986 * On Tx completion these mbufs will be freed.
1989 * Pointer to Tx queue structure.
1991 * Pointer to array of packets to be stored.
1993 * Number of packets to be stored.
1995 * Configured Tx offloads mask. It is fully defined at
1996 * compile time and may be used for optimization.
1998 static __rte_always_inline void
1999 mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
2000 struct rte_mbuf **restrict pkts,
2001 unsigned int pkts_n,
2002 unsigned int olx __rte_unused)
2005 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2009 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2011 assert(part <= txq->elts_s);
2012 /* This code is a good candidate for vectorizing with SIMD. */
2013 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2015 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2016 txq->elts_head += pkts_n;
2017 if (unlikely(part < pkts_n))
2018 /* The copy is wrapping around the elts array. */
2019 rte_memcpy((void *)elts, (void *)(pkts + part),
2020 (pkts_n - part) * sizeof(struct rte_mbuf *));
2024 * Update completion queue consuming index via doorbell
2025 * and flush the completed data buffers.
2028 * Pointer to TX queue structure.
2029 * @param valid CQE pointer
2030 * if not NULL update txq->wqe_pi and flush the buffers
2032 * if not negative - flush the buffers till this index.
2034 * Configured Tx offloads mask. It is fully defined at
2035 * compile time and may be used for optimization.
2037 static __rte_always_inline void
2038 mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
2039 volatile struct mlx5_cqe *last_cqe,
2041 unsigned int olx __rte_unused)
2045 if (likely(last_cqe != NULL)) {
2046 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2047 tail = ((volatile struct mlx5_wqe_cseg *)
2048 (txq->wqes + (txq->wqe_pi & txq->wqe_m)))->misc;
2049 } else if (itail >= 0) {
2050 tail = (uint16_t)itail;
2054 rte_compiler_barrier();
2055 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
2056 if (likely(tail != txq->elts_tail)) {
2057 mlx5_tx_free_elts(txq, tail, olx);
2058 assert(tail == txq->elts_tail);
2063 * Manage TX completions. This routine checks the CQ for
2064 * arrived CQEs, deduces the last accomplished WQE in SQ,
2065 * updates SQ producing index and frees all completed mbufs.
2068 * Pointer to TX queue structure.
2070 * Configured Tx offloads mask. It is fully defined at
2071 * compile time and may be used for optimization.
2073 * NOTE: not inlined intentionally, it makes tx_burst
2074 * routine smaller, simple and faster - from experiments.
2077 mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
2078 unsigned int olx __rte_unused)
2080 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2081 volatile struct mlx5_cqe *last_cqe = NULL;
2084 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2085 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2087 volatile struct mlx5_cqe *cqe;
2089 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
2090 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
2091 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2092 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2093 /* No new CQEs in completion queue. */
2094 assert(ret == MLX5_CQE_STATUS_HW_OWN);
2098 * Some error occurred, try to restart.
2099 * We have no barrier after WQE related Doorbell
2100 * written, make sure all writes are completed
2101 * here, before we might perform SQ reset.
2104 ret = mlx5_tx_error_cqe_handle
2105 (txq, (volatile struct mlx5_err_cqe *)cqe);
2107 * Flush buffers, update consuming index
2108 * if recovery succeeded. Otherwise
2109 * just try to recover later.
2114 /* Normal transmit completion. */
2122 * We have to restrict the amount of processed CQEs
2123 * in one tx_burst routine call. The CQ may be large
2124 * and many CQEs may be updated by the NIC in one
2125 * transaction. Buffers freeing is time consuming,
2126 * multiple iterations may introduce significant
2130 mlx5_tx_comp_flush(txq, last_cqe, ret, olx);
2134 * Check if the completion request flag should be set in the last WQE.
2135 * Both pushed mbufs and WQEs are monitored and the completion request
2136 * flag is set if any of thresholds is reached.
2139 * Pointer to TX queue structure.
2141 * Pointer to burst routine local context.
2143 * Routine is called from multi-segment sending loop,
2144 * do not correct the elts_head according to the pkts_copy.
2146 * Configured Tx offloads mask. It is fully defined at
2147 * compile time and may be used for optimization.
2149 static __rte_always_inline void
2150 mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
2151 struct mlx5_txq_local *restrict loc,
2155 uint16_t head = txq->elts_head;
2158 part = (MLX5_TXOFF_CONFIG(INLINE) || multi) ?
2159 0 : loc->pkts_sent - loc->pkts_copy;
2161 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2162 (MLX5_TXOFF_CONFIG(INLINE) &&
2163 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2164 volatile struct mlx5_wqe *last = loc->wqe_last;
2166 txq->elts_comp = head;
2167 if (MLX5_TXOFF_CONFIG(INLINE))
2168 txq->wqe_comp = txq->wqe_ci;
2169 /* Request unconditional completion on last WQE. */
2170 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2171 MLX5_COMP_MODE_OFFSET);
2172 /* Save elts_head in unused "immediate" field of WQE. */
2173 last->cseg.misc = head;
2175 * A CQE slot must always be available. Count the
2176 * issued CEQ "always" request instead of production
2177 * index due to here can be CQE with errors and
2178 * difference with ci may become inconsistent.
2180 assert(txq->cqe_s > ++txq->cq_pi);
2185 * DPDK callback to check the status of a tx descriptor.
2190 * The index of the descriptor in the ring.
2193 * The status of the tx descriptor.
2196 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2198 struct mlx5_txq_data *restrict txq = tx_queue;
2201 mlx5_tx_handle_completion(txq, 0);
2202 used = txq->elts_head - txq->elts_tail;
2204 return RTE_ETH_TX_DESC_FULL;
2205 return RTE_ETH_TX_DESC_DONE;
2209 * Build the Control Segment with specified opcode:
2210 * - MLX5_OPCODE_SEND
2211 * - MLX5_OPCODE_ENHANCED_MPSW
2215 * Pointer to TX queue structure.
2217 * Pointer to burst routine local context.
2219 * Pointer to WQE to fill with built Control Segment.
2221 * Supposed length of WQE in segments.
2223 * SQ WQE opcode to put into Control Segment.
2225 * Configured Tx offloads mask. It is fully defined at
2226 * compile time and may be used for optimization.
2228 static __rte_always_inline void
2229 mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq,
2230 struct mlx5_txq_local *restrict loc __rte_unused,
2231 struct mlx5_wqe *restrict wqe,
2233 unsigned int opcode,
2234 unsigned int olx __rte_unused)
2236 struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
2238 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2239 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2240 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2241 MLX5_COMP_MODE_OFFSET);
2242 cs->misc = RTE_BE32(0);
2246 * Build the Ethernet Segment without inlined data.
2247 * Supports Software Parser, Checksums and VLAN
2248 * insertion Tx offload features.
2251 * Pointer to TX queue structure.
2253 * Pointer to burst routine local context.
2255 * Pointer to WQE to fill with built Ethernet Segment.
2257 * Configured Tx offloads mask. It is fully defined at
2258 * compile time and may be used for optimization.
2260 static __rte_always_inline void
2261 mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
2262 struct mlx5_txq_local *restrict loc,
2263 struct mlx5_wqe *restrict wqe,
2266 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2270 * Calculate and set check sum flags first, dword field
2271 * in segment may be shared with Software Parser flags.
2273 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2274 es->flags = rte_cpu_to_le_32(csum);
2276 * Calculate and set Software Parser offsets and flags.
2277 * These flags a set for custom UDP and IP tunnel packets.
2279 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2280 /* Fill metadata field if needed. */
2281 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2282 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2283 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2284 /* Engage VLAN tag insertion feature if requested. */
2285 if (MLX5_TXOFF_CONFIG(VLAN) &&
2286 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2288 * We should get here only if device support
2289 * this feature correctly.
2291 assert(txq->vlan_en);
2292 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2293 loc->mbuf->vlan_tci);
2295 es->inline_hdr = RTE_BE32(0);
2300 * Build the Ethernet Segment with minimal inlined data
2301 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2302 * used to fill the gap in single WQEBB WQEs.
2303 * Supports Software Parser, Checksums and VLAN
2304 * insertion Tx offload features.
2307 * Pointer to TX queue structure.
2309 * Pointer to burst routine local context.
2311 * Pointer to WQE to fill with built Ethernet Segment.
2313 * Length of VLAN tag insertion if any.
2315 * Configured Tx offloads mask. It is fully defined at
2316 * compile time and may be used for optimization.
2318 static __rte_always_inline void
2319 mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
2320 struct mlx5_txq_local *restrict loc,
2321 struct mlx5_wqe *restrict wqe,
2325 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2327 uint8_t *psrc, *pdst;
2330 * Calculate and set check sum flags first, dword field
2331 * in segment may be shared with Software Parser flags.
2333 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2334 es->flags = rte_cpu_to_le_32(csum);
2336 * Calculate and set Software Parser offsets and flags.
2337 * These flags a set for custom UDP and IP tunnel packets.
2339 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2340 /* Fill metadata field if needed. */
2341 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2342 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2343 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2344 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2346 sizeof(rte_v128u32_t)),
2347 "invalid Ethernet Segment data size");
2348 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2350 sizeof(struct rte_vlan_hdr) +
2351 2 * RTE_ETHER_ADDR_LEN),
2352 "invalid Ethernet Segment data size");
2353 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2354 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2355 es->inline_data = *(unaligned_uint16_t *)psrc;
2356 psrc += sizeof(uint16_t);
2357 pdst = (uint8_t *)(es + 1);
2358 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2359 /* Implement VLAN tag insertion as part inline data. */
2360 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2361 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2362 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2363 /* Insert VLAN ethertype + VLAN tag. */
2364 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2365 ((RTE_ETHER_TYPE_VLAN << 16) |
2366 loc->mbuf->vlan_tci);
2367 pdst += sizeof(struct rte_vlan_hdr);
2368 /* Copy the rest two bytes from packet data. */
2369 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2370 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2372 /* Fill the gap in the title WQEBB with inline data. */
2373 rte_mov16(pdst, psrc);
2378 * Build the Ethernet Segment with entire packet
2379 * data inlining. Checks the boundary of WQEBB and
2380 * ring buffer wrapping, supports Software Parser,
2381 * Checksums and VLAN insertion Tx offload features.
2384 * Pointer to TX queue structure.
2386 * Pointer to burst routine local context.
2388 * Pointer to WQE to fill with built Ethernet Segment.
2390 * Length of VLAN tag insertion if any.
2392 * Length of data to inline (VLAN included, if any).
2394 * TSO flag, set mss field from the packet.
2396 * Configured Tx offloads mask. It is fully defined at
2397 * compile time and may be used for optimization.
2400 * Pointer to the next Data Segment (aligned and wrapped around).
2402 static __rte_always_inline struct mlx5_wqe_dseg *
2403 mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
2404 struct mlx5_txq_local *restrict loc,
2405 struct mlx5_wqe *restrict wqe,
2411 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2413 uint8_t *psrc, *pdst;
2417 * Calculate and set check sum flags first, dword field
2418 * in segment may be shared with Software Parser flags.
2420 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2423 csum |= loc->mbuf->tso_segsz;
2424 es->flags = rte_cpu_to_be_32(csum);
2426 es->flags = rte_cpu_to_le_32(csum);
2429 * Calculate and set Software Parser offsets and flags.
2430 * These flags a set for custom UDP and IP tunnel packets.
2432 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2433 /* Fill metadata field if needed. */
2434 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2435 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2436 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2437 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2439 sizeof(rte_v128u32_t)),
2440 "invalid Ethernet Segment data size");
2441 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2443 sizeof(struct rte_vlan_hdr) +
2444 2 * RTE_ETHER_ADDR_LEN),
2445 "invalid Ethernet Segment data size");
2446 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2447 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2448 es->inline_data = *(unaligned_uint16_t *)psrc;
2449 psrc += sizeof(uint16_t);
2450 pdst = (uint8_t *)(es + 1);
2451 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2452 /* Implement VLAN tag insertion as part inline data. */
2453 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2454 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2455 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2456 /* Insert VLAN ethertype + VLAN tag. */
2457 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2458 ((RTE_ETHER_TYPE_VLAN << 16) |
2459 loc->mbuf->vlan_tci);
2460 pdst += sizeof(struct rte_vlan_hdr);
2461 /* Copy the rest two bytes from packet data. */
2462 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2463 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2464 psrc += sizeof(uint16_t);
2466 /* Fill the gap in the title WQEBB with inline data. */
2467 rte_mov16(pdst, psrc);
2468 psrc += sizeof(rte_v128u32_t);
2470 pdst = (uint8_t *)(es + 2);
2471 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2472 assert(pdst < (uint8_t *)txq->wqes_end);
2473 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2475 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2476 return (struct mlx5_wqe_dseg *)pdst;
2479 * The WQEBB space availability is checked by caller.
2480 * Here we should be aware of WQE ring buffer wraparound only.
2482 part = (uint8_t *)txq->wqes_end - pdst;
2483 part = RTE_MIN(part, inlen);
2485 rte_memcpy(pdst, psrc, part);
2487 if (likely(!inlen)) {
2489 * If return value is not used by the caller
2490 * the code below will be optimized out.
2493 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2494 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2495 pdst = (uint8_t *)txq->wqes;
2496 return (struct mlx5_wqe_dseg *)pdst;
2498 pdst = (uint8_t *)txq->wqes;
2505 * Copy data from chain of mbuf to the specified linear buffer.
2506 * Checksums and VLAN insertion Tx offload features. If data
2507 * from some mbuf copied completely this mbuf is freed. Local
2508 * structure is used to keep the byte stream state.
2511 * Pointer to the destination linear buffer.
2513 * Pointer to burst routine local context.
2515 * Length of data to be copied.
2517 * Configured Tx offloads mask. It is fully defined at
2518 * compile time and may be used for optimization.
2520 static __rte_always_inline void
2521 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2522 struct mlx5_txq_local *restrict loc,
2524 unsigned int olx __rte_unused)
2526 struct rte_mbuf *mbuf;
2527 unsigned int part, dlen;
2532 /* Allow zero length packets, must check first. */
2533 dlen = rte_pktmbuf_data_len(loc->mbuf);
2534 if (dlen <= loc->mbuf_off) {
2535 /* Exhausted packet, just free. */
2537 loc->mbuf = mbuf->next;
2538 rte_pktmbuf_free_seg(mbuf);
2540 assert(loc->mbuf_nseg > 1);
2545 dlen -= loc->mbuf_off;
2546 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2548 part = RTE_MIN(len, dlen);
2549 rte_memcpy(pdst, psrc, part);
2550 loc->mbuf_off += part;
2553 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2555 /* Exhausted packet, just free. */
2557 loc->mbuf = mbuf->next;
2558 rte_pktmbuf_free_seg(mbuf);
2560 assert(loc->mbuf_nseg >= 1);
2570 * Build the Ethernet Segment with inlined data from
2571 * multi-segment packet. Checks the boundary of WQEBB
2572 * and ring buffer wrapping, supports Software Parser,
2573 * Checksums and VLAN insertion Tx offload features.
2576 * Pointer to TX queue structure.
2578 * Pointer to burst routine local context.
2580 * Pointer to WQE to fill with built Ethernet Segment.
2582 * Length of VLAN tag insertion if any.
2584 * Length of data to inline (VLAN included, if any).
2586 * TSO flag, set mss field from the packet.
2588 * Configured Tx offloads mask. It is fully defined at
2589 * compile time and may be used for optimization.
2592 * Pointer to the next Data Segment (aligned and
2593 * possible NOT wrapped around - caller should do
2594 * wrapping check on its own).
2596 static __rte_always_inline struct mlx5_wqe_dseg *
2597 mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
2598 struct mlx5_txq_local *restrict loc,
2599 struct mlx5_wqe *restrict wqe,
2605 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2611 * Calculate and set check sum flags first, uint32_t field
2612 * in segment may be shared with Software Parser flags.
2614 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2617 csum |= loc->mbuf->tso_segsz;
2618 es->flags = rte_cpu_to_be_32(csum);
2620 es->flags = rte_cpu_to_le_32(csum);
2623 * Calculate and set Software Parser offsets and flags.
2624 * These flags a set for custom UDP and IP tunnel packets.
2626 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2627 /* Fill metadata field if needed. */
2628 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2629 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2630 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2631 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2633 sizeof(rte_v128u32_t)),
2634 "invalid Ethernet Segment data size");
2635 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2637 sizeof(struct rte_vlan_hdr) +
2638 2 * RTE_ETHER_ADDR_LEN),
2639 "invalid Ethernet Segment data size");
2640 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2641 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2642 pdst = (uint8_t *)&es->inline_data;
2643 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2644 /* Implement VLAN tag insertion as part inline data. */
2645 mlx5_tx_mseg_memcpy(pdst, loc, 2 * RTE_ETHER_ADDR_LEN, olx);
2646 pdst += 2 * RTE_ETHER_ADDR_LEN;
2647 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2648 ((RTE_ETHER_TYPE_VLAN << 16) |
2649 loc->mbuf->vlan_tci);
2650 pdst += sizeof(struct rte_vlan_hdr);
2651 inlen -= 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2653 assert(pdst < (uint8_t *)txq->wqes_end);
2655 * The WQEBB space availability is checked by caller.
2656 * Here we should be aware of WQE ring buffer wraparound only.
2658 part = (uint8_t *)txq->wqes_end - pdst;
2659 part = RTE_MIN(part, inlen);
2662 mlx5_tx_mseg_memcpy(pdst, loc, part, olx);
2664 if (likely(!inlen)) {
2666 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2667 return (struct mlx5_wqe_dseg *)pdst;
2669 pdst = (uint8_t *)txq->wqes;
2675 * Build the Data Segment of pointer type.
2678 * Pointer to TX queue structure.
2680 * Pointer to burst routine local context.
2682 * Pointer to WQE to fill with built Data Segment.
2684 * Data buffer to point.
2686 * Data buffer length.
2688 * Configured Tx offloads mask. It is fully defined at
2689 * compile time and may be used for optimization.
2691 static __rte_always_inline void
2692 mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
2693 struct mlx5_txq_local *restrict loc,
2694 struct mlx5_wqe_dseg *restrict dseg,
2697 unsigned int olx __rte_unused)
2701 dseg->bcount = rte_cpu_to_be_32(len);
2702 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2703 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2707 * Build the Data Segment of pointer type or inline
2708 * if data length is less than buffer in minimal
2709 * Data Segment size.
2712 * Pointer to TX queue structure.
2714 * Pointer to burst routine local context.
2716 * Pointer to WQE to fill with built Data Segment.
2718 * Data buffer to point.
2720 * Data buffer length.
2722 * Configured Tx offloads mask. It is fully defined at
2723 * compile time and may be used for optimization.
2725 static __rte_always_inline void
2726 mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
2727 struct mlx5_txq_local *restrict loc,
2728 struct mlx5_wqe_dseg *restrict dseg,
2731 unsigned int olx __rte_unused)
2737 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2738 dseg->bcount = rte_cpu_to_be_32(len);
2739 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2740 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2744 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2745 /* Unrolled implementation of generic rte_memcpy. */
2746 dst = (uintptr_t)&dseg->inline_data[0];
2747 src = (uintptr_t)buf;
2749 #ifdef RTE_ARCH_STRICT_ALIGN
2750 assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2751 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2752 dst += sizeof(uint32_t);
2753 src += sizeof(uint32_t);
2754 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2755 dst += sizeof(uint32_t);
2756 src += sizeof(uint32_t);
2758 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2759 dst += sizeof(uint64_t);
2760 src += sizeof(uint64_t);
2764 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2765 dst += sizeof(uint32_t);
2766 src += sizeof(uint32_t);
2769 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2770 dst += sizeof(uint16_t);
2771 src += sizeof(uint16_t);
2774 *(uint8_t *)dst = *(uint8_t *)src;
2778 * Build the Data Segment of inlined data from single
2779 * segment packet, no VLAN insertion.
2782 * Pointer to TX queue structure.
2784 * Pointer to burst routine local context.
2786 * Pointer to WQE to fill with built Data Segment.
2788 * Data buffer to point.
2790 * Data buffer length.
2792 * Configured Tx offloads mask. It is fully defined at
2793 * compile time and may be used for optimization.
2796 * Pointer to the next Data Segment after inlined data.
2797 * Ring buffer wraparound check is needed. We do not
2798 * do it here because it may not be needed for the
2799 * last packet in the eMPW session.
2801 static __rte_always_inline struct mlx5_wqe_dseg *
2802 mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq,
2803 struct mlx5_txq_local *restrict loc __rte_unused,
2804 struct mlx5_wqe_dseg *restrict dseg,
2807 unsigned int olx __rte_unused)
2812 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2813 pdst = &dseg->inline_data[0];
2815 * The WQEBB space availability is checked by caller.
2816 * Here we should be aware of WQE ring buffer wraparound only.
2818 part = (uint8_t *)txq->wqes_end - pdst;
2819 part = RTE_MIN(part, len);
2821 rte_memcpy(pdst, buf, part);
2825 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2826 /* Note: no final wraparound check here. */
2827 return (struct mlx5_wqe_dseg *)pdst;
2829 pdst = (uint8_t *)txq->wqes;
2836 * Build the Data Segment of inlined data from single
2837 * segment packet with VLAN insertion.
2840 * Pointer to TX queue structure.
2842 * Pointer to burst routine local context.
2844 * Pointer to the dseg fill with built Data Segment.
2846 * Data buffer to point.
2848 * Data buffer length.
2850 * Configured Tx offloads mask. It is fully defined at
2851 * compile time and may be used for optimization.
2854 * Pointer to the next Data Segment after inlined data.
2855 * Ring buffer wraparound check is needed.
2857 static __rte_always_inline struct mlx5_wqe_dseg *
2858 mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
2859 struct mlx5_txq_local *restrict loc __rte_unused,
2860 struct mlx5_wqe_dseg *restrict dseg,
2863 unsigned int olx __rte_unused)
2869 assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
2870 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
2871 (2 * RTE_ETHER_ADDR_LEN),
2872 "invalid Data Segment data size");
2873 dseg->bcount = rte_cpu_to_be_32((len + sizeof(struct rte_vlan_hdr)) |
2874 MLX5_ETH_WQE_DATA_INLINE);
2875 pdst = &dseg->inline_data[0];
2876 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
2877 buf += MLX5_DSEG_MIN_INLINE_SIZE;
2878 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
2879 len -= MLX5_DSEG_MIN_INLINE_SIZE;
2880 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
2881 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2882 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2883 pdst = (uint8_t *)txq->wqes;
2884 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
2885 loc->mbuf->vlan_tci);
2886 pdst += sizeof(struct rte_vlan_hdr);
2888 * The WQEBB space availability is checked by caller.
2889 * Here we should be aware of WQE ring buffer wraparound only.
2891 part = (uint8_t *)txq->wqes_end - pdst;
2892 part = RTE_MIN(part, len);
2894 rte_memcpy(pdst, buf, part);
2898 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2899 /* Note: no final wraparound check here. */
2900 return (struct mlx5_wqe_dseg *)pdst;
2902 pdst = (uint8_t *)txq->wqes;
2909 * Build the Ethernet Segment with optionally inlined data with
2910 * VLAN insertion and following Data Segments (if any) from
2911 * multi-segment packet. Used by ordinary send and TSO.
2914 * Pointer to TX queue structure.
2916 * Pointer to burst routine local context.
2918 * Pointer to WQE to fill with built Ethernet/Data Segments.
2920 * Length of VLAN header to insert, 0 means no VLAN insertion.
2922 * Data length to inline. For TSO this parameter specifies
2923 * exact value, for ordinary send routine can be aligned by
2924 * caller to provide better WQE space saving and data buffer
2925 * start address alignment. This length includes VLAN header
2928 * Zero means ordinary send, inlined data can be extended,
2929 * otherwise this is TSO, inlined data length is fixed.
2931 * Configured Tx offloads mask. It is fully defined at
2932 * compile time and may be used for optimization.
2935 * Actual size of built WQE in segments.
2937 static __rte_always_inline unsigned int
2938 mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
2939 struct mlx5_txq_local *restrict loc,
2940 struct mlx5_wqe *restrict wqe,
2944 unsigned int olx __rte_unused)
2946 struct mlx5_wqe_dseg *restrict dseg;
2949 assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
2950 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
2953 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
2954 if (!loc->mbuf_nseg)
2957 * There are still some mbuf remaining, not inlined.
2958 * The first mbuf may be partially inlined and we
2959 * must process the possible non-zero data offset.
2961 if (loc->mbuf_off) {
2966 * Exhausted packets must be dropped before.
2967 * Non-zero offset means there are some data
2968 * remained in the packet.
2970 assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
2971 assert(rte_pktmbuf_data_len(loc->mbuf));
2972 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2974 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
2976 * Build the pointer/minimal data Data Segment.
2977 * Do ring buffer wrapping check in advance.
2979 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2980 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2981 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
2982 /* Store the mbuf to be freed on completion. */
2983 assert(loc->elts_free);
2984 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2987 if (--loc->mbuf_nseg == 0)
2989 loc->mbuf = loc->mbuf->next;
2993 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
2994 struct rte_mbuf *mbuf;
2996 /* Zero length segment found, just skip. */
2998 loc->mbuf = loc->mbuf->next;
2999 rte_pktmbuf_free_seg(mbuf);
3000 if (--loc->mbuf_nseg == 0)
3003 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3004 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3007 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3008 rte_pktmbuf_data_len(loc->mbuf), olx);
3009 assert(loc->elts_free);
3010 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3013 if (--loc->mbuf_nseg == 0)
3015 loc->mbuf = loc->mbuf->next;
3020 /* Calculate actual segments used from the dseg pointer. */
3021 if ((uintptr_t)wqe < (uintptr_t)dseg)
3022 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3024 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3025 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3030 * Tx one packet function for multi-segment TSO. Supports all
3031 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3032 * sends one packet per WQE.
3034 * This routine is responsible for storing processed mbuf
3035 * into elts ring buffer and update elts_head.
3038 * Pointer to TX queue structure.
3040 * Pointer to burst routine local context.
3042 * Configured Tx offloads mask. It is fully defined at
3043 * compile time and may be used for optimization.
3046 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3047 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3048 * Local context variables partially updated.
3050 static __rte_always_inline enum mlx5_txcmp_code
3051 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
3052 struct mlx5_txq_local *restrict loc,
3055 struct mlx5_wqe *restrict wqe;
3056 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3059 * Calculate data length to be inlined to estimate
3060 * the required space in WQE ring buffer.
3062 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3063 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3064 vlan = sizeof(struct rte_vlan_hdr);
3065 inlen = loc->mbuf->l2_len + vlan +
3066 loc->mbuf->l3_len + loc->mbuf->l4_len;
3067 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3068 return MLX5_TXCMP_CODE_ERROR;
3069 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3070 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3071 /* Packet must contain all TSO headers. */
3072 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3073 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3074 inlen > (dlen + vlan)))
3075 return MLX5_TXCMP_CODE_ERROR;
3076 assert(inlen >= txq->inlen_mode);
3078 * Check whether there are enough free WQEBBs:
3080 * - Ethernet Segment
3081 * - First Segment of inlined Ethernet data
3082 * - ... data continued ...
3083 * - Data Segments of pointer/min inline type
3085 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3086 MLX5_ESEG_MIN_INLINE_SIZE +
3088 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3089 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3090 return MLX5_TXCMP_CODE_EXIT;
3091 /* Check for maximal WQE size. */
3092 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3093 return MLX5_TXCMP_CODE_ERROR;
3094 #ifdef MLX5_PMD_SOFT_COUNTERS
3095 /* Update sent data bytes/packets counters. */
3096 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3097 loc->mbuf->tso_segsz;
3099 * One will be added for mbuf itself
3100 * at the end of the mlx5_tx_burst from
3101 * loc->pkts_sent field.
3104 txq->stats.opackets += ntcp;
3105 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3107 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3108 loc->wqe_last = wqe;
3109 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3110 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3111 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3112 txq->wqe_ci += (ds + 3) / 4;
3113 loc->wqe_free -= (ds + 3) / 4;
3114 /* Request CQE generation if limits are reached. */
3115 mlx5_tx_request_completion(txq, loc, true, olx);
3116 return MLX5_TXCMP_CODE_MULTI;
3120 * Tx one packet function for multi-segment SEND. Supports all
3121 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3122 * sends one packet per WQE, without any data inlining in
3125 * This routine is responsible for storing processed mbuf
3126 * into elts ring buffer and update elts_head.
3129 * Pointer to TX queue structure.
3131 * Pointer to burst routine local context.
3133 * Configured Tx offloads mask. It is fully defined at
3134 * compile time and may be used for optimization.
3137 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3138 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3139 * Local context variables partially updated.
3141 static __rte_always_inline enum mlx5_txcmp_code
3142 mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
3143 struct mlx5_txq_local *restrict loc,
3146 struct mlx5_wqe_dseg *restrict dseg;
3147 struct mlx5_wqe *restrict wqe;
3148 unsigned int ds, nseg;
3150 assert(NB_SEGS(loc->mbuf) > 1);
3152 * No inline at all, it means the CPU cycles saving
3153 * is prioritized at configuration, we should not
3154 * copy any packet data to WQE.
3156 nseg = NB_SEGS(loc->mbuf);
3158 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3159 return MLX5_TXCMP_CODE_EXIT;
3160 /* Check for maximal WQE size. */
3161 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3162 return MLX5_TXCMP_CODE_ERROR;
3164 * Some Tx offloads may cause an error if
3165 * packet is not long enough, check against
3166 * assumed minimal length.
3168 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3169 return MLX5_TXCMP_CODE_ERROR;
3170 #ifdef MLX5_PMD_SOFT_COUNTERS
3171 /* Update sent data bytes counter. */
3172 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3173 if (MLX5_TXOFF_CONFIG(VLAN) &&
3174 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3175 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3178 * SEND WQE, one WQEBB:
3179 * - Control Segment, SEND opcode
3180 * - Ethernet Segment, optional VLAN, no inline
3181 * - Data Segments, pointer only type
3183 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3184 loc->wqe_last = wqe;
3185 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3186 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3187 dseg = &wqe->dseg[0];
3189 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3190 struct rte_mbuf *mbuf;
3193 * Zero length segment found, have to
3194 * correct total size of WQE in segments.
3195 * It is supposed to be rare occasion, so
3196 * in normal case (no zero length segments)
3197 * we avoid extra writing to the Control
3201 wqe->cseg.sq_ds -= RTE_BE32(1);
3203 loc->mbuf = mbuf->next;
3204 rte_pktmbuf_free_seg(mbuf);
3210 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3211 rte_pktmbuf_data_len(loc->mbuf), olx);
3212 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3217 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3218 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3219 loc->mbuf = loc->mbuf->next;
3222 txq->wqe_ci += (ds + 3) / 4;
3223 loc->wqe_free -= (ds + 3) / 4;
3224 /* Request CQE generation if limits are reached. */
3225 mlx5_tx_request_completion(txq, loc, true, olx);
3226 return MLX5_TXCMP_CODE_MULTI;
3230 * Tx one packet function for multi-segment SEND. Supports all
3231 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3232 * sends one packet per WQE, with data inlining in
3233 * Ethernet Segment and minimal Data Segments.
3235 * This routine is responsible for storing processed mbuf
3236 * into elts ring buffer and update elts_head.
3239 * Pointer to TX queue structure.
3241 * Pointer to burst routine local context.
3243 * Configured Tx offloads mask. It is fully defined at
3244 * compile time and may be used for optimization.
3247 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3248 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3249 * Local context variables partially updated.
3251 static __rte_always_inline enum mlx5_txcmp_code
3252 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
3253 struct mlx5_txq_local *restrict loc,
3256 struct mlx5_wqe *restrict wqe;
3257 unsigned int ds, inlen, dlen, vlan = 0;
3259 assert(MLX5_TXOFF_CONFIG(INLINE));
3260 assert(NB_SEGS(loc->mbuf) > 1);
3262 * First calculate data length to be inlined
3263 * to estimate the required space for WQE.
3265 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3266 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3267 vlan = sizeof(struct rte_vlan_hdr);
3268 inlen = dlen + vlan;
3269 /* Check against minimal length. */
3270 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3271 return MLX5_TXCMP_CODE_ERROR;
3272 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3273 if (inlen > txq->inlen_send) {
3274 struct rte_mbuf *mbuf;
3279 * Packet length exceeds the allowed inline
3280 * data length, check whether the minimal
3281 * inlining is required.
3283 if (txq->inlen_mode) {
3284 assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
3285 assert(txq->inlen_mode <= txq->inlen_send);
3286 inlen = txq->inlen_mode;
3288 if (!vlan || txq->vlan_en) {
3290 * VLAN insertion will be done inside by HW.
3291 * It is not utmost effective - VLAN flag is
3292 * checked twice, but we should proceed the
3293 * inlining length correctly and take into
3294 * account the VLAN header being inserted.
3296 return mlx5_tx_packet_multi_send
3299 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3302 * Now we know the minimal amount of data is requested
3303 * to inline. Check whether we should inline the buffers
3304 * from the chain beginning to eliminate some mbufs.
3307 nxlen = rte_pktmbuf_data_len(mbuf);
3308 if (unlikely(nxlen <= txq->inlen_send)) {
3309 /* We can inline first mbuf at least. */
3310 if (nxlen < inlen) {
3313 /* Scan mbufs till inlen filled. */
3318 nxlen = rte_pktmbuf_data_len(mbuf);
3320 } while (unlikely(nxlen < inlen));
3321 if (unlikely(nxlen > txq->inlen_send)) {
3322 /* We cannot inline entire mbuf. */
3323 smlen = inlen - smlen;
3324 start = rte_pktmbuf_mtod_offset
3325 (mbuf, uintptr_t, smlen);
3332 /* There should be not end of packet. */
3334 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3335 } while (unlikely(nxlen < txq->inlen_send));
3337 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3339 * Check whether we can do inline to align start
3340 * address of data buffer to cacheline.
3343 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3344 if (unlikely(start)) {
3346 if (start <= txq->inlen_send)
3351 * Check whether there are enough free WQEBBs:
3353 * - Ethernet Segment
3354 * - First Segment of inlined Ethernet data
3355 * - ... data continued ...
3356 * - Data Segments of pointer/min inline type
3358 * Estimate the number of Data Segments conservatively,
3359 * supposing no any mbufs is being freed during inlining.
3361 assert(inlen <= txq->inlen_send);
3362 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3363 MLX5_ESEG_MIN_INLINE_SIZE +
3365 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3366 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3367 return MLX5_TXCMP_CODE_EXIT;
3368 /* Check for maximal WQE size. */
3369 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3370 return MLX5_TXCMP_CODE_ERROR;
3371 #ifdef MLX5_PMD_SOFT_COUNTERS
3372 /* Update sent data bytes/packets counters. */
3373 txq->stats.obytes += dlen + vlan;
3375 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3376 loc->wqe_last = wqe;
3377 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3378 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3379 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3380 txq->wqe_ci += (ds + 3) / 4;
3381 loc->wqe_free -= (ds + 3) / 4;
3382 /* Request CQE generation if limits are reached. */
3383 mlx5_tx_request_completion(txq, loc, true, olx);
3384 return MLX5_TXCMP_CODE_MULTI;
3388 * Tx burst function for multi-segment packets. Supports all
3389 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3390 * sends one packet per WQE. Function stops sending if it
3391 * encounters the single-segment packet.
3393 * This routine is responsible for storing processed mbuf
3394 * into elts ring buffer and update elts_head.
3397 * Pointer to TX queue structure.
3399 * Packets to transmit.
3401 * Number of packets in array.
3403 * Pointer to burst routine local context.
3405 * Configured Tx offloads mask. It is fully defined at
3406 * compile time and may be used for optimization.
3409 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3410 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3411 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3412 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3413 * Local context variables updated.
3415 static __rte_always_inline enum mlx5_txcmp_code
3416 mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
3417 struct rte_mbuf **restrict pkts,
3418 unsigned int pkts_n,
3419 struct mlx5_txq_local *restrict loc,
3422 assert(loc->elts_free && loc->wqe_free);
3423 assert(pkts_n > loc->pkts_sent);
3424 pkts += loc->pkts_sent + 1;
3425 pkts_n -= loc->pkts_sent;
3427 enum mlx5_txcmp_code ret;
3429 assert(NB_SEGS(loc->mbuf) > 1);
3431 * Estimate the number of free elts quickly but
3432 * conservatively. Some segment may be fully inlined
3433 * and freed, ignore this here - precise estimation
3436 if (loc->elts_free < NB_SEGS(loc->mbuf))
3437 return MLX5_TXCMP_CODE_EXIT;
3438 if (MLX5_TXOFF_CONFIG(TSO) &&
3439 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3440 /* Proceed with multi-segment TSO. */
3441 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3442 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3443 /* Proceed with multi-segment SEND with inlining. */
3444 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3446 /* Proceed with multi-segment SEND w/o inlining. */
3447 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3449 if (ret == MLX5_TXCMP_CODE_EXIT)
3450 return MLX5_TXCMP_CODE_EXIT;
3451 if (ret == MLX5_TXCMP_CODE_ERROR)
3452 return MLX5_TXCMP_CODE_ERROR;
3453 /* WQE is built, go to the next packet. */
3456 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3457 return MLX5_TXCMP_CODE_EXIT;
3458 loc->mbuf = *pkts++;
3460 rte_prefetch0(*pkts);
3461 if (likely(NB_SEGS(loc->mbuf) > 1))
3463 /* Here ends the series of multi-segment packets. */
3464 if (MLX5_TXOFF_CONFIG(TSO) &&
3465 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3466 return MLX5_TXCMP_CODE_TSO;
3467 return MLX5_TXCMP_CODE_SINGLE;
3473 * Tx burst function for single-segment packets with TSO.
3474 * Supports all types of Tx offloads, except multi-packets.
3475 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3476 * Function stops sending if it encounters the multi-segment
3477 * packet or packet without TSO requested.
3479 * The routine is responsible for storing processed mbuf
3480 * into elts ring buffer and update elts_head if inline
3481 * offloads is requested due to possible early freeing
3482 * of the inlined mbufs (can not store pkts array in elts
3486 * Pointer to TX queue structure.
3488 * Packets to transmit.
3490 * Number of packets in array.
3492 * Pointer to burst routine local context.
3494 * Configured Tx offloads mask. It is fully defined at
3495 * compile time and may be used for optimization.
3498 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3499 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3500 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3501 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3502 * Local context variables updated.
3504 static __rte_always_inline enum mlx5_txcmp_code
3505 mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
3506 struct rte_mbuf **restrict pkts,
3507 unsigned int pkts_n,
3508 struct mlx5_txq_local *restrict loc,
3511 assert(loc->elts_free && loc->wqe_free);
3512 assert(pkts_n > loc->pkts_sent);
3513 pkts += loc->pkts_sent + 1;
3514 pkts_n -= loc->pkts_sent;
3516 struct mlx5_wqe_dseg *restrict dseg;
3517 struct mlx5_wqe *restrict wqe;
3518 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3521 assert(NB_SEGS(loc->mbuf) == 1);
3522 dlen = rte_pktmbuf_data_len(loc->mbuf);
3523 if (MLX5_TXOFF_CONFIG(VLAN) &&
3524 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3525 vlan = sizeof(struct rte_vlan_hdr);
3528 * First calculate the WQE size to check
3529 * whether we have enough space in ring buffer.
3531 hlen = loc->mbuf->l2_len + vlan +
3532 loc->mbuf->l3_len + loc->mbuf->l4_len;
3533 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3534 return MLX5_TXCMP_CODE_ERROR;
3535 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3536 hlen += loc->mbuf->outer_l2_len +
3537 loc->mbuf->outer_l3_len;
3538 /* Segment must contain all TSO headers. */
3539 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3540 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3541 hlen > (dlen + vlan)))
3542 return MLX5_TXCMP_CODE_ERROR;
3544 * Check whether there are enough free WQEBBs:
3546 * - Ethernet Segment
3547 * - First Segment of inlined Ethernet data
3548 * - ... data continued ...
3549 * - Finishing Data Segment of pointer type
3551 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3552 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3553 if (loc->wqe_free < ((ds + 3) / 4))
3554 return MLX5_TXCMP_CODE_EXIT;
3555 #ifdef MLX5_PMD_SOFT_COUNTERS
3556 /* Update sent data bytes/packets counters. */
3557 ntcp = (dlen + vlan - hlen +
3558 loc->mbuf->tso_segsz - 1) /
3559 loc->mbuf->tso_segsz;
3561 * One will be added for mbuf itself at the end
3562 * of the mlx5_tx_burst from loc->pkts_sent field.
3565 txq->stats.opackets += ntcp;
3566 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3569 * Build the TSO WQE:
3571 * - Ethernet Segment with hlen bytes inlined
3572 * - Data Segment of pointer type
3574 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3575 loc->wqe_last = wqe;
3576 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3577 MLX5_OPCODE_TSO, olx);
3578 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3579 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3580 dlen -= hlen - vlan;
3581 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3583 * WQE is built, update the loop parameters
3584 * and go to the next packet.
3586 txq->wqe_ci += (ds + 3) / 4;
3587 loc->wqe_free -= (ds + 3) / 4;
3588 if (MLX5_TXOFF_CONFIG(INLINE))
3589 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3593 /* Request CQE generation if limits are reached. */
3594 mlx5_tx_request_completion(txq, loc, false, olx);
3595 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3596 return MLX5_TXCMP_CODE_EXIT;
3597 loc->mbuf = *pkts++;
3599 rte_prefetch0(*pkts);
3600 if (MLX5_TXOFF_CONFIG(MULTI) &&
3601 unlikely(NB_SEGS(loc->mbuf) > 1))
3602 return MLX5_TXCMP_CODE_MULTI;
3603 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3604 return MLX5_TXCMP_CODE_SINGLE;
3605 /* Continue with the next TSO packet. */
3611 * Analyze the packet and select the best method to send.
3614 * Pointer to TX queue structure.
3616 * Pointer to burst routine local context.
3618 * Configured Tx offloads mask. It is fully defined at
3619 * compile time and may be used for optimization.
3621 * The predefined flag whether do complete check for
3622 * multi-segment packets and TSO.
3625 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3626 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3627 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3628 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3630 static __rte_always_inline enum mlx5_txcmp_code
3631 mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq,
3632 struct mlx5_txq_local *restrict loc,
3636 /* Check for multi-segment packet. */
3638 MLX5_TXOFF_CONFIG(MULTI) &&
3639 unlikely(NB_SEGS(loc->mbuf) > 1))
3640 return MLX5_TXCMP_CODE_MULTI;
3641 /* Check for TSO packet. */
3643 MLX5_TXOFF_CONFIG(TSO) &&
3644 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3645 return MLX5_TXCMP_CODE_TSO;
3646 /* Check if eMPW is enabled at all. */
3647 if (!MLX5_TXOFF_CONFIG(EMPW))
3648 return MLX5_TXCMP_CODE_SINGLE;
3649 /* Check if eMPW can be engaged. */
3650 if (MLX5_TXOFF_CONFIG(VLAN) &&
3651 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3652 (!MLX5_TXOFF_CONFIG(INLINE) ||
3653 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3654 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3656 * eMPW does not support VLAN insertion offload,
3657 * we have to inline the entire packet but
3658 * packet is too long for inlining.
3660 return MLX5_TXCMP_CODE_SINGLE;
3662 return MLX5_TXCMP_CODE_EMPW;
3666 * Check the next packet attributes to match with the eMPW batch ones.
3669 * Pointer to TX queue structure.
3671 * Pointer to Ethernet Segment of eMPW batch.
3673 * Pointer to burst routine local context.
3675 * Configured Tx offloads mask. It is fully defined at
3676 * compile time and may be used for optimization.
3679 * true - packet match with eMPW batch attributes.
3680 * false - no match, eMPW should be restarted.
3682 static __rte_always_inline bool
3683 mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
3684 struct mlx5_wqe_eseg *restrict es,
3685 struct mlx5_txq_local *restrict loc,
3688 uint8_t swp_flags = 0;
3690 /* Compare the checksum flags, if any. */
3691 if (MLX5_TXOFF_CONFIG(CSUM) &&
3692 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3694 /* Compare the Software Parser offsets and flags. */
3695 if (MLX5_TXOFF_CONFIG(SWP) &&
3696 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3697 es->swp_flags != swp_flags))
3699 /* Fill metadata field if needed. */
3700 if (MLX5_TXOFF_CONFIG(METADATA) &&
3701 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3702 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3704 /* There must be no VLAN packets in eMPW loop. */
3705 if (MLX5_TXOFF_CONFIG(VLAN))
3706 assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3711 * Update send loop variables and WQE for eMPW loop
3712 * without data inlining. Number of Data Segments is
3713 * equal to the number of sent packets.
3716 * Pointer to TX queue structure.
3718 * Pointer to burst routine local context.
3720 * Number of packets/Data Segments/Packets.
3722 * Accumulated statistics, bytes sent
3724 * Configured Tx offloads mask. It is fully defined at
3725 * compile time and may be used for optimization.
3728 * true - packet match with eMPW batch attributes.
3729 * false - no match, eMPW should be restarted.
3731 static __rte_always_inline void
3732 mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
3733 struct mlx5_txq_local *restrict loc,
3738 assert(!MLX5_TXOFF_CONFIG(INLINE));
3739 #ifdef MLX5_PMD_SOFT_COUNTERS
3740 /* Update sent data bytes counter. */
3741 txq->stats.obytes += slen;
3745 loc->elts_free -= ds;
3746 loc->pkts_sent += ds;
3748 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3749 txq->wqe_ci += (ds + 3) / 4;
3750 loc->wqe_free -= (ds + 3) / 4;
3751 /* Request CQE generation if limits are reached. */
3752 mlx5_tx_request_completion(txq, loc, false, olx);
3756 * Update send loop variables and WQE for eMPW loop
3757 * with data inlining. Gets the size of pushed descriptors
3758 * and data to the WQE.
3761 * Pointer to TX queue structure.
3763 * Pointer to burst routine local context.
3765 * Total size of descriptor/data in bytes.
3767 * Accumulated statistics, data bytes sent.
3769 * Configured Tx offloads mask. It is fully defined at
3770 * compile time and may be used for optimization.
3773 * true - packet match with eMPW batch attributes.
3774 * false - no match, eMPW should be restarted.
3776 static __rte_always_inline void
3777 mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
3778 struct mlx5_txq_local *restrict loc,
3781 unsigned int olx __rte_unused)
3783 assert(MLX5_TXOFF_CONFIG(INLINE));
3784 assert((len % MLX5_WSEG_SIZE) == 0);
3785 #ifdef MLX5_PMD_SOFT_COUNTERS
3786 /* Update sent data bytes counter. */
3787 txq->stats.obytes += slen;
3791 len = len / MLX5_WSEG_SIZE + 2;
3792 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
3793 txq->wqe_ci += (len + 3) / 4;
3794 loc->wqe_free -= (len + 3) / 4;
3795 /* Request CQE generation if limits are reached. */
3796 mlx5_tx_request_completion(txq, loc, false, olx);
3800 * The set of Tx burst functions for single-segment packets
3801 * without TSO and with Multi-Packet Writing feature support.
3802 * Supports all types of Tx offloads, except multi-packets
3805 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
3806 * as many packet per WQE as it can. If eMPW is not configured
3807 * or packet can not be sent with eMPW (VLAN insertion) the
3808 * ordinary SEND opcode is used and only one packet placed
3811 * Functions stop sending if it encounters the multi-segment
3812 * packet or packet with TSO requested.
3814 * The routines are responsible for storing processed mbuf
3815 * into elts ring buffer and update elts_head if inlining
3816 * offload is requested. Otherwise the copying mbufs to elts
3817 * can be postponed and completed at the end of burst routine.
3820 * Pointer to TX queue structure.
3822 * Packets to transmit.
3824 * Number of packets in array.
3826 * Pointer to burst routine local context.
3828 * Configured Tx offloads mask. It is fully defined at
3829 * compile time and may be used for optimization.
3832 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3833 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3834 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3835 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
3836 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
3837 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
3839 * Local context variables updated.
3842 * The routine sends packets with MLX5_OPCODE_EMPW
3843 * without inlining, this is dedicated optimized branch.
3844 * No VLAN insertion is supported.
3846 static __rte_always_inline enum mlx5_txcmp_code
3847 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
3848 struct rte_mbuf **restrict pkts,
3849 unsigned int pkts_n,
3850 struct mlx5_txq_local *restrict loc,
3854 * Subroutine is the part of mlx5_tx_burst_single()
3855 * and sends single-segment packet with eMPW opcode
3856 * without data inlining.
3858 assert(!MLX5_TXOFF_CONFIG(INLINE));
3859 assert(MLX5_TXOFF_CONFIG(EMPW));
3860 assert(loc->elts_free && loc->wqe_free);
3861 assert(pkts_n > loc->pkts_sent);
3862 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
3863 pkts += loc->pkts_sent + 1;
3864 pkts_n -= loc->pkts_sent;
3866 struct mlx5_wqe_dseg *restrict dseg;
3867 struct mlx5_wqe_eseg *restrict eseg;
3868 enum mlx5_txcmp_code ret;
3869 unsigned int part, loop;
3870 unsigned int slen = 0;
3873 part = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS);
3874 if (unlikely(loc->elts_free < part)) {
3875 /* We have no enough elts to save all mbufs. */
3876 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
3877 return MLX5_TXCMP_CODE_EXIT;
3878 /* But we still able to send at least minimal eMPW. */
3879 part = loc->elts_free;
3881 /* Check whether we have enough WQEs */
3882 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
3883 if (unlikely(loc->wqe_free <
3884 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3885 return MLX5_TXCMP_CODE_EXIT;
3886 part = (loc->wqe_free * 4) - 2;
3888 if (likely(part > 1))
3889 rte_prefetch0(*pkts);
3890 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3892 * Build eMPW title WQEBB:
3893 * - Control Segment, eMPW opcode
3894 * - Ethernet Segment, no inline
3896 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
3897 MLX5_OPCODE_ENHANCED_MPSW, olx);
3898 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
3899 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3900 eseg = &loc->wqe_last->eseg;
3901 dseg = &loc->wqe_last->dseg[0];
3904 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3905 #ifdef MLX5_PMD_SOFT_COUNTERS
3906 /* Update sent data bytes counter. */
3911 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3913 if (unlikely(--loop == 0))
3915 loc->mbuf = *pkts++;
3916 if (likely(loop > 1))
3917 rte_prefetch0(*pkts);
3918 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3920 * Unroll the completion code to avoid
3921 * returning variable value - it results in
3922 * unoptimized sequent checking in caller.
3924 if (ret == MLX5_TXCMP_CODE_MULTI) {
3926 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3927 if (unlikely(!loc->elts_free ||
3929 return MLX5_TXCMP_CODE_EXIT;
3930 return MLX5_TXCMP_CODE_MULTI;
3932 if (ret == MLX5_TXCMP_CODE_TSO) {
3934 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3935 if (unlikely(!loc->elts_free ||
3937 return MLX5_TXCMP_CODE_EXIT;
3938 return MLX5_TXCMP_CODE_TSO;
3940 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3942 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3943 if (unlikely(!loc->elts_free ||
3945 return MLX5_TXCMP_CODE_EXIT;
3946 return MLX5_TXCMP_CODE_SINGLE;
3948 if (ret != MLX5_TXCMP_CODE_EMPW) {
3951 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3952 return MLX5_TXCMP_CODE_ERROR;
3955 * Check whether packet parameters coincide
3956 * within assumed eMPW batch:
3957 * - check sum settings
3959 * - software parser settings
3961 if (!mlx5_tx_match_empw(txq, eseg, loc, olx)) {
3964 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3965 if (unlikely(!loc->elts_free ||
3967 return MLX5_TXCMP_CODE_EXIT;
3971 /* Packet attributes match, continue the same eMPW. */
3973 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3974 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3976 /* eMPW is built successfully, update loop parameters. */
3978 assert(pkts_n >= part);
3979 #ifdef MLX5_PMD_SOFT_COUNTERS
3980 /* Update sent data bytes counter. */
3981 txq->stats.obytes += slen;
3983 loc->elts_free -= part;
3984 loc->pkts_sent += part;
3985 txq->wqe_ci += (2 + part + 3) / 4;
3986 loc->wqe_free -= (2 + part + 3) / 4;
3988 /* Request CQE generation if limits are reached. */
3989 mlx5_tx_request_completion(txq, loc, false, olx);
3990 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3991 return MLX5_TXCMP_CODE_EXIT;
3992 loc->mbuf = *pkts++;
3993 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3994 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
3996 /* Continue sending eMPW batches. */
4002 * The routine sends packets with MLX5_OPCODE_EMPW
4003 * with inlining, optionally supports VLAN insertion.
4005 static __rte_always_inline enum mlx5_txcmp_code
4006 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
4007 struct rte_mbuf **restrict pkts,
4008 unsigned int pkts_n,
4009 struct mlx5_txq_local *restrict loc,
4013 * Subroutine is the part of mlx5_tx_burst_single()
4014 * and sends single-segment packet with eMPW opcode
4015 * with data inlining.
4017 assert(MLX5_TXOFF_CONFIG(INLINE));
4018 assert(MLX5_TXOFF_CONFIG(EMPW));
4019 assert(loc->elts_free && loc->wqe_free);
4020 assert(pkts_n > loc->pkts_sent);
4021 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4022 pkts += loc->pkts_sent + 1;
4023 pkts_n -= loc->pkts_sent;
4025 struct mlx5_wqe_dseg *restrict dseg;
4026 struct mlx5_wqe_eseg *restrict eseg;
4027 enum mlx5_txcmp_code ret;
4028 unsigned int room, part, nlim;
4029 unsigned int slen = 0;
4032 * Limits the amount of packets in one WQE
4033 * to improve CQE latency generation.
4035 nlim = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS);
4036 /* Check whether we have minimal amount WQEs */
4037 if (unlikely(loc->wqe_free <
4038 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4039 return MLX5_TXCMP_CODE_EXIT;
4040 if (likely(pkts_n > 1))
4041 rte_prefetch0(*pkts);
4042 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4044 * Build eMPW title WQEBB:
4045 * - Control Segment, eMPW opcode, zero DS
4046 * - Ethernet Segment, no inline
4048 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0,
4049 MLX5_OPCODE_ENHANCED_MPSW, olx);
4050 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4051 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4052 eseg = &loc->wqe_last->eseg;
4053 dseg = &loc->wqe_last->dseg[0];
4054 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4055 loc->wqe_free) * MLX5_WQE_SIZE -
4056 MLX5_WQE_CSEG_SIZE -
4058 /* Build WQE till we have space, packets and resources. */
4061 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4062 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4065 assert(room >= MLX5_WQE_DSEG_SIZE);
4066 assert((room % MLX5_WQE_DSEG_SIZE) == 0);
4067 assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4069 * Some Tx offloads may cause an error if
4070 * packet is not long enough, check against
4071 * assumed minimal length.
4073 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4075 if (unlikely(!part))
4076 return MLX5_TXCMP_CODE_ERROR;
4078 * We have some successfully built
4079 * packet Data Segments to send.
4081 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4082 return MLX5_TXCMP_CODE_ERROR;
4084 /* Inline or not inline - that's the Question. */
4085 if (dlen > txq->inlen_empw)
4087 /* Inline entire packet, optional VLAN insertion. */
4088 tlen = sizeof(dseg->bcount) + dlen;
4089 if (MLX5_TXOFF_CONFIG(VLAN) &&
4090 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4092 * The packet length must be checked in
4093 * mlx5_tx_able_to_empw() and packet
4094 * fits into inline length guaranteed.
4096 assert((dlen + sizeof(struct rte_vlan_hdr)) <=
4098 tlen += sizeof(struct rte_vlan_hdr);
4101 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4103 #ifdef MLX5_PMD_SOFT_COUNTERS
4104 /* Update sent data bytes counter. */
4105 slen += sizeof(struct rte_vlan_hdr);
4110 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4113 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4114 assert(room >= tlen);
4117 * Packet data are completely inlined,
4118 * free the packet immediately.
4120 rte_pktmbuf_free_seg(loc->mbuf);
4124 * Not inlinable VLAN packets are
4125 * proceeded outside of this routine.
4127 assert(room >= MLX5_WQE_DSEG_SIZE);
4128 if (MLX5_TXOFF_CONFIG(VLAN))
4129 assert(!(loc->mbuf->ol_flags &
4131 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4132 /* We have to store mbuf in elts.*/
4133 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4134 room -= MLX5_WQE_DSEG_SIZE;
4135 /* Ring buffer wraparound is checked at the loop end.*/
4138 #ifdef MLX5_PMD_SOFT_COUNTERS
4139 /* Update sent data bytes counter. */
4145 if (unlikely(!pkts_n || !loc->elts_free)) {
4147 * We have no resources/packets to
4148 * continue build descriptors.
4151 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4152 return MLX5_TXCMP_CODE_EXIT;
4154 loc->mbuf = *pkts++;
4155 if (likely(pkts_n > 1))
4156 rte_prefetch0(*pkts);
4157 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4159 * Unroll the completion code to avoid
4160 * returning variable value - it results in
4161 * unoptimized sequent checking in caller.
4163 if (ret == MLX5_TXCMP_CODE_MULTI) {
4165 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4166 if (unlikely(!loc->elts_free ||
4168 return MLX5_TXCMP_CODE_EXIT;
4169 return MLX5_TXCMP_CODE_MULTI;
4171 if (ret == MLX5_TXCMP_CODE_TSO) {
4173 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4174 if (unlikely(!loc->elts_free ||
4176 return MLX5_TXCMP_CODE_EXIT;
4177 return MLX5_TXCMP_CODE_TSO;
4179 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4181 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4182 if (unlikely(!loc->elts_free ||
4184 return MLX5_TXCMP_CODE_EXIT;
4185 return MLX5_TXCMP_CODE_SINGLE;
4187 if (ret != MLX5_TXCMP_CODE_EMPW) {
4190 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4191 return MLX5_TXCMP_CODE_ERROR;
4193 /* Check if we have minimal room left. */
4195 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4198 * Check whether packet parameters coincide
4199 * within assumed eMPW batch:
4200 * - check sum settings
4202 * - software parser settings
4204 if (!mlx5_tx_match_empw(txq, eseg, loc, olx))
4206 /* Packet attributes match, continue the same eMPW. */
4207 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4208 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4211 * We get here to close an existing eMPW
4212 * session and start the new one.
4216 if (unlikely(!part))
4217 return MLX5_TXCMP_CODE_EXIT;
4218 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4219 if (unlikely(!loc->elts_free ||
4221 return MLX5_TXCMP_CODE_EXIT;
4222 /* Continue the loop with new eMPW session. */
4228 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4229 * Data inlining and VLAN insertion are supported.
4231 static __rte_always_inline enum mlx5_txcmp_code
4232 mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
4233 struct rte_mbuf **restrict pkts,
4234 unsigned int pkts_n,
4235 struct mlx5_txq_local *restrict loc,
4239 * Subroutine is the part of mlx5_tx_burst_single()
4240 * and sends single-segment packet with SEND opcode.
4242 assert(loc->elts_free && loc->wqe_free);
4243 assert(pkts_n > loc->pkts_sent);
4244 pkts += loc->pkts_sent + 1;
4245 pkts_n -= loc->pkts_sent;
4247 struct mlx5_wqe *restrict wqe;
4248 enum mlx5_txcmp_code ret;
4250 assert(NB_SEGS(loc->mbuf) == 1);
4251 if (MLX5_TXOFF_CONFIG(INLINE)) {
4252 unsigned int inlen, vlan = 0;
4254 inlen = rte_pktmbuf_data_len(loc->mbuf);
4255 if (MLX5_TXOFF_CONFIG(VLAN) &&
4256 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4257 vlan = sizeof(struct rte_vlan_hdr);
4259 static_assert((sizeof(struct rte_vlan_hdr) +
4260 sizeof(struct rte_ether_hdr)) ==
4261 MLX5_ESEG_MIN_INLINE_SIZE,
4262 "invalid min inline data size");
4265 * If inlining is enabled at configuration time
4266 * the limit must be not less than minimal size.
4267 * Otherwise we would do extra check for data
4268 * size to avoid crashes due to length overflow.
4270 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
4271 if (inlen <= txq->inlen_send) {
4272 unsigned int seg_n, wqe_n;
4274 rte_prefetch0(rte_pktmbuf_mtod
4275 (loc->mbuf, uint8_t *));
4276 /* Check against minimal length. */
4277 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4278 return MLX5_TXCMP_CODE_ERROR;
4280 * Completely inlined packet data WQE:
4281 * - Control Segment, SEND opcode
4282 * - Ethernet Segment, no VLAN insertion
4283 * - Data inlined, VLAN optionally inserted
4284 * - Alignment to MLX5_WSEG_SIZE
4285 * Have to estimate amount of WQEBBs
4287 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4288 MLX5_ESEG_MIN_INLINE_SIZE +
4289 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4290 /* Check if there are enough WQEBBs. */
4291 wqe_n = (seg_n + 3) / 4;
4292 if (wqe_n > loc->wqe_free)
4293 return MLX5_TXCMP_CODE_EXIT;
4294 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4295 loc->wqe_last = wqe;
4296 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4297 MLX5_OPCODE_SEND, olx);
4298 mlx5_tx_eseg_data(txq, loc, wqe,
4299 vlan, inlen, 0, olx);
4300 txq->wqe_ci += wqe_n;
4301 loc->wqe_free -= wqe_n;
4303 * Packet data are completely inlined,
4304 * free the packet immediately.
4306 rte_pktmbuf_free_seg(loc->mbuf);
4307 } else if (!MLX5_TXOFF_CONFIG(EMPW) &&
4310 * If minimal inlining is requested the eMPW
4311 * feature should be disabled due to data is
4312 * inlined into Ethernet Segment, which can
4313 * not contain inlined data for eMPW due to
4314 * segment shared for all packets.
4316 struct mlx5_wqe_dseg *restrict dseg;
4321 * The inline-mode settings require
4322 * to inline the specified amount of
4323 * data bytes to the Ethernet Segment.
4324 * We should check the free space in
4325 * WQE ring buffer to inline partially.
4327 assert(txq->inlen_send >= txq->inlen_mode);
4328 assert(inlen > txq->inlen_mode);
4329 assert(txq->inlen_mode >=
4330 MLX5_ESEG_MIN_INLINE_SIZE);
4332 * Check whether there are enough free WQEBBs:
4334 * - Ethernet Segment
4335 * - First Segment of inlined Ethernet data
4336 * - ... data continued ...
4337 * - Finishing Data Segment of pointer type
4339 ds = (MLX5_WQE_CSEG_SIZE +
4340 MLX5_WQE_ESEG_SIZE +
4341 MLX5_WQE_DSEG_SIZE +
4343 MLX5_ESEG_MIN_INLINE_SIZE +
4344 MLX5_WQE_DSEG_SIZE +
4345 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4346 if (loc->wqe_free < ((ds + 3) / 4))
4347 return MLX5_TXCMP_CODE_EXIT;
4349 * Build the ordinary SEND WQE:
4351 * - Ethernet Segment, inline inlen_mode bytes
4352 * - Data Segment of pointer type
4354 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4355 loc->wqe_last = wqe;
4356 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4357 MLX5_OPCODE_SEND, olx);
4358 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4361 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4362 txq->inlen_mode - vlan;
4363 inlen -= txq->inlen_mode;
4364 mlx5_tx_dseg_ptr(txq, loc, dseg,
4367 * WQE is built, update the loop parameters
4368 * and got to the next packet.
4370 txq->wqe_ci += (ds + 3) / 4;
4371 loc->wqe_free -= (ds + 3) / 4;
4372 /* We have to store mbuf in elts.*/
4373 assert(MLX5_TXOFF_CONFIG(INLINE));
4374 txq->elts[txq->elts_head++ & txq->elts_m] =
4382 * Partially inlined packet data WQE, we have
4383 * some space in title WQEBB, we can fill it
4384 * with some packet data. It takes one WQEBB,
4385 * it is available, no extra space check:
4386 * - Control Segment, SEND opcode
4387 * - Ethernet Segment, no VLAN insertion
4388 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4389 * - Data Segment, pointer type
4391 * We also get here if VLAN insertion is not
4392 * supported by HW, the inline is enabled.
4394 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4395 loc->wqe_last = wqe;
4396 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4397 MLX5_OPCODE_SEND, olx);
4398 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4399 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4400 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4402 * The length check is performed above, by
4403 * comparing with txq->inlen_send. We should
4404 * not get overflow here.
4406 assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4407 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4408 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4412 /* We have to store mbuf in elts.*/
4413 assert(MLX5_TXOFF_CONFIG(INLINE));
4414 txq->elts[txq->elts_head++ & txq->elts_m] =
4418 #ifdef MLX5_PMD_SOFT_COUNTERS
4419 /* Update sent data bytes counter. */
4420 txq->stats.obytes += vlan +
4421 rte_pktmbuf_data_len(loc->mbuf);
4425 * No inline at all, it means the CPU cycles saving
4426 * is prioritized at configuration, we should not
4427 * copy any packet data to WQE.
4429 * SEND WQE, one WQEBB:
4430 * - Control Segment, SEND opcode
4431 * - Ethernet Segment, optional VLAN, no inline
4432 * - Data Segment, pointer type
4434 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4435 loc->wqe_last = wqe;
4436 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4437 MLX5_OPCODE_SEND, olx);
4438 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4440 (txq, loc, &wqe->dseg[0],
4441 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4442 rte_pktmbuf_data_len(loc->mbuf), olx);
4446 * We should not store mbuf pointer in elts
4447 * if no inlining is configured, this is done
4448 * by calling routine in a batch copy.
4450 assert(!MLX5_TXOFF_CONFIG(INLINE));
4452 #ifdef MLX5_PMD_SOFT_COUNTERS
4453 /* Update sent data bytes counter. */
4454 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4455 if (MLX5_TXOFF_CONFIG(VLAN) &&
4456 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4457 txq->stats.obytes +=
4458 sizeof(struct rte_vlan_hdr);
4463 /* Request CQE generation if limits are reached. */
4464 mlx5_tx_request_completion(txq, loc, false, olx);
4465 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4466 return MLX5_TXCMP_CODE_EXIT;
4467 loc->mbuf = *pkts++;
4469 rte_prefetch0(*pkts);
4470 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4471 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4477 static __rte_always_inline enum mlx5_txcmp_code
4478 mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
4479 struct rte_mbuf **restrict pkts,
4480 unsigned int pkts_n,
4481 struct mlx5_txq_local *restrict loc,
4484 enum mlx5_txcmp_code ret;
4486 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4487 if (ret == MLX5_TXCMP_CODE_SINGLE)
4489 assert(ret == MLX5_TXCMP_CODE_EMPW);
4491 /* Optimize for inline/no inline eMPW send. */
4492 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4493 mlx5_tx_burst_empw_inline
4494 (txq, pkts, pkts_n, loc, olx) :
4495 mlx5_tx_burst_empw_simple
4496 (txq, pkts, pkts_n, loc, olx);
4497 if (ret != MLX5_TXCMP_CODE_SINGLE)
4499 /* The resources to send one packet should remain. */
4500 assert(loc->elts_free && loc->wqe_free);
4502 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4503 assert(ret != MLX5_TXCMP_CODE_SINGLE);
4504 if (ret != MLX5_TXCMP_CODE_EMPW)
4506 /* The resources to send one packet should remain. */
4507 assert(loc->elts_free && loc->wqe_free);
4512 * DPDK Tx callback template. This is configured template
4513 * used to generate routines optimized for specified offload setup.
4514 * One of this generated functions is chosen at SQ configuration
4518 * Generic pointer to TX queue structure.
4520 * Packets to transmit.
4522 * Number of packets in array.
4524 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4525 * values. Should be static to take compile time static configuration
4529 * Number of packets successfully transmitted (<= pkts_n).
4531 static __rte_always_inline uint16_t
4532 mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
4533 struct rte_mbuf **restrict pkts,
4537 struct mlx5_txq_local loc;
4538 enum mlx5_txcmp_code ret;
4541 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4542 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4543 if (unlikely(!pkts_n))
4547 loc.wqe_last = NULL;
4550 loc.pkts_loop = loc.pkts_sent;
4552 * Check if there are some CQEs, if any:
4553 * - process an encountered errors
4554 * - process the completed WQEs
4555 * - free related mbufs
4556 * - doorbell the NIC about processed CQEs
4558 rte_prefetch0(*(pkts + loc.pkts_sent));
4559 mlx5_tx_handle_completion(txq, olx);
4561 * Calculate the number of available resources - elts and WQEs.
4562 * There are two possible different scenarios:
4563 * - no data inlining into WQEs, one WQEBB may contains upto
4564 * four packets, in this case elts become scarce resource
4565 * - data inlining into WQEs, one packet may require multiple
4566 * WQEBBs, the WQEs become the limiting factor.
4568 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4569 loc.elts_free = txq->elts_s -
4570 (uint16_t)(txq->elts_head - txq->elts_tail);
4571 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4572 loc.wqe_free = txq->wqe_s -
4573 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4574 if (unlikely(!loc.elts_free || !loc.wqe_free))
4578 * Fetch the packet from array. Usually this is
4579 * the first packet in series of multi/single
4582 loc.mbuf = *(pkts + loc.pkts_sent);
4583 /* Dedicated branch for multi-segment packets. */
4584 if (MLX5_TXOFF_CONFIG(MULTI) &&
4585 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4587 * Multi-segment packet encountered.
4588 * Hardware is able to process it only
4589 * with SEND/TSO opcodes, one packet
4590 * per WQE, do it in dedicated routine.
4593 assert(loc.pkts_sent >= loc.pkts_copy);
4594 part = loc.pkts_sent - loc.pkts_copy;
4595 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4597 * There are some single-segment mbufs not
4598 * stored in elts. The mbufs must be in the
4599 * same order as WQEs, so we must copy the
4600 * mbufs to elts here, before the coming
4601 * multi-segment packet mbufs is appended.
4603 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4605 loc.pkts_copy = loc.pkts_sent;
4607 assert(pkts_n > loc.pkts_sent);
4608 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4609 if (!MLX5_TXOFF_CONFIG(INLINE))
4610 loc.pkts_copy = loc.pkts_sent;
4612 * These returned code checks are supposed
4613 * to be optimized out due to routine inlining.
4615 if (ret == MLX5_TXCMP_CODE_EXIT) {
4617 * The routine returns this code when
4618 * all packets are sent or there is no
4619 * enough resources to complete request.
4623 if (ret == MLX5_TXCMP_CODE_ERROR) {
4625 * The routine returns this code when
4626 * some error in the incoming packets
4629 txq->stats.oerrors++;
4632 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4634 * The single-segment packet was encountered
4635 * in the array, try to send it with the
4636 * best optimized way, possible engaging eMPW.
4638 goto enter_send_single;
4640 if (MLX5_TXOFF_CONFIG(TSO) &&
4641 ret == MLX5_TXCMP_CODE_TSO) {
4643 * The single-segment TSO packet was
4644 * encountered in the array.
4646 goto enter_send_tso;
4648 /* We must not get here. Something is going wrong. */
4650 txq->stats.oerrors++;
4653 /* Dedicated branch for single-segment TSO packets. */
4654 if (MLX5_TXOFF_CONFIG(TSO) &&
4655 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4657 * TSO might require special way for inlining
4658 * (dedicated parameters) and is sent with
4659 * MLX5_OPCODE_TSO opcode only, provide this
4660 * in dedicated branch.
4663 assert(NB_SEGS(loc.mbuf) == 1);
4664 assert(pkts_n > loc.pkts_sent);
4665 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4667 * These returned code checks are supposed
4668 * to be optimized out due to routine inlining.
4670 if (ret == MLX5_TXCMP_CODE_EXIT)
4672 if (ret == MLX5_TXCMP_CODE_ERROR) {
4673 txq->stats.oerrors++;
4676 if (ret == MLX5_TXCMP_CODE_SINGLE)
4677 goto enter_send_single;
4678 if (MLX5_TXOFF_CONFIG(MULTI) &&
4679 ret == MLX5_TXCMP_CODE_MULTI) {
4681 * The multi-segment packet was
4682 * encountered in the array.
4684 goto enter_send_multi;
4686 /* We must not get here. Something is going wrong. */
4688 txq->stats.oerrors++;
4692 * The dedicated branch for the single-segment packets
4693 * without TSO. Often these ones can be sent using
4694 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
4695 * The routine builds the WQEs till it encounters
4696 * the TSO or multi-segment packet (in case if these
4697 * offloads are requested at SQ configuration time).
4700 assert(pkts_n > loc.pkts_sent);
4701 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
4703 * These returned code checks are supposed
4704 * to be optimized out due to routine inlining.
4706 if (ret == MLX5_TXCMP_CODE_EXIT)
4708 if (ret == MLX5_TXCMP_CODE_ERROR) {
4709 txq->stats.oerrors++;
4712 if (MLX5_TXOFF_CONFIG(MULTI) &&
4713 ret == MLX5_TXCMP_CODE_MULTI) {
4715 * The multi-segment packet was
4716 * encountered in the array.
4718 goto enter_send_multi;
4720 if (MLX5_TXOFF_CONFIG(TSO) &&
4721 ret == MLX5_TXCMP_CODE_TSO) {
4723 * The single-segment TSO packet was
4724 * encountered in the array.
4726 goto enter_send_tso;
4728 /* We must not get here. Something is going wrong. */
4730 txq->stats.oerrors++;
4734 * Main Tx loop is completed, do the rest:
4735 * - set completion request if thresholds are reached
4736 * - doorbell the hardware
4737 * - copy the rest of mbufs to elts (if any)
4739 assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
4740 /* Take a shortcut if nothing is sent. */
4741 if (unlikely(loc.pkts_sent == loc.pkts_loop))
4744 * Ring QP doorbell immediately after WQE building completion
4745 * to improve latencies. The pure software related data treatment
4746 * can be completed after doorbell. Tx CQEs for this SQ are
4747 * processed in this thread only by the polling.
4749 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, 0);
4750 /* Not all of the mbufs may be stored into elts yet. */
4751 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
4752 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4754 * There are some single-segment mbufs not stored in elts.
4755 * It can be only if the last packet was single-segment.
4756 * The copying is gathered into one place due to it is
4757 * a good opportunity to optimize that with SIMD.
4758 * Unfortunately if inlining is enabled the gaps in
4759 * pointer array may happen due to early freeing of the
4762 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
4763 loc.pkts_copy = loc.pkts_sent;
4765 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4766 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4767 if (pkts_n > loc.pkts_sent) {
4769 * If burst size is large there might be no enough CQE
4770 * fetched from completion queue and no enough resources
4771 * freed to send all the packets.
4776 #ifdef MLX5_PMD_SOFT_COUNTERS
4777 /* Increment sent packets counter. */
4778 txq->stats.opackets += loc.pkts_sent;
4780 return loc.pkts_sent;
4783 /* Generate routines with Enhanced Multi-Packet Write support. */
4784 MLX5_TXOFF_DECL(full_empw,
4785 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
4787 MLX5_TXOFF_DECL(none_empw,
4788 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4790 MLX5_TXOFF_DECL(md_empw,
4791 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4793 MLX5_TXOFF_DECL(mt_empw,
4794 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4795 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4797 MLX5_TXOFF_DECL(mtsc_empw,
4798 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4799 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4800 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4802 MLX5_TXOFF_DECL(mti_empw,
4803 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4804 MLX5_TXOFF_CONFIG_INLINE |
4805 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4807 MLX5_TXOFF_DECL(mtv_empw,
4808 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4809 MLX5_TXOFF_CONFIG_VLAN |
4810 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4812 MLX5_TXOFF_DECL(mtiv_empw,
4813 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4814 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4815 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4817 MLX5_TXOFF_DECL(sc_empw,
4818 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4819 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4821 MLX5_TXOFF_DECL(sci_empw,
4822 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4823 MLX5_TXOFF_CONFIG_INLINE |
4824 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4826 MLX5_TXOFF_DECL(scv_empw,
4827 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4828 MLX5_TXOFF_CONFIG_VLAN |
4829 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4831 MLX5_TXOFF_DECL(sciv_empw,
4832 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4833 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4834 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4836 MLX5_TXOFF_DECL(i_empw,
4837 MLX5_TXOFF_CONFIG_INLINE |
4838 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4840 MLX5_TXOFF_DECL(v_empw,
4841 MLX5_TXOFF_CONFIG_VLAN |
4842 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4844 MLX5_TXOFF_DECL(iv_empw,
4845 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4846 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4848 /* Generate routines without Enhanced Multi-Packet Write support. */
4849 MLX5_TXOFF_DECL(full,
4850 MLX5_TXOFF_CONFIG_FULL)
4852 MLX5_TXOFF_DECL(none,
4853 MLX5_TXOFF_CONFIG_NONE)
4856 MLX5_TXOFF_CONFIG_METADATA)
4859 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4860 MLX5_TXOFF_CONFIG_METADATA)
4862 MLX5_TXOFF_DECL(mtsc,
4863 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4864 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4865 MLX5_TXOFF_CONFIG_METADATA)
4867 MLX5_TXOFF_DECL(mti,
4868 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4869 MLX5_TXOFF_CONFIG_INLINE |
4870 MLX5_TXOFF_CONFIG_METADATA)
4873 MLX5_TXOFF_DECL(mtv,
4874 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4875 MLX5_TXOFF_CONFIG_VLAN |
4876 MLX5_TXOFF_CONFIG_METADATA)
4879 MLX5_TXOFF_DECL(mtiv,
4880 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4881 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4882 MLX5_TXOFF_CONFIG_METADATA)
4885 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4886 MLX5_TXOFF_CONFIG_METADATA)
4888 MLX5_TXOFF_DECL(sci,
4889 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4890 MLX5_TXOFF_CONFIG_INLINE |
4891 MLX5_TXOFF_CONFIG_METADATA)
4894 MLX5_TXOFF_DECL(scv,
4895 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4896 MLX5_TXOFF_CONFIG_VLAN |
4897 MLX5_TXOFF_CONFIG_METADATA)
4900 MLX5_TXOFF_DECL(sciv,
4901 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4902 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4903 MLX5_TXOFF_CONFIG_METADATA)
4906 MLX5_TXOFF_CONFIG_INLINE |
4907 MLX5_TXOFF_CONFIG_METADATA)
4910 MLX5_TXOFF_CONFIG_VLAN |
4911 MLX5_TXOFF_CONFIG_METADATA)
4914 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4915 MLX5_TXOFF_CONFIG_METADATA)
4918 * Array of declared and compiled Tx burst function and corresponding
4919 * supported offloads set. The array is used to select the Tx burst
4920 * function for specified offloads set at Tx queue configuration time.
4923 eth_tx_burst_t func;
4926 MLX5_TXOFF_INFO(full_empw,
4927 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4928 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4929 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4930 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4932 MLX5_TXOFF_INFO(none_empw,
4933 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4935 MLX5_TXOFF_INFO(md_empw,
4936 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4938 MLX5_TXOFF_INFO(mt_empw,
4939 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4940 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4942 MLX5_TXOFF_INFO(mtsc_empw,
4943 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4944 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4945 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4947 MLX5_TXOFF_INFO(mti_empw,
4948 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4949 MLX5_TXOFF_CONFIG_INLINE |
4950 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4952 MLX5_TXOFF_INFO(mtv_empw,
4953 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4954 MLX5_TXOFF_CONFIG_VLAN |
4955 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4957 MLX5_TXOFF_INFO(mtiv_empw,
4958 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4959 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4960 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4962 MLX5_TXOFF_INFO(sc_empw,
4963 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4964 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4966 MLX5_TXOFF_INFO(sci_empw,
4967 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4968 MLX5_TXOFF_CONFIG_INLINE |
4969 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4971 MLX5_TXOFF_INFO(scv_empw,
4972 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4973 MLX5_TXOFF_CONFIG_VLAN |
4974 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4976 MLX5_TXOFF_INFO(sciv_empw,
4977 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4978 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4979 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4981 MLX5_TXOFF_INFO(i_empw,
4982 MLX5_TXOFF_CONFIG_INLINE |
4983 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4985 MLX5_TXOFF_INFO(v_empw,
4986 MLX5_TXOFF_CONFIG_VLAN |
4987 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4989 MLX5_TXOFF_INFO(iv_empw,
4990 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4991 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4993 MLX5_TXOFF_INFO(full,
4994 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4995 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4996 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4997 MLX5_TXOFF_CONFIG_METADATA)
4999 MLX5_TXOFF_INFO(none,
5000 MLX5_TXOFF_CONFIG_NONE)
5003 MLX5_TXOFF_CONFIG_METADATA)
5006 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5007 MLX5_TXOFF_CONFIG_METADATA)
5009 MLX5_TXOFF_INFO(mtsc,
5010 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5011 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5012 MLX5_TXOFF_CONFIG_METADATA)
5014 MLX5_TXOFF_INFO(mti,
5015 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5016 MLX5_TXOFF_CONFIG_INLINE |
5017 MLX5_TXOFF_CONFIG_METADATA)
5020 MLX5_TXOFF_INFO(mtv,
5021 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5022 MLX5_TXOFF_CONFIG_VLAN |
5023 MLX5_TXOFF_CONFIG_METADATA)
5025 MLX5_TXOFF_INFO(mtiv,
5026 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5027 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5028 MLX5_TXOFF_CONFIG_METADATA)
5031 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5032 MLX5_TXOFF_CONFIG_METADATA)
5034 MLX5_TXOFF_INFO(sci,
5035 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5036 MLX5_TXOFF_CONFIG_INLINE |
5037 MLX5_TXOFF_CONFIG_METADATA)
5039 MLX5_TXOFF_INFO(scv,
5040 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5041 MLX5_TXOFF_CONFIG_VLAN |
5042 MLX5_TXOFF_CONFIG_METADATA)
5044 MLX5_TXOFF_INFO(sciv,
5045 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5046 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5047 MLX5_TXOFF_CONFIG_METADATA)
5050 MLX5_TXOFF_CONFIG_INLINE |
5051 MLX5_TXOFF_CONFIG_METADATA)
5054 MLX5_TXOFF_CONFIG_VLAN |
5055 MLX5_TXOFF_CONFIG_METADATA)
5058 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5059 MLX5_TXOFF_CONFIG_METADATA)
5063 * Configure the Tx function to use. The routine checks configured
5064 * Tx offloads for the device and selects appropriate Tx burst
5065 * routine. There are multiple Tx burst routines compiled from
5066 * the same template in the most optimal way for the dedicated
5070 * Pointer to private data structure.
5073 * Pointer to selected Tx burst function.
5076 mlx5_select_tx_function(struct rte_eth_dev *dev)
5078 struct mlx5_priv *priv = dev->data->dev_private;
5079 struct mlx5_dev_config *config = &priv->config;
5080 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5081 unsigned int diff = 0, olx = 0, i, m;
5083 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5084 MLX5_DSEG_MAX, "invalid WQE max size");
5085 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5086 "invalid WQE Control Segment size");
5087 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5088 "invalid WQE Ethernet Segment size");
5089 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5090 "invalid WQE Data Segment size");
5091 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5092 "invalid WQE size");
5094 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5095 /* We should support Multi-Segment Packets. */
5096 olx |= MLX5_TXOFF_CONFIG_MULTI;
5098 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5099 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5100 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5101 DEV_TX_OFFLOAD_IP_TNL_TSO |
5102 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5103 /* We should support TCP Send Offload. */
5104 olx |= MLX5_TXOFF_CONFIG_TSO;
5106 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5107 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5108 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5109 /* We should support Software Parser for Tunnels. */
5110 olx |= MLX5_TXOFF_CONFIG_SWP;
5112 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5113 DEV_TX_OFFLOAD_UDP_CKSUM |
5114 DEV_TX_OFFLOAD_TCP_CKSUM |
5115 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5116 /* We should support IP/TCP/UDP Checksums. */
5117 olx |= MLX5_TXOFF_CONFIG_CSUM;
5119 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5120 /* We should support VLAN insertion. */
5121 olx |= MLX5_TXOFF_CONFIG_VLAN;
5123 if (priv->txqs_n && (*priv->txqs)[0]) {
5124 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5126 if (txd->inlen_send) {
5128 * Check the data inline requirements. Data inline
5129 * is enabled on per device basis, we can check
5130 * the first Tx queue only.
5132 * If device does not support VLAN insertion in WQE
5133 * and some queues are requested to perform VLAN
5134 * insertion offload than inline must be enabled.
5136 olx |= MLX5_TXOFF_CONFIG_INLINE;
5139 if (config->mps == MLX5_MPW_ENHANCED &&
5140 config->txq_inline_min <= 0) {
5142 * The NIC supports Enhanced Multi-Packet Write.
5143 * We do not support legacy MPW due to its
5144 * hardware related problems, so we just ignore
5145 * legacy MLX5_MPW settings. There should be no
5146 * minimal required inline data.
5148 olx |= MLX5_TXOFF_CONFIG_EMPW;
5150 if (rte_flow_dynf_metadata_avail()) {
5151 /* We should support Flow metadata. */
5152 olx |= MLX5_TXOFF_CONFIG_METADATA;
5155 * Scan the routines table to find the minimal
5156 * satisfying routine with requested offloads.
5158 m = RTE_DIM(txoff_func);
5159 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5162 tmp = txoff_func[i].olx;
5164 /* Meets requested offloads exactly.*/
5168 if ((tmp & olx) != olx) {
5169 /* Does not meet requested offloads at all. */
5172 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5173 /* Do not enable eMPW if not configured. */
5175 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5176 /* Do not enable inlining if not configured. */
5179 * Some routine meets the requirements.
5180 * Check whether it has minimal amount
5181 * of not requested offloads.
5183 tmp = __builtin_popcountl(tmp & ~olx);
5184 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5185 /* First or better match, save and continue. */
5191 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5192 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5193 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5194 /* Lighter not requested offload. */
5199 if (m >= RTE_DIM(txoff_func)) {
5200 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5201 " for requested offloads %04X",
5202 dev->data->port_id, olx);
5205 DRV_LOG(DEBUG, "port %u has selected Tx function"
5206 " supporting offloads %04X/%04X",
5207 dev->data->port_id, olx, txoff_func[m].olx);
5208 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5209 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5210 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5211 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5212 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5213 DRV_LOG(DEBUG, "\tSWP (software parser)");
5214 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5215 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5216 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5217 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5218 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5219 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5220 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5221 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5222 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW)
5223 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5224 return txoff_func[m].func;