1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
25 #include <rte_common.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_ether.h>
28 #include <rte_cycles.h>
31 #include <mlx5_devx_cmds.h>
34 #include "mlx5_defs.h"
36 #include "mlx5_utils.h"
37 #include "mlx5_rxtx.h"
38 #include "mlx5_autoconf.h"
40 /* TX burst subroutines return codes. */
41 enum mlx5_txcmp_code {
42 MLX5_TXCMP_CODE_EXIT = 0,
43 MLX5_TXCMP_CODE_ERROR,
44 MLX5_TXCMP_CODE_SINGLE,
45 MLX5_TXCMP_CODE_MULTI,
51 * These defines are used to configure Tx burst routine option set
52 * supported at compile time. The not specified options are optimized out
53 * out due to if conditions can be explicitly calculated at compile time.
54 * The offloads with bigger runtime check (require more CPU cycles to
55 * skip) overhead should have the bigger index - this is needed to
56 * select the better matching routine function if no exact match and
57 * some offloads are not actually requested.
59 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
60 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
61 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
62 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
63 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
64 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
65 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
66 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
67 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
69 /* The most common offloads groups. */
70 #define MLX5_TXOFF_CONFIG_NONE 0
71 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
72 MLX5_TXOFF_CONFIG_TSO | \
73 MLX5_TXOFF_CONFIG_SWP | \
74 MLX5_TXOFF_CONFIG_CSUM | \
75 MLX5_TXOFF_CONFIG_INLINE | \
76 MLX5_TXOFF_CONFIG_VLAN | \
77 MLX5_TXOFF_CONFIG_METADATA)
79 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
81 #define MLX5_TXOFF_DECL(func, olx) \
82 static uint16_t mlx5_tx_burst_##func(void *txq, \
83 struct rte_mbuf **pkts, \
86 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
87 pkts, pkts_n, (olx)); \
90 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
92 static __rte_always_inline uint32_t
93 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
95 static __rte_always_inline int
96 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
97 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
99 static __rte_always_inline uint32_t
100 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
102 static __rte_always_inline void
103 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
104 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
106 static __rte_always_inline void
107 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
108 const unsigned int strd_n);
111 mlx5_queue_state_modify(struct rte_eth_dev *dev,
112 struct mlx5_mp_arg_queue_state_modify *sm);
115 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
116 volatile struct mlx5_cqe *restrict cqe,
120 mlx5_lro_update_hdr(uint8_t *restrict padd,
121 volatile struct mlx5_cqe *restrict cqe,
124 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
125 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
128 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
129 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
132 * Build a table to translate Rx completion flags to packet type.
134 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
137 mlx5_set_ptype_table(void)
140 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
142 /* Last entry must not be overwritten, reserved for errored packet. */
143 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
144 (*p)[i] = RTE_PTYPE_UNKNOWN;
146 * The index to the array should have:
147 * bit[1:0] = l3_hdr_type
148 * bit[4:2] = l4_hdr_type
151 * bit[7] = outer_l3_type
154 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
156 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
157 RTE_PTYPE_L4_NONFRAG;
158 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
159 RTE_PTYPE_L4_NONFRAG;
161 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
163 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
166 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
168 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
170 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
172 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
174 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
176 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
179 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
181 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
183 /* Repeat with outer_l3_type being set. Just in case. */
184 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
185 RTE_PTYPE_L4_NONFRAG;
186 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
187 RTE_PTYPE_L4_NONFRAG;
188 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
190 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
192 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
194 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
196 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
198 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
200 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
202 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
204 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
206 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
209 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
210 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
211 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
212 RTE_PTYPE_INNER_L4_NONFRAG;
213 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
215 RTE_PTYPE_INNER_L4_NONFRAG;
216 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
217 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
219 RTE_PTYPE_INNER_L4_NONFRAG;
220 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
221 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
222 RTE_PTYPE_INNER_L4_NONFRAG;
223 /* Tunneled - Fragmented */
224 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L4_FRAG;
227 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
228 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L4_FRAG;
230 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
232 RTE_PTYPE_INNER_L4_FRAG;
233 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L4_FRAG;
237 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L4_TCP;
240 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
241 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L4_TCP;
243 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
244 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L4_TCP;
246 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
247 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L4_TCP;
249 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
250 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L4_TCP;
252 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L4_TCP;
255 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L4_TCP;
258 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
259 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L4_TCP;
261 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
262 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L4_TCP;
264 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
265 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L4_TCP;
267 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
268 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
269 RTE_PTYPE_INNER_L4_TCP;
270 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
272 RTE_PTYPE_INNER_L4_TCP;
274 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
275 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
276 RTE_PTYPE_INNER_L4_UDP;
277 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
278 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
279 RTE_PTYPE_INNER_L4_UDP;
280 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
281 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
282 RTE_PTYPE_INNER_L4_UDP;
283 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
284 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
285 RTE_PTYPE_INNER_L4_UDP;
289 * Build a table to translate packet to checksum type of Verbs.
292 mlx5_set_cksum_table(void)
298 * The index should have:
299 * bit[0] = PKT_TX_TCP_SEG
300 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
301 * bit[4] = PKT_TX_IP_CKSUM
302 * bit[8] = PKT_TX_OUTER_IP_CKSUM
305 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
308 /* Tunneled packet. */
309 if (i & (1 << 8)) /* Outer IP. */
310 v |= MLX5_ETH_WQE_L3_CSUM;
311 if (i & (1 << 4)) /* Inner IP. */
312 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
313 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
314 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
317 if (i & (1 << 4)) /* IP. */
318 v |= MLX5_ETH_WQE_L3_CSUM;
319 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
320 v |= MLX5_ETH_WQE_L4_CSUM;
322 mlx5_cksum_table[i] = v;
327 * Build a table to translate packet type of mbuf to SWP type of Verbs.
330 mlx5_set_swp_types_table(void)
336 * The index should have:
337 * bit[0:1] = PKT_TX_L4_MASK
338 * bit[4] = PKT_TX_IPV6
339 * bit[8] = PKT_TX_OUTER_IPV6
340 * bit[9] = PKT_TX_OUTER_UDP
342 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
345 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
347 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
349 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
350 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
351 v |= MLX5_ETH_WQE_L4_INNER_UDP;
352 mlx5_swp_types_table[i] = v;
357 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
358 * Flags must be preliminary initialized to zero.
361 * Pointer to burst routine local context.
363 * Pointer to store Software Parser flags
365 * Configured Tx offloads mask. It is fully defined at
366 * compile time and may be used for optimization.
369 * Software Parser offsets packed in dword.
370 * Software Parser flags are set by pointer.
372 static __rte_always_inline uint32_t
373 txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc,
378 unsigned int idx, off;
381 if (!MLX5_TXOFF_CONFIG(SWP))
383 ol = loc->mbuf->ol_flags;
384 tunnel = ol & PKT_TX_TUNNEL_MASK;
386 * Check whether Software Parser is required.
387 * Only customized tunnels may ask for.
389 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
392 * The index should have:
393 * bit[0:1] = PKT_TX_L4_MASK
394 * bit[4] = PKT_TX_IPV6
395 * bit[8] = PKT_TX_OUTER_IPV6
396 * bit[9] = PKT_TX_OUTER_UDP
398 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
399 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
400 *swp_flags = mlx5_swp_types_table[idx];
402 * Set offsets for SW parser. Since ConnectX-5, SW parser just
403 * complements HW parser. SW parser starts to engage only if HW parser
404 * can't reach a header. For the older devices, HW parser will not kick
405 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
406 * should be set regardless of HW offload.
408 off = loc->mbuf->outer_l2_len;
409 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
410 off += sizeof(struct rte_vlan_hdr);
411 set = (off >> 1) << 8; /* Outer L3 offset. */
412 off += loc->mbuf->outer_l3_len;
413 if (tunnel == PKT_TX_TUNNEL_UDP)
414 set |= off >> 1; /* Outer L4 offset. */
415 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
416 const uint64_t csum = ol & PKT_TX_L4_MASK;
417 off += loc->mbuf->l2_len;
418 set |= (off >> 1) << 24; /* Inner L3 offset. */
419 if (csum == PKT_TX_TCP_CKSUM ||
420 csum == PKT_TX_UDP_CKSUM ||
421 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
422 off += loc->mbuf->l3_len;
423 set |= (off >> 1) << 16; /* Inner L4 offset. */
426 set = rte_cpu_to_le_32(set);
431 * Convert the Checksum offloads to Verbs.
434 * Pointer to the mbuf.
437 * Converted checksum flags.
439 static __rte_always_inline uint8_t
440 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
443 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
444 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
445 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
448 * The index should have:
449 * bit[0] = PKT_TX_TCP_SEG
450 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
451 * bit[4] = PKT_TX_IP_CKSUM
452 * bit[8] = PKT_TX_OUTER_IP_CKSUM
455 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
456 return mlx5_cksum_table[idx];
460 * Internal function to compute the number of used descriptors in an RX queue
466 * The number of used rx descriptor.
469 rx_queue_count(struct mlx5_rxq_data *rxq)
471 struct rxq_zip *zip = &rxq->zip;
472 volatile struct mlx5_cqe *cqe;
473 const unsigned int cqe_n = (1 << rxq->cqe_n);
474 const unsigned int cqe_cnt = cqe_n - 1;
478 /* if we are processing a compressed cqe */
480 used = zip->cqe_cnt - zip->ca;
486 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
487 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
491 op_own = cqe->op_own;
492 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
493 n = rte_be_to_cpu_32(cqe->byte_cnt);
498 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
500 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
505 * DPDK callback to check the status of a rx descriptor.
510 * The index of the descriptor in the ring.
513 * The status of the tx descriptor.
516 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
518 struct mlx5_rxq_data *rxq = rx_queue;
519 struct mlx5_rxq_ctrl *rxq_ctrl =
520 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
521 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
523 if (dev->rx_pkt_burst != mlx5_rx_burst) {
527 if (offset >= (1 << rxq->elts_n)) {
531 if (offset < rx_queue_count(rxq))
532 return RTE_ETH_RX_DESC_DONE;
533 return RTE_ETH_RX_DESC_AVAIL;
537 * DPDK callback to get the number of used descriptors in a RX queue
540 * Pointer to the device structure.
546 * The number of used rx descriptor.
547 * -EINVAL if the queue is invalid
550 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
552 struct mlx5_priv *priv = dev->data->dev_private;
553 struct mlx5_rxq_data *rxq;
555 if (dev->rx_pkt_burst != mlx5_rx_burst) {
559 rxq = (*priv->rxqs)[rx_queue_id];
564 return rx_queue_count(rxq);
567 #define MLX5_SYSTEM_LOG_DIR "/var/log"
569 * Dump debug information to log file.
574 * If not NULL this string is printed as a header to the output
575 * and the output will be in hexadecimal view.
577 * This is the buffer address to print out.
579 * The number of bytes to dump out.
582 mlx5_dump_debug_information(const char *fname, const char *hex_title,
583 const void *buf, unsigned int hex_len)
587 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
588 fd = fopen(path, "a+");
590 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
591 MKSTR(path2, "./%s", fname);
592 fd = fopen(path2, "a+");
594 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
597 DRV_LOG(INFO, "New debug dump in file %s", path2);
599 DRV_LOG(INFO, "New debug dump in file %s", path);
602 rte_hexdump(fd, hex_title, buf, hex_len);
604 fprintf(fd, "%s", (const char *)buf);
605 fprintf(fd, "\n\n\n");
610 * Move QP from error state to running state and initialize indexes.
613 * Pointer to TX queue control structure.
616 * 0 on success, else -1.
619 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
621 struct mlx5_mp_arg_queue_state_modify sm = {
623 .queue_id = txq_ctrl->txq.idx,
626 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
628 txq_ctrl->txq.wqe_ci = 0;
629 txq_ctrl->txq.wqe_pi = 0;
630 txq_ctrl->txq.elts_comp = 0;
634 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
636 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
638 static const uint8_t magic[] = "seen";
642 for (i = 0; i < sizeof(magic); ++i)
643 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
645 err_cqe->rsvd1[i] = magic[i];
654 * Pointer to TX queue structure.
656 * Pointer to the error CQE.
659 * Negative value if queue recovery failed, otherwise
660 * the error completion entry is handled successfully.
663 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
664 volatile struct mlx5_err_cqe *err_cqe)
666 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
667 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
668 struct mlx5_txq_ctrl *txq_ctrl =
669 container_of(txq, struct mlx5_txq_ctrl, txq);
670 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
671 int seen = check_err_cqe_seen(err_cqe);
673 if (!seen && txq_ctrl->dump_file_n <
674 txq_ctrl->priv->config.max_dump_files_num) {
675 MKSTR(err_str, "Unexpected CQE error syndrome "
676 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
677 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
678 txq->cqe_s, txq->qp_num_8s >> 8,
679 rte_be_to_cpu_16(err_cqe->wqe_counter),
680 txq->wqe_ci, txq->cq_ci);
681 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
682 PORT_ID(txq_ctrl->priv), txq->idx,
683 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
684 mlx5_dump_debug_information(name, NULL, err_str, 0);
685 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
686 (const void *)((uintptr_t)
690 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
691 (const void *)((uintptr_t)
695 txq_ctrl->dump_file_n++;
699 * Count errors in WQEs units.
700 * Later it can be improved to count error packets,
701 * for example, by SQ parsing to find how much packets
702 * should be counted for each WQE.
704 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
706 if (tx_recover_qp(txq_ctrl)) {
707 /* Recovering failed - retry later on the same WQE. */
710 /* Release all the remaining buffers. */
711 txq_free_elts(txq_ctrl);
717 * Translate RX completion flags to packet type.
720 * Pointer to RX queue structure.
724 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
727 * Packet type for struct rte_mbuf.
729 static inline uint32_t
730 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
733 uint8_t pinfo = cqe->pkt_info;
734 uint16_t ptype = cqe->hdr_type_etc;
737 * The index to the array should have:
738 * bit[1:0] = l3_hdr_type
739 * bit[4:2] = l4_hdr_type
742 * bit[7] = outer_l3_type
744 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
745 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
749 * Initialize Rx WQ and indexes.
752 * Pointer to RX queue structure.
755 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
757 const unsigned int wqe_n = 1 << rxq->elts_n;
760 for (i = 0; (i != wqe_n); ++i) {
761 volatile struct mlx5_wqe_data_seg *scat;
765 if (mlx5_rxq_mprq_enabled(rxq)) {
766 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
768 scat = &((volatile struct mlx5_wqe_mprq *)
770 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
771 1 << rxq->strd_num_n);
772 byte_count = (1 << rxq->strd_sz_n) *
773 (1 << rxq->strd_num_n);
775 struct rte_mbuf *buf = (*rxq->elts)[i];
777 scat = &((volatile struct mlx5_wqe_data_seg *)
779 addr = rte_pktmbuf_mtod(buf, uintptr_t);
780 byte_count = DATA_LEN(buf);
782 /* scat->addr must be able to store a pointer. */
783 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
784 *scat = (struct mlx5_wqe_data_seg){
785 .addr = rte_cpu_to_be_64(addr),
786 .byte_count = rte_cpu_to_be_32(byte_count),
787 .lkey = mlx5_rx_addr2mr(rxq, addr),
790 rxq->consumed_strd = 0;
791 rxq->decompressed = 0;
793 rxq->zip = (struct rxq_zip){
796 /* Update doorbell counter. */
797 rxq->rq_ci = wqe_n >> rxq->sges_n;
799 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
803 * Modify a Verbs/DevX queue state.
804 * This must be called from the primary process.
807 * Pointer to Ethernet device.
809 * State modify request parameters.
812 * 0 in case of success else non-zero value and rte_errno is set.
815 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
816 const struct mlx5_mp_arg_queue_state_modify *sm)
819 struct mlx5_priv *priv = dev->data->dev_private;
822 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
823 struct mlx5_rxq_ctrl *rxq_ctrl =
824 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
826 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
827 struct ibv_wq_attr mod = {
828 .attr_mask = IBV_WQ_ATTR_STATE,
829 .wq_state = sm->state,
832 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
833 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
834 struct mlx5_devx_modify_rq_attr rq_attr;
836 memset(&rq_attr, 0, sizeof(rq_attr));
837 if (sm->state == IBV_WQS_RESET) {
838 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
839 rq_attr.state = MLX5_RQC_STATE_RST;
840 } else if (sm->state == IBV_WQS_RDY) {
841 rq_attr.rq_state = MLX5_RQC_STATE_RST;
842 rq_attr.state = MLX5_RQC_STATE_RDY;
843 } else if (sm->state == IBV_WQS_ERR) {
844 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
845 rq_attr.state = MLX5_RQC_STATE_ERR;
847 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
851 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
852 sm->state, strerror(errno));
857 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
858 struct mlx5_txq_ctrl *txq_ctrl =
859 container_of(txq, struct mlx5_txq_ctrl, txq);
860 struct ibv_qp_attr mod = {
861 .qp_state = IBV_QPS_RESET,
862 .port_num = (uint8_t)priv->ibv_port,
864 struct ibv_qp *qp = txq_ctrl->obj->qp;
866 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
868 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
869 "%s", strerror(errno));
873 mod.qp_state = IBV_QPS_INIT;
874 ret = mlx5_glue->modify_qp(qp, &mod,
875 (IBV_QP_STATE | IBV_QP_PORT));
877 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
882 mod.qp_state = IBV_QPS_RTR;
883 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
885 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
890 mod.qp_state = IBV_QPS_RTS;
891 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
893 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
903 * Modify a Verbs queue state.
906 * Pointer to Ethernet device.
908 * State modify request parameters.
911 * 0 in case of success else non-zero value.
914 mlx5_queue_state_modify(struct rte_eth_dev *dev,
915 struct mlx5_mp_arg_queue_state_modify *sm)
919 switch (rte_eal_process_type()) {
920 case RTE_PROC_PRIMARY:
921 ret = mlx5_queue_state_modify_primary(dev, sm);
923 case RTE_PROC_SECONDARY:
924 ret = mlx5_mp_req_queue_state_modify(dev, sm);
934 * The function inserts the RQ state to reset when the first error CQE is
935 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
936 * it moves the RQ state to ready and initializes the RQ.
937 * Next CQE identification and error counting are in the caller responsibility.
940 * Pointer to RX queue structure.
942 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
943 * 0 when called from non-vectorized Rx burst.
946 * -1 in case of recovery error, otherwise the CQE status.
949 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
951 const uint16_t cqe_n = 1 << rxq->cqe_n;
952 const uint16_t cqe_mask = cqe_n - 1;
953 const unsigned int wqe_n = 1 << rxq->elts_n;
954 struct mlx5_rxq_ctrl *rxq_ctrl =
955 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
957 volatile struct mlx5_cqe *cqe;
958 volatile struct mlx5_err_cqe *err_cqe;
960 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
962 struct mlx5_mp_arg_queue_state_modify sm;
965 switch (rxq->err_state) {
966 case MLX5_RXQ_ERR_STATE_NO_ERROR:
967 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
969 case MLX5_RXQ_ERR_STATE_NEED_RESET:
971 sm.queue_id = rxq->idx;
972 sm.state = IBV_WQS_RESET;
973 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
975 if (rxq_ctrl->dump_file_n <
976 rxq_ctrl->priv->config.max_dump_files_num) {
977 MKSTR(err_str, "Unexpected CQE error syndrome "
978 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
979 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
980 rxq->cqn, rxq_ctrl->wqn,
981 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
982 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
983 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
984 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
985 mlx5_dump_debug_information(name, NULL, err_str, 0);
986 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
987 (const void *)((uintptr_t)
989 sizeof(*u.cqe) * cqe_n);
990 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
991 (const void *)((uintptr_t)
994 rxq_ctrl->dump_file_n++;
996 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
998 case MLX5_RXQ_ERR_STATE_NEED_READY:
999 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1000 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1002 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1005 * The RQ consumer index must be zeroed while moving
1006 * from RESET state to RDY state.
1008 *rxq->rq_db = rte_cpu_to_be_32(0);
1011 sm.queue_id = rxq->idx;
1012 sm.state = IBV_WQS_RDY;
1013 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1017 const uint16_t q_mask = wqe_n - 1;
1019 struct rte_mbuf **elt;
1021 unsigned int n = wqe_n - (rxq->rq_ci -
1024 for (i = 0; i < (int)n; ++i) {
1025 elt_idx = (rxq->rq_ci + i) & q_mask;
1026 elt = &(*rxq->elts)[elt_idx];
1027 *elt = rte_mbuf_raw_alloc(rxq->mp);
1029 for (i--; i >= 0; --i) {
1030 elt_idx = (rxq->rq_ci +
1034 rte_pktmbuf_free_seg
1040 for (i = 0; i < (int)wqe_n; ++i) {
1041 elt = &(*rxq->elts)[i];
1043 (uint16_t)((*elt)->buf_len -
1044 rte_pktmbuf_headroom(*elt));
1046 /* Padding with a fake mbuf for vec Rx. */
1047 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1048 (*rxq->elts)[wqe_n + i] =
1051 mlx5_rxq_initialize(rxq);
1052 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1061 * Get size of the next packet for a given CQE. For compressed CQEs, the
1062 * consumer index is updated only once all packets of the current one have
1066 * Pointer to RX queue.
1070 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1074 * 0 in case of empty CQE, otherwise the packet size in bytes.
1077 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1078 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1080 struct rxq_zip *zip = &rxq->zip;
1081 uint16_t cqe_n = cqe_cnt + 1;
1087 /* Process compressed data in the CQE and mini arrays. */
1089 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1090 (volatile struct mlx5_mini_cqe8 (*)[8])
1091 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1094 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1095 *mcqe = &(*mc)[zip->ai & 7];
1096 if ((++zip->ai & 7) == 0) {
1097 /* Invalidate consumed CQEs */
1100 while (idx != end) {
1101 (*rxq->cqes)[idx & cqe_cnt].op_own =
1102 MLX5_CQE_INVALIDATE;
1106 * Increment consumer index to skip the number
1107 * of CQEs consumed. Hardware leaves holes in
1108 * the CQ ring for software use.
1113 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1114 /* Invalidate the rest */
1118 while (idx != end) {
1119 (*rxq->cqes)[idx & cqe_cnt].op_own =
1120 MLX5_CQE_INVALIDATE;
1123 rxq->cq_ci = zip->cq_ci;
1127 * No compressed data, get next CQE and verify if it is
1134 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1135 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1136 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1138 ret = mlx5_rx_err_handle(rxq, 0);
1139 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1147 op_own = cqe->op_own;
1148 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1149 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1150 (volatile struct mlx5_mini_cqe8 (*)[8])
1151 (uintptr_t)(&(*rxq->cqes)
1155 /* Fix endianness. */
1156 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1158 * Current mini array position is the one
1159 * returned by check_cqe64().
1161 * If completion comprises several mini arrays,
1162 * as a special case the second one is located
1163 * 7 CQEs after the initial CQE instead of 8
1164 * for subsequent ones.
1166 zip->ca = rxq->cq_ci;
1167 zip->na = zip->ca + 7;
1168 /* Compute the next non compressed CQE. */
1170 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1171 /* Get packet size to return. */
1172 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1175 /* Prefetch all to be invalidated */
1178 while (idx != end) {
1179 rte_prefetch0(&(*rxq->cqes)[(idx) &
1184 len = rte_be_to_cpu_32(cqe->byte_cnt);
1187 if (unlikely(rxq->err_state)) {
1188 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1189 ++rxq->stats.idropped;
1197 * Translate RX completion flags to offload flags.
1203 * Offload flags (ol_flags) for struct rte_mbuf.
1205 static inline uint32_t
1206 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1208 uint32_t ol_flags = 0;
1209 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1213 MLX5_CQE_RX_L3_HDR_VALID,
1214 PKT_RX_IP_CKSUM_GOOD) |
1216 MLX5_CQE_RX_L4_HDR_VALID,
1217 PKT_RX_L4_CKSUM_GOOD);
1222 * Fill in mbuf fields from RX completion flags.
1223 * Note that pkt->ol_flags should be initialized outside of this function.
1226 * Pointer to RX queue.
1231 * @param rss_hash_res
1232 * Packet RSS Hash result.
1235 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1236 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1238 /* Update packet information. */
1239 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1240 if (rss_hash_res && rxq->rss_hash) {
1241 pkt->hash.rss = rss_hash_res;
1242 pkt->ol_flags |= PKT_RX_RSS_HASH;
1244 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1245 pkt->ol_flags |= PKT_RX_FDIR;
1246 if (cqe->sop_drop_qpn !=
1247 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1248 uint32_t mark = cqe->sop_drop_qpn;
1250 pkt->ol_flags |= PKT_RX_FDIR_ID;
1251 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1254 if (rte_flow_dynf_metadata_avail() && cqe->flow_table_metadata) {
1255 pkt->ol_flags |= PKT_RX_DYNF_METADATA;
1256 *RTE_FLOW_DYNF_METADATA(pkt) = cqe->flow_table_metadata;
1259 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1260 if (rxq->vlan_strip &&
1261 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1262 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1263 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1265 if (rxq->hw_timestamp) {
1266 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1267 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1272 * DPDK callback for RX.
1275 * Generic pointer to RX queue structure.
1277 * Array to store received packets.
1279 * Maximum number of packets in array.
1282 * Number of packets successfully received (<= pkts_n).
1285 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1287 struct mlx5_rxq_data *rxq = dpdk_rxq;
1288 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1289 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1290 const unsigned int sges_n = rxq->sges_n;
1291 struct rte_mbuf *pkt = NULL;
1292 struct rte_mbuf *seg = NULL;
1293 volatile struct mlx5_cqe *cqe =
1294 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1296 unsigned int rq_ci = rxq->rq_ci << sges_n;
1297 int len = 0; /* keep its value across iterations. */
1300 unsigned int idx = rq_ci & wqe_cnt;
1301 volatile struct mlx5_wqe_data_seg *wqe =
1302 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1303 struct rte_mbuf *rep = (*rxq->elts)[idx];
1304 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1305 uint32_t rss_hash_res;
1313 rep = rte_mbuf_raw_alloc(rxq->mp);
1314 if (unlikely(rep == NULL)) {
1315 ++rxq->stats.rx_nombuf;
1318 * no buffers before we even started,
1319 * bail out silently.
1323 while (pkt != seg) {
1324 assert(pkt != (*rxq->elts)[idx]);
1328 rte_mbuf_raw_free(pkt);
1334 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1335 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1337 rte_mbuf_raw_free(rep);
1341 assert(len >= (rxq->crc_present << 2));
1342 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1343 /* If compressed, take hash result from mini-CQE. */
1344 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1346 mcqe->rx_hash_result);
1347 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1348 if (rxq->crc_present)
1349 len -= RTE_ETHER_CRC_LEN;
1351 if (cqe->lro_num_seg > 1) {
1353 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1355 pkt->ol_flags |= PKT_RX_LRO;
1356 pkt->tso_segsz = len / cqe->lro_num_seg;
1359 DATA_LEN(rep) = DATA_LEN(seg);
1360 PKT_LEN(rep) = PKT_LEN(seg);
1361 SET_DATA_OFF(rep, DATA_OFF(seg));
1362 PORT(rep) = PORT(seg);
1363 (*rxq->elts)[idx] = rep;
1365 * Fill NIC descriptor with the new buffer. The lkey and size
1366 * of the buffers are already known, only the buffer address
1369 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1370 /* If there's only one MR, no need to replace LKey in WQE. */
1371 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1372 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1373 if (len > DATA_LEN(seg)) {
1374 len -= DATA_LEN(seg);
1379 DATA_LEN(seg) = len;
1380 #ifdef MLX5_PMD_SOFT_COUNTERS
1381 /* Increment bytes counter. */
1382 rxq->stats.ibytes += PKT_LEN(pkt);
1384 /* Return packet. */
1389 /* Align consumer index to the next stride. */
1394 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1396 /* Update the consumer index. */
1397 rxq->rq_ci = rq_ci >> sges_n;
1399 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1401 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1402 #ifdef MLX5_PMD_SOFT_COUNTERS
1403 /* Increment packets counter. */
1404 rxq->stats.ipackets += i;
1410 * Update LRO packet TCP header.
1411 * The HW LRO feature doesn't update the TCP header after coalescing the
1412 * TCP segments but supplies information in CQE to fill it by SW.
1415 * Pointer to the TCP header.
1417 * Pointer to the completion entry..
1419 * The L3 pseudo-header checksum.
1422 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
1423 volatile struct mlx5_cqe *restrict cqe,
1426 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1427 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1429 * The HW calculates only the TCP payload checksum, need to complete
1430 * the TCP header checksum and the L3 pseudo-header checksum.
1432 uint32_t csum = phcsum + cqe->csum;
1434 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1435 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1436 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1437 tcp->recv_ack = cqe->lro_ack_seq_num;
1438 tcp->rx_win = cqe->lro_tcp_win;
1440 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1441 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1443 csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4);
1444 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1445 csum = (~csum) & 0xffff;
1452 * Update LRO packet headers.
1453 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1454 * TCP segments but supply information in CQE to fill it by SW.
1457 * The packet address.
1459 * Pointer to the completion entry..
1461 * The packet length.
1464 mlx5_lro_update_hdr(uint8_t *restrict padd,
1465 volatile struct mlx5_cqe *restrict cqe,
1469 struct rte_ether_hdr *eth;
1470 struct rte_vlan_hdr *vlan;
1471 struct rte_ipv4_hdr *ipv4;
1472 struct rte_ipv6_hdr *ipv6;
1473 struct rte_tcp_hdr *tcp;
1478 uint16_t proto = h.eth->ether_type;
1482 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1483 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1484 proto = h.vlan->eth_proto;
1487 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1488 h.ipv4->time_to_live = cqe->lro_min_ttl;
1489 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1490 h.ipv4->hdr_checksum = 0;
1491 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1492 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1495 h.ipv6->hop_limits = cqe->lro_min_ttl;
1496 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1498 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1501 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1505 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1507 struct mlx5_mprq_buf *buf = opaque;
1509 if (rte_atomic16_read(&buf->refcnt) == 1) {
1510 rte_mempool_put(buf->mp, buf);
1511 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1512 rte_atomic16_set(&buf->refcnt, 1);
1513 rte_mempool_put(buf->mp, buf);
1518 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1520 mlx5_mprq_buf_free_cb(NULL, buf);
1524 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1525 const unsigned int strd_n)
1527 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1528 volatile struct mlx5_wqe_data_seg *wqe =
1529 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1532 assert(rep != NULL);
1533 /* Replace MPRQ buf. */
1534 (*rxq->mprq_bufs)[rq_idx] = rep;
1536 addr = mlx5_mprq_buf_addr(rep, strd_n);
1537 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1538 /* If there's only one MR, no need to replace LKey in WQE. */
1539 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1540 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1541 /* Stash a mbuf for next replacement. */
1542 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1543 rxq->mprq_repl = rep;
1545 rxq->mprq_repl = NULL;
1549 * DPDK callback for RX with Multi-Packet RQ support.
1552 * Generic pointer to RX queue structure.
1554 * Array to store received packets.
1556 * Maximum number of packets in array.
1559 * Number of packets successfully received (<= pkts_n).
1562 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1564 struct mlx5_rxq_data *rxq = dpdk_rxq;
1565 const unsigned int strd_n = 1 << rxq->strd_num_n;
1566 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1567 const unsigned int strd_shift =
1568 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1569 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1570 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1571 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1573 uint32_t rq_ci = rxq->rq_ci;
1574 uint16_t consumed_strd = rxq->consumed_strd;
1575 uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM;
1576 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1578 while (i < pkts_n) {
1579 struct rte_mbuf *pkt;
1587 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1588 uint32_t rss_hash_res = 0;
1589 uint8_t lro_num_seg;
1591 if (consumed_strd == strd_n) {
1592 /* Replace WQE only if the buffer is still in use. */
1593 if (rte_atomic16_read(&buf->refcnt) > 1) {
1594 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1595 /* Release the old buffer. */
1596 mlx5_mprq_buf_free(buf);
1597 } else if (unlikely(rxq->mprq_repl == NULL)) {
1598 struct mlx5_mprq_buf *rep;
1601 * Currently, the MPRQ mempool is out of buffer
1602 * and doing memcpy regardless of the size of Rx
1603 * packet. Retry allocation to get back to
1606 if (!rte_mempool_get(rxq->mprq_mp,
1608 rxq->mprq_repl = rep;
1610 /* Advance to the next WQE. */
1613 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1615 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1616 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1620 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1621 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1623 consumed_strd += strd_cnt;
1624 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1627 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1628 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1630 /* mini-CQE for MPRQ doesn't have hash result. */
1631 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1633 assert(strd_idx < strd_n);
1634 assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
1635 lro_num_seg = cqe->lro_num_seg;
1637 * Currently configured to receive a packet per a stride. But if
1638 * MTU is adjusted through kernel interface, device could
1639 * consume multiple strides without raising an error. In this
1640 * case, the packet should be dropped because it is bigger than
1641 * the max_rx_pkt_len.
1643 if (unlikely(!lro_num_seg && strd_cnt > 1)) {
1644 ++rxq->stats.idropped;
1647 pkt = rte_pktmbuf_alloc(rxq->mp);
1648 if (unlikely(pkt == NULL)) {
1649 ++rxq->stats.rx_nombuf;
1652 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1653 assert((int)len >= (rxq->crc_present << 2));
1654 if (rxq->crc_present)
1655 len -= RTE_ETHER_CRC_LEN;
1656 offset = strd_idx * strd_sz + strd_shift;
1657 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1659 * Memcpy packets to the target mbuf if:
1660 * - The size of packet is smaller than mprq_max_memcpy_len.
1661 * - Out of buffer in the Mempool for Multi-Packet RQ.
1663 if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
1665 * When memcpy'ing packet due to out-of-buffer, the
1666 * packet must be smaller than the target mbuf.
1668 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1669 rte_pktmbuf_free_seg(pkt);
1670 ++rxq->stats.idropped;
1673 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
1674 DATA_LEN(pkt) = len;
1676 rte_iova_t buf_iova;
1677 struct rte_mbuf_ext_shared_info *shinfo;
1678 uint16_t buf_len = strd_cnt * strd_sz;
1681 /* Increment the refcnt of the whole chunk. */
1682 rte_atomic16_add_return(&buf->refcnt, 1);
1683 assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1685 buf_addr = RTE_PTR_SUB(addr, headroom_sz);
1687 * MLX5 device doesn't use iova but it is necessary in a
1688 * case where the Rx packet is transmitted via a
1691 buf_iova = rte_mempool_virt2iova(buf) +
1692 RTE_PTR_DIFF(buf_addr, buf);
1693 shinfo = &buf->shinfos[strd_idx];
1694 rte_mbuf_ext_refcnt_set(shinfo, 1);
1696 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1697 * attaching the stride to mbuf and more offload flags
1698 * will be added below by calling rxq_cq_to_mbuf().
1699 * Other fields will be overwritten.
1701 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1703 /* Set mbuf head-room. */
1704 pkt->data_off = headroom_sz;
1705 assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
1707 * Prevent potential overflow due to MTU change through
1710 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1711 rte_pktmbuf_free_seg(pkt);
1712 ++rxq->stats.idropped;
1715 DATA_LEN(pkt) = len;
1717 * LRO packet may consume all the stride memory, in this
1718 * case packet head-room space is not guaranteed so must
1719 * to add an empty mbuf for the head-room.
1721 if (!rxq->strd_headroom_en) {
1722 struct rte_mbuf *headroom_mbuf =
1723 rte_pktmbuf_alloc(rxq->mp);
1725 if (unlikely(headroom_mbuf == NULL)) {
1726 rte_pktmbuf_free_seg(pkt);
1727 ++rxq->stats.rx_nombuf;
1730 PORT(pkt) = rxq->port_id;
1731 NEXT(headroom_mbuf) = pkt;
1732 pkt = headroom_mbuf;
1736 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1737 if (lro_num_seg > 1) {
1738 mlx5_lro_update_hdr(addr, cqe, len);
1739 pkt->ol_flags |= PKT_RX_LRO;
1740 pkt->tso_segsz = strd_sz;
1743 PORT(pkt) = rxq->port_id;
1744 #ifdef MLX5_PMD_SOFT_COUNTERS
1745 /* Increment bytes counter. */
1746 rxq->stats.ibytes += PKT_LEN(pkt);
1748 /* Return packet. */
1752 /* Update the consumer indexes. */
1753 rxq->consumed_strd = consumed_strd;
1755 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1756 if (rq_ci != rxq->rq_ci) {
1759 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1761 #ifdef MLX5_PMD_SOFT_COUNTERS
1762 /* Increment packets counter. */
1763 rxq->stats.ipackets += i;
1769 * Dummy DPDK callback for TX.
1771 * This function is used to temporarily replace the real callback during
1772 * unsafe control operations on the queue, or in case of error.
1775 * Generic pointer to TX queue structure.
1777 * Packets to transmit.
1779 * Number of packets in array.
1782 * Number of packets successfully transmitted (<= pkts_n).
1785 removed_tx_burst(void *dpdk_txq __rte_unused,
1786 struct rte_mbuf **pkts __rte_unused,
1787 uint16_t pkts_n __rte_unused)
1794 * Dummy DPDK callback for RX.
1796 * This function is used to temporarily replace the real callback during
1797 * unsafe control operations on the queue, or in case of error.
1800 * Generic pointer to RX queue structure.
1802 * Array to store received packets.
1804 * Maximum number of packets in array.
1807 * Number of packets successfully received (<= pkts_n).
1810 removed_rx_burst(void *dpdk_txq __rte_unused,
1811 struct rte_mbuf **pkts __rte_unused,
1812 uint16_t pkts_n __rte_unused)
1819 * Vectorized Rx/Tx routines are not compiled in when required vector
1820 * instructions are not supported on a target architecture. The following null
1821 * stubs are needed for linkage when those are not included outside of this file
1822 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1826 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1827 struct rte_mbuf **pkts __rte_unused,
1828 uint16_t pkts_n __rte_unused)
1834 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1840 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1846 * Free the mbufs from the linear array of pointers.
1849 * Pointer to array of packets to be free.
1851 * Number of packets to be freed.
1853 * Configured Tx offloads mask. It is fully defined at
1854 * compile time and may be used for optimization.
1856 static __rte_always_inline void
1857 mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
1858 unsigned int pkts_n,
1859 unsigned int olx __rte_unused)
1861 struct rte_mempool *pool = NULL;
1862 struct rte_mbuf **p_free = NULL;
1863 struct rte_mbuf *mbuf;
1864 unsigned int n_free = 0;
1867 * The implemented algorithm eliminates
1868 * copying pointers to temporary array
1869 * for rte_mempool_put_bulk() calls.
1876 * Decrement mbuf reference counter, detach
1877 * indirect and external buffers if needed.
1879 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1880 if (likely(mbuf != NULL)) {
1881 assert(mbuf == *pkts);
1882 if (likely(n_free != 0)) {
1883 if (unlikely(pool != mbuf->pool))
1884 /* From different pool. */
1887 /* Start new scan array. */
1894 if (unlikely(pkts_n == 0)) {
1900 * This happens if mbuf is still referenced.
1901 * We can't put it back to the pool, skip.
1905 if (unlikely(n_free != 0))
1906 /* There is some array to free.*/
1908 if (unlikely(pkts_n == 0))
1909 /* Last mbuf, nothing to free. */
1915 * This loop is implemented to avoid multiple
1916 * inlining of rte_mempool_put_bulk().
1922 * Free the array of pre-freed mbufs
1923 * belonging to the same memory pool.
1925 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1926 if (unlikely(mbuf != NULL)) {
1927 /* There is the request to start new scan. */
1932 if (likely(pkts_n != 0))
1935 * This is the last mbuf to be freed.
1936 * Do one more loop iteration to complete.
1937 * This is rare case of the last unique mbuf.
1942 if (likely(pkts_n == 0))
1951 * Free the mbuf from the elts ring buffer till new tail.
1954 * Pointer to Tx queue structure.
1956 * Index in elts to free up to, becomes new elts tail.
1958 * Configured Tx offloads mask. It is fully defined at
1959 * compile time and may be used for optimization.
1961 static __rte_always_inline void
1962 mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
1964 unsigned int olx __rte_unused)
1966 uint16_t n_elts = tail - txq->elts_tail;
1969 assert(n_elts <= txq->elts_s);
1971 * Implement a loop to support ring buffer wraparound
1972 * with single inlining of mlx5_tx_free_mbuf().
1977 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
1978 part = RTE_MIN(part, n_elts);
1980 assert(part <= txq->elts_s);
1981 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
1983 txq->elts_tail += part;
1989 * Store the mbuf being sent into elts ring buffer.
1990 * On Tx completion these mbufs will be freed.
1993 * Pointer to Tx queue structure.
1995 * Pointer to array of packets to be stored.
1997 * Number of packets to be stored.
1999 * Configured Tx offloads mask. It is fully defined at
2000 * compile time and may be used for optimization.
2002 static __rte_always_inline void
2003 mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
2004 struct rte_mbuf **restrict pkts,
2005 unsigned int pkts_n,
2006 unsigned int olx __rte_unused)
2009 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2013 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2015 assert(part <= txq->elts_s);
2016 /* This code is a good candidate for vectorizing with SIMD. */
2017 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2019 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2020 txq->elts_head += pkts_n;
2021 if (unlikely(part < pkts_n))
2022 /* The copy is wrapping around the elts array. */
2023 rte_memcpy((void *)elts, (void *)(pkts + part),
2024 (pkts_n - part) * sizeof(struct rte_mbuf *));
2028 * Update completion queue consuming index via doorbell
2029 * and flush the completed data buffers.
2032 * Pointer to TX queue structure.
2033 * @param valid CQE pointer
2034 * if not NULL update txq->wqe_pi and flush the buffers
2036 * Configured Tx offloads mask. It is fully defined at
2037 * compile time and may be used for optimization.
2039 static __rte_always_inline void
2040 mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
2041 volatile struct mlx5_cqe *last_cqe,
2042 unsigned int olx __rte_unused)
2044 if (likely(last_cqe != NULL)) {
2047 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2048 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2049 if (likely(tail != txq->elts_tail)) {
2050 mlx5_tx_free_elts(txq, tail, olx);
2051 assert(tail == txq->elts_tail);
2057 * Manage TX completions. This routine checks the CQ for
2058 * arrived CQEs, deduces the last accomplished WQE in SQ,
2059 * updates SQ producing index and frees all completed mbufs.
2062 * Pointer to TX queue structure.
2064 * Configured Tx offloads mask. It is fully defined at
2065 * compile time and may be used for optimization.
2067 * NOTE: not inlined intentionally, it makes tx_burst
2068 * routine smaller, simple and faster - from experiments.
2071 mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
2072 unsigned int olx __rte_unused)
2074 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2075 volatile struct mlx5_cqe *last_cqe = NULL;
2076 uint16_t ci = txq->cq_ci;
2079 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2080 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2082 volatile struct mlx5_cqe *cqe;
2084 cqe = &txq->cqes[ci & txq->cqe_m];
2085 ret = check_cqe(cqe, txq->cqe_s, ci);
2086 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2087 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2088 /* No new CQEs in completion queue. */
2089 assert(ret == MLX5_CQE_STATUS_HW_OWN);
2093 * Some error occurred, try to restart.
2094 * We have no barrier after WQE related Doorbell
2095 * written, make sure all writes are completed
2096 * here, before we might perform SQ reset.
2100 ret = mlx5_tx_error_cqe_handle
2101 (txq, (volatile struct mlx5_err_cqe *)cqe);
2102 if (unlikely(ret < 0)) {
2104 * Some error occurred on queue error
2105 * handling, we do not advance the index
2106 * here, allowing to retry on next call.
2111 * We are going to fetch all entries with
2112 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2113 * The send queue is supposed to be empty.
2120 /* Normal transmit completion. */
2121 assert(ci != txq->cq_pi);
2122 assert((txq->fcqs[ci & txq->cqe_m] >> 16) == cqe->wqe_counter);
2126 * We have to restrict the amount of processed CQEs
2127 * in one tx_burst routine call. The CQ may be large
2128 * and many CQEs may be updated by the NIC in one
2129 * transaction. Buffers freeing is time consuming,
2130 * multiple iterations may introduce significant
2133 if (likely(--count == 0))
2136 if (likely(ci != txq->cq_ci)) {
2138 * Update completion queue consuming index
2139 * and ring doorbell to notify hardware.
2141 rte_compiler_barrier();
2143 *txq->cq_db = rte_cpu_to_be_32(ci);
2144 mlx5_tx_comp_flush(txq, last_cqe, olx);
2149 * Check if the completion request flag should be set in the last WQE.
2150 * Both pushed mbufs and WQEs are monitored and the completion request
2151 * flag is set if any of thresholds is reached.
2154 * Pointer to TX queue structure.
2156 * Pointer to burst routine local context.
2158 * Configured Tx offloads mask. It is fully defined at
2159 * compile time and may be used for optimization.
2161 static __rte_always_inline void
2162 mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
2163 struct mlx5_txq_local *restrict loc,
2166 uint16_t head = txq->elts_head;
2169 part = MLX5_TXOFF_CONFIG(INLINE) ?
2170 0 : loc->pkts_sent - loc->pkts_copy;
2172 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2173 (MLX5_TXOFF_CONFIG(INLINE) &&
2174 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2175 volatile struct mlx5_wqe *last = loc->wqe_last;
2177 txq->elts_comp = head;
2178 if (MLX5_TXOFF_CONFIG(INLINE))
2179 txq->wqe_comp = txq->wqe_ci;
2180 /* Request unconditional completion on last WQE. */
2181 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2182 MLX5_COMP_MODE_OFFSET);
2183 /* Save elts_head in dedicated free on completion queue. */
2185 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2187 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2188 (last->cseg.opcode >> 8) << 16;
2190 /* A CQE slot must always be available. */
2191 assert((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2196 * DPDK callback to check the status of a tx descriptor.
2201 * The index of the descriptor in the ring.
2204 * The status of the tx descriptor.
2207 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2209 struct mlx5_txq_data *restrict txq = tx_queue;
2212 mlx5_tx_handle_completion(txq, 0);
2213 used = txq->elts_head - txq->elts_tail;
2215 return RTE_ETH_TX_DESC_FULL;
2216 return RTE_ETH_TX_DESC_DONE;
2220 * Build the Control Segment with specified opcode:
2221 * - MLX5_OPCODE_SEND
2222 * - MLX5_OPCODE_ENHANCED_MPSW
2226 * Pointer to TX queue structure.
2228 * Pointer to burst routine local context.
2230 * Pointer to WQE to fill with built Control Segment.
2232 * Supposed length of WQE in segments.
2234 * SQ WQE opcode to put into Control Segment.
2236 * Configured Tx offloads mask. It is fully defined at
2237 * compile time and may be used for optimization.
2239 static __rte_always_inline void
2240 mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq,
2241 struct mlx5_txq_local *restrict loc __rte_unused,
2242 struct mlx5_wqe *restrict wqe,
2244 unsigned int opcode,
2245 unsigned int olx __rte_unused)
2247 struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
2249 /* For legacy MPW replace the EMPW by TSO with modifier. */
2250 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2251 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2252 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2253 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2254 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2255 MLX5_COMP_MODE_OFFSET);
2256 cs->misc = RTE_BE32(0);
2260 * Build the Ethernet Segment without inlined data.
2261 * Supports Software Parser, Checksums and VLAN
2262 * insertion Tx offload features.
2265 * Pointer to TX queue structure.
2267 * Pointer to burst routine local context.
2269 * Pointer to WQE to fill with built Ethernet Segment.
2271 * Configured Tx offloads mask. It is fully defined at
2272 * compile time and may be used for optimization.
2274 static __rte_always_inline void
2275 mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
2276 struct mlx5_txq_local *restrict loc,
2277 struct mlx5_wqe *restrict wqe,
2280 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2284 * Calculate and set check sum flags first, dword field
2285 * in segment may be shared with Software Parser flags.
2287 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2288 es->flags = rte_cpu_to_le_32(csum);
2290 * Calculate and set Software Parser offsets and flags.
2291 * These flags a set for custom UDP and IP tunnel packets.
2293 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2294 /* Fill metadata field if needed. */
2295 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2296 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2297 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2298 /* Engage VLAN tag insertion feature if requested. */
2299 if (MLX5_TXOFF_CONFIG(VLAN) &&
2300 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2302 * We should get here only if device support
2303 * this feature correctly.
2305 assert(txq->vlan_en);
2306 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2307 loc->mbuf->vlan_tci);
2309 es->inline_hdr = RTE_BE32(0);
2314 * Build the Ethernet Segment with minimal inlined data
2315 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2316 * used to fill the gap in single WQEBB WQEs.
2317 * Supports Software Parser, Checksums and VLAN
2318 * insertion Tx offload features.
2321 * Pointer to TX queue structure.
2323 * Pointer to burst routine local context.
2325 * Pointer to WQE to fill with built Ethernet Segment.
2327 * Length of VLAN tag insertion if any.
2329 * Configured Tx offloads mask. It is fully defined at
2330 * compile time and may be used for optimization.
2332 static __rte_always_inline void
2333 mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
2334 struct mlx5_txq_local *restrict loc,
2335 struct mlx5_wqe *restrict wqe,
2339 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2341 uint8_t *psrc, *pdst;
2344 * Calculate and set check sum flags first, dword field
2345 * in segment may be shared with Software Parser flags.
2347 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2348 es->flags = rte_cpu_to_le_32(csum);
2350 * Calculate and set Software Parser offsets and flags.
2351 * These flags a set for custom UDP and IP tunnel packets.
2353 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2354 /* Fill metadata field if needed. */
2355 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2356 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2357 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2358 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2360 sizeof(rte_v128u32_t)),
2361 "invalid Ethernet Segment data size");
2362 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2364 sizeof(struct rte_vlan_hdr) +
2365 2 * RTE_ETHER_ADDR_LEN),
2366 "invalid Ethernet Segment data size");
2367 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2368 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2369 es->inline_data = *(unaligned_uint16_t *)psrc;
2370 psrc += sizeof(uint16_t);
2371 pdst = (uint8_t *)(es + 1);
2372 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2373 /* Implement VLAN tag insertion as part inline data. */
2374 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2375 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2376 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2377 /* Insert VLAN ethertype + VLAN tag. */
2378 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2379 ((RTE_ETHER_TYPE_VLAN << 16) |
2380 loc->mbuf->vlan_tci);
2381 pdst += sizeof(struct rte_vlan_hdr);
2382 /* Copy the rest two bytes from packet data. */
2383 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2384 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2386 /* Fill the gap in the title WQEBB with inline data. */
2387 rte_mov16(pdst, psrc);
2392 * Build the Ethernet Segment with entire packet
2393 * data inlining. Checks the boundary of WQEBB and
2394 * ring buffer wrapping, supports Software Parser,
2395 * Checksums and VLAN insertion Tx offload features.
2398 * Pointer to TX queue structure.
2400 * Pointer to burst routine local context.
2402 * Pointer to WQE to fill with built Ethernet Segment.
2404 * Length of VLAN tag insertion if any.
2406 * Length of data to inline (VLAN included, if any).
2408 * TSO flag, set mss field from the packet.
2410 * Configured Tx offloads mask. It is fully defined at
2411 * compile time and may be used for optimization.
2414 * Pointer to the next Data Segment (aligned and wrapped around).
2416 static __rte_always_inline struct mlx5_wqe_dseg *
2417 mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
2418 struct mlx5_txq_local *restrict loc,
2419 struct mlx5_wqe *restrict wqe,
2425 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2427 uint8_t *psrc, *pdst;
2431 * Calculate and set check sum flags first, dword field
2432 * in segment may be shared with Software Parser flags.
2434 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2437 csum |= loc->mbuf->tso_segsz;
2438 es->flags = rte_cpu_to_be_32(csum);
2440 es->flags = rte_cpu_to_le_32(csum);
2443 * Calculate and set Software Parser offsets and flags.
2444 * These flags a set for custom UDP and IP tunnel packets.
2446 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2447 /* Fill metadata field if needed. */
2448 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2449 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2450 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2451 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2453 sizeof(rte_v128u32_t)),
2454 "invalid Ethernet Segment data size");
2455 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2457 sizeof(struct rte_vlan_hdr) +
2458 2 * RTE_ETHER_ADDR_LEN),
2459 "invalid Ethernet Segment data size");
2460 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2461 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2462 es->inline_data = *(unaligned_uint16_t *)psrc;
2463 psrc += sizeof(uint16_t);
2464 pdst = (uint8_t *)(es + 1);
2465 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2466 /* Implement VLAN tag insertion as part inline data. */
2467 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2468 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2469 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2470 /* Insert VLAN ethertype + VLAN tag. */
2471 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2472 ((RTE_ETHER_TYPE_VLAN << 16) |
2473 loc->mbuf->vlan_tci);
2474 pdst += sizeof(struct rte_vlan_hdr);
2475 /* Copy the rest two bytes from packet data. */
2476 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2477 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2478 psrc += sizeof(uint16_t);
2480 /* Fill the gap in the title WQEBB with inline data. */
2481 rte_mov16(pdst, psrc);
2482 psrc += sizeof(rte_v128u32_t);
2484 pdst = (uint8_t *)(es + 2);
2485 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2486 assert(pdst < (uint8_t *)txq->wqes_end);
2487 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2489 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2490 return (struct mlx5_wqe_dseg *)pdst;
2493 * The WQEBB space availability is checked by caller.
2494 * Here we should be aware of WQE ring buffer wraparound only.
2496 part = (uint8_t *)txq->wqes_end - pdst;
2497 part = RTE_MIN(part, inlen);
2499 rte_memcpy(pdst, psrc, part);
2501 if (likely(!inlen)) {
2503 * If return value is not used by the caller
2504 * the code below will be optimized out.
2507 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2508 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2509 pdst = (uint8_t *)txq->wqes;
2510 return (struct mlx5_wqe_dseg *)pdst;
2512 pdst = (uint8_t *)txq->wqes;
2519 * Copy data from chain of mbuf to the specified linear buffer.
2520 * Checksums and VLAN insertion Tx offload features. If data
2521 * from some mbuf copied completely this mbuf is freed. Local
2522 * structure is used to keep the byte stream state.
2525 * Pointer to the destination linear buffer.
2527 * Pointer to burst routine local context.
2529 * Length of data to be copied.
2531 * Configured Tx offloads mask. It is fully defined at
2532 * compile time and may be used for optimization.
2534 static __rte_always_inline void
2535 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2536 struct mlx5_txq_local *restrict loc,
2538 unsigned int olx __rte_unused)
2540 struct rte_mbuf *mbuf;
2541 unsigned int part, dlen;
2546 /* Allow zero length packets, must check first. */
2547 dlen = rte_pktmbuf_data_len(loc->mbuf);
2548 if (dlen <= loc->mbuf_off) {
2549 /* Exhausted packet, just free. */
2551 loc->mbuf = mbuf->next;
2552 rte_pktmbuf_free_seg(mbuf);
2554 assert(loc->mbuf_nseg > 1);
2559 dlen -= loc->mbuf_off;
2560 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2562 part = RTE_MIN(len, dlen);
2563 rte_memcpy(pdst, psrc, part);
2564 loc->mbuf_off += part;
2567 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2569 /* Exhausted packet, just free. */
2571 loc->mbuf = mbuf->next;
2572 rte_pktmbuf_free_seg(mbuf);
2574 assert(loc->mbuf_nseg >= 1);
2584 * Build the Ethernet Segment with inlined data from
2585 * multi-segment packet. Checks the boundary of WQEBB
2586 * and ring buffer wrapping, supports Software Parser,
2587 * Checksums and VLAN insertion Tx offload features.
2590 * Pointer to TX queue structure.
2592 * Pointer to burst routine local context.
2594 * Pointer to WQE to fill with built Ethernet Segment.
2596 * Length of VLAN tag insertion if any.
2598 * Length of data to inline (VLAN included, if any).
2600 * TSO flag, set mss field from the packet.
2602 * Configured Tx offloads mask. It is fully defined at
2603 * compile time and may be used for optimization.
2606 * Pointer to the next Data Segment (aligned and
2607 * possible NOT wrapped around - caller should do
2608 * wrapping check on its own).
2610 static __rte_always_inline struct mlx5_wqe_dseg *
2611 mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
2612 struct mlx5_txq_local *restrict loc,
2613 struct mlx5_wqe *restrict wqe,
2619 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2625 * Calculate and set check sum flags first, uint32_t field
2626 * in segment may be shared with Software Parser flags.
2628 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2631 csum |= loc->mbuf->tso_segsz;
2632 es->flags = rte_cpu_to_be_32(csum);
2634 es->flags = rte_cpu_to_le_32(csum);
2637 * Calculate and set Software Parser offsets and flags.
2638 * These flags a set for custom UDP and IP tunnel packets.
2640 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2641 /* Fill metadata field if needed. */
2642 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2643 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2644 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2645 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2647 sizeof(rte_v128u32_t)),
2648 "invalid Ethernet Segment data size");
2649 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2651 sizeof(struct rte_vlan_hdr) +
2652 2 * RTE_ETHER_ADDR_LEN),
2653 "invalid Ethernet Segment data size");
2654 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2655 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2656 pdst = (uint8_t *)&es->inline_data;
2657 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2658 /* Implement VLAN tag insertion as part inline data. */
2659 mlx5_tx_mseg_memcpy(pdst, loc, 2 * RTE_ETHER_ADDR_LEN, olx);
2660 pdst += 2 * RTE_ETHER_ADDR_LEN;
2661 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2662 ((RTE_ETHER_TYPE_VLAN << 16) |
2663 loc->mbuf->vlan_tci);
2664 pdst += sizeof(struct rte_vlan_hdr);
2665 inlen -= 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2667 assert(pdst < (uint8_t *)txq->wqes_end);
2669 * The WQEBB space availability is checked by caller.
2670 * Here we should be aware of WQE ring buffer wraparound only.
2672 part = (uint8_t *)txq->wqes_end - pdst;
2673 part = RTE_MIN(part, inlen);
2676 mlx5_tx_mseg_memcpy(pdst, loc, part, olx);
2678 if (likely(!inlen)) {
2680 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2681 return (struct mlx5_wqe_dseg *)pdst;
2683 pdst = (uint8_t *)txq->wqes;
2689 * Build the Data Segment of pointer type.
2692 * Pointer to TX queue structure.
2694 * Pointer to burst routine local context.
2696 * Pointer to WQE to fill with built Data Segment.
2698 * Data buffer to point.
2700 * Data buffer length.
2702 * Configured Tx offloads mask. It is fully defined at
2703 * compile time and may be used for optimization.
2705 static __rte_always_inline void
2706 mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
2707 struct mlx5_txq_local *restrict loc,
2708 struct mlx5_wqe_dseg *restrict dseg,
2711 unsigned int olx __rte_unused)
2715 dseg->bcount = rte_cpu_to_be_32(len);
2716 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2717 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2721 * Build the Data Segment of pointer type or inline
2722 * if data length is less than buffer in minimal
2723 * Data Segment size.
2726 * Pointer to TX queue structure.
2728 * Pointer to burst routine local context.
2730 * Pointer to WQE to fill with built Data Segment.
2732 * Data buffer to point.
2734 * Data buffer length.
2736 * Configured Tx offloads mask. It is fully defined at
2737 * compile time and may be used for optimization.
2739 static __rte_always_inline void
2740 mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
2741 struct mlx5_txq_local *restrict loc,
2742 struct mlx5_wqe_dseg *restrict dseg,
2745 unsigned int olx __rte_unused)
2751 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2752 dseg->bcount = rte_cpu_to_be_32(len);
2753 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2754 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2758 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2759 /* Unrolled implementation of generic rte_memcpy. */
2760 dst = (uintptr_t)&dseg->inline_data[0];
2761 src = (uintptr_t)buf;
2763 #ifdef RTE_ARCH_STRICT_ALIGN
2764 assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2765 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2766 dst += sizeof(uint32_t);
2767 src += sizeof(uint32_t);
2768 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2769 dst += sizeof(uint32_t);
2770 src += sizeof(uint32_t);
2772 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2773 dst += sizeof(uint64_t);
2774 src += sizeof(uint64_t);
2778 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2779 dst += sizeof(uint32_t);
2780 src += sizeof(uint32_t);
2783 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2784 dst += sizeof(uint16_t);
2785 src += sizeof(uint16_t);
2788 *(uint8_t *)dst = *(uint8_t *)src;
2792 * Build the Data Segment of inlined data from single
2793 * segment packet, no VLAN insertion.
2796 * Pointer to TX queue structure.
2798 * Pointer to burst routine local context.
2800 * Pointer to WQE to fill with built Data Segment.
2802 * Data buffer to point.
2804 * Data buffer length.
2806 * Configured Tx offloads mask. It is fully defined at
2807 * compile time and may be used for optimization.
2810 * Pointer to the next Data Segment after inlined data.
2811 * Ring buffer wraparound check is needed. We do not
2812 * do it here because it may not be needed for the
2813 * last packet in the eMPW session.
2815 static __rte_always_inline struct mlx5_wqe_dseg *
2816 mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq,
2817 struct mlx5_txq_local *restrict loc __rte_unused,
2818 struct mlx5_wqe_dseg *restrict dseg,
2821 unsigned int olx __rte_unused)
2826 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2827 pdst = &dseg->inline_data[0];
2829 * The WQEBB space availability is checked by caller.
2830 * Here we should be aware of WQE ring buffer wraparound only.
2832 part = (uint8_t *)txq->wqes_end - pdst;
2833 part = RTE_MIN(part, len);
2835 rte_memcpy(pdst, buf, part);
2839 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2840 /* Note: no final wraparound check here. */
2841 return (struct mlx5_wqe_dseg *)pdst;
2843 pdst = (uint8_t *)txq->wqes;
2850 * Build the Data Segment of inlined data from single
2851 * segment packet with VLAN insertion.
2854 * Pointer to TX queue structure.
2856 * Pointer to burst routine local context.
2858 * Pointer to the dseg fill with built Data Segment.
2860 * Data buffer to point.
2862 * Data buffer length.
2864 * Configured Tx offloads mask. It is fully defined at
2865 * compile time and may be used for optimization.
2868 * Pointer to the next Data Segment after inlined data.
2869 * Ring buffer wraparound check is needed.
2871 static __rte_always_inline struct mlx5_wqe_dseg *
2872 mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
2873 struct mlx5_txq_local *restrict loc __rte_unused,
2874 struct mlx5_wqe_dseg *restrict dseg,
2877 unsigned int olx __rte_unused)
2883 assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
2884 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
2885 (2 * RTE_ETHER_ADDR_LEN),
2886 "invalid Data Segment data size");
2887 dseg->bcount = rte_cpu_to_be_32((len + sizeof(struct rte_vlan_hdr)) |
2888 MLX5_ETH_WQE_DATA_INLINE);
2889 pdst = &dseg->inline_data[0];
2890 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
2891 buf += MLX5_DSEG_MIN_INLINE_SIZE;
2892 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
2893 len -= MLX5_DSEG_MIN_INLINE_SIZE;
2894 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
2895 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2896 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2897 pdst = (uint8_t *)txq->wqes;
2898 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
2899 loc->mbuf->vlan_tci);
2900 pdst += sizeof(struct rte_vlan_hdr);
2902 * The WQEBB space availability is checked by caller.
2903 * Here we should be aware of WQE ring buffer wraparound only.
2905 part = (uint8_t *)txq->wqes_end - pdst;
2906 part = RTE_MIN(part, len);
2908 rte_memcpy(pdst, buf, part);
2912 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2913 /* Note: no final wraparound check here. */
2914 return (struct mlx5_wqe_dseg *)pdst;
2916 pdst = (uint8_t *)txq->wqes;
2923 * Build the Ethernet Segment with optionally inlined data with
2924 * VLAN insertion and following Data Segments (if any) from
2925 * multi-segment packet. Used by ordinary send and TSO.
2928 * Pointer to TX queue structure.
2930 * Pointer to burst routine local context.
2932 * Pointer to WQE to fill with built Ethernet/Data Segments.
2934 * Length of VLAN header to insert, 0 means no VLAN insertion.
2936 * Data length to inline. For TSO this parameter specifies
2937 * exact value, for ordinary send routine can be aligned by
2938 * caller to provide better WQE space saving and data buffer
2939 * start address alignment. This length includes VLAN header
2942 * Zero means ordinary send, inlined data can be extended,
2943 * otherwise this is TSO, inlined data length is fixed.
2945 * Configured Tx offloads mask. It is fully defined at
2946 * compile time and may be used for optimization.
2949 * Actual size of built WQE in segments.
2951 static __rte_always_inline unsigned int
2952 mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
2953 struct mlx5_txq_local *restrict loc,
2954 struct mlx5_wqe *restrict wqe,
2958 unsigned int olx __rte_unused)
2960 struct mlx5_wqe_dseg *restrict dseg;
2963 assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
2964 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
2967 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
2968 if (!loc->mbuf_nseg)
2971 * There are still some mbuf remaining, not inlined.
2972 * The first mbuf may be partially inlined and we
2973 * must process the possible non-zero data offset.
2975 if (loc->mbuf_off) {
2980 * Exhausted packets must be dropped before.
2981 * Non-zero offset means there are some data
2982 * remained in the packet.
2984 assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
2985 assert(rte_pktmbuf_data_len(loc->mbuf));
2986 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2988 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
2990 * Build the pointer/minimal data Data Segment.
2991 * Do ring buffer wrapping check in advance.
2993 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2994 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2995 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
2996 /* Store the mbuf to be freed on completion. */
2997 assert(loc->elts_free);
2998 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3001 if (--loc->mbuf_nseg == 0)
3003 loc->mbuf = loc->mbuf->next;
3007 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3008 struct rte_mbuf *mbuf;
3010 /* Zero length segment found, just skip. */
3012 loc->mbuf = loc->mbuf->next;
3013 rte_pktmbuf_free_seg(mbuf);
3014 if (--loc->mbuf_nseg == 0)
3017 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3018 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3021 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3022 rte_pktmbuf_data_len(loc->mbuf), olx);
3023 assert(loc->elts_free);
3024 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3027 if (--loc->mbuf_nseg == 0)
3029 loc->mbuf = loc->mbuf->next;
3034 /* Calculate actual segments used from the dseg pointer. */
3035 if ((uintptr_t)wqe < (uintptr_t)dseg)
3036 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3038 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3039 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3044 * Tx one packet function for multi-segment TSO. Supports all
3045 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3046 * sends one packet per WQE.
3048 * This routine is responsible for storing processed mbuf
3049 * into elts ring buffer and update elts_head.
3052 * Pointer to TX queue structure.
3054 * Pointer to burst routine local context.
3056 * Configured Tx offloads mask. It is fully defined at
3057 * compile time and may be used for optimization.
3060 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3061 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3062 * Local context variables partially updated.
3064 static __rte_always_inline enum mlx5_txcmp_code
3065 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
3066 struct mlx5_txq_local *restrict loc,
3069 struct mlx5_wqe *restrict wqe;
3070 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3073 * Calculate data length to be inlined to estimate
3074 * the required space in WQE ring buffer.
3076 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3077 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3078 vlan = sizeof(struct rte_vlan_hdr);
3079 inlen = loc->mbuf->l2_len + vlan +
3080 loc->mbuf->l3_len + loc->mbuf->l4_len;
3081 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3082 return MLX5_TXCMP_CODE_ERROR;
3083 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3084 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3085 /* Packet must contain all TSO headers. */
3086 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3087 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3088 inlen > (dlen + vlan)))
3089 return MLX5_TXCMP_CODE_ERROR;
3090 assert(inlen >= txq->inlen_mode);
3092 * Check whether there are enough free WQEBBs:
3094 * - Ethernet Segment
3095 * - First Segment of inlined Ethernet data
3096 * - ... data continued ...
3097 * - Data Segments of pointer/min inline type
3099 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3100 MLX5_ESEG_MIN_INLINE_SIZE +
3102 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3103 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3104 return MLX5_TXCMP_CODE_EXIT;
3105 /* Check for maximal WQE size. */
3106 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3107 return MLX5_TXCMP_CODE_ERROR;
3108 #ifdef MLX5_PMD_SOFT_COUNTERS
3109 /* Update sent data bytes/packets counters. */
3110 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3111 loc->mbuf->tso_segsz;
3113 * One will be added for mbuf itself
3114 * at the end of the mlx5_tx_burst from
3115 * loc->pkts_sent field.
3118 txq->stats.opackets += ntcp;
3119 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3121 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3122 loc->wqe_last = wqe;
3123 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3124 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3125 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3126 txq->wqe_ci += (ds + 3) / 4;
3127 loc->wqe_free -= (ds + 3) / 4;
3128 return MLX5_TXCMP_CODE_MULTI;
3132 * Tx one packet function for multi-segment SEND. Supports all
3133 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3134 * sends one packet per WQE, without any data inlining in
3137 * This routine is responsible for storing processed mbuf
3138 * into elts ring buffer and update elts_head.
3141 * Pointer to TX queue structure.
3143 * Pointer to burst routine local context.
3145 * Configured Tx offloads mask. It is fully defined at
3146 * compile time and may be used for optimization.
3149 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3150 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3151 * Local context variables partially updated.
3153 static __rte_always_inline enum mlx5_txcmp_code
3154 mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
3155 struct mlx5_txq_local *restrict loc,
3158 struct mlx5_wqe_dseg *restrict dseg;
3159 struct mlx5_wqe *restrict wqe;
3160 unsigned int ds, nseg;
3162 assert(NB_SEGS(loc->mbuf) > 1);
3164 * No inline at all, it means the CPU cycles saving
3165 * is prioritized at configuration, we should not
3166 * copy any packet data to WQE.
3168 nseg = NB_SEGS(loc->mbuf);
3170 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3171 return MLX5_TXCMP_CODE_EXIT;
3172 /* Check for maximal WQE size. */
3173 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3174 return MLX5_TXCMP_CODE_ERROR;
3176 * Some Tx offloads may cause an error if
3177 * packet is not long enough, check against
3178 * assumed minimal length.
3180 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3181 return MLX5_TXCMP_CODE_ERROR;
3182 #ifdef MLX5_PMD_SOFT_COUNTERS
3183 /* Update sent data bytes counter. */
3184 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3185 if (MLX5_TXOFF_CONFIG(VLAN) &&
3186 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3187 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3190 * SEND WQE, one WQEBB:
3191 * - Control Segment, SEND opcode
3192 * - Ethernet Segment, optional VLAN, no inline
3193 * - Data Segments, pointer only type
3195 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3196 loc->wqe_last = wqe;
3197 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3198 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3199 dseg = &wqe->dseg[0];
3201 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3202 struct rte_mbuf *mbuf;
3205 * Zero length segment found, have to
3206 * correct total size of WQE in segments.
3207 * It is supposed to be rare occasion, so
3208 * in normal case (no zero length segments)
3209 * we avoid extra writing to the Control
3213 wqe->cseg.sq_ds -= RTE_BE32(1);
3215 loc->mbuf = mbuf->next;
3216 rte_pktmbuf_free_seg(mbuf);
3222 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3223 rte_pktmbuf_data_len(loc->mbuf), olx);
3224 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3229 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3230 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3231 loc->mbuf = loc->mbuf->next;
3234 txq->wqe_ci += (ds + 3) / 4;
3235 loc->wqe_free -= (ds + 3) / 4;
3236 return MLX5_TXCMP_CODE_MULTI;
3240 * Tx one packet function for multi-segment SEND. Supports all
3241 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3242 * sends one packet per WQE, with data inlining in
3243 * Ethernet Segment and minimal Data Segments.
3245 * This routine is responsible for storing processed mbuf
3246 * into elts ring buffer and update elts_head.
3249 * Pointer to TX queue structure.
3251 * Pointer to burst routine local context.
3253 * Configured Tx offloads mask. It is fully defined at
3254 * compile time and may be used for optimization.
3257 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3258 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3259 * Local context variables partially updated.
3261 static __rte_always_inline enum mlx5_txcmp_code
3262 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
3263 struct mlx5_txq_local *restrict loc,
3266 struct mlx5_wqe *restrict wqe;
3267 unsigned int ds, inlen, dlen, vlan = 0;
3269 assert(MLX5_TXOFF_CONFIG(INLINE));
3270 assert(NB_SEGS(loc->mbuf) > 1);
3272 * First calculate data length to be inlined
3273 * to estimate the required space for WQE.
3275 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3276 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3277 vlan = sizeof(struct rte_vlan_hdr);
3278 inlen = dlen + vlan;
3279 /* Check against minimal length. */
3280 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3281 return MLX5_TXCMP_CODE_ERROR;
3282 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3283 if (inlen > txq->inlen_send) {
3284 struct rte_mbuf *mbuf;
3289 * Packet length exceeds the allowed inline
3290 * data length, check whether the minimal
3291 * inlining is required.
3293 if (txq->inlen_mode) {
3294 assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
3295 assert(txq->inlen_mode <= txq->inlen_send);
3296 inlen = txq->inlen_mode;
3298 if (!vlan || txq->vlan_en) {
3300 * VLAN insertion will be done inside by HW.
3301 * It is not utmost effective - VLAN flag is
3302 * checked twice, but we should proceed the
3303 * inlining length correctly and take into
3304 * account the VLAN header being inserted.
3306 return mlx5_tx_packet_multi_send
3309 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3312 * Now we know the minimal amount of data is requested
3313 * to inline. Check whether we should inline the buffers
3314 * from the chain beginning to eliminate some mbufs.
3317 nxlen = rte_pktmbuf_data_len(mbuf);
3318 if (unlikely(nxlen <= txq->inlen_send)) {
3319 /* We can inline first mbuf at least. */
3320 if (nxlen < inlen) {
3323 /* Scan mbufs till inlen filled. */
3328 nxlen = rte_pktmbuf_data_len(mbuf);
3330 } while (unlikely(nxlen < inlen));
3331 if (unlikely(nxlen > txq->inlen_send)) {
3332 /* We cannot inline entire mbuf. */
3333 smlen = inlen - smlen;
3334 start = rte_pktmbuf_mtod_offset
3335 (mbuf, uintptr_t, smlen);
3342 /* There should be not end of packet. */
3344 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3345 } while (unlikely(nxlen < txq->inlen_send));
3347 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3349 * Check whether we can do inline to align start
3350 * address of data buffer to cacheline.
3353 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3354 if (unlikely(start)) {
3356 if (start <= txq->inlen_send)
3361 * Check whether there are enough free WQEBBs:
3363 * - Ethernet Segment
3364 * - First Segment of inlined Ethernet data
3365 * - ... data continued ...
3366 * - Data Segments of pointer/min inline type
3368 * Estimate the number of Data Segments conservatively,
3369 * supposing no any mbufs is being freed during inlining.
3371 assert(inlen <= txq->inlen_send);
3372 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3373 MLX5_ESEG_MIN_INLINE_SIZE +
3375 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3376 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3377 return MLX5_TXCMP_CODE_EXIT;
3378 /* Check for maximal WQE size. */
3379 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3380 return MLX5_TXCMP_CODE_ERROR;
3381 #ifdef MLX5_PMD_SOFT_COUNTERS
3382 /* Update sent data bytes/packets counters. */
3383 txq->stats.obytes += dlen + vlan;
3385 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3386 loc->wqe_last = wqe;
3387 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3388 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3389 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3390 txq->wqe_ci += (ds + 3) / 4;
3391 loc->wqe_free -= (ds + 3) / 4;
3392 return MLX5_TXCMP_CODE_MULTI;
3396 * Tx burst function for multi-segment packets. Supports all
3397 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3398 * sends one packet per WQE. Function stops sending if it
3399 * encounters the single-segment packet.
3401 * This routine is responsible for storing processed mbuf
3402 * into elts ring buffer and update elts_head.
3405 * Pointer to TX queue structure.
3407 * Packets to transmit.
3409 * Number of packets in array.
3411 * Pointer to burst routine local context.
3413 * Configured Tx offloads mask. It is fully defined at
3414 * compile time and may be used for optimization.
3417 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3418 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3419 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3420 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3421 * Local context variables updated.
3423 static __rte_always_inline enum mlx5_txcmp_code
3424 mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
3425 struct rte_mbuf **restrict pkts,
3426 unsigned int pkts_n,
3427 struct mlx5_txq_local *restrict loc,
3430 assert(loc->elts_free && loc->wqe_free);
3431 assert(pkts_n > loc->pkts_sent);
3432 pkts += loc->pkts_sent + 1;
3433 pkts_n -= loc->pkts_sent;
3435 enum mlx5_txcmp_code ret;
3437 assert(NB_SEGS(loc->mbuf) > 1);
3439 * Estimate the number of free elts quickly but
3440 * conservatively. Some segment may be fully inlined
3441 * and freed, ignore this here - precise estimation
3444 if (loc->elts_free < NB_SEGS(loc->mbuf))
3445 return MLX5_TXCMP_CODE_EXIT;
3446 if (MLX5_TXOFF_CONFIG(TSO) &&
3447 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3448 /* Proceed with multi-segment TSO. */
3449 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3450 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3451 /* Proceed with multi-segment SEND with inlining. */
3452 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3454 /* Proceed with multi-segment SEND w/o inlining. */
3455 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3457 if (ret == MLX5_TXCMP_CODE_EXIT)
3458 return MLX5_TXCMP_CODE_EXIT;
3459 if (ret == MLX5_TXCMP_CODE_ERROR)
3460 return MLX5_TXCMP_CODE_ERROR;
3461 /* WQE is built, go to the next packet. */
3464 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3465 return MLX5_TXCMP_CODE_EXIT;
3466 loc->mbuf = *pkts++;
3468 rte_prefetch0(*pkts);
3469 if (likely(NB_SEGS(loc->mbuf) > 1))
3471 /* Here ends the series of multi-segment packets. */
3472 if (MLX5_TXOFF_CONFIG(TSO) &&
3473 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3474 return MLX5_TXCMP_CODE_TSO;
3475 return MLX5_TXCMP_CODE_SINGLE;
3481 * Tx burst function for single-segment packets with TSO.
3482 * Supports all types of Tx offloads, except multi-packets.
3483 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3484 * Function stops sending if it encounters the multi-segment
3485 * packet or packet without TSO requested.
3487 * The routine is responsible for storing processed mbuf
3488 * into elts ring buffer and update elts_head if inline
3489 * offloads is requested due to possible early freeing
3490 * of the inlined mbufs (can not store pkts array in elts
3494 * Pointer to TX queue structure.
3496 * Packets to transmit.
3498 * Number of packets in array.
3500 * Pointer to burst routine local context.
3502 * Configured Tx offloads mask. It is fully defined at
3503 * compile time and may be used for optimization.
3506 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3507 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3508 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3509 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3510 * Local context variables updated.
3512 static __rte_always_inline enum mlx5_txcmp_code
3513 mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
3514 struct rte_mbuf **restrict pkts,
3515 unsigned int pkts_n,
3516 struct mlx5_txq_local *restrict loc,
3519 assert(loc->elts_free && loc->wqe_free);
3520 assert(pkts_n > loc->pkts_sent);
3521 pkts += loc->pkts_sent + 1;
3522 pkts_n -= loc->pkts_sent;
3524 struct mlx5_wqe_dseg *restrict dseg;
3525 struct mlx5_wqe *restrict wqe;
3526 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3529 assert(NB_SEGS(loc->mbuf) == 1);
3530 dlen = rte_pktmbuf_data_len(loc->mbuf);
3531 if (MLX5_TXOFF_CONFIG(VLAN) &&
3532 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3533 vlan = sizeof(struct rte_vlan_hdr);
3536 * First calculate the WQE size to check
3537 * whether we have enough space in ring buffer.
3539 hlen = loc->mbuf->l2_len + vlan +
3540 loc->mbuf->l3_len + loc->mbuf->l4_len;
3541 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3542 return MLX5_TXCMP_CODE_ERROR;
3543 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3544 hlen += loc->mbuf->outer_l2_len +
3545 loc->mbuf->outer_l3_len;
3546 /* Segment must contain all TSO headers. */
3547 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3548 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3549 hlen > (dlen + vlan)))
3550 return MLX5_TXCMP_CODE_ERROR;
3552 * Check whether there are enough free WQEBBs:
3554 * - Ethernet Segment
3555 * - First Segment of inlined Ethernet data
3556 * - ... data continued ...
3557 * - Finishing Data Segment of pointer type
3559 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3560 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3561 if (loc->wqe_free < ((ds + 3) / 4))
3562 return MLX5_TXCMP_CODE_EXIT;
3563 #ifdef MLX5_PMD_SOFT_COUNTERS
3564 /* Update sent data bytes/packets counters. */
3565 ntcp = (dlen + vlan - hlen +
3566 loc->mbuf->tso_segsz - 1) /
3567 loc->mbuf->tso_segsz;
3569 * One will be added for mbuf itself at the end
3570 * of the mlx5_tx_burst from loc->pkts_sent field.
3573 txq->stats.opackets += ntcp;
3574 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3577 * Build the TSO WQE:
3579 * - Ethernet Segment with hlen bytes inlined
3580 * - Data Segment of pointer type
3582 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3583 loc->wqe_last = wqe;
3584 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3585 MLX5_OPCODE_TSO, olx);
3586 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3587 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3588 dlen -= hlen - vlan;
3589 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3591 * WQE is built, update the loop parameters
3592 * and go to the next packet.
3594 txq->wqe_ci += (ds + 3) / 4;
3595 loc->wqe_free -= (ds + 3) / 4;
3596 if (MLX5_TXOFF_CONFIG(INLINE))
3597 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3601 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3602 return MLX5_TXCMP_CODE_EXIT;
3603 loc->mbuf = *pkts++;
3605 rte_prefetch0(*pkts);
3606 if (MLX5_TXOFF_CONFIG(MULTI) &&
3607 unlikely(NB_SEGS(loc->mbuf) > 1))
3608 return MLX5_TXCMP_CODE_MULTI;
3609 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3610 return MLX5_TXCMP_CODE_SINGLE;
3611 /* Continue with the next TSO packet. */
3617 * Analyze the packet and select the best method to send.
3620 * Pointer to TX queue structure.
3622 * Pointer to burst routine local context.
3624 * Configured Tx offloads mask. It is fully defined at
3625 * compile time and may be used for optimization.
3627 * The predefined flag whether do complete check for
3628 * multi-segment packets and TSO.
3631 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3632 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3633 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3634 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3636 static __rte_always_inline enum mlx5_txcmp_code
3637 mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq,
3638 struct mlx5_txq_local *restrict loc,
3642 /* Check for multi-segment packet. */
3644 MLX5_TXOFF_CONFIG(MULTI) &&
3645 unlikely(NB_SEGS(loc->mbuf) > 1))
3646 return MLX5_TXCMP_CODE_MULTI;
3647 /* Check for TSO packet. */
3649 MLX5_TXOFF_CONFIG(TSO) &&
3650 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3651 return MLX5_TXCMP_CODE_TSO;
3652 /* Check if eMPW is enabled at all. */
3653 if (!MLX5_TXOFF_CONFIG(EMPW))
3654 return MLX5_TXCMP_CODE_SINGLE;
3655 /* Check if eMPW can be engaged. */
3656 if (MLX5_TXOFF_CONFIG(VLAN) &&
3657 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3658 (!MLX5_TXOFF_CONFIG(INLINE) ||
3659 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3660 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3662 * eMPW does not support VLAN insertion offload,
3663 * we have to inline the entire packet but
3664 * packet is too long for inlining.
3666 return MLX5_TXCMP_CODE_SINGLE;
3668 return MLX5_TXCMP_CODE_EMPW;
3672 * Check the next packet attributes to match with the eMPW batch ones.
3673 * In addition, for legacy MPW the packet length is checked either.
3676 * Pointer to TX queue structure.
3678 * Pointer to Ethernet Segment of eMPW batch.
3680 * Pointer to burst routine local context.
3682 * Length of previous packet in MPW descriptor.
3684 * Configured Tx offloads mask. It is fully defined at
3685 * compile time and may be used for optimization.
3688 * true - packet match with eMPW batch attributes.
3689 * false - no match, eMPW should be restarted.
3691 static __rte_always_inline bool
3692 mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
3693 struct mlx5_wqe_eseg *restrict es,
3694 struct mlx5_txq_local *restrict loc,
3698 uint8_t swp_flags = 0;
3700 /* Compare the checksum flags, if any. */
3701 if (MLX5_TXOFF_CONFIG(CSUM) &&
3702 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3704 /* Compare the Software Parser offsets and flags. */
3705 if (MLX5_TXOFF_CONFIG(SWP) &&
3706 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3707 es->swp_flags != swp_flags))
3709 /* Fill metadata field if needed. */
3710 if (MLX5_TXOFF_CONFIG(METADATA) &&
3711 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3712 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3714 /* Legacy MPW can send packets with the same lengt only. */
3715 if (MLX5_TXOFF_CONFIG(MPW) &&
3716 dlen != rte_pktmbuf_data_len(loc->mbuf))
3718 /* There must be no VLAN packets in eMPW loop. */
3719 if (MLX5_TXOFF_CONFIG(VLAN))
3720 assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3725 * Update send loop variables and WQE for eMPW loop
3726 * without data inlining. Number of Data Segments is
3727 * equal to the number of sent packets.
3730 * Pointer to TX queue structure.
3732 * Pointer to burst routine local context.
3734 * Number of packets/Data Segments/Packets.
3736 * Accumulated statistics, bytes sent
3738 * Configured Tx offloads mask. It is fully defined at
3739 * compile time and may be used for optimization.
3742 * true - packet match with eMPW batch attributes.
3743 * false - no match, eMPW should be restarted.
3745 static __rte_always_inline void
3746 mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
3747 struct mlx5_txq_local *restrict loc,
3750 unsigned int olx __rte_unused)
3752 assert(!MLX5_TXOFF_CONFIG(INLINE));
3753 #ifdef MLX5_PMD_SOFT_COUNTERS
3754 /* Update sent data bytes counter. */
3755 txq->stats.obytes += slen;
3759 loc->elts_free -= ds;
3760 loc->pkts_sent += ds;
3762 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3763 txq->wqe_ci += (ds + 3) / 4;
3764 loc->wqe_free -= (ds + 3) / 4;
3768 * Update send loop variables and WQE for eMPW loop
3769 * with data inlining. Gets the size of pushed descriptors
3770 * and data to the WQE.
3773 * Pointer to TX queue structure.
3775 * Pointer to burst routine local context.
3777 * Total size of descriptor/data in bytes.
3779 * Accumulated statistics, data bytes sent.
3781 * Configured Tx offloads mask. It is fully defined at
3782 * compile time and may be used for optimization.
3785 * true - packet match with eMPW batch attributes.
3786 * false - no match, eMPW should be restarted.
3788 static __rte_always_inline void
3789 mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
3790 struct mlx5_txq_local *restrict loc,
3793 unsigned int olx __rte_unused)
3795 assert(MLX5_TXOFF_CONFIG(INLINE));
3796 assert((len % MLX5_WSEG_SIZE) == 0);
3797 #ifdef MLX5_PMD_SOFT_COUNTERS
3798 /* Update sent data bytes counter. */
3799 txq->stats.obytes += slen;
3803 len = len / MLX5_WSEG_SIZE + 2;
3804 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
3805 txq->wqe_ci += (len + 3) / 4;
3806 loc->wqe_free -= (len + 3) / 4;
3810 * The set of Tx burst functions for single-segment packets
3811 * without TSO and with Multi-Packet Writing feature support.
3812 * Supports all types of Tx offloads, except multi-packets
3815 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
3816 * as many packet per WQE as it can. If eMPW is not configured
3817 * or packet can not be sent with eMPW (VLAN insertion) the
3818 * ordinary SEND opcode is used and only one packet placed
3821 * Functions stop sending if it encounters the multi-segment
3822 * packet or packet with TSO requested.
3824 * The routines are responsible for storing processed mbuf
3825 * into elts ring buffer and update elts_head if inlining
3826 * offload is requested. Otherwise the copying mbufs to elts
3827 * can be postponed and completed at the end of burst routine.
3830 * Pointer to TX queue structure.
3832 * Packets to transmit.
3834 * Number of packets in array.
3836 * Pointer to burst routine local context.
3838 * Configured Tx offloads mask. It is fully defined at
3839 * compile time and may be used for optimization.
3842 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3843 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3844 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3845 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
3846 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
3847 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
3849 * Local context variables updated.
3852 * The routine sends packets with MLX5_OPCODE_EMPW
3853 * without inlining, this is dedicated optimized branch.
3854 * No VLAN insertion is supported.
3856 static __rte_always_inline enum mlx5_txcmp_code
3857 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
3858 struct rte_mbuf **restrict pkts,
3859 unsigned int pkts_n,
3860 struct mlx5_txq_local *restrict loc,
3864 * Subroutine is the part of mlx5_tx_burst_single()
3865 * and sends single-segment packet with eMPW opcode
3866 * without data inlining.
3868 assert(!MLX5_TXOFF_CONFIG(INLINE));
3869 assert(MLX5_TXOFF_CONFIG(EMPW));
3870 assert(loc->elts_free && loc->wqe_free);
3871 assert(pkts_n > loc->pkts_sent);
3872 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
3873 pkts += loc->pkts_sent + 1;
3874 pkts_n -= loc->pkts_sent;
3876 struct mlx5_wqe_dseg *restrict dseg;
3877 struct mlx5_wqe_eseg *restrict eseg;
3878 enum mlx5_txcmp_code ret;
3879 unsigned int part, loop;
3880 unsigned int slen = 0;
3883 assert(NB_SEGS(loc->mbuf) == 1);
3884 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
3885 MLX5_MPW_MAX_PACKETS :
3886 MLX5_EMPW_MAX_PACKETS);
3887 if (unlikely(loc->elts_free < part)) {
3888 /* We have no enough elts to save all mbufs. */
3889 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
3890 return MLX5_TXCMP_CODE_EXIT;
3891 /* But we still able to send at least minimal eMPW. */
3892 part = loc->elts_free;
3894 /* Check whether we have enough WQEs */
3895 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
3896 if (unlikely(loc->wqe_free <
3897 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3898 return MLX5_TXCMP_CODE_EXIT;
3899 part = (loc->wqe_free * 4) - 2;
3901 if (likely(part > 1))
3902 rte_prefetch0(*pkts);
3903 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3905 * Build eMPW title WQEBB:
3906 * - Control Segment, eMPW opcode
3907 * - Ethernet Segment, no inline
3909 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
3910 MLX5_OPCODE_ENHANCED_MPSW, olx);
3911 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
3912 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3913 eseg = &loc->wqe_last->eseg;
3914 dseg = &loc->wqe_last->dseg[0];
3916 /* Store the packet length for legacy MPW. */
3917 if (MLX5_TXOFF_CONFIG(MPW))
3918 eseg->mss = rte_cpu_to_be_16
3919 (rte_pktmbuf_data_len(loc->mbuf));
3921 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3922 #ifdef MLX5_PMD_SOFT_COUNTERS
3923 /* Update sent data bytes counter. */
3928 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3930 if (unlikely(--loop == 0))
3932 loc->mbuf = *pkts++;
3933 if (likely(loop > 1))
3934 rte_prefetch0(*pkts);
3935 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3937 * Unroll the completion code to avoid
3938 * returning variable value - it results in
3939 * unoptimized sequent checking in caller.
3941 if (ret == MLX5_TXCMP_CODE_MULTI) {
3943 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3944 if (unlikely(!loc->elts_free ||
3946 return MLX5_TXCMP_CODE_EXIT;
3947 return MLX5_TXCMP_CODE_MULTI;
3949 assert(NB_SEGS(loc->mbuf) == 1);
3950 if (ret == MLX5_TXCMP_CODE_TSO) {
3952 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3953 if (unlikely(!loc->elts_free ||
3955 return MLX5_TXCMP_CODE_EXIT;
3956 return MLX5_TXCMP_CODE_TSO;
3958 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3960 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3961 if (unlikely(!loc->elts_free ||
3963 return MLX5_TXCMP_CODE_EXIT;
3964 return MLX5_TXCMP_CODE_SINGLE;
3966 if (ret != MLX5_TXCMP_CODE_EMPW) {
3969 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3970 return MLX5_TXCMP_CODE_ERROR;
3973 * Check whether packet parameters coincide
3974 * within assumed eMPW batch:
3975 * - check sum settings
3977 * - software parser settings
3978 * - packets length (legacy MPW only)
3980 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
3983 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3984 if (unlikely(!loc->elts_free ||
3986 return MLX5_TXCMP_CODE_EXIT;
3990 /* Packet attributes match, continue the same eMPW. */
3992 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3993 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3995 /* eMPW is built successfully, update loop parameters. */
3997 assert(pkts_n >= part);
3998 #ifdef MLX5_PMD_SOFT_COUNTERS
3999 /* Update sent data bytes counter. */
4000 txq->stats.obytes += slen;
4002 loc->elts_free -= part;
4003 loc->pkts_sent += part;
4004 txq->wqe_ci += (2 + part + 3) / 4;
4005 loc->wqe_free -= (2 + part + 3) / 4;
4007 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4008 return MLX5_TXCMP_CODE_EXIT;
4009 loc->mbuf = *pkts++;
4010 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4011 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4013 /* Continue sending eMPW batches. */
4019 * The routine sends packets with MLX5_OPCODE_EMPW
4020 * with inlining, optionally supports VLAN insertion.
4022 static __rte_always_inline enum mlx5_txcmp_code
4023 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
4024 struct rte_mbuf **restrict pkts,
4025 unsigned int pkts_n,
4026 struct mlx5_txq_local *restrict loc,
4030 * Subroutine is the part of mlx5_tx_burst_single()
4031 * and sends single-segment packet with eMPW opcode
4032 * with data inlining.
4034 assert(MLX5_TXOFF_CONFIG(INLINE));
4035 assert(MLX5_TXOFF_CONFIG(EMPW));
4036 assert(loc->elts_free && loc->wqe_free);
4037 assert(pkts_n > loc->pkts_sent);
4038 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4039 pkts += loc->pkts_sent + 1;
4040 pkts_n -= loc->pkts_sent;
4042 struct mlx5_wqe_dseg *restrict dseg;
4043 struct mlx5_wqe_eseg *restrict eseg;
4044 enum mlx5_txcmp_code ret;
4045 unsigned int room, part, nlim;
4046 unsigned int slen = 0;
4048 assert(NB_SEGS(loc->mbuf) == 1);
4050 * Limits the amount of packets in one WQE
4051 * to improve CQE latency generation.
4053 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4054 MLX5_MPW_INLINE_MAX_PACKETS :
4055 MLX5_EMPW_MAX_PACKETS);
4056 /* Check whether we have minimal amount WQEs */
4057 if (unlikely(loc->wqe_free <
4058 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4059 return MLX5_TXCMP_CODE_EXIT;
4060 if (likely(pkts_n > 1))
4061 rte_prefetch0(*pkts);
4062 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4064 * Build eMPW title WQEBB:
4065 * - Control Segment, eMPW opcode, zero DS
4066 * - Ethernet Segment, no inline
4068 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0,
4069 MLX5_OPCODE_ENHANCED_MPSW, olx);
4070 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4071 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4072 eseg = &loc->wqe_last->eseg;
4073 dseg = &loc->wqe_last->dseg[0];
4074 /* Store the packet length for legacy MPW. */
4075 if (MLX5_TXOFF_CONFIG(MPW))
4076 eseg->mss = rte_cpu_to_be_16
4077 (rte_pktmbuf_data_len(loc->mbuf));
4078 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4079 loc->wqe_free) * MLX5_WQE_SIZE -
4080 MLX5_WQE_CSEG_SIZE -
4082 /* Build WQE till we have space, packets and resources. */
4085 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4086 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4089 assert(room >= MLX5_WQE_DSEG_SIZE);
4090 assert((room % MLX5_WQE_DSEG_SIZE) == 0);
4091 assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4093 * Some Tx offloads may cause an error if
4094 * packet is not long enough, check against
4095 * assumed minimal length.
4097 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4099 if (unlikely(!part))
4100 return MLX5_TXCMP_CODE_ERROR;
4102 * We have some successfully built
4103 * packet Data Segments to send.
4105 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4106 return MLX5_TXCMP_CODE_ERROR;
4108 /* Inline or not inline - that's the Question. */
4109 if (dlen > txq->inlen_empw)
4111 /* Inline entire packet, optional VLAN insertion. */
4112 tlen = sizeof(dseg->bcount) + dlen;
4113 if (MLX5_TXOFF_CONFIG(VLAN) &&
4114 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4116 * The packet length must be checked in
4117 * mlx5_tx_able_to_empw() and packet
4118 * fits into inline length guaranteed.
4120 assert((dlen + sizeof(struct rte_vlan_hdr)) <=
4122 tlen += sizeof(struct rte_vlan_hdr);
4125 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4127 #ifdef MLX5_PMD_SOFT_COUNTERS
4128 /* Update sent data bytes counter. */
4129 slen += sizeof(struct rte_vlan_hdr);
4134 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4137 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4138 assert(room >= tlen);
4141 * Packet data are completely inlined,
4142 * free the packet immediately.
4144 rte_pktmbuf_free_seg(loc->mbuf);
4148 * Not inlinable VLAN packets are
4149 * proceeded outside of this routine.
4151 assert(room >= MLX5_WQE_DSEG_SIZE);
4152 if (MLX5_TXOFF_CONFIG(VLAN))
4153 assert(!(loc->mbuf->ol_flags &
4155 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4156 /* We have to store mbuf in elts.*/
4157 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4158 room -= MLX5_WQE_DSEG_SIZE;
4159 /* Ring buffer wraparound is checked at the loop end.*/
4162 #ifdef MLX5_PMD_SOFT_COUNTERS
4163 /* Update sent data bytes counter. */
4169 if (unlikely(!pkts_n || !loc->elts_free)) {
4171 * We have no resources/packets to
4172 * continue build descriptors.
4175 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4176 return MLX5_TXCMP_CODE_EXIT;
4178 loc->mbuf = *pkts++;
4179 if (likely(pkts_n > 1))
4180 rte_prefetch0(*pkts);
4181 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4183 * Unroll the completion code to avoid
4184 * returning variable value - it results in
4185 * unoptimized sequent checking in caller.
4187 if (ret == MLX5_TXCMP_CODE_MULTI) {
4189 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4190 if (unlikely(!loc->elts_free ||
4192 return MLX5_TXCMP_CODE_EXIT;
4193 return MLX5_TXCMP_CODE_MULTI;
4195 assert(NB_SEGS(loc->mbuf) == 1);
4196 if (ret == MLX5_TXCMP_CODE_TSO) {
4198 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4199 if (unlikely(!loc->elts_free ||
4201 return MLX5_TXCMP_CODE_EXIT;
4202 return MLX5_TXCMP_CODE_TSO;
4204 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4206 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4207 if (unlikely(!loc->elts_free ||
4209 return MLX5_TXCMP_CODE_EXIT;
4210 return MLX5_TXCMP_CODE_SINGLE;
4212 if (ret != MLX5_TXCMP_CODE_EMPW) {
4215 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4216 return MLX5_TXCMP_CODE_ERROR;
4218 /* Check if we have minimal room left. */
4220 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4223 * Check whether packet parameters coincide
4224 * within assumed eMPW batch:
4225 * - check sum settings
4227 * - software parser settings
4228 * - packets length (legacy MPW only)
4230 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx))
4232 /* Packet attributes match, continue the same eMPW. */
4233 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4234 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4237 * We get here to close an existing eMPW
4238 * session and start the new one.
4242 if (unlikely(!part))
4243 return MLX5_TXCMP_CODE_EXIT;
4244 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4245 if (unlikely(!loc->elts_free ||
4247 return MLX5_TXCMP_CODE_EXIT;
4248 /* Continue the loop with new eMPW session. */
4254 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4255 * Data inlining and VLAN insertion are supported.
4257 static __rte_always_inline enum mlx5_txcmp_code
4258 mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
4259 struct rte_mbuf **restrict pkts,
4260 unsigned int pkts_n,
4261 struct mlx5_txq_local *restrict loc,
4265 * Subroutine is the part of mlx5_tx_burst_single()
4266 * and sends single-segment packet with SEND opcode.
4268 assert(loc->elts_free && loc->wqe_free);
4269 assert(pkts_n > loc->pkts_sent);
4270 pkts += loc->pkts_sent + 1;
4271 pkts_n -= loc->pkts_sent;
4273 struct mlx5_wqe *restrict wqe;
4274 enum mlx5_txcmp_code ret;
4276 assert(NB_SEGS(loc->mbuf) == 1);
4277 if (MLX5_TXOFF_CONFIG(INLINE)) {
4278 unsigned int inlen, vlan = 0;
4280 inlen = rte_pktmbuf_data_len(loc->mbuf);
4281 if (MLX5_TXOFF_CONFIG(VLAN) &&
4282 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4283 vlan = sizeof(struct rte_vlan_hdr);
4285 static_assert((sizeof(struct rte_vlan_hdr) +
4286 sizeof(struct rte_ether_hdr)) ==
4287 MLX5_ESEG_MIN_INLINE_SIZE,
4288 "invalid min inline data size");
4291 * If inlining is enabled at configuration time
4292 * the limit must be not less than minimal size.
4293 * Otherwise we would do extra check for data
4294 * size to avoid crashes due to length overflow.
4296 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
4297 if (inlen <= txq->inlen_send) {
4298 unsigned int seg_n, wqe_n;
4300 rte_prefetch0(rte_pktmbuf_mtod
4301 (loc->mbuf, uint8_t *));
4302 /* Check against minimal length. */
4303 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4304 return MLX5_TXCMP_CODE_ERROR;
4306 * Completely inlined packet data WQE:
4307 * - Control Segment, SEND opcode
4308 * - Ethernet Segment, no VLAN insertion
4309 * - Data inlined, VLAN optionally inserted
4310 * - Alignment to MLX5_WSEG_SIZE
4311 * Have to estimate amount of WQEBBs
4313 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4314 MLX5_ESEG_MIN_INLINE_SIZE +
4315 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4316 /* Check if there are enough WQEBBs. */
4317 wqe_n = (seg_n + 3) / 4;
4318 if (wqe_n > loc->wqe_free)
4319 return MLX5_TXCMP_CODE_EXIT;
4320 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4321 loc->wqe_last = wqe;
4322 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4323 MLX5_OPCODE_SEND, olx);
4324 mlx5_tx_eseg_data(txq, loc, wqe,
4325 vlan, inlen, 0, olx);
4326 txq->wqe_ci += wqe_n;
4327 loc->wqe_free -= wqe_n;
4329 * Packet data are completely inlined,
4330 * free the packet immediately.
4332 rte_pktmbuf_free_seg(loc->mbuf);
4333 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4334 MLX5_TXOFF_CONFIG(MPW)) &&
4337 * If minimal inlining is requested the eMPW
4338 * feature should be disabled due to data is
4339 * inlined into Ethernet Segment, which can
4340 * not contain inlined data for eMPW due to
4341 * segment shared for all packets.
4343 struct mlx5_wqe_dseg *restrict dseg;
4348 * The inline-mode settings require
4349 * to inline the specified amount of
4350 * data bytes to the Ethernet Segment.
4351 * We should check the free space in
4352 * WQE ring buffer to inline partially.
4354 assert(txq->inlen_send >= txq->inlen_mode);
4355 assert(inlen > txq->inlen_mode);
4356 assert(txq->inlen_mode >=
4357 MLX5_ESEG_MIN_INLINE_SIZE);
4359 * Check whether there are enough free WQEBBs:
4361 * - Ethernet Segment
4362 * - First Segment of inlined Ethernet data
4363 * - ... data continued ...
4364 * - Finishing Data Segment of pointer type
4366 ds = (MLX5_WQE_CSEG_SIZE +
4367 MLX5_WQE_ESEG_SIZE +
4368 MLX5_WQE_DSEG_SIZE +
4370 MLX5_ESEG_MIN_INLINE_SIZE +
4371 MLX5_WQE_DSEG_SIZE +
4372 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4373 if (loc->wqe_free < ((ds + 3) / 4))
4374 return MLX5_TXCMP_CODE_EXIT;
4376 * Build the ordinary SEND WQE:
4378 * - Ethernet Segment, inline inlen_mode bytes
4379 * - Data Segment of pointer type
4381 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4382 loc->wqe_last = wqe;
4383 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4384 MLX5_OPCODE_SEND, olx);
4385 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4388 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4389 txq->inlen_mode - vlan;
4390 inlen -= txq->inlen_mode;
4391 mlx5_tx_dseg_ptr(txq, loc, dseg,
4394 * WQE is built, update the loop parameters
4395 * and got to the next packet.
4397 txq->wqe_ci += (ds + 3) / 4;
4398 loc->wqe_free -= (ds + 3) / 4;
4399 /* We have to store mbuf in elts.*/
4400 assert(MLX5_TXOFF_CONFIG(INLINE));
4401 txq->elts[txq->elts_head++ & txq->elts_m] =
4409 * Partially inlined packet data WQE, we have
4410 * some space in title WQEBB, we can fill it
4411 * with some packet data. It takes one WQEBB,
4412 * it is available, no extra space check:
4413 * - Control Segment, SEND opcode
4414 * - Ethernet Segment, no VLAN insertion
4415 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4416 * - Data Segment, pointer type
4418 * We also get here if VLAN insertion is not
4419 * supported by HW, the inline is enabled.
4421 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4422 loc->wqe_last = wqe;
4423 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4424 MLX5_OPCODE_SEND, olx);
4425 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4426 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4427 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4429 * The length check is performed above, by
4430 * comparing with txq->inlen_send. We should
4431 * not get overflow here.
4433 assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4434 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4435 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4439 /* We have to store mbuf in elts.*/
4440 assert(MLX5_TXOFF_CONFIG(INLINE));
4441 txq->elts[txq->elts_head++ & txq->elts_m] =
4445 #ifdef MLX5_PMD_SOFT_COUNTERS
4446 /* Update sent data bytes counter. */
4447 txq->stats.obytes += vlan +
4448 rte_pktmbuf_data_len(loc->mbuf);
4452 * No inline at all, it means the CPU cycles saving
4453 * is prioritized at configuration, we should not
4454 * copy any packet data to WQE.
4456 * SEND WQE, one WQEBB:
4457 * - Control Segment, SEND opcode
4458 * - Ethernet Segment, optional VLAN, no inline
4459 * - Data Segment, pointer type
4461 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4462 loc->wqe_last = wqe;
4463 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4464 MLX5_OPCODE_SEND, olx);
4465 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4467 (txq, loc, &wqe->dseg[0],
4468 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4469 rte_pktmbuf_data_len(loc->mbuf), olx);
4473 * We should not store mbuf pointer in elts
4474 * if no inlining is configured, this is done
4475 * by calling routine in a batch copy.
4477 assert(!MLX5_TXOFF_CONFIG(INLINE));
4479 #ifdef MLX5_PMD_SOFT_COUNTERS
4480 /* Update sent data bytes counter. */
4481 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4482 if (MLX5_TXOFF_CONFIG(VLAN) &&
4483 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4484 txq->stats.obytes +=
4485 sizeof(struct rte_vlan_hdr);
4490 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4491 return MLX5_TXCMP_CODE_EXIT;
4492 loc->mbuf = *pkts++;
4494 rte_prefetch0(*pkts);
4495 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4496 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4502 static __rte_always_inline enum mlx5_txcmp_code
4503 mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
4504 struct rte_mbuf **restrict pkts,
4505 unsigned int pkts_n,
4506 struct mlx5_txq_local *restrict loc,
4509 enum mlx5_txcmp_code ret;
4511 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4512 if (ret == MLX5_TXCMP_CODE_SINGLE)
4514 assert(ret == MLX5_TXCMP_CODE_EMPW);
4516 /* Optimize for inline/no inline eMPW send. */
4517 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4518 mlx5_tx_burst_empw_inline
4519 (txq, pkts, pkts_n, loc, olx) :
4520 mlx5_tx_burst_empw_simple
4521 (txq, pkts, pkts_n, loc, olx);
4522 if (ret != MLX5_TXCMP_CODE_SINGLE)
4524 /* The resources to send one packet should remain. */
4525 assert(loc->elts_free && loc->wqe_free);
4527 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4528 assert(ret != MLX5_TXCMP_CODE_SINGLE);
4529 if (ret != MLX5_TXCMP_CODE_EMPW)
4531 /* The resources to send one packet should remain. */
4532 assert(loc->elts_free && loc->wqe_free);
4537 * DPDK Tx callback template. This is configured template
4538 * used to generate routines optimized for specified offload setup.
4539 * One of this generated functions is chosen at SQ configuration
4543 * Generic pointer to TX queue structure.
4545 * Packets to transmit.
4547 * Number of packets in array.
4549 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4550 * values. Should be static to take compile time static configuration
4554 * Number of packets successfully transmitted (<= pkts_n).
4556 static __rte_always_inline uint16_t
4557 mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
4558 struct rte_mbuf **restrict pkts,
4562 struct mlx5_txq_local loc;
4563 enum mlx5_txcmp_code ret;
4566 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4567 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4568 if (unlikely(!pkts_n))
4572 loc.wqe_last = NULL;
4575 loc.pkts_loop = loc.pkts_sent;
4577 * Check if there are some CQEs, if any:
4578 * - process an encountered errors
4579 * - process the completed WQEs
4580 * - free related mbufs
4581 * - doorbell the NIC about processed CQEs
4583 rte_prefetch0(*(pkts + loc.pkts_sent));
4584 mlx5_tx_handle_completion(txq, olx);
4586 * Calculate the number of available resources - elts and WQEs.
4587 * There are two possible different scenarios:
4588 * - no data inlining into WQEs, one WQEBB may contains upto
4589 * four packets, in this case elts become scarce resource
4590 * - data inlining into WQEs, one packet may require multiple
4591 * WQEBBs, the WQEs become the limiting factor.
4593 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4594 loc.elts_free = txq->elts_s -
4595 (uint16_t)(txq->elts_head - txq->elts_tail);
4596 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4597 loc.wqe_free = txq->wqe_s -
4598 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4599 if (unlikely(!loc.elts_free || !loc.wqe_free))
4603 * Fetch the packet from array. Usually this is
4604 * the first packet in series of multi/single
4607 loc.mbuf = *(pkts + loc.pkts_sent);
4608 /* Dedicated branch for multi-segment packets. */
4609 if (MLX5_TXOFF_CONFIG(MULTI) &&
4610 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4612 * Multi-segment packet encountered.
4613 * Hardware is able to process it only
4614 * with SEND/TSO opcodes, one packet
4615 * per WQE, do it in dedicated routine.
4618 assert(loc.pkts_sent >= loc.pkts_copy);
4619 part = loc.pkts_sent - loc.pkts_copy;
4620 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4622 * There are some single-segment mbufs not
4623 * stored in elts. The mbufs must be in the
4624 * same order as WQEs, so we must copy the
4625 * mbufs to elts here, before the coming
4626 * multi-segment packet mbufs is appended.
4628 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4630 loc.pkts_copy = loc.pkts_sent;
4632 assert(pkts_n > loc.pkts_sent);
4633 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4634 if (!MLX5_TXOFF_CONFIG(INLINE))
4635 loc.pkts_copy = loc.pkts_sent;
4637 * These returned code checks are supposed
4638 * to be optimized out due to routine inlining.
4640 if (ret == MLX5_TXCMP_CODE_EXIT) {
4642 * The routine returns this code when
4643 * all packets are sent or there is no
4644 * enough resources to complete request.
4648 if (ret == MLX5_TXCMP_CODE_ERROR) {
4650 * The routine returns this code when
4651 * some error in the incoming packets
4654 txq->stats.oerrors++;
4657 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4659 * The single-segment packet was encountered
4660 * in the array, try to send it with the
4661 * best optimized way, possible engaging eMPW.
4663 goto enter_send_single;
4665 if (MLX5_TXOFF_CONFIG(TSO) &&
4666 ret == MLX5_TXCMP_CODE_TSO) {
4668 * The single-segment TSO packet was
4669 * encountered in the array.
4671 goto enter_send_tso;
4673 /* We must not get here. Something is going wrong. */
4675 txq->stats.oerrors++;
4678 /* Dedicated branch for single-segment TSO packets. */
4679 if (MLX5_TXOFF_CONFIG(TSO) &&
4680 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4682 * TSO might require special way for inlining
4683 * (dedicated parameters) and is sent with
4684 * MLX5_OPCODE_TSO opcode only, provide this
4685 * in dedicated branch.
4688 assert(NB_SEGS(loc.mbuf) == 1);
4689 assert(pkts_n > loc.pkts_sent);
4690 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4692 * These returned code checks are supposed
4693 * to be optimized out due to routine inlining.
4695 if (ret == MLX5_TXCMP_CODE_EXIT)
4697 if (ret == MLX5_TXCMP_CODE_ERROR) {
4698 txq->stats.oerrors++;
4701 if (ret == MLX5_TXCMP_CODE_SINGLE)
4702 goto enter_send_single;
4703 if (MLX5_TXOFF_CONFIG(MULTI) &&
4704 ret == MLX5_TXCMP_CODE_MULTI) {
4706 * The multi-segment packet was
4707 * encountered in the array.
4709 goto enter_send_multi;
4711 /* We must not get here. Something is going wrong. */
4713 txq->stats.oerrors++;
4717 * The dedicated branch for the single-segment packets
4718 * without TSO. Often these ones can be sent using
4719 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
4720 * The routine builds the WQEs till it encounters
4721 * the TSO or multi-segment packet (in case if these
4722 * offloads are requested at SQ configuration time).
4725 assert(pkts_n > loc.pkts_sent);
4726 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
4728 * These returned code checks are supposed
4729 * to be optimized out due to routine inlining.
4731 if (ret == MLX5_TXCMP_CODE_EXIT)
4733 if (ret == MLX5_TXCMP_CODE_ERROR) {
4734 txq->stats.oerrors++;
4737 if (MLX5_TXOFF_CONFIG(MULTI) &&
4738 ret == MLX5_TXCMP_CODE_MULTI) {
4740 * The multi-segment packet was
4741 * encountered in the array.
4743 goto enter_send_multi;
4745 if (MLX5_TXOFF_CONFIG(TSO) &&
4746 ret == MLX5_TXCMP_CODE_TSO) {
4748 * The single-segment TSO packet was
4749 * encountered in the array.
4751 goto enter_send_tso;
4753 /* We must not get here. Something is going wrong. */
4755 txq->stats.oerrors++;
4759 * Main Tx loop is completed, do the rest:
4760 * - set completion request if thresholds are reached
4761 * - doorbell the hardware
4762 * - copy the rest of mbufs to elts (if any)
4764 assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
4765 /* Take a shortcut if nothing is sent. */
4766 if (unlikely(loc.pkts_sent == loc.pkts_loop))
4768 /* Request CQE generation if limits are reached. */
4769 mlx5_tx_request_completion(txq, &loc, olx);
4771 * Ring QP doorbell immediately after WQE building completion
4772 * to improve latencies. The pure software related data treatment
4773 * can be completed after doorbell. Tx CQEs for this SQ are
4774 * processed in this thread only by the polling.
4776 * The rdma core library can map doorbell register in two ways,
4777 * depending on the environment variable "MLX5_SHUT_UP_BF":
4779 * - as regular cached memory, the variable is either missing or
4780 * set to zero. This type of mapping may cause the significant
4781 * doorbell register writing latency and requires explicit
4782 * memory write barrier to mitigate this issue and prevent
4785 * - as non-cached memory, the variable is present and set to
4786 * not "0" value. This type of mapping may cause performance
4787 * impact under heavy loading conditions but the explicit write
4788 * memory barrier is not required and it may improve core
4791 * - the legacy behaviour (prior 19.08 release) was to use some
4792 * heuristics to decide whether write memory barrier should
4793 * be performed. This behavior is supported with specifying
4794 * tx_db_nc=2, write barrier is skipped if application
4795 * provides the full recommended burst of packets, it
4796 * supposes the next packets are coming and the write barrier
4797 * will be issued on the next burst (after descriptor writing,
4800 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
4801 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
4802 /* Not all of the mbufs may be stored into elts yet. */
4803 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
4804 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4806 * There are some single-segment mbufs not stored in elts.
4807 * It can be only if the last packet was single-segment.
4808 * The copying is gathered into one place due to it is
4809 * a good opportunity to optimize that with SIMD.
4810 * Unfortunately if inlining is enabled the gaps in
4811 * pointer array may happen due to early freeing of the
4814 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
4815 loc.pkts_copy = loc.pkts_sent;
4817 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4818 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4819 if (pkts_n > loc.pkts_sent) {
4821 * If burst size is large there might be no enough CQE
4822 * fetched from completion queue and no enough resources
4823 * freed to send all the packets.
4828 #ifdef MLX5_PMD_SOFT_COUNTERS
4829 /* Increment sent packets counter. */
4830 txq->stats.opackets += loc.pkts_sent;
4832 return loc.pkts_sent;
4835 /* Generate routines with Enhanced Multi-Packet Write support. */
4836 MLX5_TXOFF_DECL(full_empw,
4837 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
4839 MLX5_TXOFF_DECL(none_empw,
4840 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4842 MLX5_TXOFF_DECL(md_empw,
4843 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4845 MLX5_TXOFF_DECL(mt_empw,
4846 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4847 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4849 MLX5_TXOFF_DECL(mtsc_empw,
4850 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4851 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4852 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4854 MLX5_TXOFF_DECL(mti_empw,
4855 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4856 MLX5_TXOFF_CONFIG_INLINE |
4857 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4859 MLX5_TXOFF_DECL(mtv_empw,
4860 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4861 MLX5_TXOFF_CONFIG_VLAN |
4862 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4864 MLX5_TXOFF_DECL(mtiv_empw,
4865 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4866 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4867 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4869 MLX5_TXOFF_DECL(sc_empw,
4870 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4871 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4873 MLX5_TXOFF_DECL(sci_empw,
4874 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4875 MLX5_TXOFF_CONFIG_INLINE |
4876 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4878 MLX5_TXOFF_DECL(scv_empw,
4879 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4880 MLX5_TXOFF_CONFIG_VLAN |
4881 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4883 MLX5_TXOFF_DECL(sciv_empw,
4884 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4885 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4886 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4888 MLX5_TXOFF_DECL(i_empw,
4889 MLX5_TXOFF_CONFIG_INLINE |
4890 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4892 MLX5_TXOFF_DECL(v_empw,
4893 MLX5_TXOFF_CONFIG_VLAN |
4894 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4896 MLX5_TXOFF_DECL(iv_empw,
4897 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4898 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4900 /* Generate routines without Enhanced Multi-Packet Write support. */
4901 MLX5_TXOFF_DECL(full,
4902 MLX5_TXOFF_CONFIG_FULL)
4904 MLX5_TXOFF_DECL(none,
4905 MLX5_TXOFF_CONFIG_NONE)
4908 MLX5_TXOFF_CONFIG_METADATA)
4911 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4912 MLX5_TXOFF_CONFIG_METADATA)
4914 MLX5_TXOFF_DECL(mtsc,
4915 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4916 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4917 MLX5_TXOFF_CONFIG_METADATA)
4919 MLX5_TXOFF_DECL(mti,
4920 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4921 MLX5_TXOFF_CONFIG_INLINE |
4922 MLX5_TXOFF_CONFIG_METADATA)
4925 MLX5_TXOFF_DECL(mtv,
4926 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4927 MLX5_TXOFF_CONFIG_VLAN |
4928 MLX5_TXOFF_CONFIG_METADATA)
4931 MLX5_TXOFF_DECL(mtiv,
4932 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4933 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4934 MLX5_TXOFF_CONFIG_METADATA)
4937 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4938 MLX5_TXOFF_CONFIG_METADATA)
4940 MLX5_TXOFF_DECL(sci,
4941 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4942 MLX5_TXOFF_CONFIG_INLINE |
4943 MLX5_TXOFF_CONFIG_METADATA)
4946 MLX5_TXOFF_DECL(scv,
4947 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4948 MLX5_TXOFF_CONFIG_VLAN |
4949 MLX5_TXOFF_CONFIG_METADATA)
4952 MLX5_TXOFF_DECL(sciv,
4953 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4954 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4955 MLX5_TXOFF_CONFIG_METADATA)
4958 MLX5_TXOFF_CONFIG_INLINE |
4959 MLX5_TXOFF_CONFIG_METADATA)
4962 MLX5_TXOFF_CONFIG_VLAN |
4963 MLX5_TXOFF_CONFIG_METADATA)
4966 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4967 MLX5_TXOFF_CONFIG_METADATA)
4970 * Generate routines with Legacy Multi-Packet Write support.
4971 * This mode is supported by ConnectX-4LX only and imposes
4972 * offload limitations, not supported:
4973 * - ACL/Flows (metadata are becoming meaningless)
4974 * - WQE Inline headers
4975 * - SRIOV (E-Switch offloads)
4977 * - tunnel encapsulation/decapsulation
4980 MLX5_TXOFF_DECL(none_mpw,
4981 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
4982 MLX5_TXOFF_CONFIG_MPW)
4984 MLX5_TXOFF_DECL(mci_mpw,
4985 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4986 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4987 MLX5_TXOFF_CONFIG_MPW)
4989 MLX5_TXOFF_DECL(mc_mpw,
4990 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4991 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
4993 MLX5_TXOFF_DECL(i_mpw,
4994 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4995 MLX5_TXOFF_CONFIG_MPW)
4998 * Array of declared and compiled Tx burst function and corresponding
4999 * supported offloads set. The array is used to select the Tx burst
5000 * function for specified offloads set at Tx queue configuration time.
5003 eth_tx_burst_t func;
5006 MLX5_TXOFF_INFO(full_empw,
5007 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5008 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5009 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5010 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5012 MLX5_TXOFF_INFO(none_empw,
5013 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5015 MLX5_TXOFF_INFO(md_empw,
5016 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5018 MLX5_TXOFF_INFO(mt_empw,
5019 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5020 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5022 MLX5_TXOFF_INFO(mtsc_empw,
5023 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5024 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5025 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5027 MLX5_TXOFF_INFO(mti_empw,
5028 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5029 MLX5_TXOFF_CONFIG_INLINE |
5030 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5032 MLX5_TXOFF_INFO(mtv_empw,
5033 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5034 MLX5_TXOFF_CONFIG_VLAN |
5035 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5037 MLX5_TXOFF_INFO(mtiv_empw,
5038 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5039 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5040 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5042 MLX5_TXOFF_INFO(sc_empw,
5043 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5044 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5046 MLX5_TXOFF_INFO(sci_empw,
5047 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5048 MLX5_TXOFF_CONFIG_INLINE |
5049 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5051 MLX5_TXOFF_INFO(scv_empw,
5052 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5053 MLX5_TXOFF_CONFIG_VLAN |
5054 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5056 MLX5_TXOFF_INFO(sciv_empw,
5057 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5058 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5059 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5061 MLX5_TXOFF_INFO(i_empw,
5062 MLX5_TXOFF_CONFIG_INLINE |
5063 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5065 MLX5_TXOFF_INFO(v_empw,
5066 MLX5_TXOFF_CONFIG_VLAN |
5067 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5069 MLX5_TXOFF_INFO(iv_empw,
5070 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5071 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5073 MLX5_TXOFF_INFO(full,
5074 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5075 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5076 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5077 MLX5_TXOFF_CONFIG_METADATA)
5079 MLX5_TXOFF_INFO(none,
5080 MLX5_TXOFF_CONFIG_NONE)
5083 MLX5_TXOFF_CONFIG_METADATA)
5086 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5087 MLX5_TXOFF_CONFIG_METADATA)
5089 MLX5_TXOFF_INFO(mtsc,
5090 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5091 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5092 MLX5_TXOFF_CONFIG_METADATA)
5094 MLX5_TXOFF_INFO(mti,
5095 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5096 MLX5_TXOFF_CONFIG_INLINE |
5097 MLX5_TXOFF_CONFIG_METADATA)
5099 MLX5_TXOFF_INFO(mtv,
5100 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5101 MLX5_TXOFF_CONFIG_VLAN |
5102 MLX5_TXOFF_CONFIG_METADATA)
5104 MLX5_TXOFF_INFO(mtiv,
5105 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5106 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5107 MLX5_TXOFF_CONFIG_METADATA)
5110 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5111 MLX5_TXOFF_CONFIG_METADATA)
5113 MLX5_TXOFF_INFO(sci,
5114 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5115 MLX5_TXOFF_CONFIG_INLINE |
5116 MLX5_TXOFF_CONFIG_METADATA)
5118 MLX5_TXOFF_INFO(scv,
5119 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5120 MLX5_TXOFF_CONFIG_VLAN |
5121 MLX5_TXOFF_CONFIG_METADATA)
5123 MLX5_TXOFF_INFO(sciv,
5124 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5125 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5126 MLX5_TXOFF_CONFIG_METADATA)
5129 MLX5_TXOFF_CONFIG_INLINE |
5130 MLX5_TXOFF_CONFIG_METADATA)
5133 MLX5_TXOFF_CONFIG_VLAN |
5134 MLX5_TXOFF_CONFIG_METADATA)
5137 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5138 MLX5_TXOFF_CONFIG_METADATA)
5140 MLX5_TXOFF_INFO(none_mpw,
5141 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5142 MLX5_TXOFF_CONFIG_MPW)
5144 MLX5_TXOFF_INFO(mci_mpw,
5145 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5146 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5147 MLX5_TXOFF_CONFIG_MPW)
5149 MLX5_TXOFF_INFO(mc_mpw,
5150 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5151 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5153 MLX5_TXOFF_INFO(i_mpw,
5154 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5155 MLX5_TXOFF_CONFIG_MPW)
5159 * Configure the Tx function to use. The routine checks configured
5160 * Tx offloads for the device and selects appropriate Tx burst
5161 * routine. There are multiple Tx burst routines compiled from
5162 * the same template in the most optimal way for the dedicated
5166 * Pointer to private data structure.
5169 * Pointer to selected Tx burst function.
5172 mlx5_select_tx_function(struct rte_eth_dev *dev)
5174 struct mlx5_priv *priv = dev->data->dev_private;
5175 struct mlx5_dev_config *config = &priv->config;
5176 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5177 unsigned int diff = 0, olx = 0, i, m;
5179 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5180 MLX5_DSEG_MAX, "invalid WQE max size");
5181 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5182 "invalid WQE Control Segment size");
5183 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5184 "invalid WQE Ethernet Segment size");
5185 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5186 "invalid WQE Data Segment size");
5187 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5188 "invalid WQE size");
5190 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5191 /* We should support Multi-Segment Packets. */
5192 olx |= MLX5_TXOFF_CONFIG_MULTI;
5194 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5195 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5196 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5197 DEV_TX_OFFLOAD_IP_TNL_TSO |
5198 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5199 /* We should support TCP Send Offload. */
5200 olx |= MLX5_TXOFF_CONFIG_TSO;
5202 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5203 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5204 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5205 /* We should support Software Parser for Tunnels. */
5206 olx |= MLX5_TXOFF_CONFIG_SWP;
5208 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5209 DEV_TX_OFFLOAD_UDP_CKSUM |
5210 DEV_TX_OFFLOAD_TCP_CKSUM |
5211 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5212 /* We should support IP/TCP/UDP Checksums. */
5213 olx |= MLX5_TXOFF_CONFIG_CSUM;
5215 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5216 /* We should support VLAN insertion. */
5217 olx |= MLX5_TXOFF_CONFIG_VLAN;
5219 if (priv->txqs_n && (*priv->txqs)[0]) {
5220 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5222 if (txd->inlen_send) {
5224 * Check the data inline requirements. Data inline
5225 * is enabled on per device basis, we can check
5226 * the first Tx queue only.
5228 * If device does not support VLAN insertion in WQE
5229 * and some queues are requested to perform VLAN
5230 * insertion offload than inline must be enabled.
5232 olx |= MLX5_TXOFF_CONFIG_INLINE;
5235 if (config->mps == MLX5_MPW_ENHANCED &&
5236 config->txq_inline_min <= 0) {
5238 * The NIC supports Enhanced Multi-Packet Write
5239 * and does not require minimal inline data.
5241 olx |= MLX5_TXOFF_CONFIG_EMPW;
5243 if (rte_flow_dynf_metadata_avail()) {
5244 /* We should support Flow metadata. */
5245 olx |= MLX5_TXOFF_CONFIG_METADATA;
5247 if (config->mps == MLX5_MPW) {
5249 * The NIC supports Legacy Multi-Packet Write.
5250 * The MLX5_TXOFF_CONFIG_MPW controls the
5251 * descriptor building method in combination
5252 * with MLX5_TXOFF_CONFIG_EMPW.
5254 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5255 MLX5_TXOFF_CONFIG_SWP |
5256 MLX5_TXOFF_CONFIG_VLAN |
5257 MLX5_TXOFF_CONFIG_METADATA)))
5258 olx |= MLX5_TXOFF_CONFIG_EMPW |
5259 MLX5_TXOFF_CONFIG_MPW;
5262 * Scan the routines table to find the minimal
5263 * satisfying routine with requested offloads.
5265 m = RTE_DIM(txoff_func);
5266 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5269 tmp = txoff_func[i].olx;
5271 /* Meets requested offloads exactly.*/
5275 if ((tmp & olx) != olx) {
5276 /* Does not meet requested offloads at all. */
5279 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5280 /* Do not enable eMPW if not configured. */
5282 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5283 /* Do not enable inlining if not configured. */
5286 * Some routine meets the requirements.
5287 * Check whether it has minimal amount
5288 * of not requested offloads.
5290 tmp = __builtin_popcountl(tmp & ~olx);
5291 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5292 /* First or better match, save and continue. */
5298 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5299 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5300 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5301 /* Lighter not requested offload. */
5306 if (m >= RTE_DIM(txoff_func)) {
5307 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5308 " for requested offloads %04X",
5309 dev->data->port_id, olx);
5312 DRV_LOG(DEBUG, "port %u has selected Tx function"
5313 " supporting offloads %04X/%04X",
5314 dev->data->port_id, olx, txoff_func[m].olx);
5315 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5316 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5317 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5318 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5319 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5320 DRV_LOG(DEBUG, "\tSWP (software parser)");
5321 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5322 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5323 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5324 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5325 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5326 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5327 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5328 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5329 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5330 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5331 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5333 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5335 return txoff_func[m].func;