1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
25 #include <rte_common.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_ether.h>
28 #include <rte_cycles.h>
32 #include "mlx5_utils.h"
33 #include "mlx5_rxtx.h"
34 #include "mlx5_autoconf.h"
35 #include "mlx5_defs.h"
38 /* TX burst subroutines return codes. */
39 enum mlx5_txcmp_code {
40 MLX5_TXCMP_CODE_EXIT = 0,
41 MLX5_TXCMP_CODE_ERROR,
42 MLX5_TXCMP_CODE_SINGLE,
43 MLX5_TXCMP_CODE_MULTI,
49 * These defines are used to configure Tx burst routine option set
50 * supported at compile time. The not specified options are optimized out
51 * out due to if conditions can be explicitly calculated at compile time.
52 * The offloads with bigger runtime check (require more CPU cycles to
53 * skip) overhead should have the bigger index - this is needed to
54 * select the better matching routine function if no exact match and
55 * some offloads are not actually requested.
57 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
58 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
59 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
60 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
61 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
62 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
63 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
64 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
65 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
67 /* The most common offloads groups. */
68 #define MLX5_TXOFF_CONFIG_NONE 0
69 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
70 MLX5_TXOFF_CONFIG_TSO | \
71 MLX5_TXOFF_CONFIG_SWP | \
72 MLX5_TXOFF_CONFIG_CSUM | \
73 MLX5_TXOFF_CONFIG_INLINE | \
74 MLX5_TXOFF_CONFIG_VLAN | \
75 MLX5_TXOFF_CONFIG_METADATA)
77 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
79 #define MLX5_TXOFF_DECL(func, olx) \
80 static uint16_t mlx5_tx_burst_##func(void *txq, \
81 struct rte_mbuf **pkts, \
84 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
85 pkts, pkts_n, (olx)); \
88 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
90 static __rte_always_inline uint32_t
91 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
93 static __rte_always_inline int
94 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
95 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
97 static __rte_always_inline uint32_t
98 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
100 static __rte_always_inline void
101 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
102 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
104 static __rte_always_inline void
105 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
106 const unsigned int strd_n);
109 mlx5_queue_state_modify(struct rte_eth_dev *dev,
110 struct mlx5_mp_arg_queue_state_modify *sm);
113 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
114 volatile struct mlx5_cqe *restrict cqe,
118 mlx5_lro_update_hdr(uint8_t *restrict padd,
119 volatile struct mlx5_cqe *restrict cqe,
122 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
123 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
126 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
127 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
130 * Build a table to translate Rx completion flags to packet type.
132 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
135 mlx5_set_ptype_table(void)
138 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
140 /* Last entry must not be overwritten, reserved for errored packet. */
141 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
142 (*p)[i] = RTE_PTYPE_UNKNOWN;
144 * The index to the array should have:
145 * bit[1:0] = l3_hdr_type
146 * bit[4:2] = l4_hdr_type
149 * bit[7] = outer_l3_type
152 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
154 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
155 RTE_PTYPE_L4_NONFRAG;
156 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
157 RTE_PTYPE_L4_NONFRAG;
159 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
161 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
164 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
166 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
168 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
170 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
172 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
174 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
177 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
179 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
181 /* Repeat with outer_l3_type being set. Just in case. */
182 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
183 RTE_PTYPE_L4_NONFRAG;
184 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
185 RTE_PTYPE_L4_NONFRAG;
186 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
188 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
190 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
192 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
194 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
196 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
198 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
200 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
202 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
204 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
207 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
208 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
209 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
210 RTE_PTYPE_INNER_L4_NONFRAG;
211 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
212 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
213 RTE_PTYPE_INNER_L4_NONFRAG;
214 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
215 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
216 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L4_NONFRAG;
218 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
219 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
220 RTE_PTYPE_INNER_L4_NONFRAG;
221 /* Tunneled - Fragmented */
222 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L4_FRAG;
225 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L4_FRAG;
228 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L4_FRAG;
231 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
232 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L4_FRAG;
235 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
237 RTE_PTYPE_INNER_L4_TCP;
238 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
240 RTE_PTYPE_INNER_L4_TCP;
241 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L4_TCP;
244 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L4_TCP;
247 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L4_TCP;
250 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L4_TCP;
253 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L4_TCP;
256 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L4_TCP;
259 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_TCP;
262 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L4_TCP;
265 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L4_TCP;
268 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
269 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L4_TCP;
272 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
273 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
274 RTE_PTYPE_INNER_L4_UDP;
275 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
276 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
277 RTE_PTYPE_INNER_L4_UDP;
278 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
279 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
280 RTE_PTYPE_INNER_L4_UDP;
281 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
282 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
283 RTE_PTYPE_INNER_L4_UDP;
287 * Build a table to translate packet to checksum type of Verbs.
290 mlx5_set_cksum_table(void)
296 * The index should have:
297 * bit[0] = PKT_TX_TCP_SEG
298 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
299 * bit[4] = PKT_TX_IP_CKSUM
300 * bit[8] = PKT_TX_OUTER_IP_CKSUM
303 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
306 /* Tunneled packet. */
307 if (i & (1 << 8)) /* Outer IP. */
308 v |= MLX5_ETH_WQE_L3_CSUM;
309 if (i & (1 << 4)) /* Inner IP. */
310 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
311 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
312 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
315 if (i & (1 << 4)) /* IP. */
316 v |= MLX5_ETH_WQE_L3_CSUM;
317 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
318 v |= MLX5_ETH_WQE_L4_CSUM;
320 mlx5_cksum_table[i] = v;
325 * Build a table to translate packet type of mbuf to SWP type of Verbs.
328 mlx5_set_swp_types_table(void)
334 * The index should have:
335 * bit[0:1] = PKT_TX_L4_MASK
336 * bit[4] = PKT_TX_IPV6
337 * bit[8] = PKT_TX_OUTER_IPV6
338 * bit[9] = PKT_TX_OUTER_UDP
340 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
343 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
345 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
347 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
348 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
349 v |= MLX5_ETH_WQE_L4_INNER_UDP;
350 mlx5_swp_types_table[i] = v;
355 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
356 * Flags must be preliminary initialized to zero.
359 * Pointer to burst routine local context.
361 * Pointer to store Software Parser flags
363 * Configured Tx offloads mask. It is fully defined at
364 * compile time and may be used for optimization.
367 * Software Parser offsets packed in dword.
368 * Software Parser flags are set by pointer.
370 static __rte_always_inline uint32_t
371 txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc,
376 unsigned int idx, off;
379 if (!MLX5_TXOFF_CONFIG(SWP))
381 ol = loc->mbuf->ol_flags;
382 tunnel = ol & PKT_TX_TUNNEL_MASK;
384 * Check whether Software Parser is required.
385 * Only customized tunnels may ask for.
387 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
390 * The index should have:
391 * bit[0:1] = PKT_TX_L4_MASK
392 * bit[4] = PKT_TX_IPV6
393 * bit[8] = PKT_TX_OUTER_IPV6
394 * bit[9] = PKT_TX_OUTER_UDP
396 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
397 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
398 *swp_flags = mlx5_swp_types_table[idx];
400 * Set offsets for SW parser. Since ConnectX-5, SW parser just
401 * complements HW parser. SW parser starts to engage only if HW parser
402 * can't reach a header. For the older devices, HW parser will not kick
403 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
404 * should be set regardless of HW offload.
406 off = loc->mbuf->outer_l2_len;
407 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
408 off += sizeof(struct rte_vlan_hdr);
409 set = (off >> 1) << 8; /* Outer L3 offset. */
410 off += loc->mbuf->outer_l3_len;
411 if (tunnel == PKT_TX_TUNNEL_UDP)
412 set |= off >> 1; /* Outer L4 offset. */
413 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
414 const uint64_t csum = ol & PKT_TX_L4_MASK;
415 off += loc->mbuf->l2_len;
416 set |= (off >> 1) << 24; /* Inner L3 offset. */
417 if (csum == PKT_TX_TCP_CKSUM ||
418 csum == PKT_TX_UDP_CKSUM ||
419 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
420 off += loc->mbuf->l3_len;
421 set |= (off >> 1) << 16; /* Inner L4 offset. */
424 set = rte_cpu_to_le_32(set);
429 * Convert the Checksum offloads to Verbs.
432 * Pointer to the mbuf.
435 * Converted checksum flags.
437 static __rte_always_inline uint8_t
438 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
441 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
442 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
443 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
446 * The index should have:
447 * bit[0] = PKT_TX_TCP_SEG
448 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
449 * bit[4] = PKT_TX_IP_CKSUM
450 * bit[8] = PKT_TX_OUTER_IP_CKSUM
453 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
454 return mlx5_cksum_table[idx];
458 * Internal function to compute the number of used descriptors in an RX queue
464 * The number of used rx descriptor.
467 rx_queue_count(struct mlx5_rxq_data *rxq)
469 struct rxq_zip *zip = &rxq->zip;
470 volatile struct mlx5_cqe *cqe;
471 const unsigned int cqe_n = (1 << rxq->cqe_n);
472 const unsigned int cqe_cnt = cqe_n - 1;
476 /* if we are processing a compressed cqe */
478 used = zip->cqe_cnt - zip->ca;
484 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
485 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
489 op_own = cqe->op_own;
490 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
491 n = rte_be_to_cpu_32(cqe->byte_cnt);
496 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
498 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
503 * DPDK callback to check the status of a rx descriptor.
508 * The index of the descriptor in the ring.
511 * The status of the tx descriptor.
514 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
516 struct mlx5_rxq_data *rxq = rx_queue;
517 struct mlx5_rxq_ctrl *rxq_ctrl =
518 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
519 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
521 if (dev->rx_pkt_burst != mlx5_rx_burst) {
525 if (offset >= (1 << rxq->elts_n)) {
529 if (offset < rx_queue_count(rxq))
530 return RTE_ETH_RX_DESC_DONE;
531 return RTE_ETH_RX_DESC_AVAIL;
535 * DPDK callback to get the number of used descriptors in a RX queue
538 * Pointer to the device structure.
544 * The number of used rx descriptor.
545 * -EINVAL if the queue is invalid
548 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
550 struct mlx5_priv *priv = dev->data->dev_private;
551 struct mlx5_rxq_data *rxq;
553 if (dev->rx_pkt_burst != mlx5_rx_burst) {
557 rxq = (*priv->rxqs)[rx_queue_id];
562 return rx_queue_count(rxq);
565 #define MLX5_SYSTEM_LOG_DIR "/var/log"
567 * Dump debug information to log file.
572 * If not NULL this string is printed as a header to the output
573 * and the output will be in hexadecimal view.
575 * This is the buffer address to print out.
577 * The number of bytes to dump out.
580 mlx5_dump_debug_information(const char *fname, const char *hex_title,
581 const void *buf, unsigned int hex_len)
585 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
586 fd = fopen(path, "a+");
588 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
589 MKSTR(path2, "./%s", fname);
590 fd = fopen(path2, "a+");
592 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
595 DRV_LOG(INFO, "New debug dump in file %s", path2);
597 DRV_LOG(INFO, "New debug dump in file %s", path);
600 rte_hexdump(fd, hex_title, buf, hex_len);
602 fprintf(fd, "%s", (const char *)buf);
603 fprintf(fd, "\n\n\n");
608 * Move QP from error state to running state and initialize indexes.
611 * Pointer to TX queue control structure.
614 * 0 on success, else -1.
617 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
619 struct mlx5_mp_arg_queue_state_modify sm = {
621 .queue_id = txq_ctrl->txq.idx,
624 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
626 txq_ctrl->txq.wqe_ci = 0;
627 txq_ctrl->txq.wqe_pi = 0;
628 txq_ctrl->txq.elts_comp = 0;
632 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
634 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
636 static const uint8_t magic[] = "seen";
640 for (i = 0; i < sizeof(magic); ++i)
641 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
643 err_cqe->rsvd1[i] = magic[i];
652 * Pointer to TX queue structure.
654 * Pointer to the error CQE.
657 * Negative value if queue recovery failed, otherwise
658 * the error completion entry is handled successfully.
661 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
662 volatile struct mlx5_err_cqe *err_cqe)
664 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
665 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
666 struct mlx5_txq_ctrl *txq_ctrl =
667 container_of(txq, struct mlx5_txq_ctrl, txq);
668 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
669 int seen = check_err_cqe_seen(err_cqe);
671 if (!seen && txq_ctrl->dump_file_n <
672 txq_ctrl->priv->config.max_dump_files_num) {
673 MKSTR(err_str, "Unexpected CQE error syndrome "
674 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
675 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
676 txq->cqe_s, txq->qp_num_8s >> 8,
677 rte_be_to_cpu_16(err_cqe->wqe_counter),
678 txq->wqe_ci, txq->cq_ci);
679 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
680 PORT_ID(txq_ctrl->priv), txq->idx,
681 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
682 mlx5_dump_debug_information(name, NULL, err_str, 0);
683 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
684 (const void *)((uintptr_t)
688 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
689 (const void *)((uintptr_t)
693 txq_ctrl->dump_file_n++;
697 * Count errors in WQEs units.
698 * Later it can be improved to count error packets,
699 * for example, by SQ parsing to find how much packets
700 * should be counted for each WQE.
702 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
704 if (tx_recover_qp(txq_ctrl)) {
705 /* Recovering failed - retry later on the same WQE. */
708 /* Release all the remaining buffers. */
709 txq_free_elts(txq_ctrl);
715 * Translate RX completion flags to packet type.
718 * Pointer to RX queue structure.
722 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
725 * Packet type for struct rte_mbuf.
727 static inline uint32_t
728 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
731 uint8_t pinfo = cqe->pkt_info;
732 uint16_t ptype = cqe->hdr_type_etc;
735 * The index to the array should have:
736 * bit[1:0] = l3_hdr_type
737 * bit[4:2] = l4_hdr_type
740 * bit[7] = outer_l3_type
742 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
743 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
747 * Initialize Rx WQ and indexes.
750 * Pointer to RX queue structure.
753 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
755 const unsigned int wqe_n = 1 << rxq->elts_n;
758 for (i = 0; (i != wqe_n); ++i) {
759 volatile struct mlx5_wqe_data_seg *scat;
763 if (mlx5_rxq_mprq_enabled(rxq)) {
764 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
766 scat = &((volatile struct mlx5_wqe_mprq *)
768 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
769 1 << rxq->strd_num_n);
770 byte_count = (1 << rxq->strd_sz_n) *
771 (1 << rxq->strd_num_n);
773 struct rte_mbuf *buf = (*rxq->elts)[i];
775 scat = &((volatile struct mlx5_wqe_data_seg *)
777 addr = rte_pktmbuf_mtod(buf, uintptr_t);
778 byte_count = DATA_LEN(buf);
780 /* scat->addr must be able to store a pointer. */
781 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
782 *scat = (struct mlx5_wqe_data_seg){
783 .addr = rte_cpu_to_be_64(addr),
784 .byte_count = rte_cpu_to_be_32(byte_count),
785 .lkey = mlx5_rx_addr2mr(rxq, addr),
788 rxq->consumed_strd = 0;
789 rxq->decompressed = 0;
791 rxq->zip = (struct rxq_zip){
794 /* Update doorbell counter. */
795 rxq->rq_ci = wqe_n >> rxq->sges_n;
797 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
801 * Modify a Verbs/DevX queue state.
802 * This must be called from the primary process.
805 * Pointer to Ethernet device.
807 * State modify request parameters.
810 * 0 in case of success else non-zero value and rte_errno is set.
813 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
814 const struct mlx5_mp_arg_queue_state_modify *sm)
817 struct mlx5_priv *priv = dev->data->dev_private;
820 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
821 struct mlx5_rxq_ctrl *rxq_ctrl =
822 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
824 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
825 struct ibv_wq_attr mod = {
826 .attr_mask = IBV_WQ_ATTR_STATE,
827 .wq_state = sm->state,
830 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
831 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
832 struct mlx5_devx_modify_rq_attr rq_attr;
834 memset(&rq_attr, 0, sizeof(rq_attr));
835 if (sm->state == IBV_WQS_RESET) {
836 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
837 rq_attr.state = MLX5_RQC_STATE_RST;
838 } else if (sm->state == IBV_WQS_RDY) {
839 rq_attr.rq_state = MLX5_RQC_STATE_RST;
840 rq_attr.state = MLX5_RQC_STATE_RDY;
841 } else if (sm->state == IBV_WQS_ERR) {
842 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
843 rq_attr.state = MLX5_RQC_STATE_ERR;
845 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
849 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
850 sm->state, strerror(errno));
855 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
856 struct mlx5_txq_ctrl *txq_ctrl =
857 container_of(txq, struct mlx5_txq_ctrl, txq);
858 struct ibv_qp_attr mod = {
859 .qp_state = IBV_QPS_RESET,
860 .port_num = (uint8_t)priv->ibv_port,
862 struct ibv_qp *qp = txq_ctrl->obj->qp;
864 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
866 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
867 "%s", strerror(errno));
871 mod.qp_state = IBV_QPS_INIT;
872 ret = mlx5_glue->modify_qp(qp, &mod,
873 (IBV_QP_STATE | IBV_QP_PORT));
875 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
880 mod.qp_state = IBV_QPS_RTR;
881 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
883 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
888 mod.qp_state = IBV_QPS_RTS;
889 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
891 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
901 * Modify a Verbs queue state.
904 * Pointer to Ethernet device.
906 * State modify request parameters.
909 * 0 in case of success else non-zero value.
912 mlx5_queue_state_modify(struct rte_eth_dev *dev,
913 struct mlx5_mp_arg_queue_state_modify *sm)
917 switch (rte_eal_process_type()) {
918 case RTE_PROC_PRIMARY:
919 ret = mlx5_queue_state_modify_primary(dev, sm);
921 case RTE_PROC_SECONDARY:
922 ret = mlx5_mp_req_queue_state_modify(dev, sm);
932 * The function inserts the RQ state to reset when the first error CQE is
933 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
934 * it moves the RQ state to ready and initializes the RQ.
935 * Next CQE identification and error counting are in the caller responsibility.
938 * Pointer to RX queue structure.
940 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
941 * 0 when called from non-vectorized Rx burst.
944 * -1 in case of recovery error, otherwise the CQE status.
947 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
949 const uint16_t cqe_n = 1 << rxq->cqe_n;
950 const uint16_t cqe_mask = cqe_n - 1;
951 const unsigned int wqe_n = 1 << rxq->elts_n;
952 struct mlx5_rxq_ctrl *rxq_ctrl =
953 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
955 volatile struct mlx5_cqe *cqe;
956 volatile struct mlx5_err_cqe *err_cqe;
958 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
960 struct mlx5_mp_arg_queue_state_modify sm;
963 switch (rxq->err_state) {
964 case MLX5_RXQ_ERR_STATE_NO_ERROR:
965 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
967 case MLX5_RXQ_ERR_STATE_NEED_RESET:
969 sm.queue_id = rxq->idx;
970 sm.state = IBV_WQS_RESET;
971 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
973 if (rxq_ctrl->dump_file_n <
974 rxq_ctrl->priv->config.max_dump_files_num) {
975 MKSTR(err_str, "Unexpected CQE error syndrome "
976 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
977 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
978 rxq->cqn, rxq_ctrl->wqn,
979 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
980 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
981 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
982 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
983 mlx5_dump_debug_information(name, NULL, err_str, 0);
984 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
985 (const void *)((uintptr_t)
987 sizeof(*u.cqe) * cqe_n);
988 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
989 (const void *)((uintptr_t)
992 rxq_ctrl->dump_file_n++;
994 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
996 case MLX5_RXQ_ERR_STATE_NEED_READY:
997 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
998 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1000 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1003 * The RQ consumer index must be zeroed while moving
1004 * from RESET state to RDY state.
1006 *rxq->rq_db = rte_cpu_to_be_32(0);
1009 sm.queue_id = rxq->idx;
1010 sm.state = IBV_WQS_RDY;
1011 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1015 const uint16_t q_mask = wqe_n - 1;
1017 struct rte_mbuf **elt;
1019 unsigned int n = wqe_n - (rxq->rq_ci -
1022 for (i = 0; i < (int)n; ++i) {
1023 elt_idx = (rxq->rq_ci + i) & q_mask;
1024 elt = &(*rxq->elts)[elt_idx];
1025 *elt = rte_mbuf_raw_alloc(rxq->mp);
1027 for (i--; i >= 0; --i) {
1028 elt_idx = (rxq->rq_ci +
1032 rte_pktmbuf_free_seg
1038 for (i = 0; i < (int)wqe_n; ++i) {
1039 elt = &(*rxq->elts)[i];
1041 (uint16_t)((*elt)->buf_len -
1042 rte_pktmbuf_headroom(*elt));
1044 /* Padding with a fake mbuf for vec Rx. */
1045 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1046 (*rxq->elts)[wqe_n + i] =
1049 mlx5_rxq_initialize(rxq);
1050 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1059 * Get size of the next packet for a given CQE. For compressed CQEs, the
1060 * consumer index is updated only once all packets of the current one have
1064 * Pointer to RX queue.
1068 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1072 * 0 in case of empty CQE, otherwise the packet size in bytes.
1075 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1076 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1078 struct rxq_zip *zip = &rxq->zip;
1079 uint16_t cqe_n = cqe_cnt + 1;
1085 /* Process compressed data in the CQE and mini arrays. */
1087 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1088 (volatile struct mlx5_mini_cqe8 (*)[8])
1089 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1092 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1093 *mcqe = &(*mc)[zip->ai & 7];
1094 if ((++zip->ai & 7) == 0) {
1095 /* Invalidate consumed CQEs */
1098 while (idx != end) {
1099 (*rxq->cqes)[idx & cqe_cnt].op_own =
1100 MLX5_CQE_INVALIDATE;
1104 * Increment consumer index to skip the number
1105 * of CQEs consumed. Hardware leaves holes in
1106 * the CQ ring for software use.
1111 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1112 /* Invalidate the rest */
1116 while (idx != end) {
1117 (*rxq->cqes)[idx & cqe_cnt].op_own =
1118 MLX5_CQE_INVALIDATE;
1121 rxq->cq_ci = zip->cq_ci;
1125 * No compressed data, get next CQE and verify if it is
1132 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1133 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1134 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1136 ret = mlx5_rx_err_handle(rxq, 0);
1137 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1145 op_own = cqe->op_own;
1146 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1147 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1148 (volatile struct mlx5_mini_cqe8 (*)[8])
1149 (uintptr_t)(&(*rxq->cqes)
1153 /* Fix endianness. */
1154 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1156 * Current mini array position is the one
1157 * returned by check_cqe64().
1159 * If completion comprises several mini arrays,
1160 * as a special case the second one is located
1161 * 7 CQEs after the initial CQE instead of 8
1162 * for subsequent ones.
1164 zip->ca = rxq->cq_ci;
1165 zip->na = zip->ca + 7;
1166 /* Compute the next non compressed CQE. */
1168 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1169 /* Get packet size to return. */
1170 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1173 /* Prefetch all to be invalidated */
1176 while (idx != end) {
1177 rte_prefetch0(&(*rxq->cqes)[(idx) &
1182 len = rte_be_to_cpu_32(cqe->byte_cnt);
1185 if (unlikely(rxq->err_state)) {
1186 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1187 ++rxq->stats.idropped;
1195 * Translate RX completion flags to offload flags.
1201 * Offload flags (ol_flags) for struct rte_mbuf.
1203 static inline uint32_t
1204 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1206 uint32_t ol_flags = 0;
1207 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1211 MLX5_CQE_RX_L3_HDR_VALID,
1212 PKT_RX_IP_CKSUM_GOOD) |
1214 MLX5_CQE_RX_L4_HDR_VALID,
1215 PKT_RX_L4_CKSUM_GOOD);
1220 * Fill in mbuf fields from RX completion flags.
1221 * Note that pkt->ol_flags should be initialized outside of this function.
1224 * Pointer to RX queue.
1229 * @param rss_hash_res
1230 * Packet RSS Hash result.
1233 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1234 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1236 /* Update packet information. */
1237 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1238 if (rss_hash_res && rxq->rss_hash) {
1239 pkt->hash.rss = rss_hash_res;
1240 pkt->ol_flags |= PKT_RX_RSS_HASH;
1242 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1243 pkt->ol_flags |= PKT_RX_FDIR;
1244 if (cqe->sop_drop_qpn !=
1245 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1246 uint32_t mark = cqe->sop_drop_qpn;
1248 pkt->ol_flags |= PKT_RX_FDIR_ID;
1249 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1252 if (rte_flow_dynf_metadata_avail() && cqe->flow_table_metadata) {
1253 pkt->ol_flags |= PKT_RX_DYNF_METADATA;
1254 *RTE_FLOW_DYNF_METADATA(pkt) = cqe->flow_table_metadata;
1257 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1258 if (rxq->vlan_strip &&
1259 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1260 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1261 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1263 if (rxq->hw_timestamp) {
1264 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1265 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1270 * DPDK callback for RX.
1273 * Generic pointer to RX queue structure.
1275 * Array to store received packets.
1277 * Maximum number of packets in array.
1280 * Number of packets successfully received (<= pkts_n).
1283 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1285 struct mlx5_rxq_data *rxq = dpdk_rxq;
1286 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1287 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1288 const unsigned int sges_n = rxq->sges_n;
1289 struct rte_mbuf *pkt = NULL;
1290 struct rte_mbuf *seg = NULL;
1291 volatile struct mlx5_cqe *cqe =
1292 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1294 unsigned int rq_ci = rxq->rq_ci << sges_n;
1295 int len = 0; /* keep its value across iterations. */
1298 unsigned int idx = rq_ci & wqe_cnt;
1299 volatile struct mlx5_wqe_data_seg *wqe =
1300 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1301 struct rte_mbuf *rep = (*rxq->elts)[idx];
1302 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1303 uint32_t rss_hash_res;
1311 rep = rte_mbuf_raw_alloc(rxq->mp);
1312 if (unlikely(rep == NULL)) {
1313 ++rxq->stats.rx_nombuf;
1316 * no buffers before we even started,
1317 * bail out silently.
1321 while (pkt != seg) {
1322 assert(pkt != (*rxq->elts)[idx]);
1326 rte_mbuf_raw_free(pkt);
1332 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1333 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1335 rte_mbuf_raw_free(rep);
1339 assert(len >= (rxq->crc_present << 2));
1340 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1341 /* If compressed, take hash result from mini-CQE. */
1342 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1344 mcqe->rx_hash_result);
1345 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1346 if (rxq->crc_present)
1347 len -= RTE_ETHER_CRC_LEN;
1349 if (cqe->lro_num_seg > 1) {
1351 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1353 pkt->ol_flags |= PKT_RX_LRO;
1354 pkt->tso_segsz = len / cqe->lro_num_seg;
1357 DATA_LEN(rep) = DATA_LEN(seg);
1358 PKT_LEN(rep) = PKT_LEN(seg);
1359 SET_DATA_OFF(rep, DATA_OFF(seg));
1360 PORT(rep) = PORT(seg);
1361 (*rxq->elts)[idx] = rep;
1363 * Fill NIC descriptor with the new buffer. The lkey and size
1364 * of the buffers are already known, only the buffer address
1367 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1368 /* If there's only one MR, no need to replace LKey in WQE. */
1369 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1370 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1371 if (len > DATA_LEN(seg)) {
1372 len -= DATA_LEN(seg);
1377 DATA_LEN(seg) = len;
1378 #ifdef MLX5_PMD_SOFT_COUNTERS
1379 /* Increment bytes counter. */
1380 rxq->stats.ibytes += PKT_LEN(pkt);
1382 /* Return packet. */
1387 /* Align consumer index to the next stride. */
1392 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1394 /* Update the consumer index. */
1395 rxq->rq_ci = rq_ci >> sges_n;
1397 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1399 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1400 #ifdef MLX5_PMD_SOFT_COUNTERS
1401 /* Increment packets counter. */
1402 rxq->stats.ipackets += i;
1408 * Update LRO packet TCP header.
1409 * The HW LRO feature doesn't update the TCP header after coalescing the
1410 * TCP segments but supplies information in CQE to fill it by SW.
1413 * Pointer to the TCP header.
1415 * Pointer to the completion entry..
1417 * The L3 pseudo-header checksum.
1420 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
1421 volatile struct mlx5_cqe *restrict cqe,
1424 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1425 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1427 * The HW calculates only the TCP payload checksum, need to complete
1428 * the TCP header checksum and the L3 pseudo-header checksum.
1430 uint32_t csum = phcsum + cqe->csum;
1432 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1433 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1434 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1435 tcp->recv_ack = cqe->lro_ack_seq_num;
1436 tcp->rx_win = cqe->lro_tcp_win;
1438 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1439 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1441 csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4);
1442 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1443 csum = (~csum) & 0xffff;
1450 * Update LRO packet headers.
1451 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1452 * TCP segments but supply information in CQE to fill it by SW.
1455 * The packet address.
1457 * Pointer to the completion entry..
1459 * The packet length.
1462 mlx5_lro_update_hdr(uint8_t *restrict padd,
1463 volatile struct mlx5_cqe *restrict cqe,
1467 struct rte_ether_hdr *eth;
1468 struct rte_vlan_hdr *vlan;
1469 struct rte_ipv4_hdr *ipv4;
1470 struct rte_ipv6_hdr *ipv6;
1471 struct rte_tcp_hdr *tcp;
1476 uint16_t proto = h.eth->ether_type;
1480 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1481 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1482 proto = h.vlan->eth_proto;
1485 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1486 h.ipv4->time_to_live = cqe->lro_min_ttl;
1487 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1488 h.ipv4->hdr_checksum = 0;
1489 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1490 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1493 h.ipv6->hop_limits = cqe->lro_min_ttl;
1494 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1496 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1499 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1503 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1505 struct mlx5_mprq_buf *buf = opaque;
1507 if (rte_atomic16_read(&buf->refcnt) == 1) {
1508 rte_mempool_put(buf->mp, buf);
1509 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1510 rte_atomic16_set(&buf->refcnt, 1);
1511 rte_mempool_put(buf->mp, buf);
1516 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1518 mlx5_mprq_buf_free_cb(NULL, buf);
1522 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1523 const unsigned int strd_n)
1525 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1526 volatile struct mlx5_wqe_data_seg *wqe =
1527 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1530 assert(rep != NULL);
1531 /* Replace MPRQ buf. */
1532 (*rxq->mprq_bufs)[rq_idx] = rep;
1534 addr = mlx5_mprq_buf_addr(rep, strd_n);
1535 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1536 /* If there's only one MR, no need to replace LKey in WQE. */
1537 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1538 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1539 /* Stash a mbuf for next replacement. */
1540 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1541 rxq->mprq_repl = rep;
1543 rxq->mprq_repl = NULL;
1547 * DPDK callback for RX with Multi-Packet RQ support.
1550 * Generic pointer to RX queue structure.
1552 * Array to store received packets.
1554 * Maximum number of packets in array.
1557 * Number of packets successfully received (<= pkts_n).
1560 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1562 struct mlx5_rxq_data *rxq = dpdk_rxq;
1563 const unsigned int strd_n = 1 << rxq->strd_num_n;
1564 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1565 const unsigned int strd_shift =
1566 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1567 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1568 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1569 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1571 uint32_t rq_ci = rxq->rq_ci;
1572 uint16_t consumed_strd = rxq->consumed_strd;
1573 uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM;
1574 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1576 while (i < pkts_n) {
1577 struct rte_mbuf *pkt;
1585 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1586 uint32_t rss_hash_res = 0;
1587 uint8_t lro_num_seg;
1589 if (consumed_strd == strd_n) {
1590 /* Replace WQE only if the buffer is still in use. */
1591 if (rte_atomic16_read(&buf->refcnt) > 1) {
1592 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1593 /* Release the old buffer. */
1594 mlx5_mprq_buf_free(buf);
1595 } else if (unlikely(rxq->mprq_repl == NULL)) {
1596 struct mlx5_mprq_buf *rep;
1599 * Currently, the MPRQ mempool is out of buffer
1600 * and doing memcpy regardless of the size of Rx
1601 * packet. Retry allocation to get back to
1604 if (!rte_mempool_get(rxq->mprq_mp,
1606 rxq->mprq_repl = rep;
1608 /* Advance to the next WQE. */
1611 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1613 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1614 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1618 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1619 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1621 consumed_strd += strd_cnt;
1622 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1625 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1626 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1628 /* mini-CQE for MPRQ doesn't have hash result. */
1629 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1631 assert(strd_idx < strd_n);
1632 assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
1633 lro_num_seg = cqe->lro_num_seg;
1635 * Currently configured to receive a packet per a stride. But if
1636 * MTU is adjusted through kernel interface, device could
1637 * consume multiple strides without raising an error. In this
1638 * case, the packet should be dropped because it is bigger than
1639 * the max_rx_pkt_len.
1641 if (unlikely(!lro_num_seg && strd_cnt > 1)) {
1642 ++rxq->stats.idropped;
1645 pkt = rte_pktmbuf_alloc(rxq->mp);
1646 if (unlikely(pkt == NULL)) {
1647 ++rxq->stats.rx_nombuf;
1650 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1651 assert((int)len >= (rxq->crc_present << 2));
1652 if (rxq->crc_present)
1653 len -= RTE_ETHER_CRC_LEN;
1654 offset = strd_idx * strd_sz + strd_shift;
1655 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1657 * Memcpy packets to the target mbuf if:
1658 * - The size of packet is smaller than mprq_max_memcpy_len.
1659 * - Out of buffer in the Mempool for Multi-Packet RQ.
1661 if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
1663 * When memcpy'ing packet due to out-of-buffer, the
1664 * packet must be smaller than the target mbuf.
1666 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1667 rte_pktmbuf_free_seg(pkt);
1668 ++rxq->stats.idropped;
1671 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
1672 DATA_LEN(pkt) = len;
1674 rte_iova_t buf_iova;
1675 struct rte_mbuf_ext_shared_info *shinfo;
1676 uint16_t buf_len = strd_cnt * strd_sz;
1679 /* Increment the refcnt of the whole chunk. */
1680 rte_atomic16_add_return(&buf->refcnt, 1);
1681 assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1683 buf_addr = RTE_PTR_SUB(addr, headroom_sz);
1685 * MLX5 device doesn't use iova but it is necessary in a
1686 * case where the Rx packet is transmitted via a
1689 buf_iova = rte_mempool_virt2iova(buf) +
1690 RTE_PTR_DIFF(buf_addr, buf);
1691 shinfo = &buf->shinfos[strd_idx];
1692 rte_mbuf_ext_refcnt_set(shinfo, 1);
1694 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1695 * attaching the stride to mbuf and more offload flags
1696 * will be added below by calling rxq_cq_to_mbuf().
1697 * Other fields will be overwritten.
1699 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1701 /* Set mbuf head-room. */
1702 pkt->data_off = headroom_sz;
1703 assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
1705 * Prevent potential overflow due to MTU change through
1708 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1709 rte_pktmbuf_free_seg(pkt);
1710 ++rxq->stats.idropped;
1713 DATA_LEN(pkt) = len;
1715 * LRO packet may consume all the stride memory, in this
1716 * case packet head-room space is not guaranteed so must
1717 * to add an empty mbuf for the head-room.
1719 if (!rxq->strd_headroom_en) {
1720 struct rte_mbuf *headroom_mbuf =
1721 rte_pktmbuf_alloc(rxq->mp);
1723 if (unlikely(headroom_mbuf == NULL)) {
1724 rte_pktmbuf_free_seg(pkt);
1725 ++rxq->stats.rx_nombuf;
1728 PORT(pkt) = rxq->port_id;
1729 NEXT(headroom_mbuf) = pkt;
1730 pkt = headroom_mbuf;
1734 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1735 if (lro_num_seg > 1) {
1736 mlx5_lro_update_hdr(addr, cqe, len);
1737 pkt->ol_flags |= PKT_RX_LRO;
1738 pkt->tso_segsz = strd_sz;
1741 PORT(pkt) = rxq->port_id;
1742 #ifdef MLX5_PMD_SOFT_COUNTERS
1743 /* Increment bytes counter. */
1744 rxq->stats.ibytes += PKT_LEN(pkt);
1746 /* Return packet. */
1750 /* Update the consumer indexes. */
1751 rxq->consumed_strd = consumed_strd;
1753 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1754 if (rq_ci != rxq->rq_ci) {
1757 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1759 #ifdef MLX5_PMD_SOFT_COUNTERS
1760 /* Increment packets counter. */
1761 rxq->stats.ipackets += i;
1767 * Dummy DPDK callback for TX.
1769 * This function is used to temporarily replace the real callback during
1770 * unsafe control operations on the queue, or in case of error.
1773 * Generic pointer to TX queue structure.
1775 * Packets to transmit.
1777 * Number of packets in array.
1780 * Number of packets successfully transmitted (<= pkts_n).
1783 removed_tx_burst(void *dpdk_txq __rte_unused,
1784 struct rte_mbuf **pkts __rte_unused,
1785 uint16_t pkts_n __rte_unused)
1792 * Dummy DPDK callback for RX.
1794 * This function is used to temporarily replace the real callback during
1795 * unsafe control operations on the queue, or in case of error.
1798 * Generic pointer to RX queue structure.
1800 * Array to store received packets.
1802 * Maximum number of packets in array.
1805 * Number of packets successfully received (<= pkts_n).
1808 removed_rx_burst(void *dpdk_txq __rte_unused,
1809 struct rte_mbuf **pkts __rte_unused,
1810 uint16_t pkts_n __rte_unused)
1817 * Vectorized Rx/Tx routines are not compiled in when required vector
1818 * instructions are not supported on a target architecture. The following null
1819 * stubs are needed for linkage when those are not included outside of this file
1820 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1824 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1825 struct rte_mbuf **pkts __rte_unused,
1826 uint16_t pkts_n __rte_unused)
1832 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1838 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1844 * Free the mbufs from the linear array of pointers.
1847 * Pointer to array of packets to be free.
1849 * Number of packets to be freed.
1851 * Configured Tx offloads mask. It is fully defined at
1852 * compile time and may be used for optimization.
1854 static __rte_always_inline void
1855 mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
1856 unsigned int pkts_n,
1857 unsigned int olx __rte_unused)
1859 struct rte_mempool *pool = NULL;
1860 struct rte_mbuf **p_free = NULL;
1861 struct rte_mbuf *mbuf;
1862 unsigned int n_free = 0;
1865 * The implemented algorithm eliminates
1866 * copying pointers to temporary array
1867 * for rte_mempool_put_bulk() calls.
1874 * Decrement mbuf reference counter, detach
1875 * indirect and external buffers if needed.
1877 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1878 if (likely(mbuf != NULL)) {
1879 assert(mbuf == *pkts);
1880 if (likely(n_free != 0)) {
1881 if (unlikely(pool != mbuf->pool))
1882 /* From different pool. */
1885 /* Start new scan array. */
1892 if (unlikely(pkts_n == 0)) {
1898 * This happens if mbuf is still referenced.
1899 * We can't put it back to the pool, skip.
1903 if (unlikely(n_free != 0))
1904 /* There is some array to free.*/
1906 if (unlikely(pkts_n == 0))
1907 /* Last mbuf, nothing to free. */
1913 * This loop is implemented to avoid multiple
1914 * inlining of rte_mempool_put_bulk().
1920 * Free the array of pre-freed mbufs
1921 * belonging to the same memory pool.
1923 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1924 if (unlikely(mbuf != NULL)) {
1925 /* There is the request to start new scan. */
1930 if (likely(pkts_n != 0))
1933 * This is the last mbuf to be freed.
1934 * Do one more loop iteration to complete.
1935 * This is rare case of the last unique mbuf.
1940 if (likely(pkts_n == 0))
1949 * Free the mbuf from the elts ring buffer till new tail.
1952 * Pointer to Tx queue structure.
1954 * Index in elts to free up to, becomes new elts tail.
1956 * Configured Tx offloads mask. It is fully defined at
1957 * compile time and may be used for optimization.
1959 static __rte_always_inline void
1960 mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
1962 unsigned int olx __rte_unused)
1964 uint16_t n_elts = tail - txq->elts_tail;
1967 assert(n_elts <= txq->elts_s);
1969 * Implement a loop to support ring buffer wraparound
1970 * with single inlining of mlx5_tx_free_mbuf().
1975 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
1976 part = RTE_MIN(part, n_elts);
1978 assert(part <= txq->elts_s);
1979 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
1981 txq->elts_tail += part;
1987 * Store the mbuf being sent into elts ring buffer.
1988 * On Tx completion these mbufs will be freed.
1991 * Pointer to Tx queue structure.
1993 * Pointer to array of packets to be stored.
1995 * Number of packets to be stored.
1997 * Configured Tx offloads mask. It is fully defined at
1998 * compile time and may be used for optimization.
2000 static __rte_always_inline void
2001 mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
2002 struct rte_mbuf **restrict pkts,
2003 unsigned int pkts_n,
2004 unsigned int olx __rte_unused)
2007 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2011 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2013 assert(part <= txq->elts_s);
2014 /* This code is a good candidate for vectorizing with SIMD. */
2015 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2017 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2018 txq->elts_head += pkts_n;
2019 if (unlikely(part < pkts_n))
2020 /* The copy is wrapping around the elts array. */
2021 rte_memcpy((void *)elts, (void *)(pkts + part),
2022 (pkts_n - part) * sizeof(struct rte_mbuf *));
2026 * Update completion queue consuming index via doorbell
2027 * and flush the completed data buffers.
2030 * Pointer to TX queue structure.
2031 * @param valid CQE pointer
2032 * if not NULL update txq->wqe_pi and flush the buffers
2034 * Configured Tx offloads mask. It is fully defined at
2035 * compile time and may be used for optimization.
2037 static __rte_always_inline void
2038 mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
2039 volatile struct mlx5_cqe *last_cqe,
2040 unsigned int olx __rte_unused)
2042 if (likely(last_cqe != NULL)) {
2045 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2046 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2047 if (likely(tail != txq->elts_tail)) {
2048 mlx5_tx_free_elts(txq, tail, olx);
2049 assert(tail == txq->elts_tail);
2055 * Manage TX completions. This routine checks the CQ for
2056 * arrived CQEs, deduces the last accomplished WQE in SQ,
2057 * updates SQ producing index and frees all completed mbufs.
2060 * Pointer to TX queue structure.
2062 * Configured Tx offloads mask. It is fully defined at
2063 * compile time and may be used for optimization.
2065 * NOTE: not inlined intentionally, it makes tx_burst
2066 * routine smaller, simple and faster - from experiments.
2069 mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
2070 unsigned int olx __rte_unused)
2072 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2073 volatile struct mlx5_cqe *last_cqe = NULL;
2074 uint16_t ci = txq->cq_ci;
2077 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2078 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2080 volatile struct mlx5_cqe *cqe;
2082 cqe = &txq->cqes[ci & txq->cqe_m];
2083 ret = check_cqe(cqe, txq->cqe_s, ci);
2084 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2085 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2086 /* No new CQEs in completion queue. */
2087 assert(ret == MLX5_CQE_STATUS_HW_OWN);
2091 * Some error occurred, try to restart.
2092 * We have no barrier after WQE related Doorbell
2093 * written, make sure all writes are completed
2094 * here, before we might perform SQ reset.
2098 ret = mlx5_tx_error_cqe_handle
2099 (txq, (volatile struct mlx5_err_cqe *)cqe);
2100 if (unlikely(ret < 0)) {
2102 * Some error occurred on queue error
2103 * handling, we do not advance the index
2104 * here, allowing to retry on next call.
2109 * We are going to fetch all entries with
2110 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2111 * The send queue is supposed to be empty.
2118 /* Normal transmit completion. */
2119 assert(ci != txq->cq_pi);
2120 assert((txq->fcqs[ci & txq->cqe_m] >> 16) == cqe->wqe_counter);
2124 * We have to restrict the amount of processed CQEs
2125 * in one tx_burst routine call. The CQ may be large
2126 * and many CQEs may be updated by the NIC in one
2127 * transaction. Buffers freeing is time consuming,
2128 * multiple iterations may introduce significant
2131 if (likely(--count == 0))
2134 if (likely(ci != txq->cq_ci)) {
2136 * Update completion queue consuming index
2137 * and ring doorbell to notify hardware.
2139 rte_compiler_barrier();
2141 *txq->cq_db = rte_cpu_to_be_32(ci);
2142 mlx5_tx_comp_flush(txq, last_cqe, olx);
2147 * Check if the completion request flag should be set in the last WQE.
2148 * Both pushed mbufs and WQEs are monitored and the completion request
2149 * flag is set if any of thresholds is reached.
2152 * Pointer to TX queue structure.
2154 * Pointer to burst routine local context.
2156 * Configured Tx offloads mask. It is fully defined at
2157 * compile time and may be used for optimization.
2159 static __rte_always_inline void
2160 mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
2161 struct mlx5_txq_local *restrict loc,
2164 uint16_t head = txq->elts_head;
2167 part = MLX5_TXOFF_CONFIG(INLINE) ?
2168 0 : loc->pkts_sent - loc->pkts_copy;
2170 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2171 (MLX5_TXOFF_CONFIG(INLINE) &&
2172 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2173 volatile struct mlx5_wqe *last = loc->wqe_last;
2175 txq->elts_comp = head;
2176 if (MLX5_TXOFF_CONFIG(INLINE))
2177 txq->wqe_comp = txq->wqe_ci;
2178 /* Request unconditional completion on last WQE. */
2179 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2180 MLX5_COMP_MODE_OFFSET);
2181 /* Save elts_head in dedicated free on completion queue. */
2183 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2185 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2186 (last->cseg.opcode >> 8) << 16;
2188 /* A CQE slot must always be available. */
2189 assert((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2194 * DPDK callback to check the status of a tx descriptor.
2199 * The index of the descriptor in the ring.
2202 * The status of the tx descriptor.
2205 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2207 struct mlx5_txq_data *restrict txq = tx_queue;
2210 mlx5_tx_handle_completion(txq, 0);
2211 used = txq->elts_head - txq->elts_tail;
2213 return RTE_ETH_TX_DESC_FULL;
2214 return RTE_ETH_TX_DESC_DONE;
2218 * Build the Control Segment with specified opcode:
2219 * - MLX5_OPCODE_SEND
2220 * - MLX5_OPCODE_ENHANCED_MPSW
2224 * Pointer to TX queue structure.
2226 * Pointer to burst routine local context.
2228 * Pointer to WQE to fill with built Control Segment.
2230 * Supposed length of WQE in segments.
2232 * SQ WQE opcode to put into Control Segment.
2234 * Configured Tx offloads mask. It is fully defined at
2235 * compile time and may be used for optimization.
2237 static __rte_always_inline void
2238 mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq,
2239 struct mlx5_txq_local *restrict loc __rte_unused,
2240 struct mlx5_wqe *restrict wqe,
2242 unsigned int opcode,
2243 unsigned int olx __rte_unused)
2245 struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
2247 /* For legacy MPW replace the EMPW by TSO with modifier. */
2248 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2249 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2250 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2251 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2252 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2253 MLX5_COMP_MODE_OFFSET);
2254 cs->misc = RTE_BE32(0);
2258 * Build the Ethernet Segment without inlined data.
2259 * Supports Software Parser, Checksums and VLAN
2260 * insertion Tx offload features.
2263 * Pointer to TX queue structure.
2265 * Pointer to burst routine local context.
2267 * Pointer to WQE to fill with built Ethernet Segment.
2269 * Configured Tx offloads mask. It is fully defined at
2270 * compile time and may be used for optimization.
2272 static __rte_always_inline void
2273 mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
2274 struct mlx5_txq_local *restrict loc,
2275 struct mlx5_wqe *restrict wqe,
2278 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2282 * Calculate and set check sum flags first, dword field
2283 * in segment may be shared with Software Parser flags.
2285 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2286 es->flags = rte_cpu_to_le_32(csum);
2288 * Calculate and set Software Parser offsets and flags.
2289 * These flags a set for custom UDP and IP tunnel packets.
2291 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2292 /* Fill metadata field if needed. */
2293 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2294 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2295 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2296 /* Engage VLAN tag insertion feature if requested. */
2297 if (MLX5_TXOFF_CONFIG(VLAN) &&
2298 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2300 * We should get here only if device support
2301 * this feature correctly.
2303 assert(txq->vlan_en);
2304 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2305 loc->mbuf->vlan_tci);
2307 es->inline_hdr = RTE_BE32(0);
2312 * Build the Ethernet Segment with minimal inlined data
2313 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2314 * used to fill the gap in single WQEBB WQEs.
2315 * Supports Software Parser, Checksums and VLAN
2316 * insertion Tx offload features.
2319 * Pointer to TX queue structure.
2321 * Pointer to burst routine local context.
2323 * Pointer to WQE to fill with built Ethernet Segment.
2325 * Length of VLAN tag insertion if any.
2327 * Configured Tx offloads mask. It is fully defined at
2328 * compile time and may be used for optimization.
2330 static __rte_always_inline void
2331 mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
2332 struct mlx5_txq_local *restrict loc,
2333 struct mlx5_wqe *restrict wqe,
2337 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2339 uint8_t *psrc, *pdst;
2342 * Calculate and set check sum flags first, dword field
2343 * in segment may be shared with Software Parser flags.
2345 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2346 es->flags = rte_cpu_to_le_32(csum);
2348 * Calculate and set Software Parser offsets and flags.
2349 * These flags a set for custom UDP and IP tunnel packets.
2351 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2352 /* Fill metadata field if needed. */
2353 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2354 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2355 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2356 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2358 sizeof(rte_v128u32_t)),
2359 "invalid Ethernet Segment data size");
2360 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2362 sizeof(struct rte_vlan_hdr) +
2363 2 * RTE_ETHER_ADDR_LEN),
2364 "invalid Ethernet Segment data size");
2365 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2366 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2367 es->inline_data = *(unaligned_uint16_t *)psrc;
2368 psrc += sizeof(uint16_t);
2369 pdst = (uint8_t *)(es + 1);
2370 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2371 /* Implement VLAN tag insertion as part inline data. */
2372 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2373 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2374 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2375 /* Insert VLAN ethertype + VLAN tag. */
2376 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2377 ((RTE_ETHER_TYPE_VLAN << 16) |
2378 loc->mbuf->vlan_tci);
2379 pdst += sizeof(struct rte_vlan_hdr);
2380 /* Copy the rest two bytes from packet data. */
2381 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2382 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2384 /* Fill the gap in the title WQEBB with inline data. */
2385 rte_mov16(pdst, psrc);
2390 * Build the Ethernet Segment with entire packet
2391 * data inlining. Checks the boundary of WQEBB and
2392 * ring buffer wrapping, supports Software Parser,
2393 * Checksums and VLAN insertion Tx offload features.
2396 * Pointer to TX queue structure.
2398 * Pointer to burst routine local context.
2400 * Pointer to WQE to fill with built Ethernet Segment.
2402 * Length of VLAN tag insertion if any.
2404 * Length of data to inline (VLAN included, if any).
2406 * TSO flag, set mss field from the packet.
2408 * Configured Tx offloads mask. It is fully defined at
2409 * compile time and may be used for optimization.
2412 * Pointer to the next Data Segment (aligned and wrapped around).
2414 static __rte_always_inline struct mlx5_wqe_dseg *
2415 mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
2416 struct mlx5_txq_local *restrict loc,
2417 struct mlx5_wqe *restrict wqe,
2423 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2425 uint8_t *psrc, *pdst;
2429 * Calculate and set check sum flags first, dword field
2430 * in segment may be shared with Software Parser flags.
2432 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2435 csum |= loc->mbuf->tso_segsz;
2436 es->flags = rte_cpu_to_be_32(csum);
2438 es->flags = rte_cpu_to_le_32(csum);
2441 * Calculate and set Software Parser offsets and flags.
2442 * These flags a set for custom UDP and IP tunnel packets.
2444 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2445 /* Fill metadata field if needed. */
2446 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2447 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2448 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2449 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2451 sizeof(rte_v128u32_t)),
2452 "invalid Ethernet Segment data size");
2453 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2455 sizeof(struct rte_vlan_hdr) +
2456 2 * RTE_ETHER_ADDR_LEN),
2457 "invalid Ethernet Segment data size");
2458 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2459 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2460 es->inline_data = *(unaligned_uint16_t *)psrc;
2461 psrc += sizeof(uint16_t);
2462 pdst = (uint8_t *)(es + 1);
2463 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2464 /* Implement VLAN tag insertion as part inline data. */
2465 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2466 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2467 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2468 /* Insert VLAN ethertype + VLAN tag. */
2469 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2470 ((RTE_ETHER_TYPE_VLAN << 16) |
2471 loc->mbuf->vlan_tci);
2472 pdst += sizeof(struct rte_vlan_hdr);
2473 /* Copy the rest two bytes from packet data. */
2474 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2475 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2476 psrc += sizeof(uint16_t);
2478 /* Fill the gap in the title WQEBB with inline data. */
2479 rte_mov16(pdst, psrc);
2480 psrc += sizeof(rte_v128u32_t);
2482 pdst = (uint8_t *)(es + 2);
2483 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2484 assert(pdst < (uint8_t *)txq->wqes_end);
2485 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2487 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2488 return (struct mlx5_wqe_dseg *)pdst;
2491 * The WQEBB space availability is checked by caller.
2492 * Here we should be aware of WQE ring buffer wraparound only.
2494 part = (uint8_t *)txq->wqes_end - pdst;
2495 part = RTE_MIN(part, inlen);
2497 rte_memcpy(pdst, psrc, part);
2499 if (likely(!inlen)) {
2501 * If return value is not used by the caller
2502 * the code below will be optimized out.
2505 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2506 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2507 pdst = (uint8_t *)txq->wqes;
2508 return (struct mlx5_wqe_dseg *)pdst;
2510 pdst = (uint8_t *)txq->wqes;
2517 * Copy data from chain of mbuf to the specified linear buffer.
2518 * Checksums and VLAN insertion Tx offload features. If data
2519 * from some mbuf copied completely this mbuf is freed. Local
2520 * structure is used to keep the byte stream state.
2523 * Pointer to the destination linear buffer.
2525 * Pointer to burst routine local context.
2527 * Length of data to be copied.
2529 * Configured Tx offloads mask. It is fully defined at
2530 * compile time and may be used for optimization.
2532 static __rte_always_inline void
2533 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2534 struct mlx5_txq_local *restrict loc,
2536 unsigned int olx __rte_unused)
2538 struct rte_mbuf *mbuf;
2539 unsigned int part, dlen;
2544 /* Allow zero length packets, must check first. */
2545 dlen = rte_pktmbuf_data_len(loc->mbuf);
2546 if (dlen <= loc->mbuf_off) {
2547 /* Exhausted packet, just free. */
2549 loc->mbuf = mbuf->next;
2550 rte_pktmbuf_free_seg(mbuf);
2552 assert(loc->mbuf_nseg > 1);
2557 dlen -= loc->mbuf_off;
2558 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2560 part = RTE_MIN(len, dlen);
2561 rte_memcpy(pdst, psrc, part);
2562 loc->mbuf_off += part;
2565 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2567 /* Exhausted packet, just free. */
2569 loc->mbuf = mbuf->next;
2570 rte_pktmbuf_free_seg(mbuf);
2572 assert(loc->mbuf_nseg >= 1);
2582 * Build the Ethernet Segment with inlined data from
2583 * multi-segment packet. Checks the boundary of WQEBB
2584 * and ring buffer wrapping, supports Software Parser,
2585 * Checksums and VLAN insertion Tx offload features.
2588 * Pointer to TX queue structure.
2590 * Pointer to burst routine local context.
2592 * Pointer to WQE to fill with built Ethernet Segment.
2594 * Length of VLAN tag insertion if any.
2596 * Length of data to inline (VLAN included, if any).
2598 * TSO flag, set mss field from the packet.
2600 * Configured Tx offloads mask. It is fully defined at
2601 * compile time and may be used for optimization.
2604 * Pointer to the next Data Segment (aligned and
2605 * possible NOT wrapped around - caller should do
2606 * wrapping check on its own).
2608 static __rte_always_inline struct mlx5_wqe_dseg *
2609 mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
2610 struct mlx5_txq_local *restrict loc,
2611 struct mlx5_wqe *restrict wqe,
2617 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2623 * Calculate and set check sum flags first, uint32_t field
2624 * in segment may be shared with Software Parser flags.
2626 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2629 csum |= loc->mbuf->tso_segsz;
2630 es->flags = rte_cpu_to_be_32(csum);
2632 es->flags = rte_cpu_to_le_32(csum);
2635 * Calculate and set Software Parser offsets and flags.
2636 * These flags a set for custom UDP and IP tunnel packets.
2638 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2639 /* Fill metadata field if needed. */
2640 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2641 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2642 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2643 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2645 sizeof(rte_v128u32_t)),
2646 "invalid Ethernet Segment data size");
2647 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2649 sizeof(struct rte_vlan_hdr) +
2650 2 * RTE_ETHER_ADDR_LEN),
2651 "invalid Ethernet Segment data size");
2652 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2653 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2654 pdst = (uint8_t *)&es->inline_data;
2655 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2656 /* Implement VLAN tag insertion as part inline data. */
2657 mlx5_tx_mseg_memcpy(pdst, loc, 2 * RTE_ETHER_ADDR_LEN, olx);
2658 pdst += 2 * RTE_ETHER_ADDR_LEN;
2659 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2660 ((RTE_ETHER_TYPE_VLAN << 16) |
2661 loc->mbuf->vlan_tci);
2662 pdst += sizeof(struct rte_vlan_hdr);
2663 inlen -= 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2665 assert(pdst < (uint8_t *)txq->wqes_end);
2667 * The WQEBB space availability is checked by caller.
2668 * Here we should be aware of WQE ring buffer wraparound only.
2670 part = (uint8_t *)txq->wqes_end - pdst;
2671 part = RTE_MIN(part, inlen);
2674 mlx5_tx_mseg_memcpy(pdst, loc, part, olx);
2676 if (likely(!inlen)) {
2678 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2679 return (struct mlx5_wqe_dseg *)pdst;
2681 pdst = (uint8_t *)txq->wqes;
2687 * Build the Data Segment of pointer type.
2690 * Pointer to TX queue structure.
2692 * Pointer to burst routine local context.
2694 * Pointer to WQE to fill with built Data Segment.
2696 * Data buffer to point.
2698 * Data buffer length.
2700 * Configured Tx offloads mask. It is fully defined at
2701 * compile time and may be used for optimization.
2703 static __rte_always_inline void
2704 mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
2705 struct mlx5_txq_local *restrict loc,
2706 struct mlx5_wqe_dseg *restrict dseg,
2709 unsigned int olx __rte_unused)
2713 dseg->bcount = rte_cpu_to_be_32(len);
2714 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2715 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2719 * Build the Data Segment of pointer type or inline
2720 * if data length is less than buffer in minimal
2721 * Data Segment size.
2724 * Pointer to TX queue structure.
2726 * Pointer to burst routine local context.
2728 * Pointer to WQE to fill with built Data Segment.
2730 * Data buffer to point.
2732 * Data buffer length.
2734 * Configured Tx offloads mask. It is fully defined at
2735 * compile time and may be used for optimization.
2737 static __rte_always_inline void
2738 mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
2739 struct mlx5_txq_local *restrict loc,
2740 struct mlx5_wqe_dseg *restrict dseg,
2743 unsigned int olx __rte_unused)
2749 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2750 dseg->bcount = rte_cpu_to_be_32(len);
2751 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2752 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2756 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2757 /* Unrolled implementation of generic rte_memcpy. */
2758 dst = (uintptr_t)&dseg->inline_data[0];
2759 src = (uintptr_t)buf;
2761 #ifdef RTE_ARCH_STRICT_ALIGN
2762 assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2763 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2764 dst += sizeof(uint32_t);
2765 src += sizeof(uint32_t);
2766 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2767 dst += sizeof(uint32_t);
2768 src += sizeof(uint32_t);
2770 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2771 dst += sizeof(uint64_t);
2772 src += sizeof(uint64_t);
2776 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2777 dst += sizeof(uint32_t);
2778 src += sizeof(uint32_t);
2781 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2782 dst += sizeof(uint16_t);
2783 src += sizeof(uint16_t);
2786 *(uint8_t *)dst = *(uint8_t *)src;
2790 * Build the Data Segment of inlined data from single
2791 * segment packet, no VLAN insertion.
2794 * Pointer to TX queue structure.
2796 * Pointer to burst routine local context.
2798 * Pointer to WQE to fill with built Data Segment.
2800 * Data buffer to point.
2802 * Data buffer length.
2804 * Configured Tx offloads mask. It is fully defined at
2805 * compile time and may be used for optimization.
2808 * Pointer to the next Data Segment after inlined data.
2809 * Ring buffer wraparound check is needed. We do not
2810 * do it here because it may not be needed for the
2811 * last packet in the eMPW session.
2813 static __rte_always_inline struct mlx5_wqe_dseg *
2814 mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq,
2815 struct mlx5_txq_local *restrict loc __rte_unused,
2816 struct mlx5_wqe_dseg *restrict dseg,
2819 unsigned int olx __rte_unused)
2824 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2825 pdst = &dseg->inline_data[0];
2827 * The WQEBB space availability is checked by caller.
2828 * Here we should be aware of WQE ring buffer wraparound only.
2830 part = (uint8_t *)txq->wqes_end - pdst;
2831 part = RTE_MIN(part, len);
2833 rte_memcpy(pdst, buf, part);
2837 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2838 /* Note: no final wraparound check here. */
2839 return (struct mlx5_wqe_dseg *)pdst;
2841 pdst = (uint8_t *)txq->wqes;
2848 * Build the Data Segment of inlined data from single
2849 * segment packet with VLAN insertion.
2852 * Pointer to TX queue structure.
2854 * Pointer to burst routine local context.
2856 * Pointer to the dseg fill with built Data Segment.
2858 * Data buffer to point.
2860 * Data buffer length.
2862 * Configured Tx offloads mask. It is fully defined at
2863 * compile time and may be used for optimization.
2866 * Pointer to the next Data Segment after inlined data.
2867 * Ring buffer wraparound check is needed.
2869 static __rte_always_inline struct mlx5_wqe_dseg *
2870 mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
2871 struct mlx5_txq_local *restrict loc __rte_unused,
2872 struct mlx5_wqe_dseg *restrict dseg,
2875 unsigned int olx __rte_unused)
2881 assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
2882 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
2883 (2 * RTE_ETHER_ADDR_LEN),
2884 "invalid Data Segment data size");
2885 dseg->bcount = rte_cpu_to_be_32((len + sizeof(struct rte_vlan_hdr)) |
2886 MLX5_ETH_WQE_DATA_INLINE);
2887 pdst = &dseg->inline_data[0];
2888 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
2889 buf += MLX5_DSEG_MIN_INLINE_SIZE;
2890 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
2891 len -= MLX5_DSEG_MIN_INLINE_SIZE;
2892 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
2893 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2894 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2895 pdst = (uint8_t *)txq->wqes;
2896 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
2897 loc->mbuf->vlan_tci);
2898 pdst += sizeof(struct rte_vlan_hdr);
2900 * The WQEBB space availability is checked by caller.
2901 * Here we should be aware of WQE ring buffer wraparound only.
2903 part = (uint8_t *)txq->wqes_end - pdst;
2904 part = RTE_MIN(part, len);
2906 rte_memcpy(pdst, buf, part);
2910 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2911 /* Note: no final wraparound check here. */
2912 return (struct mlx5_wqe_dseg *)pdst;
2914 pdst = (uint8_t *)txq->wqes;
2921 * Build the Ethernet Segment with optionally inlined data with
2922 * VLAN insertion and following Data Segments (if any) from
2923 * multi-segment packet. Used by ordinary send and TSO.
2926 * Pointer to TX queue structure.
2928 * Pointer to burst routine local context.
2930 * Pointer to WQE to fill with built Ethernet/Data Segments.
2932 * Length of VLAN header to insert, 0 means no VLAN insertion.
2934 * Data length to inline. For TSO this parameter specifies
2935 * exact value, for ordinary send routine can be aligned by
2936 * caller to provide better WQE space saving and data buffer
2937 * start address alignment. This length includes VLAN header
2940 * Zero means ordinary send, inlined data can be extended,
2941 * otherwise this is TSO, inlined data length is fixed.
2943 * Configured Tx offloads mask. It is fully defined at
2944 * compile time and may be used for optimization.
2947 * Actual size of built WQE in segments.
2949 static __rte_always_inline unsigned int
2950 mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
2951 struct mlx5_txq_local *restrict loc,
2952 struct mlx5_wqe *restrict wqe,
2956 unsigned int olx __rte_unused)
2958 struct mlx5_wqe_dseg *restrict dseg;
2961 assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
2962 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
2965 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
2966 if (!loc->mbuf_nseg)
2969 * There are still some mbuf remaining, not inlined.
2970 * The first mbuf may be partially inlined and we
2971 * must process the possible non-zero data offset.
2973 if (loc->mbuf_off) {
2978 * Exhausted packets must be dropped before.
2979 * Non-zero offset means there are some data
2980 * remained in the packet.
2982 assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
2983 assert(rte_pktmbuf_data_len(loc->mbuf));
2984 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2986 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
2988 * Build the pointer/minimal data Data Segment.
2989 * Do ring buffer wrapping check in advance.
2991 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2992 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2993 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
2994 /* Store the mbuf to be freed on completion. */
2995 assert(loc->elts_free);
2996 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2999 if (--loc->mbuf_nseg == 0)
3001 loc->mbuf = loc->mbuf->next;
3005 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3006 struct rte_mbuf *mbuf;
3008 /* Zero length segment found, just skip. */
3010 loc->mbuf = loc->mbuf->next;
3011 rte_pktmbuf_free_seg(mbuf);
3012 if (--loc->mbuf_nseg == 0)
3015 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3016 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3019 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3020 rte_pktmbuf_data_len(loc->mbuf), olx);
3021 assert(loc->elts_free);
3022 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3025 if (--loc->mbuf_nseg == 0)
3027 loc->mbuf = loc->mbuf->next;
3032 /* Calculate actual segments used from the dseg pointer. */
3033 if ((uintptr_t)wqe < (uintptr_t)dseg)
3034 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3036 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3037 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3042 * Tx one packet function for multi-segment TSO. Supports all
3043 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3044 * sends one packet per WQE.
3046 * This routine is responsible for storing processed mbuf
3047 * into elts ring buffer and update elts_head.
3050 * Pointer to TX queue structure.
3052 * Pointer to burst routine local context.
3054 * Configured Tx offloads mask. It is fully defined at
3055 * compile time and may be used for optimization.
3058 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3059 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3060 * Local context variables partially updated.
3062 static __rte_always_inline enum mlx5_txcmp_code
3063 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
3064 struct mlx5_txq_local *restrict loc,
3067 struct mlx5_wqe *restrict wqe;
3068 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3071 * Calculate data length to be inlined to estimate
3072 * the required space in WQE ring buffer.
3074 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3075 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3076 vlan = sizeof(struct rte_vlan_hdr);
3077 inlen = loc->mbuf->l2_len + vlan +
3078 loc->mbuf->l3_len + loc->mbuf->l4_len;
3079 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3080 return MLX5_TXCMP_CODE_ERROR;
3081 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3082 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3083 /* Packet must contain all TSO headers. */
3084 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3085 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3086 inlen > (dlen + vlan)))
3087 return MLX5_TXCMP_CODE_ERROR;
3088 assert(inlen >= txq->inlen_mode);
3090 * Check whether there are enough free WQEBBs:
3092 * - Ethernet Segment
3093 * - First Segment of inlined Ethernet data
3094 * - ... data continued ...
3095 * - Data Segments of pointer/min inline type
3097 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3098 MLX5_ESEG_MIN_INLINE_SIZE +
3100 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3101 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3102 return MLX5_TXCMP_CODE_EXIT;
3103 /* Check for maximal WQE size. */
3104 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3105 return MLX5_TXCMP_CODE_ERROR;
3106 #ifdef MLX5_PMD_SOFT_COUNTERS
3107 /* Update sent data bytes/packets counters. */
3108 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3109 loc->mbuf->tso_segsz;
3111 * One will be added for mbuf itself
3112 * at the end of the mlx5_tx_burst from
3113 * loc->pkts_sent field.
3116 txq->stats.opackets += ntcp;
3117 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3119 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3120 loc->wqe_last = wqe;
3121 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3122 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3123 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3124 txq->wqe_ci += (ds + 3) / 4;
3125 loc->wqe_free -= (ds + 3) / 4;
3126 return MLX5_TXCMP_CODE_MULTI;
3130 * Tx one packet function for multi-segment SEND. Supports all
3131 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3132 * sends one packet per WQE, without any data inlining in
3135 * This routine is responsible for storing processed mbuf
3136 * into elts ring buffer and update elts_head.
3139 * Pointer to TX queue structure.
3141 * Pointer to burst routine local context.
3143 * Configured Tx offloads mask. It is fully defined at
3144 * compile time and may be used for optimization.
3147 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3148 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3149 * Local context variables partially updated.
3151 static __rte_always_inline enum mlx5_txcmp_code
3152 mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
3153 struct mlx5_txq_local *restrict loc,
3156 struct mlx5_wqe_dseg *restrict dseg;
3157 struct mlx5_wqe *restrict wqe;
3158 unsigned int ds, nseg;
3160 assert(NB_SEGS(loc->mbuf) > 1);
3162 * No inline at all, it means the CPU cycles saving
3163 * is prioritized at configuration, we should not
3164 * copy any packet data to WQE.
3166 nseg = NB_SEGS(loc->mbuf);
3168 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3169 return MLX5_TXCMP_CODE_EXIT;
3170 /* Check for maximal WQE size. */
3171 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3172 return MLX5_TXCMP_CODE_ERROR;
3174 * Some Tx offloads may cause an error if
3175 * packet is not long enough, check against
3176 * assumed minimal length.
3178 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3179 return MLX5_TXCMP_CODE_ERROR;
3180 #ifdef MLX5_PMD_SOFT_COUNTERS
3181 /* Update sent data bytes counter. */
3182 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3183 if (MLX5_TXOFF_CONFIG(VLAN) &&
3184 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3185 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3188 * SEND WQE, one WQEBB:
3189 * - Control Segment, SEND opcode
3190 * - Ethernet Segment, optional VLAN, no inline
3191 * - Data Segments, pointer only type
3193 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3194 loc->wqe_last = wqe;
3195 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3196 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3197 dseg = &wqe->dseg[0];
3199 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3200 struct rte_mbuf *mbuf;
3203 * Zero length segment found, have to
3204 * correct total size of WQE in segments.
3205 * It is supposed to be rare occasion, so
3206 * in normal case (no zero length segments)
3207 * we avoid extra writing to the Control
3211 wqe->cseg.sq_ds -= RTE_BE32(1);
3213 loc->mbuf = mbuf->next;
3214 rte_pktmbuf_free_seg(mbuf);
3220 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3221 rte_pktmbuf_data_len(loc->mbuf), olx);
3222 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3227 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3228 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3229 loc->mbuf = loc->mbuf->next;
3232 txq->wqe_ci += (ds + 3) / 4;
3233 loc->wqe_free -= (ds + 3) / 4;
3234 return MLX5_TXCMP_CODE_MULTI;
3238 * Tx one packet function for multi-segment SEND. Supports all
3239 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3240 * sends one packet per WQE, with data inlining in
3241 * Ethernet Segment and minimal Data Segments.
3243 * This routine is responsible for storing processed mbuf
3244 * into elts ring buffer and update elts_head.
3247 * Pointer to TX queue structure.
3249 * Pointer to burst routine local context.
3251 * Configured Tx offloads mask. It is fully defined at
3252 * compile time and may be used for optimization.
3255 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3256 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3257 * Local context variables partially updated.
3259 static __rte_always_inline enum mlx5_txcmp_code
3260 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
3261 struct mlx5_txq_local *restrict loc,
3264 struct mlx5_wqe *restrict wqe;
3265 unsigned int ds, inlen, dlen, vlan = 0;
3267 assert(MLX5_TXOFF_CONFIG(INLINE));
3268 assert(NB_SEGS(loc->mbuf) > 1);
3270 * First calculate data length to be inlined
3271 * to estimate the required space for WQE.
3273 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3274 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3275 vlan = sizeof(struct rte_vlan_hdr);
3276 inlen = dlen + vlan;
3277 /* Check against minimal length. */
3278 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3279 return MLX5_TXCMP_CODE_ERROR;
3280 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3281 if (inlen > txq->inlen_send) {
3282 struct rte_mbuf *mbuf;
3287 * Packet length exceeds the allowed inline
3288 * data length, check whether the minimal
3289 * inlining is required.
3291 if (txq->inlen_mode) {
3292 assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
3293 assert(txq->inlen_mode <= txq->inlen_send);
3294 inlen = txq->inlen_mode;
3296 if (!vlan || txq->vlan_en) {
3298 * VLAN insertion will be done inside by HW.
3299 * It is not utmost effective - VLAN flag is
3300 * checked twice, but we should proceed the
3301 * inlining length correctly and take into
3302 * account the VLAN header being inserted.
3304 return mlx5_tx_packet_multi_send
3307 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3310 * Now we know the minimal amount of data is requested
3311 * to inline. Check whether we should inline the buffers
3312 * from the chain beginning to eliminate some mbufs.
3315 nxlen = rte_pktmbuf_data_len(mbuf);
3316 if (unlikely(nxlen <= txq->inlen_send)) {
3317 /* We can inline first mbuf at least. */
3318 if (nxlen < inlen) {
3321 /* Scan mbufs till inlen filled. */
3326 nxlen = rte_pktmbuf_data_len(mbuf);
3328 } while (unlikely(nxlen < inlen));
3329 if (unlikely(nxlen > txq->inlen_send)) {
3330 /* We cannot inline entire mbuf. */
3331 smlen = inlen - smlen;
3332 start = rte_pktmbuf_mtod_offset
3333 (mbuf, uintptr_t, smlen);
3340 /* There should be not end of packet. */
3342 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3343 } while (unlikely(nxlen < txq->inlen_send));
3345 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3347 * Check whether we can do inline to align start
3348 * address of data buffer to cacheline.
3351 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3352 if (unlikely(start)) {
3354 if (start <= txq->inlen_send)
3359 * Check whether there are enough free WQEBBs:
3361 * - Ethernet Segment
3362 * - First Segment of inlined Ethernet data
3363 * - ... data continued ...
3364 * - Data Segments of pointer/min inline type
3366 * Estimate the number of Data Segments conservatively,
3367 * supposing no any mbufs is being freed during inlining.
3369 assert(inlen <= txq->inlen_send);
3370 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3371 MLX5_ESEG_MIN_INLINE_SIZE +
3373 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3374 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3375 return MLX5_TXCMP_CODE_EXIT;
3376 /* Check for maximal WQE size. */
3377 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3378 return MLX5_TXCMP_CODE_ERROR;
3379 #ifdef MLX5_PMD_SOFT_COUNTERS
3380 /* Update sent data bytes/packets counters. */
3381 txq->stats.obytes += dlen + vlan;
3383 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3384 loc->wqe_last = wqe;
3385 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3386 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3387 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3388 txq->wqe_ci += (ds + 3) / 4;
3389 loc->wqe_free -= (ds + 3) / 4;
3390 return MLX5_TXCMP_CODE_MULTI;
3394 * Tx burst function for multi-segment packets. Supports all
3395 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3396 * sends one packet per WQE. Function stops sending if it
3397 * encounters the single-segment packet.
3399 * This routine is responsible for storing processed mbuf
3400 * into elts ring buffer and update elts_head.
3403 * Pointer to TX queue structure.
3405 * Packets to transmit.
3407 * Number of packets in array.
3409 * Pointer to burst routine local context.
3411 * Configured Tx offloads mask. It is fully defined at
3412 * compile time and may be used for optimization.
3415 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3416 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3417 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3418 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3419 * Local context variables updated.
3421 static __rte_always_inline enum mlx5_txcmp_code
3422 mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
3423 struct rte_mbuf **restrict pkts,
3424 unsigned int pkts_n,
3425 struct mlx5_txq_local *restrict loc,
3428 assert(loc->elts_free && loc->wqe_free);
3429 assert(pkts_n > loc->pkts_sent);
3430 pkts += loc->pkts_sent + 1;
3431 pkts_n -= loc->pkts_sent;
3433 enum mlx5_txcmp_code ret;
3435 assert(NB_SEGS(loc->mbuf) > 1);
3437 * Estimate the number of free elts quickly but
3438 * conservatively. Some segment may be fully inlined
3439 * and freed, ignore this here - precise estimation
3442 if (loc->elts_free < NB_SEGS(loc->mbuf))
3443 return MLX5_TXCMP_CODE_EXIT;
3444 if (MLX5_TXOFF_CONFIG(TSO) &&
3445 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3446 /* Proceed with multi-segment TSO. */
3447 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3448 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3449 /* Proceed with multi-segment SEND with inlining. */
3450 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3452 /* Proceed with multi-segment SEND w/o inlining. */
3453 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3455 if (ret == MLX5_TXCMP_CODE_EXIT)
3456 return MLX5_TXCMP_CODE_EXIT;
3457 if (ret == MLX5_TXCMP_CODE_ERROR)
3458 return MLX5_TXCMP_CODE_ERROR;
3459 /* WQE is built, go to the next packet. */
3462 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3463 return MLX5_TXCMP_CODE_EXIT;
3464 loc->mbuf = *pkts++;
3466 rte_prefetch0(*pkts);
3467 if (likely(NB_SEGS(loc->mbuf) > 1))
3469 /* Here ends the series of multi-segment packets. */
3470 if (MLX5_TXOFF_CONFIG(TSO) &&
3471 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3472 return MLX5_TXCMP_CODE_TSO;
3473 return MLX5_TXCMP_CODE_SINGLE;
3479 * Tx burst function for single-segment packets with TSO.
3480 * Supports all types of Tx offloads, except multi-packets.
3481 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3482 * Function stops sending if it encounters the multi-segment
3483 * packet or packet without TSO requested.
3485 * The routine is responsible for storing processed mbuf
3486 * into elts ring buffer and update elts_head if inline
3487 * offloads is requested due to possible early freeing
3488 * of the inlined mbufs (can not store pkts array in elts
3492 * Pointer to TX queue structure.
3494 * Packets to transmit.
3496 * Number of packets in array.
3498 * Pointer to burst routine local context.
3500 * Configured Tx offloads mask. It is fully defined at
3501 * compile time and may be used for optimization.
3504 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3505 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3506 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3507 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3508 * Local context variables updated.
3510 static __rte_always_inline enum mlx5_txcmp_code
3511 mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
3512 struct rte_mbuf **restrict pkts,
3513 unsigned int pkts_n,
3514 struct mlx5_txq_local *restrict loc,
3517 assert(loc->elts_free && loc->wqe_free);
3518 assert(pkts_n > loc->pkts_sent);
3519 pkts += loc->pkts_sent + 1;
3520 pkts_n -= loc->pkts_sent;
3522 struct mlx5_wqe_dseg *restrict dseg;
3523 struct mlx5_wqe *restrict wqe;
3524 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3527 assert(NB_SEGS(loc->mbuf) == 1);
3528 dlen = rte_pktmbuf_data_len(loc->mbuf);
3529 if (MLX5_TXOFF_CONFIG(VLAN) &&
3530 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3531 vlan = sizeof(struct rte_vlan_hdr);
3534 * First calculate the WQE size to check
3535 * whether we have enough space in ring buffer.
3537 hlen = loc->mbuf->l2_len + vlan +
3538 loc->mbuf->l3_len + loc->mbuf->l4_len;
3539 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3540 return MLX5_TXCMP_CODE_ERROR;
3541 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3542 hlen += loc->mbuf->outer_l2_len +
3543 loc->mbuf->outer_l3_len;
3544 /* Segment must contain all TSO headers. */
3545 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3546 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3547 hlen > (dlen + vlan)))
3548 return MLX5_TXCMP_CODE_ERROR;
3550 * Check whether there are enough free WQEBBs:
3552 * - Ethernet Segment
3553 * - First Segment of inlined Ethernet data
3554 * - ... data continued ...
3555 * - Finishing Data Segment of pointer type
3557 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3558 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3559 if (loc->wqe_free < ((ds + 3) / 4))
3560 return MLX5_TXCMP_CODE_EXIT;
3561 #ifdef MLX5_PMD_SOFT_COUNTERS
3562 /* Update sent data bytes/packets counters. */
3563 ntcp = (dlen + vlan - hlen +
3564 loc->mbuf->tso_segsz - 1) /
3565 loc->mbuf->tso_segsz;
3567 * One will be added for mbuf itself at the end
3568 * of the mlx5_tx_burst from loc->pkts_sent field.
3571 txq->stats.opackets += ntcp;
3572 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3575 * Build the TSO WQE:
3577 * - Ethernet Segment with hlen bytes inlined
3578 * - Data Segment of pointer type
3580 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3581 loc->wqe_last = wqe;
3582 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3583 MLX5_OPCODE_TSO, olx);
3584 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3585 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3586 dlen -= hlen - vlan;
3587 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3589 * WQE is built, update the loop parameters
3590 * and go to the next packet.
3592 txq->wqe_ci += (ds + 3) / 4;
3593 loc->wqe_free -= (ds + 3) / 4;
3594 if (MLX5_TXOFF_CONFIG(INLINE))
3595 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3599 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3600 return MLX5_TXCMP_CODE_EXIT;
3601 loc->mbuf = *pkts++;
3603 rte_prefetch0(*pkts);
3604 if (MLX5_TXOFF_CONFIG(MULTI) &&
3605 unlikely(NB_SEGS(loc->mbuf) > 1))
3606 return MLX5_TXCMP_CODE_MULTI;
3607 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3608 return MLX5_TXCMP_CODE_SINGLE;
3609 /* Continue with the next TSO packet. */
3615 * Analyze the packet and select the best method to send.
3618 * Pointer to TX queue structure.
3620 * Pointer to burst routine local context.
3622 * Configured Tx offloads mask. It is fully defined at
3623 * compile time and may be used for optimization.
3625 * The predefined flag whether do complete check for
3626 * multi-segment packets and TSO.
3629 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3630 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3631 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3632 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3634 static __rte_always_inline enum mlx5_txcmp_code
3635 mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq,
3636 struct mlx5_txq_local *restrict loc,
3640 /* Check for multi-segment packet. */
3642 MLX5_TXOFF_CONFIG(MULTI) &&
3643 unlikely(NB_SEGS(loc->mbuf) > 1))
3644 return MLX5_TXCMP_CODE_MULTI;
3645 /* Check for TSO packet. */
3647 MLX5_TXOFF_CONFIG(TSO) &&
3648 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3649 return MLX5_TXCMP_CODE_TSO;
3650 /* Check if eMPW is enabled at all. */
3651 if (!MLX5_TXOFF_CONFIG(EMPW))
3652 return MLX5_TXCMP_CODE_SINGLE;
3653 /* Check if eMPW can be engaged. */
3654 if (MLX5_TXOFF_CONFIG(VLAN) &&
3655 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3656 (!MLX5_TXOFF_CONFIG(INLINE) ||
3657 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3658 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3660 * eMPW does not support VLAN insertion offload,
3661 * we have to inline the entire packet but
3662 * packet is too long for inlining.
3664 return MLX5_TXCMP_CODE_SINGLE;
3666 return MLX5_TXCMP_CODE_EMPW;
3670 * Check the next packet attributes to match with the eMPW batch ones.
3671 * In addition, for legacy MPW the packet length is checked either.
3674 * Pointer to TX queue structure.
3676 * Pointer to Ethernet Segment of eMPW batch.
3678 * Pointer to burst routine local context.
3680 * Length of previous packet in MPW descriptor.
3682 * Configured Tx offloads mask. It is fully defined at
3683 * compile time and may be used for optimization.
3686 * true - packet match with eMPW batch attributes.
3687 * false - no match, eMPW should be restarted.
3689 static __rte_always_inline bool
3690 mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
3691 struct mlx5_wqe_eseg *restrict es,
3692 struct mlx5_txq_local *restrict loc,
3696 uint8_t swp_flags = 0;
3698 /* Compare the checksum flags, if any. */
3699 if (MLX5_TXOFF_CONFIG(CSUM) &&
3700 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3702 /* Compare the Software Parser offsets and flags. */
3703 if (MLX5_TXOFF_CONFIG(SWP) &&
3704 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3705 es->swp_flags != swp_flags))
3707 /* Fill metadata field if needed. */
3708 if (MLX5_TXOFF_CONFIG(METADATA) &&
3709 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3710 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3712 /* Legacy MPW can send packets with the same lengt only. */
3713 if (MLX5_TXOFF_CONFIG(MPW) &&
3714 dlen != rte_pktmbuf_data_len(loc->mbuf))
3716 /* There must be no VLAN packets in eMPW loop. */
3717 if (MLX5_TXOFF_CONFIG(VLAN))
3718 assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3723 * Update send loop variables and WQE for eMPW loop
3724 * without data inlining. Number of Data Segments is
3725 * equal to the number of sent packets.
3728 * Pointer to TX queue structure.
3730 * Pointer to burst routine local context.
3732 * Number of packets/Data Segments/Packets.
3734 * Accumulated statistics, bytes sent
3736 * Configured Tx offloads mask. It is fully defined at
3737 * compile time and may be used for optimization.
3740 * true - packet match with eMPW batch attributes.
3741 * false - no match, eMPW should be restarted.
3743 static __rte_always_inline void
3744 mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
3745 struct mlx5_txq_local *restrict loc,
3748 unsigned int olx __rte_unused)
3750 assert(!MLX5_TXOFF_CONFIG(INLINE));
3751 #ifdef MLX5_PMD_SOFT_COUNTERS
3752 /* Update sent data bytes counter. */
3753 txq->stats.obytes += slen;
3757 loc->elts_free -= ds;
3758 loc->pkts_sent += ds;
3760 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3761 txq->wqe_ci += (ds + 3) / 4;
3762 loc->wqe_free -= (ds + 3) / 4;
3766 * Update send loop variables and WQE for eMPW loop
3767 * with data inlining. Gets the size of pushed descriptors
3768 * and data to the WQE.
3771 * Pointer to TX queue structure.
3773 * Pointer to burst routine local context.
3775 * Total size of descriptor/data in bytes.
3777 * Accumulated statistics, data bytes sent.
3779 * Configured Tx offloads mask. It is fully defined at
3780 * compile time and may be used for optimization.
3783 * true - packet match with eMPW batch attributes.
3784 * false - no match, eMPW should be restarted.
3786 static __rte_always_inline void
3787 mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
3788 struct mlx5_txq_local *restrict loc,
3791 unsigned int olx __rte_unused)
3793 assert(MLX5_TXOFF_CONFIG(INLINE));
3794 assert((len % MLX5_WSEG_SIZE) == 0);
3795 #ifdef MLX5_PMD_SOFT_COUNTERS
3796 /* Update sent data bytes counter. */
3797 txq->stats.obytes += slen;
3801 len = len / MLX5_WSEG_SIZE + 2;
3802 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
3803 txq->wqe_ci += (len + 3) / 4;
3804 loc->wqe_free -= (len + 3) / 4;
3808 * The set of Tx burst functions for single-segment packets
3809 * without TSO and with Multi-Packet Writing feature support.
3810 * Supports all types of Tx offloads, except multi-packets
3813 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
3814 * as many packet per WQE as it can. If eMPW is not configured
3815 * or packet can not be sent with eMPW (VLAN insertion) the
3816 * ordinary SEND opcode is used and only one packet placed
3819 * Functions stop sending if it encounters the multi-segment
3820 * packet or packet with TSO requested.
3822 * The routines are responsible for storing processed mbuf
3823 * into elts ring buffer and update elts_head if inlining
3824 * offload is requested. Otherwise the copying mbufs to elts
3825 * can be postponed and completed at the end of burst routine.
3828 * Pointer to TX queue structure.
3830 * Packets to transmit.
3832 * Number of packets in array.
3834 * Pointer to burst routine local context.
3836 * Configured Tx offloads mask. It is fully defined at
3837 * compile time and may be used for optimization.
3840 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3841 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3842 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3843 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
3844 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
3845 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
3847 * Local context variables updated.
3850 * The routine sends packets with MLX5_OPCODE_EMPW
3851 * without inlining, this is dedicated optimized branch.
3852 * No VLAN insertion is supported.
3854 static __rte_always_inline enum mlx5_txcmp_code
3855 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
3856 struct rte_mbuf **restrict pkts,
3857 unsigned int pkts_n,
3858 struct mlx5_txq_local *restrict loc,
3862 * Subroutine is the part of mlx5_tx_burst_single()
3863 * and sends single-segment packet with eMPW opcode
3864 * without data inlining.
3866 assert(!MLX5_TXOFF_CONFIG(INLINE));
3867 assert(MLX5_TXOFF_CONFIG(EMPW));
3868 assert(loc->elts_free && loc->wqe_free);
3869 assert(pkts_n > loc->pkts_sent);
3870 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
3871 pkts += loc->pkts_sent + 1;
3872 pkts_n -= loc->pkts_sent;
3874 struct mlx5_wqe_dseg *restrict dseg;
3875 struct mlx5_wqe_eseg *restrict eseg;
3876 enum mlx5_txcmp_code ret;
3877 unsigned int part, loop;
3878 unsigned int slen = 0;
3881 assert(NB_SEGS(loc->mbuf) == 1);
3882 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
3883 MLX5_MPW_MAX_PACKETS :
3884 MLX5_EMPW_MAX_PACKETS);
3885 if (unlikely(loc->elts_free < part)) {
3886 /* We have no enough elts to save all mbufs. */
3887 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
3888 return MLX5_TXCMP_CODE_EXIT;
3889 /* But we still able to send at least minimal eMPW. */
3890 part = loc->elts_free;
3892 /* Check whether we have enough WQEs */
3893 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
3894 if (unlikely(loc->wqe_free <
3895 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3896 return MLX5_TXCMP_CODE_EXIT;
3897 part = (loc->wqe_free * 4) - 2;
3899 if (likely(part > 1))
3900 rte_prefetch0(*pkts);
3901 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3903 * Build eMPW title WQEBB:
3904 * - Control Segment, eMPW opcode
3905 * - Ethernet Segment, no inline
3907 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
3908 MLX5_OPCODE_ENHANCED_MPSW, olx);
3909 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
3910 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3911 eseg = &loc->wqe_last->eseg;
3912 dseg = &loc->wqe_last->dseg[0];
3914 /* Store the packet length for legacy MPW. */
3915 if (MLX5_TXOFF_CONFIG(MPW))
3916 eseg->mss = rte_cpu_to_be_16
3917 (rte_pktmbuf_data_len(loc->mbuf));
3919 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3920 #ifdef MLX5_PMD_SOFT_COUNTERS
3921 /* Update sent data bytes counter. */
3926 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3928 if (unlikely(--loop == 0))
3930 loc->mbuf = *pkts++;
3931 if (likely(loop > 1))
3932 rte_prefetch0(*pkts);
3933 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3935 * Unroll the completion code to avoid
3936 * returning variable value - it results in
3937 * unoptimized sequent checking in caller.
3939 if (ret == MLX5_TXCMP_CODE_MULTI) {
3941 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3942 if (unlikely(!loc->elts_free ||
3944 return MLX5_TXCMP_CODE_EXIT;
3945 return MLX5_TXCMP_CODE_MULTI;
3947 assert(NB_SEGS(loc->mbuf) == 1);
3948 if (ret == MLX5_TXCMP_CODE_TSO) {
3950 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3951 if (unlikely(!loc->elts_free ||
3953 return MLX5_TXCMP_CODE_EXIT;
3954 return MLX5_TXCMP_CODE_TSO;
3956 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3958 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3959 if (unlikely(!loc->elts_free ||
3961 return MLX5_TXCMP_CODE_EXIT;
3962 return MLX5_TXCMP_CODE_SINGLE;
3964 if (ret != MLX5_TXCMP_CODE_EMPW) {
3967 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3968 return MLX5_TXCMP_CODE_ERROR;
3971 * Check whether packet parameters coincide
3972 * within assumed eMPW batch:
3973 * - check sum settings
3975 * - software parser settings
3976 * - packets length (legacy MPW only)
3978 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
3981 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3982 if (unlikely(!loc->elts_free ||
3984 return MLX5_TXCMP_CODE_EXIT;
3988 /* Packet attributes match, continue the same eMPW. */
3990 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3991 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3993 /* eMPW is built successfully, update loop parameters. */
3995 assert(pkts_n >= part);
3996 #ifdef MLX5_PMD_SOFT_COUNTERS
3997 /* Update sent data bytes counter. */
3998 txq->stats.obytes += slen;
4000 loc->elts_free -= part;
4001 loc->pkts_sent += part;
4002 txq->wqe_ci += (2 + part + 3) / 4;
4003 loc->wqe_free -= (2 + part + 3) / 4;
4005 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4006 return MLX5_TXCMP_CODE_EXIT;
4007 loc->mbuf = *pkts++;
4008 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4009 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4011 /* Continue sending eMPW batches. */
4017 * The routine sends packets with MLX5_OPCODE_EMPW
4018 * with inlining, optionally supports VLAN insertion.
4020 static __rte_always_inline enum mlx5_txcmp_code
4021 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
4022 struct rte_mbuf **restrict pkts,
4023 unsigned int pkts_n,
4024 struct mlx5_txq_local *restrict loc,
4028 * Subroutine is the part of mlx5_tx_burst_single()
4029 * and sends single-segment packet with eMPW opcode
4030 * with data inlining.
4032 assert(MLX5_TXOFF_CONFIG(INLINE));
4033 assert(MLX5_TXOFF_CONFIG(EMPW));
4034 assert(loc->elts_free && loc->wqe_free);
4035 assert(pkts_n > loc->pkts_sent);
4036 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4037 pkts += loc->pkts_sent + 1;
4038 pkts_n -= loc->pkts_sent;
4040 struct mlx5_wqe_dseg *restrict dseg;
4041 struct mlx5_wqe_eseg *restrict eseg;
4042 enum mlx5_txcmp_code ret;
4043 unsigned int room, part, nlim;
4044 unsigned int slen = 0;
4046 assert(NB_SEGS(loc->mbuf) == 1);
4048 * Limits the amount of packets in one WQE
4049 * to improve CQE latency generation.
4051 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4052 MLX5_MPW_INLINE_MAX_PACKETS :
4053 MLX5_EMPW_MAX_PACKETS);
4054 /* Check whether we have minimal amount WQEs */
4055 if (unlikely(loc->wqe_free <
4056 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4057 return MLX5_TXCMP_CODE_EXIT;
4058 if (likely(pkts_n > 1))
4059 rte_prefetch0(*pkts);
4060 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4062 * Build eMPW title WQEBB:
4063 * - Control Segment, eMPW opcode, zero DS
4064 * - Ethernet Segment, no inline
4066 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0,
4067 MLX5_OPCODE_ENHANCED_MPSW, olx);
4068 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4069 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4070 eseg = &loc->wqe_last->eseg;
4071 dseg = &loc->wqe_last->dseg[0];
4072 /* Store the packet length for legacy MPW. */
4073 if (MLX5_TXOFF_CONFIG(MPW))
4074 eseg->mss = rte_cpu_to_be_16
4075 (rte_pktmbuf_data_len(loc->mbuf));
4076 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4077 loc->wqe_free) * MLX5_WQE_SIZE -
4078 MLX5_WQE_CSEG_SIZE -
4080 /* Build WQE till we have space, packets and resources. */
4083 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4084 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4087 assert(room >= MLX5_WQE_DSEG_SIZE);
4088 assert((room % MLX5_WQE_DSEG_SIZE) == 0);
4089 assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4091 * Some Tx offloads may cause an error if
4092 * packet is not long enough, check against
4093 * assumed minimal length.
4095 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4097 if (unlikely(!part))
4098 return MLX5_TXCMP_CODE_ERROR;
4100 * We have some successfully built
4101 * packet Data Segments to send.
4103 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4104 return MLX5_TXCMP_CODE_ERROR;
4106 /* Inline or not inline - that's the Question. */
4107 if (dlen > txq->inlen_empw)
4109 /* Inline entire packet, optional VLAN insertion. */
4110 tlen = sizeof(dseg->bcount) + dlen;
4111 if (MLX5_TXOFF_CONFIG(VLAN) &&
4112 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4114 * The packet length must be checked in
4115 * mlx5_tx_able_to_empw() and packet
4116 * fits into inline length guaranteed.
4118 assert((dlen + sizeof(struct rte_vlan_hdr)) <=
4120 tlen += sizeof(struct rte_vlan_hdr);
4123 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4125 #ifdef MLX5_PMD_SOFT_COUNTERS
4126 /* Update sent data bytes counter. */
4127 slen += sizeof(struct rte_vlan_hdr);
4132 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4135 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4136 assert(room >= tlen);
4139 * Packet data are completely inlined,
4140 * free the packet immediately.
4142 rte_pktmbuf_free_seg(loc->mbuf);
4146 * Not inlinable VLAN packets are
4147 * proceeded outside of this routine.
4149 assert(room >= MLX5_WQE_DSEG_SIZE);
4150 if (MLX5_TXOFF_CONFIG(VLAN))
4151 assert(!(loc->mbuf->ol_flags &
4153 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4154 /* We have to store mbuf in elts.*/
4155 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4156 room -= MLX5_WQE_DSEG_SIZE;
4157 /* Ring buffer wraparound is checked at the loop end.*/
4160 #ifdef MLX5_PMD_SOFT_COUNTERS
4161 /* Update sent data bytes counter. */
4167 if (unlikely(!pkts_n || !loc->elts_free)) {
4169 * We have no resources/packets to
4170 * continue build descriptors.
4173 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4174 return MLX5_TXCMP_CODE_EXIT;
4176 loc->mbuf = *pkts++;
4177 if (likely(pkts_n > 1))
4178 rte_prefetch0(*pkts);
4179 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4181 * Unroll the completion code to avoid
4182 * returning variable value - it results in
4183 * unoptimized sequent checking in caller.
4185 if (ret == MLX5_TXCMP_CODE_MULTI) {
4187 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4188 if (unlikely(!loc->elts_free ||
4190 return MLX5_TXCMP_CODE_EXIT;
4191 return MLX5_TXCMP_CODE_MULTI;
4193 assert(NB_SEGS(loc->mbuf) == 1);
4194 if (ret == MLX5_TXCMP_CODE_TSO) {
4196 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4197 if (unlikely(!loc->elts_free ||
4199 return MLX5_TXCMP_CODE_EXIT;
4200 return MLX5_TXCMP_CODE_TSO;
4202 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4204 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4205 if (unlikely(!loc->elts_free ||
4207 return MLX5_TXCMP_CODE_EXIT;
4208 return MLX5_TXCMP_CODE_SINGLE;
4210 if (ret != MLX5_TXCMP_CODE_EMPW) {
4213 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4214 return MLX5_TXCMP_CODE_ERROR;
4216 /* Check if we have minimal room left. */
4218 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4221 * Check whether packet parameters coincide
4222 * within assumed eMPW batch:
4223 * - check sum settings
4225 * - software parser settings
4226 * - packets length (legacy MPW only)
4228 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx))
4230 /* Packet attributes match, continue the same eMPW. */
4231 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4232 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4235 * We get here to close an existing eMPW
4236 * session and start the new one.
4240 if (unlikely(!part))
4241 return MLX5_TXCMP_CODE_EXIT;
4242 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4243 if (unlikely(!loc->elts_free ||
4245 return MLX5_TXCMP_CODE_EXIT;
4246 /* Continue the loop with new eMPW session. */
4252 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4253 * Data inlining and VLAN insertion are supported.
4255 static __rte_always_inline enum mlx5_txcmp_code
4256 mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
4257 struct rte_mbuf **restrict pkts,
4258 unsigned int pkts_n,
4259 struct mlx5_txq_local *restrict loc,
4263 * Subroutine is the part of mlx5_tx_burst_single()
4264 * and sends single-segment packet with SEND opcode.
4266 assert(loc->elts_free && loc->wqe_free);
4267 assert(pkts_n > loc->pkts_sent);
4268 pkts += loc->pkts_sent + 1;
4269 pkts_n -= loc->pkts_sent;
4271 struct mlx5_wqe *restrict wqe;
4272 enum mlx5_txcmp_code ret;
4274 assert(NB_SEGS(loc->mbuf) == 1);
4275 if (MLX5_TXOFF_CONFIG(INLINE)) {
4276 unsigned int inlen, vlan = 0;
4278 inlen = rte_pktmbuf_data_len(loc->mbuf);
4279 if (MLX5_TXOFF_CONFIG(VLAN) &&
4280 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4281 vlan = sizeof(struct rte_vlan_hdr);
4283 static_assert((sizeof(struct rte_vlan_hdr) +
4284 sizeof(struct rte_ether_hdr)) ==
4285 MLX5_ESEG_MIN_INLINE_SIZE,
4286 "invalid min inline data size");
4289 * If inlining is enabled at configuration time
4290 * the limit must be not less than minimal size.
4291 * Otherwise we would do extra check for data
4292 * size to avoid crashes due to length overflow.
4294 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
4295 if (inlen <= txq->inlen_send) {
4296 unsigned int seg_n, wqe_n;
4298 rte_prefetch0(rte_pktmbuf_mtod
4299 (loc->mbuf, uint8_t *));
4300 /* Check against minimal length. */
4301 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4302 return MLX5_TXCMP_CODE_ERROR;
4304 * Completely inlined packet data WQE:
4305 * - Control Segment, SEND opcode
4306 * - Ethernet Segment, no VLAN insertion
4307 * - Data inlined, VLAN optionally inserted
4308 * - Alignment to MLX5_WSEG_SIZE
4309 * Have to estimate amount of WQEBBs
4311 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4312 MLX5_ESEG_MIN_INLINE_SIZE +
4313 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4314 /* Check if there are enough WQEBBs. */
4315 wqe_n = (seg_n + 3) / 4;
4316 if (wqe_n > loc->wqe_free)
4317 return MLX5_TXCMP_CODE_EXIT;
4318 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4319 loc->wqe_last = wqe;
4320 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4321 MLX5_OPCODE_SEND, olx);
4322 mlx5_tx_eseg_data(txq, loc, wqe,
4323 vlan, inlen, 0, olx);
4324 txq->wqe_ci += wqe_n;
4325 loc->wqe_free -= wqe_n;
4327 * Packet data are completely inlined,
4328 * free the packet immediately.
4330 rte_pktmbuf_free_seg(loc->mbuf);
4331 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4332 MLX5_TXOFF_CONFIG(MPW)) &&
4335 * If minimal inlining is requested the eMPW
4336 * feature should be disabled due to data is
4337 * inlined into Ethernet Segment, which can
4338 * not contain inlined data for eMPW due to
4339 * segment shared for all packets.
4341 struct mlx5_wqe_dseg *restrict dseg;
4346 * The inline-mode settings require
4347 * to inline the specified amount of
4348 * data bytes to the Ethernet Segment.
4349 * We should check the free space in
4350 * WQE ring buffer to inline partially.
4352 assert(txq->inlen_send >= txq->inlen_mode);
4353 assert(inlen > txq->inlen_mode);
4354 assert(txq->inlen_mode >=
4355 MLX5_ESEG_MIN_INLINE_SIZE);
4357 * Check whether there are enough free WQEBBs:
4359 * - Ethernet Segment
4360 * - First Segment of inlined Ethernet data
4361 * - ... data continued ...
4362 * - Finishing Data Segment of pointer type
4364 ds = (MLX5_WQE_CSEG_SIZE +
4365 MLX5_WQE_ESEG_SIZE +
4366 MLX5_WQE_DSEG_SIZE +
4368 MLX5_ESEG_MIN_INLINE_SIZE +
4369 MLX5_WQE_DSEG_SIZE +
4370 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4371 if (loc->wqe_free < ((ds + 3) / 4))
4372 return MLX5_TXCMP_CODE_EXIT;
4374 * Build the ordinary SEND WQE:
4376 * - Ethernet Segment, inline inlen_mode bytes
4377 * - Data Segment of pointer type
4379 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4380 loc->wqe_last = wqe;
4381 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4382 MLX5_OPCODE_SEND, olx);
4383 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4386 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4387 txq->inlen_mode - vlan;
4388 inlen -= txq->inlen_mode;
4389 mlx5_tx_dseg_ptr(txq, loc, dseg,
4392 * WQE is built, update the loop parameters
4393 * and got to the next packet.
4395 txq->wqe_ci += (ds + 3) / 4;
4396 loc->wqe_free -= (ds + 3) / 4;
4397 /* We have to store mbuf in elts.*/
4398 assert(MLX5_TXOFF_CONFIG(INLINE));
4399 txq->elts[txq->elts_head++ & txq->elts_m] =
4407 * Partially inlined packet data WQE, we have
4408 * some space in title WQEBB, we can fill it
4409 * with some packet data. It takes one WQEBB,
4410 * it is available, no extra space check:
4411 * - Control Segment, SEND opcode
4412 * - Ethernet Segment, no VLAN insertion
4413 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4414 * - Data Segment, pointer type
4416 * We also get here if VLAN insertion is not
4417 * supported by HW, the inline is enabled.
4419 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4420 loc->wqe_last = wqe;
4421 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4422 MLX5_OPCODE_SEND, olx);
4423 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4424 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4425 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4427 * The length check is performed above, by
4428 * comparing with txq->inlen_send. We should
4429 * not get overflow here.
4431 assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4432 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4433 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4437 /* We have to store mbuf in elts.*/
4438 assert(MLX5_TXOFF_CONFIG(INLINE));
4439 txq->elts[txq->elts_head++ & txq->elts_m] =
4443 #ifdef MLX5_PMD_SOFT_COUNTERS
4444 /* Update sent data bytes counter. */
4445 txq->stats.obytes += vlan +
4446 rte_pktmbuf_data_len(loc->mbuf);
4450 * No inline at all, it means the CPU cycles saving
4451 * is prioritized at configuration, we should not
4452 * copy any packet data to WQE.
4454 * SEND WQE, one WQEBB:
4455 * - Control Segment, SEND opcode
4456 * - Ethernet Segment, optional VLAN, no inline
4457 * - Data Segment, pointer type
4459 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4460 loc->wqe_last = wqe;
4461 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4462 MLX5_OPCODE_SEND, olx);
4463 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4465 (txq, loc, &wqe->dseg[0],
4466 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4467 rte_pktmbuf_data_len(loc->mbuf), olx);
4471 * We should not store mbuf pointer in elts
4472 * if no inlining is configured, this is done
4473 * by calling routine in a batch copy.
4475 assert(!MLX5_TXOFF_CONFIG(INLINE));
4477 #ifdef MLX5_PMD_SOFT_COUNTERS
4478 /* Update sent data bytes counter. */
4479 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4480 if (MLX5_TXOFF_CONFIG(VLAN) &&
4481 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4482 txq->stats.obytes +=
4483 sizeof(struct rte_vlan_hdr);
4488 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4489 return MLX5_TXCMP_CODE_EXIT;
4490 loc->mbuf = *pkts++;
4492 rte_prefetch0(*pkts);
4493 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4494 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4500 static __rte_always_inline enum mlx5_txcmp_code
4501 mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
4502 struct rte_mbuf **restrict pkts,
4503 unsigned int pkts_n,
4504 struct mlx5_txq_local *restrict loc,
4507 enum mlx5_txcmp_code ret;
4509 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4510 if (ret == MLX5_TXCMP_CODE_SINGLE)
4512 assert(ret == MLX5_TXCMP_CODE_EMPW);
4514 /* Optimize for inline/no inline eMPW send. */
4515 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4516 mlx5_tx_burst_empw_inline
4517 (txq, pkts, pkts_n, loc, olx) :
4518 mlx5_tx_burst_empw_simple
4519 (txq, pkts, pkts_n, loc, olx);
4520 if (ret != MLX5_TXCMP_CODE_SINGLE)
4522 /* The resources to send one packet should remain. */
4523 assert(loc->elts_free && loc->wqe_free);
4525 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4526 assert(ret != MLX5_TXCMP_CODE_SINGLE);
4527 if (ret != MLX5_TXCMP_CODE_EMPW)
4529 /* The resources to send one packet should remain. */
4530 assert(loc->elts_free && loc->wqe_free);
4535 * DPDK Tx callback template. This is configured template
4536 * used to generate routines optimized for specified offload setup.
4537 * One of this generated functions is chosen at SQ configuration
4541 * Generic pointer to TX queue structure.
4543 * Packets to transmit.
4545 * Number of packets in array.
4547 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4548 * values. Should be static to take compile time static configuration
4552 * Number of packets successfully transmitted (<= pkts_n).
4554 static __rte_always_inline uint16_t
4555 mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
4556 struct rte_mbuf **restrict pkts,
4560 struct mlx5_txq_local loc;
4561 enum mlx5_txcmp_code ret;
4564 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4565 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4566 if (unlikely(!pkts_n))
4570 loc.wqe_last = NULL;
4573 loc.pkts_loop = loc.pkts_sent;
4575 * Check if there are some CQEs, if any:
4576 * - process an encountered errors
4577 * - process the completed WQEs
4578 * - free related mbufs
4579 * - doorbell the NIC about processed CQEs
4581 rte_prefetch0(*(pkts + loc.pkts_sent));
4582 mlx5_tx_handle_completion(txq, olx);
4584 * Calculate the number of available resources - elts and WQEs.
4585 * There are two possible different scenarios:
4586 * - no data inlining into WQEs, one WQEBB may contains upto
4587 * four packets, in this case elts become scarce resource
4588 * - data inlining into WQEs, one packet may require multiple
4589 * WQEBBs, the WQEs become the limiting factor.
4591 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4592 loc.elts_free = txq->elts_s -
4593 (uint16_t)(txq->elts_head - txq->elts_tail);
4594 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4595 loc.wqe_free = txq->wqe_s -
4596 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4597 if (unlikely(!loc.elts_free || !loc.wqe_free))
4601 * Fetch the packet from array. Usually this is
4602 * the first packet in series of multi/single
4605 loc.mbuf = *(pkts + loc.pkts_sent);
4606 /* Dedicated branch for multi-segment packets. */
4607 if (MLX5_TXOFF_CONFIG(MULTI) &&
4608 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4610 * Multi-segment packet encountered.
4611 * Hardware is able to process it only
4612 * with SEND/TSO opcodes, one packet
4613 * per WQE, do it in dedicated routine.
4616 assert(loc.pkts_sent >= loc.pkts_copy);
4617 part = loc.pkts_sent - loc.pkts_copy;
4618 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4620 * There are some single-segment mbufs not
4621 * stored in elts. The mbufs must be in the
4622 * same order as WQEs, so we must copy the
4623 * mbufs to elts here, before the coming
4624 * multi-segment packet mbufs is appended.
4626 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4628 loc.pkts_copy = loc.pkts_sent;
4630 assert(pkts_n > loc.pkts_sent);
4631 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4632 if (!MLX5_TXOFF_CONFIG(INLINE))
4633 loc.pkts_copy = loc.pkts_sent;
4635 * These returned code checks are supposed
4636 * to be optimized out due to routine inlining.
4638 if (ret == MLX5_TXCMP_CODE_EXIT) {
4640 * The routine returns this code when
4641 * all packets are sent or there is no
4642 * enough resources to complete request.
4646 if (ret == MLX5_TXCMP_CODE_ERROR) {
4648 * The routine returns this code when
4649 * some error in the incoming packets
4652 txq->stats.oerrors++;
4655 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4657 * The single-segment packet was encountered
4658 * in the array, try to send it with the
4659 * best optimized way, possible engaging eMPW.
4661 goto enter_send_single;
4663 if (MLX5_TXOFF_CONFIG(TSO) &&
4664 ret == MLX5_TXCMP_CODE_TSO) {
4666 * The single-segment TSO packet was
4667 * encountered in the array.
4669 goto enter_send_tso;
4671 /* We must not get here. Something is going wrong. */
4673 txq->stats.oerrors++;
4676 /* Dedicated branch for single-segment TSO packets. */
4677 if (MLX5_TXOFF_CONFIG(TSO) &&
4678 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4680 * TSO might require special way for inlining
4681 * (dedicated parameters) and is sent with
4682 * MLX5_OPCODE_TSO opcode only, provide this
4683 * in dedicated branch.
4686 assert(NB_SEGS(loc.mbuf) == 1);
4687 assert(pkts_n > loc.pkts_sent);
4688 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4690 * These returned code checks are supposed
4691 * to be optimized out due to routine inlining.
4693 if (ret == MLX5_TXCMP_CODE_EXIT)
4695 if (ret == MLX5_TXCMP_CODE_ERROR) {
4696 txq->stats.oerrors++;
4699 if (ret == MLX5_TXCMP_CODE_SINGLE)
4700 goto enter_send_single;
4701 if (MLX5_TXOFF_CONFIG(MULTI) &&
4702 ret == MLX5_TXCMP_CODE_MULTI) {
4704 * The multi-segment packet was
4705 * encountered in the array.
4707 goto enter_send_multi;
4709 /* We must not get here. Something is going wrong. */
4711 txq->stats.oerrors++;
4715 * The dedicated branch for the single-segment packets
4716 * without TSO. Often these ones can be sent using
4717 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
4718 * The routine builds the WQEs till it encounters
4719 * the TSO or multi-segment packet (in case if these
4720 * offloads are requested at SQ configuration time).
4723 assert(pkts_n > loc.pkts_sent);
4724 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
4726 * These returned code checks are supposed
4727 * to be optimized out due to routine inlining.
4729 if (ret == MLX5_TXCMP_CODE_EXIT)
4731 if (ret == MLX5_TXCMP_CODE_ERROR) {
4732 txq->stats.oerrors++;
4735 if (MLX5_TXOFF_CONFIG(MULTI) &&
4736 ret == MLX5_TXCMP_CODE_MULTI) {
4738 * The multi-segment packet was
4739 * encountered in the array.
4741 goto enter_send_multi;
4743 if (MLX5_TXOFF_CONFIG(TSO) &&
4744 ret == MLX5_TXCMP_CODE_TSO) {
4746 * The single-segment TSO packet was
4747 * encountered in the array.
4749 goto enter_send_tso;
4751 /* We must not get here. Something is going wrong. */
4753 txq->stats.oerrors++;
4757 * Main Tx loop is completed, do the rest:
4758 * - set completion request if thresholds are reached
4759 * - doorbell the hardware
4760 * - copy the rest of mbufs to elts (if any)
4762 assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
4763 /* Take a shortcut if nothing is sent. */
4764 if (unlikely(loc.pkts_sent == loc.pkts_loop))
4766 /* Request CQE generation if limits are reached. */
4767 mlx5_tx_request_completion(txq, &loc, olx);
4769 * Ring QP doorbell immediately after WQE building completion
4770 * to improve latencies. The pure software related data treatment
4771 * can be completed after doorbell. Tx CQEs for this SQ are
4772 * processed in this thread only by the polling.
4774 * The rdma core library can map doorbell register in two ways,
4775 * depending on the environment variable "MLX5_SHUT_UP_BF":
4777 * - as regular cached memory, the variable is either missing or
4778 * set to zero. This type of mapping may cause the significant
4779 * doorbell register writing latency and requires explicit
4780 * memory write barrier to mitigate this issue and prevent
4783 * - as non-cached memory, the variable is present and set to
4784 * not "0" value. This type of mapping may cause performance
4785 * impact under heavy loading conditions but the explicit write
4786 * memory barrier is not required and it may improve core
4789 * - the legacy behaviour (prior 19.08 release) was to use some
4790 * heuristics to decide whether write memory barrier should
4791 * be performed. This behavior is supported with specifying
4792 * tx_db_nc=2, write barrier is skipped if application
4793 * provides the full recommended burst of packets, it
4794 * supposes the next packets are coming and the write barrier
4795 * will be issued on the next burst (after descriptor writing,
4798 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
4799 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
4800 /* Not all of the mbufs may be stored into elts yet. */
4801 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
4802 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4804 * There are some single-segment mbufs not stored in elts.
4805 * It can be only if the last packet was single-segment.
4806 * The copying is gathered into one place due to it is
4807 * a good opportunity to optimize that with SIMD.
4808 * Unfortunately if inlining is enabled the gaps in
4809 * pointer array may happen due to early freeing of the
4812 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
4813 loc.pkts_copy = loc.pkts_sent;
4815 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4816 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4817 if (pkts_n > loc.pkts_sent) {
4819 * If burst size is large there might be no enough CQE
4820 * fetched from completion queue and no enough resources
4821 * freed to send all the packets.
4826 #ifdef MLX5_PMD_SOFT_COUNTERS
4827 /* Increment sent packets counter. */
4828 txq->stats.opackets += loc.pkts_sent;
4830 return loc.pkts_sent;
4833 /* Generate routines with Enhanced Multi-Packet Write support. */
4834 MLX5_TXOFF_DECL(full_empw,
4835 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
4837 MLX5_TXOFF_DECL(none_empw,
4838 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4840 MLX5_TXOFF_DECL(md_empw,
4841 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4843 MLX5_TXOFF_DECL(mt_empw,
4844 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4845 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4847 MLX5_TXOFF_DECL(mtsc_empw,
4848 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4849 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4850 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4852 MLX5_TXOFF_DECL(mti_empw,
4853 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4854 MLX5_TXOFF_CONFIG_INLINE |
4855 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4857 MLX5_TXOFF_DECL(mtv_empw,
4858 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4859 MLX5_TXOFF_CONFIG_VLAN |
4860 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4862 MLX5_TXOFF_DECL(mtiv_empw,
4863 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4864 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4865 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4867 MLX5_TXOFF_DECL(sc_empw,
4868 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4869 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4871 MLX5_TXOFF_DECL(sci_empw,
4872 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4873 MLX5_TXOFF_CONFIG_INLINE |
4874 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4876 MLX5_TXOFF_DECL(scv_empw,
4877 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4878 MLX5_TXOFF_CONFIG_VLAN |
4879 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4881 MLX5_TXOFF_DECL(sciv_empw,
4882 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4883 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4884 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4886 MLX5_TXOFF_DECL(i_empw,
4887 MLX5_TXOFF_CONFIG_INLINE |
4888 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4890 MLX5_TXOFF_DECL(v_empw,
4891 MLX5_TXOFF_CONFIG_VLAN |
4892 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4894 MLX5_TXOFF_DECL(iv_empw,
4895 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4896 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4898 /* Generate routines without Enhanced Multi-Packet Write support. */
4899 MLX5_TXOFF_DECL(full,
4900 MLX5_TXOFF_CONFIG_FULL)
4902 MLX5_TXOFF_DECL(none,
4903 MLX5_TXOFF_CONFIG_NONE)
4906 MLX5_TXOFF_CONFIG_METADATA)
4909 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4910 MLX5_TXOFF_CONFIG_METADATA)
4912 MLX5_TXOFF_DECL(mtsc,
4913 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4914 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4915 MLX5_TXOFF_CONFIG_METADATA)
4917 MLX5_TXOFF_DECL(mti,
4918 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4919 MLX5_TXOFF_CONFIG_INLINE |
4920 MLX5_TXOFF_CONFIG_METADATA)
4923 MLX5_TXOFF_DECL(mtv,
4924 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4925 MLX5_TXOFF_CONFIG_VLAN |
4926 MLX5_TXOFF_CONFIG_METADATA)
4929 MLX5_TXOFF_DECL(mtiv,
4930 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4931 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4932 MLX5_TXOFF_CONFIG_METADATA)
4935 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4936 MLX5_TXOFF_CONFIG_METADATA)
4938 MLX5_TXOFF_DECL(sci,
4939 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4940 MLX5_TXOFF_CONFIG_INLINE |
4941 MLX5_TXOFF_CONFIG_METADATA)
4944 MLX5_TXOFF_DECL(scv,
4945 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4946 MLX5_TXOFF_CONFIG_VLAN |
4947 MLX5_TXOFF_CONFIG_METADATA)
4950 MLX5_TXOFF_DECL(sciv,
4951 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4952 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4953 MLX5_TXOFF_CONFIG_METADATA)
4956 MLX5_TXOFF_CONFIG_INLINE |
4957 MLX5_TXOFF_CONFIG_METADATA)
4960 MLX5_TXOFF_CONFIG_VLAN |
4961 MLX5_TXOFF_CONFIG_METADATA)
4964 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4965 MLX5_TXOFF_CONFIG_METADATA)
4968 * Generate routines with Legacy Multi-Packet Write support.
4969 * This mode is supported by ConnectX-4LX only and imposes
4970 * offload limitations, not supported:
4971 * - ACL/Flows (metadata are becoming meaningless)
4972 * - WQE Inline headers
4973 * - SRIOV (E-Switch offloads)
4975 * - tunnel encapsulation/decapsulation
4978 MLX5_TXOFF_DECL(none_mpw,
4979 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
4980 MLX5_TXOFF_CONFIG_MPW)
4982 MLX5_TXOFF_DECL(mci_mpw,
4983 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4984 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4985 MLX5_TXOFF_CONFIG_MPW)
4987 MLX5_TXOFF_DECL(mc_mpw,
4988 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4989 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
4991 MLX5_TXOFF_DECL(i_mpw,
4992 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4993 MLX5_TXOFF_CONFIG_MPW)
4996 * Array of declared and compiled Tx burst function and corresponding
4997 * supported offloads set. The array is used to select the Tx burst
4998 * function for specified offloads set at Tx queue configuration time.
5001 eth_tx_burst_t func;
5004 MLX5_TXOFF_INFO(full_empw,
5005 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5006 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5007 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5008 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5010 MLX5_TXOFF_INFO(none_empw,
5011 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5013 MLX5_TXOFF_INFO(md_empw,
5014 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5016 MLX5_TXOFF_INFO(mt_empw,
5017 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5018 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5020 MLX5_TXOFF_INFO(mtsc_empw,
5021 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5022 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5023 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5025 MLX5_TXOFF_INFO(mti_empw,
5026 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5027 MLX5_TXOFF_CONFIG_INLINE |
5028 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5030 MLX5_TXOFF_INFO(mtv_empw,
5031 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5032 MLX5_TXOFF_CONFIG_VLAN |
5033 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5035 MLX5_TXOFF_INFO(mtiv_empw,
5036 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5037 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5038 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5040 MLX5_TXOFF_INFO(sc_empw,
5041 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5042 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5044 MLX5_TXOFF_INFO(sci_empw,
5045 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5046 MLX5_TXOFF_CONFIG_INLINE |
5047 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5049 MLX5_TXOFF_INFO(scv_empw,
5050 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5051 MLX5_TXOFF_CONFIG_VLAN |
5052 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5054 MLX5_TXOFF_INFO(sciv_empw,
5055 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5056 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5057 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5059 MLX5_TXOFF_INFO(i_empw,
5060 MLX5_TXOFF_CONFIG_INLINE |
5061 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5063 MLX5_TXOFF_INFO(v_empw,
5064 MLX5_TXOFF_CONFIG_VLAN |
5065 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5067 MLX5_TXOFF_INFO(iv_empw,
5068 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5069 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5071 MLX5_TXOFF_INFO(full,
5072 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5073 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5074 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5075 MLX5_TXOFF_CONFIG_METADATA)
5077 MLX5_TXOFF_INFO(none,
5078 MLX5_TXOFF_CONFIG_NONE)
5081 MLX5_TXOFF_CONFIG_METADATA)
5084 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5085 MLX5_TXOFF_CONFIG_METADATA)
5087 MLX5_TXOFF_INFO(mtsc,
5088 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5089 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5090 MLX5_TXOFF_CONFIG_METADATA)
5092 MLX5_TXOFF_INFO(mti,
5093 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5094 MLX5_TXOFF_CONFIG_INLINE |
5095 MLX5_TXOFF_CONFIG_METADATA)
5097 MLX5_TXOFF_INFO(mtv,
5098 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5099 MLX5_TXOFF_CONFIG_VLAN |
5100 MLX5_TXOFF_CONFIG_METADATA)
5102 MLX5_TXOFF_INFO(mtiv,
5103 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5104 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5105 MLX5_TXOFF_CONFIG_METADATA)
5108 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5109 MLX5_TXOFF_CONFIG_METADATA)
5111 MLX5_TXOFF_INFO(sci,
5112 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5113 MLX5_TXOFF_CONFIG_INLINE |
5114 MLX5_TXOFF_CONFIG_METADATA)
5116 MLX5_TXOFF_INFO(scv,
5117 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5118 MLX5_TXOFF_CONFIG_VLAN |
5119 MLX5_TXOFF_CONFIG_METADATA)
5121 MLX5_TXOFF_INFO(sciv,
5122 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5123 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5124 MLX5_TXOFF_CONFIG_METADATA)
5127 MLX5_TXOFF_CONFIG_INLINE |
5128 MLX5_TXOFF_CONFIG_METADATA)
5131 MLX5_TXOFF_CONFIG_VLAN |
5132 MLX5_TXOFF_CONFIG_METADATA)
5135 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5136 MLX5_TXOFF_CONFIG_METADATA)
5138 MLX5_TXOFF_INFO(none_mpw,
5139 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5140 MLX5_TXOFF_CONFIG_MPW)
5142 MLX5_TXOFF_INFO(mci_mpw,
5143 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5144 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5145 MLX5_TXOFF_CONFIG_MPW)
5147 MLX5_TXOFF_INFO(mc_mpw,
5148 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5149 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5151 MLX5_TXOFF_INFO(i_mpw,
5152 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5153 MLX5_TXOFF_CONFIG_MPW)
5157 * Configure the Tx function to use. The routine checks configured
5158 * Tx offloads for the device and selects appropriate Tx burst
5159 * routine. There are multiple Tx burst routines compiled from
5160 * the same template in the most optimal way for the dedicated
5164 * Pointer to private data structure.
5167 * Pointer to selected Tx burst function.
5170 mlx5_select_tx_function(struct rte_eth_dev *dev)
5172 struct mlx5_priv *priv = dev->data->dev_private;
5173 struct mlx5_dev_config *config = &priv->config;
5174 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5175 unsigned int diff = 0, olx = 0, i, m;
5177 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5178 MLX5_DSEG_MAX, "invalid WQE max size");
5179 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5180 "invalid WQE Control Segment size");
5181 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5182 "invalid WQE Ethernet Segment size");
5183 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5184 "invalid WQE Data Segment size");
5185 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5186 "invalid WQE size");
5188 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5189 /* We should support Multi-Segment Packets. */
5190 olx |= MLX5_TXOFF_CONFIG_MULTI;
5192 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5193 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5194 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5195 DEV_TX_OFFLOAD_IP_TNL_TSO |
5196 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5197 /* We should support TCP Send Offload. */
5198 olx |= MLX5_TXOFF_CONFIG_TSO;
5200 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5201 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5202 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5203 /* We should support Software Parser for Tunnels. */
5204 olx |= MLX5_TXOFF_CONFIG_SWP;
5206 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5207 DEV_TX_OFFLOAD_UDP_CKSUM |
5208 DEV_TX_OFFLOAD_TCP_CKSUM |
5209 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5210 /* We should support IP/TCP/UDP Checksums. */
5211 olx |= MLX5_TXOFF_CONFIG_CSUM;
5213 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5214 /* We should support VLAN insertion. */
5215 olx |= MLX5_TXOFF_CONFIG_VLAN;
5217 if (priv->txqs_n && (*priv->txqs)[0]) {
5218 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5220 if (txd->inlen_send) {
5222 * Check the data inline requirements. Data inline
5223 * is enabled on per device basis, we can check
5224 * the first Tx queue only.
5226 * If device does not support VLAN insertion in WQE
5227 * and some queues are requested to perform VLAN
5228 * insertion offload than inline must be enabled.
5230 olx |= MLX5_TXOFF_CONFIG_INLINE;
5233 if (config->mps == MLX5_MPW_ENHANCED &&
5234 config->txq_inline_min <= 0) {
5236 * The NIC supports Enhanced Multi-Packet Write
5237 * and does not require minimal inline data.
5239 olx |= MLX5_TXOFF_CONFIG_EMPW;
5241 if (rte_flow_dynf_metadata_avail()) {
5242 /* We should support Flow metadata. */
5243 olx |= MLX5_TXOFF_CONFIG_METADATA;
5245 if (config->mps == MLX5_MPW) {
5247 * The NIC supports Legacy Multi-Packet Write.
5248 * The MLX5_TXOFF_CONFIG_MPW controls the
5249 * descriptor building method in combination
5250 * with MLX5_TXOFF_CONFIG_EMPW.
5252 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5253 MLX5_TXOFF_CONFIG_SWP |
5254 MLX5_TXOFF_CONFIG_VLAN |
5255 MLX5_TXOFF_CONFIG_METADATA)))
5256 olx |= MLX5_TXOFF_CONFIG_EMPW |
5257 MLX5_TXOFF_CONFIG_MPW;
5260 * Scan the routines table to find the minimal
5261 * satisfying routine with requested offloads.
5263 m = RTE_DIM(txoff_func);
5264 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5267 tmp = txoff_func[i].olx;
5269 /* Meets requested offloads exactly.*/
5273 if ((tmp & olx) != olx) {
5274 /* Does not meet requested offloads at all. */
5277 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5278 /* Do not enable eMPW if not configured. */
5280 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5281 /* Do not enable inlining if not configured. */
5284 * Some routine meets the requirements.
5285 * Check whether it has minimal amount
5286 * of not requested offloads.
5288 tmp = __builtin_popcountl(tmp & ~olx);
5289 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5290 /* First or better match, save and continue. */
5296 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5297 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5298 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5299 /* Lighter not requested offload. */
5304 if (m >= RTE_DIM(txoff_func)) {
5305 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5306 " for requested offloads %04X",
5307 dev->data->port_id, olx);
5310 DRV_LOG(DEBUG, "port %u has selected Tx function"
5311 " supporting offloads %04X/%04X",
5312 dev->data->port_id, olx, txoff_func[m].olx);
5313 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5314 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5315 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5316 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5317 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5318 DRV_LOG(DEBUG, "\tSWP (software parser)");
5319 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5320 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5321 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5322 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5323 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5324 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5325 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5326 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5327 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5328 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5329 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5331 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5333 return txoff_func[m].func;