1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
16 #include <infiniband/mlx5dv.h>
18 #pragma GCC diagnostic error "-Wpedantic"
22 #include <rte_mempool.h>
23 #include <rte_prefetch.h>
24 #include <rte_common.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_ether.h>
27 #include <rte_cycles.h>
30 #include <mlx5_devx_cmds.h>
32 #include <mlx5_common.h>
34 #include "mlx5_defs.h"
36 #include "mlx5_utils.h"
37 #include "mlx5_rxtx.h"
38 #include "mlx5_autoconf.h"
40 /* TX burst subroutines return codes. */
41 enum mlx5_txcmp_code {
42 MLX5_TXCMP_CODE_EXIT = 0,
43 MLX5_TXCMP_CODE_ERROR,
44 MLX5_TXCMP_CODE_SINGLE,
45 MLX5_TXCMP_CODE_MULTI,
51 * These defines are used to configure Tx burst routine option set
52 * supported at compile time. The not specified options are optimized out
53 * out due to if conditions can be explicitly calculated at compile time.
54 * The offloads with bigger runtime check (require more CPU cycles to
55 * skip) overhead should have the bigger index - this is needed to
56 * select the better matching routine function if no exact match and
57 * some offloads are not actually requested.
59 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
60 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
61 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
62 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
63 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
64 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
65 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
66 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
67 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
69 /* The most common offloads groups. */
70 #define MLX5_TXOFF_CONFIG_NONE 0
71 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
72 MLX5_TXOFF_CONFIG_TSO | \
73 MLX5_TXOFF_CONFIG_SWP | \
74 MLX5_TXOFF_CONFIG_CSUM | \
75 MLX5_TXOFF_CONFIG_INLINE | \
76 MLX5_TXOFF_CONFIG_VLAN | \
77 MLX5_TXOFF_CONFIG_METADATA)
79 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
81 #define MLX5_TXOFF_DECL(func, olx) \
82 static uint16_t mlx5_tx_burst_##func(void *txq, \
83 struct rte_mbuf **pkts, \
86 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
87 pkts, pkts_n, (olx)); \
90 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
92 static __rte_always_inline uint32_t
93 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
95 static __rte_always_inline int
96 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
97 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
99 static __rte_always_inline uint32_t
100 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
102 static __rte_always_inline void
103 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
104 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
106 static __rte_always_inline void
107 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
108 const unsigned int strd_n);
111 mlx5_queue_state_modify(struct rte_eth_dev *dev,
112 struct mlx5_mp_arg_queue_state_modify *sm);
115 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
116 volatile struct mlx5_cqe *restrict cqe,
120 mlx5_lro_update_hdr(uint8_t *restrict padd,
121 volatile struct mlx5_cqe *restrict cqe,
124 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
125 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
128 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
129 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
131 uint64_t rte_net_mlx5_dynf_inline_mask;
132 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
135 * Build a table to translate Rx completion flags to packet type.
137 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
140 mlx5_set_ptype_table(void)
143 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
145 /* Last entry must not be overwritten, reserved for errored packet. */
146 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
147 (*p)[i] = RTE_PTYPE_UNKNOWN;
149 * The index to the array should have:
150 * bit[1:0] = l3_hdr_type
151 * bit[4:2] = l4_hdr_type
154 * bit[7] = outer_l3_type
157 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
159 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
160 RTE_PTYPE_L4_NONFRAG;
161 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
162 RTE_PTYPE_L4_NONFRAG;
164 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
166 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
169 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
171 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
173 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
175 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
177 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
179 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
182 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
184 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
186 /* Repeat with outer_l3_type being set. Just in case. */
187 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
188 RTE_PTYPE_L4_NONFRAG;
189 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
190 RTE_PTYPE_L4_NONFRAG;
191 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
193 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
195 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
197 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
199 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
201 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
203 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
205 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
207 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
209 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
212 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
213 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
215 RTE_PTYPE_INNER_L4_NONFRAG;
216 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L4_NONFRAG;
219 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
220 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
221 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
222 RTE_PTYPE_INNER_L4_NONFRAG;
223 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L4_NONFRAG;
226 /* Tunneled - Fragmented */
227 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
228 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L4_FRAG;
230 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
232 RTE_PTYPE_INNER_L4_FRAG;
233 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L4_FRAG;
236 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
237 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L4_FRAG;
240 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
241 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L4_TCP;
243 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
244 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L4_TCP;
246 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
247 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L4_TCP;
249 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
250 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L4_TCP;
252 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L4_TCP;
255 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L4_TCP;
258 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
259 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L4_TCP;
261 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
262 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L4_TCP;
264 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
265 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L4_TCP;
267 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
268 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
269 RTE_PTYPE_INNER_L4_TCP;
270 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
272 RTE_PTYPE_INNER_L4_TCP;
273 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
274 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
275 RTE_PTYPE_INNER_L4_TCP;
277 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
278 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
279 RTE_PTYPE_INNER_L4_UDP;
280 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
281 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
282 RTE_PTYPE_INNER_L4_UDP;
283 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
284 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
285 RTE_PTYPE_INNER_L4_UDP;
286 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
287 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
288 RTE_PTYPE_INNER_L4_UDP;
292 * Build a table to translate packet to checksum type of Verbs.
295 mlx5_set_cksum_table(void)
301 * The index should have:
302 * bit[0] = PKT_TX_TCP_SEG
303 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
304 * bit[4] = PKT_TX_IP_CKSUM
305 * bit[8] = PKT_TX_OUTER_IP_CKSUM
308 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
311 /* Tunneled packet. */
312 if (i & (1 << 8)) /* Outer IP. */
313 v |= MLX5_ETH_WQE_L3_CSUM;
314 if (i & (1 << 4)) /* Inner IP. */
315 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
316 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
317 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
320 if (i & (1 << 4)) /* IP. */
321 v |= MLX5_ETH_WQE_L3_CSUM;
322 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
323 v |= MLX5_ETH_WQE_L4_CSUM;
325 mlx5_cksum_table[i] = v;
330 * Build a table to translate packet type of mbuf to SWP type of Verbs.
333 mlx5_set_swp_types_table(void)
339 * The index should have:
340 * bit[0:1] = PKT_TX_L4_MASK
341 * bit[4] = PKT_TX_IPV6
342 * bit[8] = PKT_TX_OUTER_IPV6
343 * bit[9] = PKT_TX_OUTER_UDP
345 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
348 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
350 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
352 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
353 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
354 v |= MLX5_ETH_WQE_L4_INNER_UDP;
355 mlx5_swp_types_table[i] = v;
360 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
361 * Flags must be preliminary initialized to zero.
364 * Pointer to burst routine local context.
366 * Pointer to store Software Parser flags
368 * Configured Tx offloads mask. It is fully defined at
369 * compile time and may be used for optimization.
372 * Software Parser offsets packed in dword.
373 * Software Parser flags are set by pointer.
375 static __rte_always_inline uint32_t
376 txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc,
381 unsigned int idx, off;
384 if (!MLX5_TXOFF_CONFIG(SWP))
386 ol = loc->mbuf->ol_flags;
387 tunnel = ol & PKT_TX_TUNNEL_MASK;
389 * Check whether Software Parser is required.
390 * Only customized tunnels may ask for.
392 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
395 * The index should have:
396 * bit[0:1] = PKT_TX_L4_MASK
397 * bit[4] = PKT_TX_IPV6
398 * bit[8] = PKT_TX_OUTER_IPV6
399 * bit[9] = PKT_TX_OUTER_UDP
401 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
402 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
403 *swp_flags = mlx5_swp_types_table[idx];
405 * Set offsets for SW parser. Since ConnectX-5, SW parser just
406 * complements HW parser. SW parser starts to engage only if HW parser
407 * can't reach a header. For the older devices, HW parser will not kick
408 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
409 * should be set regardless of HW offload.
411 off = loc->mbuf->outer_l2_len;
412 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
413 off += sizeof(struct rte_vlan_hdr);
414 set = (off >> 1) << 8; /* Outer L3 offset. */
415 off += loc->mbuf->outer_l3_len;
416 if (tunnel == PKT_TX_TUNNEL_UDP)
417 set |= off >> 1; /* Outer L4 offset. */
418 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
419 const uint64_t csum = ol & PKT_TX_L4_MASK;
420 off += loc->mbuf->l2_len;
421 set |= (off >> 1) << 24; /* Inner L3 offset. */
422 if (csum == PKT_TX_TCP_CKSUM ||
423 csum == PKT_TX_UDP_CKSUM ||
424 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
425 off += loc->mbuf->l3_len;
426 set |= (off >> 1) << 16; /* Inner L4 offset. */
429 set = rte_cpu_to_le_32(set);
434 * Convert the Checksum offloads to Verbs.
437 * Pointer to the mbuf.
440 * Converted checksum flags.
442 static __rte_always_inline uint8_t
443 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
446 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
447 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
448 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
451 * The index should have:
452 * bit[0] = PKT_TX_TCP_SEG
453 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
454 * bit[4] = PKT_TX_IP_CKSUM
455 * bit[8] = PKT_TX_OUTER_IP_CKSUM
458 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
459 return mlx5_cksum_table[idx];
463 * Internal function to compute the number of used descriptors in an RX queue
469 * The number of used rx descriptor.
472 rx_queue_count(struct mlx5_rxq_data *rxq)
474 struct rxq_zip *zip = &rxq->zip;
475 volatile struct mlx5_cqe *cqe;
476 const unsigned int cqe_n = (1 << rxq->cqe_n);
477 const unsigned int cqe_cnt = cqe_n - 1;
481 /* if we are processing a compressed cqe */
483 used = zip->cqe_cnt - zip->ca;
489 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
490 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
494 op_own = cqe->op_own;
495 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
496 n = rte_be_to_cpu_32(cqe->byte_cnt);
501 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
503 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
508 * DPDK callback to check the status of a rx descriptor.
513 * The index of the descriptor in the ring.
516 * The status of the tx descriptor.
519 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
521 struct mlx5_rxq_data *rxq = rx_queue;
522 struct mlx5_rxq_ctrl *rxq_ctrl =
523 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
524 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
526 if (dev->rx_pkt_burst != mlx5_rx_burst) {
530 if (offset >= (1 << rxq->elts_n)) {
534 if (offset < rx_queue_count(rxq))
535 return RTE_ETH_RX_DESC_DONE;
536 return RTE_ETH_RX_DESC_AVAIL;
540 * DPDK callback to get the RX queue information
543 * Pointer to the device structure.
546 * Rx queue identificator.
549 * Pointer to the RX queue information structure.
556 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
557 struct rte_eth_rxq_info *qinfo)
559 struct mlx5_priv *priv = dev->data->dev_private;
560 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
561 struct mlx5_rxq_ctrl *rxq_ctrl =
562 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
566 qinfo->mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
567 rxq->mprq_mp : rxq->mp;
568 qinfo->conf.rx_thresh.pthresh = 0;
569 qinfo->conf.rx_thresh.hthresh = 0;
570 qinfo->conf.rx_thresh.wthresh = 0;
571 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
572 qinfo->conf.rx_drop_en = 1;
573 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
574 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
575 qinfo->scattered_rx = dev->data->scattered_rx;
576 qinfo->nb_desc = 1 << rxq->elts_n;
580 * DPDK callback to get the RX packet burst mode information
583 * Pointer to the device structure.
586 * Rx queue identificatior.
589 * Pointer to the burts mode information.
592 * 0 as success, -EINVAL as failure.
596 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
597 uint16_t rx_queue_id __rte_unused,
598 struct rte_eth_burst_mode *mode)
600 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
602 if (pkt_burst == mlx5_rx_burst) {
603 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
604 } else if (pkt_burst == mlx5_rx_burst_mprq) {
605 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
606 } else if (pkt_burst == mlx5_rx_burst_vec) {
607 #if defined RTE_ARCH_X86_64
608 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
609 #elif defined RTE_ARCH_ARM64
610 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
611 #elif defined RTE_ARCH_PPC_64
612 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
623 * DPDK callback to get the number of used descriptors in a RX queue
626 * Pointer to the device structure.
632 * The number of used rx descriptor.
633 * -EINVAL if the queue is invalid
636 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
638 struct mlx5_priv *priv = dev->data->dev_private;
639 struct mlx5_rxq_data *rxq;
641 if (dev->rx_pkt_burst != mlx5_rx_burst) {
645 rxq = (*priv->rxqs)[rx_queue_id];
650 return rx_queue_count(rxq);
653 #define MLX5_SYSTEM_LOG_DIR "/var/log"
655 * Dump debug information to log file.
660 * If not NULL this string is printed as a header to the output
661 * and the output will be in hexadecimal view.
663 * This is the buffer address to print out.
665 * The number of bytes to dump out.
668 mlx5_dump_debug_information(const char *fname, const char *hex_title,
669 const void *buf, unsigned int hex_len)
673 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
674 fd = fopen(path, "a+");
676 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
677 MKSTR(path2, "./%s", fname);
678 fd = fopen(path2, "a+");
680 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
683 DRV_LOG(INFO, "New debug dump in file %s", path2);
685 DRV_LOG(INFO, "New debug dump in file %s", path);
688 rte_hexdump(fd, hex_title, buf, hex_len);
690 fprintf(fd, "%s", (const char *)buf);
691 fprintf(fd, "\n\n\n");
696 * Move QP from error state to running state and initialize indexes.
699 * Pointer to TX queue control structure.
702 * 0 on success, else -1.
705 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
707 struct mlx5_mp_arg_queue_state_modify sm = {
709 .queue_id = txq_ctrl->txq.idx,
712 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
714 txq_ctrl->txq.wqe_ci = 0;
715 txq_ctrl->txq.wqe_pi = 0;
716 txq_ctrl->txq.elts_comp = 0;
720 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
722 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
724 static const uint8_t magic[] = "seen";
728 for (i = 0; i < sizeof(magic); ++i)
729 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
731 err_cqe->rsvd1[i] = magic[i];
740 * Pointer to TX queue structure.
742 * Pointer to the error CQE.
745 * Negative value if queue recovery failed, otherwise
746 * the error completion entry is handled successfully.
749 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
750 volatile struct mlx5_err_cqe *err_cqe)
752 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
753 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
754 struct mlx5_txq_ctrl *txq_ctrl =
755 container_of(txq, struct mlx5_txq_ctrl, txq);
756 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
757 int seen = check_err_cqe_seen(err_cqe);
759 if (!seen && txq_ctrl->dump_file_n <
760 txq_ctrl->priv->config.max_dump_files_num) {
761 MKSTR(err_str, "Unexpected CQE error syndrome "
762 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
763 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
764 txq->cqe_s, txq->qp_num_8s >> 8,
765 rte_be_to_cpu_16(err_cqe->wqe_counter),
766 txq->wqe_ci, txq->cq_ci);
767 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
768 PORT_ID(txq_ctrl->priv), txq->idx,
769 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
770 mlx5_dump_debug_information(name, NULL, err_str, 0);
771 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
772 (const void *)((uintptr_t)
776 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
777 (const void *)((uintptr_t)
781 txq_ctrl->dump_file_n++;
785 * Count errors in WQEs units.
786 * Later it can be improved to count error packets,
787 * for example, by SQ parsing to find how much packets
788 * should be counted for each WQE.
790 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
792 if (tx_recover_qp(txq_ctrl)) {
793 /* Recovering failed - retry later on the same WQE. */
796 /* Release all the remaining buffers. */
797 txq_free_elts(txq_ctrl);
803 * Translate RX completion flags to packet type.
806 * Pointer to RX queue structure.
810 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
813 * Packet type for struct rte_mbuf.
815 static inline uint32_t
816 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
819 uint8_t pinfo = cqe->pkt_info;
820 uint16_t ptype = cqe->hdr_type_etc;
823 * The index to the array should have:
824 * bit[1:0] = l3_hdr_type
825 * bit[4:2] = l4_hdr_type
828 * bit[7] = outer_l3_type
830 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
831 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
835 * Initialize Rx WQ and indexes.
838 * Pointer to RX queue structure.
841 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
843 const unsigned int wqe_n = 1 << rxq->elts_n;
846 for (i = 0; (i != wqe_n); ++i) {
847 volatile struct mlx5_wqe_data_seg *scat;
851 if (mlx5_rxq_mprq_enabled(rxq)) {
852 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
854 scat = &((volatile struct mlx5_wqe_mprq *)
856 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
857 1 << rxq->strd_num_n);
858 byte_count = (1 << rxq->strd_sz_n) *
859 (1 << rxq->strd_num_n);
861 struct rte_mbuf *buf = (*rxq->elts)[i];
863 scat = &((volatile struct mlx5_wqe_data_seg *)
865 addr = rte_pktmbuf_mtod(buf, uintptr_t);
866 byte_count = DATA_LEN(buf);
868 /* scat->addr must be able to store a pointer. */
869 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
870 *scat = (struct mlx5_wqe_data_seg){
871 .addr = rte_cpu_to_be_64(addr),
872 .byte_count = rte_cpu_to_be_32(byte_count),
873 .lkey = mlx5_rx_addr2mr(rxq, addr),
876 rxq->consumed_strd = 0;
877 rxq->decompressed = 0;
879 rxq->zip = (struct rxq_zip){
882 /* Update doorbell counter. */
883 rxq->rq_ci = wqe_n >> rxq->sges_n;
885 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
889 * Modify a Verbs/DevX queue state.
890 * This must be called from the primary process.
893 * Pointer to Ethernet device.
895 * State modify request parameters.
898 * 0 in case of success else non-zero value and rte_errno is set.
901 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
902 const struct mlx5_mp_arg_queue_state_modify *sm)
905 struct mlx5_priv *priv = dev->data->dev_private;
908 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
909 struct mlx5_rxq_ctrl *rxq_ctrl =
910 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
912 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
913 struct ibv_wq_attr mod = {
914 .attr_mask = IBV_WQ_ATTR_STATE,
915 .wq_state = sm->state,
918 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
919 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
920 struct mlx5_devx_modify_rq_attr rq_attr;
922 memset(&rq_attr, 0, sizeof(rq_attr));
923 if (sm->state == IBV_WQS_RESET) {
924 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
925 rq_attr.state = MLX5_RQC_STATE_RST;
926 } else if (sm->state == IBV_WQS_RDY) {
927 rq_attr.rq_state = MLX5_RQC_STATE_RST;
928 rq_attr.state = MLX5_RQC_STATE_RDY;
929 } else if (sm->state == IBV_WQS_ERR) {
930 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
931 rq_attr.state = MLX5_RQC_STATE_ERR;
933 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
937 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
938 sm->state, strerror(errno));
943 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
944 struct mlx5_txq_ctrl *txq_ctrl =
945 container_of(txq, struct mlx5_txq_ctrl, txq);
946 struct ibv_qp_attr mod = {
947 .qp_state = IBV_QPS_RESET,
948 .port_num = (uint8_t)priv->ibv_port,
950 struct ibv_qp *qp = txq_ctrl->obj->qp;
952 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
954 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
955 "%s", strerror(errno));
959 mod.qp_state = IBV_QPS_INIT;
960 ret = mlx5_glue->modify_qp(qp, &mod,
961 (IBV_QP_STATE | IBV_QP_PORT));
963 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
968 mod.qp_state = IBV_QPS_RTR;
969 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
971 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
976 mod.qp_state = IBV_QPS_RTS;
977 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
979 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
989 * Modify a Verbs queue state.
992 * Pointer to Ethernet device.
994 * State modify request parameters.
997 * 0 in case of success else non-zero value.
1000 mlx5_queue_state_modify(struct rte_eth_dev *dev,
1001 struct mlx5_mp_arg_queue_state_modify *sm)
1003 struct mlx5_priv *priv = dev->data->dev_private;
1006 switch (rte_eal_process_type()) {
1007 case RTE_PROC_PRIMARY:
1008 ret = mlx5_queue_state_modify_primary(dev, sm);
1010 case RTE_PROC_SECONDARY:
1011 ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
1020 * Handle a Rx error.
1021 * The function inserts the RQ state to reset when the first error CQE is
1022 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
1023 * it moves the RQ state to ready and initializes the RQ.
1024 * Next CQE identification and error counting are in the caller responsibility.
1027 * Pointer to RX queue structure.
1029 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
1030 * 0 when called from non-vectorized Rx burst.
1033 * -1 in case of recovery error, otherwise the CQE status.
1036 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
1038 const uint16_t cqe_n = 1 << rxq->cqe_n;
1039 const uint16_t cqe_mask = cqe_n - 1;
1040 const unsigned int wqe_n = 1 << rxq->elts_n;
1041 struct mlx5_rxq_ctrl *rxq_ctrl =
1042 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1044 volatile struct mlx5_cqe *cqe;
1045 volatile struct mlx5_err_cqe *err_cqe;
1047 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
1049 struct mlx5_mp_arg_queue_state_modify sm;
1052 switch (rxq->err_state) {
1053 case MLX5_RXQ_ERR_STATE_NO_ERROR:
1054 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
1056 case MLX5_RXQ_ERR_STATE_NEED_RESET:
1058 sm.queue_id = rxq->idx;
1059 sm.state = IBV_WQS_RESET;
1060 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
1062 if (rxq_ctrl->dump_file_n <
1063 rxq_ctrl->priv->config.max_dump_files_num) {
1064 MKSTR(err_str, "Unexpected CQE error syndrome "
1065 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
1066 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
1067 rxq->cqn, rxq_ctrl->wqn,
1068 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
1069 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
1070 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
1071 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
1072 mlx5_dump_debug_information(name, NULL, err_str, 0);
1073 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
1074 (const void *)((uintptr_t)
1076 sizeof(*u.cqe) * cqe_n);
1077 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
1078 (const void *)((uintptr_t)
1081 rxq_ctrl->dump_file_n++;
1083 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
1085 case MLX5_RXQ_ERR_STATE_NEED_READY:
1086 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1087 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1089 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1092 * The RQ consumer index must be zeroed while moving
1093 * from RESET state to RDY state.
1095 *rxq->rq_db = rte_cpu_to_be_32(0);
1098 sm.queue_id = rxq->idx;
1099 sm.state = IBV_WQS_RDY;
1100 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1104 const uint16_t q_mask = wqe_n - 1;
1106 struct rte_mbuf **elt;
1108 unsigned int n = wqe_n - (rxq->rq_ci -
1111 for (i = 0; i < (int)n; ++i) {
1112 elt_idx = (rxq->rq_ci + i) & q_mask;
1113 elt = &(*rxq->elts)[elt_idx];
1114 *elt = rte_mbuf_raw_alloc(rxq->mp);
1116 for (i--; i >= 0; --i) {
1117 elt_idx = (rxq->rq_ci +
1121 rte_pktmbuf_free_seg
1127 for (i = 0; i < (int)wqe_n; ++i) {
1128 elt = &(*rxq->elts)[i];
1130 (uint16_t)((*elt)->buf_len -
1131 rte_pktmbuf_headroom(*elt));
1133 /* Padding with a fake mbuf for vec Rx. */
1134 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1135 (*rxq->elts)[wqe_n + i] =
1138 mlx5_rxq_initialize(rxq);
1139 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1148 * Get size of the next packet for a given CQE. For compressed CQEs, the
1149 * consumer index is updated only once all packets of the current one have
1153 * Pointer to RX queue.
1157 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1161 * 0 in case of empty CQE, otherwise the packet size in bytes.
1164 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1165 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1167 struct rxq_zip *zip = &rxq->zip;
1168 uint16_t cqe_n = cqe_cnt + 1;
1174 /* Process compressed data in the CQE and mini arrays. */
1176 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1177 (volatile struct mlx5_mini_cqe8 (*)[8])
1178 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1181 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1182 *mcqe = &(*mc)[zip->ai & 7];
1183 if ((++zip->ai & 7) == 0) {
1184 /* Invalidate consumed CQEs */
1187 while (idx != end) {
1188 (*rxq->cqes)[idx & cqe_cnt].op_own =
1189 MLX5_CQE_INVALIDATE;
1193 * Increment consumer index to skip the number
1194 * of CQEs consumed. Hardware leaves holes in
1195 * the CQ ring for software use.
1200 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1201 /* Invalidate the rest */
1205 while (idx != end) {
1206 (*rxq->cqes)[idx & cqe_cnt].op_own =
1207 MLX5_CQE_INVALIDATE;
1210 rxq->cq_ci = zip->cq_ci;
1214 * No compressed data, get next CQE and verify if it is
1221 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1222 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1223 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1225 ret = mlx5_rx_err_handle(rxq, 0);
1226 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1234 op_own = cqe->op_own;
1235 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1236 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1237 (volatile struct mlx5_mini_cqe8 (*)[8])
1238 (uintptr_t)(&(*rxq->cqes)
1242 /* Fix endianness. */
1243 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1245 * Current mini array position is the one
1246 * returned by check_cqe64().
1248 * If completion comprises several mini arrays,
1249 * as a special case the second one is located
1250 * 7 CQEs after the initial CQE instead of 8
1251 * for subsequent ones.
1253 zip->ca = rxq->cq_ci;
1254 zip->na = zip->ca + 7;
1255 /* Compute the next non compressed CQE. */
1257 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1258 /* Get packet size to return. */
1259 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1262 /* Prefetch all to be invalidated */
1265 while (idx != end) {
1266 rte_prefetch0(&(*rxq->cqes)[(idx) &
1271 len = rte_be_to_cpu_32(cqe->byte_cnt);
1274 if (unlikely(rxq->err_state)) {
1275 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1276 ++rxq->stats.idropped;
1284 * Translate RX completion flags to offload flags.
1290 * Offload flags (ol_flags) for struct rte_mbuf.
1292 static inline uint32_t
1293 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1295 uint32_t ol_flags = 0;
1296 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1300 MLX5_CQE_RX_L3_HDR_VALID,
1301 PKT_RX_IP_CKSUM_GOOD) |
1303 MLX5_CQE_RX_L4_HDR_VALID,
1304 PKT_RX_L4_CKSUM_GOOD);
1309 * Fill in mbuf fields from RX completion flags.
1310 * Note that pkt->ol_flags should be initialized outside of this function.
1313 * Pointer to RX queue.
1318 * @param rss_hash_res
1319 * Packet RSS Hash result.
1322 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1323 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1325 /* Update packet information. */
1326 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1327 if (rss_hash_res && rxq->rss_hash) {
1328 pkt->hash.rss = rss_hash_res;
1329 pkt->ol_flags |= PKT_RX_RSS_HASH;
1331 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1332 pkt->ol_flags |= PKT_RX_FDIR;
1333 if (cqe->sop_drop_qpn !=
1334 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1335 uint32_t mark = cqe->sop_drop_qpn;
1337 pkt->ol_flags |= PKT_RX_FDIR_ID;
1338 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1341 if (rte_flow_dynf_metadata_avail() && cqe->flow_table_metadata) {
1342 pkt->ol_flags |= PKT_RX_DYNF_METADATA;
1343 *RTE_FLOW_DYNF_METADATA(pkt) = cqe->flow_table_metadata;
1346 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1347 if (rxq->vlan_strip &&
1348 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1349 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1350 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1352 if (rxq->hw_timestamp) {
1353 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1354 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1359 * DPDK callback for RX.
1362 * Generic pointer to RX queue structure.
1364 * Array to store received packets.
1366 * Maximum number of packets in array.
1369 * Number of packets successfully received (<= pkts_n).
1372 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1374 struct mlx5_rxq_data *rxq = dpdk_rxq;
1375 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1376 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1377 const unsigned int sges_n = rxq->sges_n;
1378 struct rte_mbuf *pkt = NULL;
1379 struct rte_mbuf *seg = NULL;
1380 volatile struct mlx5_cqe *cqe =
1381 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1383 unsigned int rq_ci = rxq->rq_ci << sges_n;
1384 int len = 0; /* keep its value across iterations. */
1387 unsigned int idx = rq_ci & wqe_cnt;
1388 volatile struct mlx5_wqe_data_seg *wqe =
1389 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1390 struct rte_mbuf *rep = (*rxq->elts)[idx];
1391 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1392 uint32_t rss_hash_res;
1400 rep = rte_mbuf_raw_alloc(rxq->mp);
1401 if (unlikely(rep == NULL)) {
1402 ++rxq->stats.rx_nombuf;
1405 * no buffers before we even started,
1406 * bail out silently.
1410 while (pkt != seg) {
1411 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
1415 rte_mbuf_raw_free(pkt);
1421 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1422 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1424 rte_mbuf_raw_free(rep);
1428 MLX5_ASSERT(len >= (rxq->crc_present << 2));
1429 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1430 /* If compressed, take hash result from mini-CQE. */
1431 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1433 mcqe->rx_hash_result);
1434 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1435 if (rxq->crc_present)
1436 len -= RTE_ETHER_CRC_LEN;
1438 if (cqe->lro_num_seg > 1) {
1440 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1442 pkt->ol_flags |= PKT_RX_LRO;
1443 pkt->tso_segsz = len / cqe->lro_num_seg;
1446 DATA_LEN(rep) = DATA_LEN(seg);
1447 PKT_LEN(rep) = PKT_LEN(seg);
1448 SET_DATA_OFF(rep, DATA_OFF(seg));
1449 PORT(rep) = PORT(seg);
1450 (*rxq->elts)[idx] = rep;
1452 * Fill NIC descriptor with the new buffer. The lkey and size
1453 * of the buffers are already known, only the buffer address
1456 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1457 /* If there's only one MR, no need to replace LKey in WQE. */
1458 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1459 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1460 if (len > DATA_LEN(seg)) {
1461 len -= DATA_LEN(seg);
1466 DATA_LEN(seg) = len;
1467 #ifdef MLX5_PMD_SOFT_COUNTERS
1468 /* Increment bytes counter. */
1469 rxq->stats.ibytes += PKT_LEN(pkt);
1471 /* Return packet. */
1476 /* Align consumer index to the next stride. */
1481 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1483 /* Update the consumer index. */
1484 rxq->rq_ci = rq_ci >> sges_n;
1486 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1488 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1489 #ifdef MLX5_PMD_SOFT_COUNTERS
1490 /* Increment packets counter. */
1491 rxq->stats.ipackets += i;
1497 * Update LRO packet TCP header.
1498 * The HW LRO feature doesn't update the TCP header after coalescing the
1499 * TCP segments but supplies information in CQE to fill it by SW.
1502 * Pointer to the TCP header.
1504 * Pointer to the completion entry..
1506 * The L3 pseudo-header checksum.
1509 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
1510 volatile struct mlx5_cqe *restrict cqe,
1513 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1514 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1516 * The HW calculates only the TCP payload checksum, need to complete
1517 * the TCP header checksum and the L3 pseudo-header checksum.
1519 uint32_t csum = phcsum + cqe->csum;
1521 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1522 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1523 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1524 tcp->recv_ack = cqe->lro_ack_seq_num;
1525 tcp->rx_win = cqe->lro_tcp_win;
1527 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1528 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1530 csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4);
1531 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1532 csum = (~csum) & 0xffff;
1539 * Update LRO packet headers.
1540 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1541 * TCP segments but supply information in CQE to fill it by SW.
1544 * The packet address.
1546 * Pointer to the completion entry..
1548 * The packet length.
1551 mlx5_lro_update_hdr(uint8_t *restrict padd,
1552 volatile struct mlx5_cqe *restrict cqe,
1556 struct rte_ether_hdr *eth;
1557 struct rte_vlan_hdr *vlan;
1558 struct rte_ipv4_hdr *ipv4;
1559 struct rte_ipv6_hdr *ipv6;
1560 struct rte_tcp_hdr *tcp;
1565 uint16_t proto = h.eth->ether_type;
1569 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1570 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1571 proto = h.vlan->eth_proto;
1574 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1575 h.ipv4->time_to_live = cqe->lro_min_ttl;
1576 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1577 h.ipv4->hdr_checksum = 0;
1578 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1579 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1582 h.ipv6->hop_limits = cqe->lro_min_ttl;
1583 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1585 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1588 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1592 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1594 struct mlx5_mprq_buf *buf = opaque;
1596 if (rte_atomic16_read(&buf->refcnt) == 1) {
1597 rte_mempool_put(buf->mp, buf);
1598 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1599 rte_atomic16_set(&buf->refcnt, 1);
1600 rte_mempool_put(buf->mp, buf);
1605 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1607 mlx5_mprq_buf_free_cb(NULL, buf);
1611 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1612 const unsigned int strd_n)
1614 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1615 volatile struct mlx5_wqe_data_seg *wqe =
1616 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1619 MLX5_ASSERT(rep != NULL);
1620 /* Replace MPRQ buf. */
1621 (*rxq->mprq_bufs)[rq_idx] = rep;
1623 addr = mlx5_mprq_buf_addr(rep, strd_n);
1624 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1625 /* If there's only one MR, no need to replace LKey in WQE. */
1626 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1627 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1628 /* Stash a mbuf for next replacement. */
1629 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1630 rxq->mprq_repl = rep;
1632 rxq->mprq_repl = NULL;
1636 * DPDK callback for RX with Multi-Packet RQ support.
1639 * Generic pointer to RX queue structure.
1641 * Array to store received packets.
1643 * Maximum number of packets in array.
1646 * Number of packets successfully received (<= pkts_n).
1649 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1651 struct mlx5_rxq_data *rxq = dpdk_rxq;
1652 const unsigned int strd_n = 1 << rxq->strd_num_n;
1653 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1654 const unsigned int strd_shift =
1655 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1656 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1657 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1658 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1660 uint32_t rq_ci = rxq->rq_ci;
1661 uint16_t consumed_strd = rxq->consumed_strd;
1662 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1664 while (i < pkts_n) {
1665 struct rte_mbuf *pkt;
1673 int32_t hdrm_overlap;
1674 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1675 uint32_t rss_hash_res = 0;
1677 if (consumed_strd == strd_n) {
1678 /* Replace WQE only if the buffer is still in use. */
1679 if (rte_atomic16_read(&buf->refcnt) > 1) {
1680 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1681 /* Release the old buffer. */
1682 mlx5_mprq_buf_free(buf);
1683 } else if (unlikely(rxq->mprq_repl == NULL)) {
1684 struct mlx5_mprq_buf *rep;
1687 * Currently, the MPRQ mempool is out of buffer
1688 * and doing memcpy regardless of the size of Rx
1689 * packet. Retry allocation to get back to
1692 if (!rte_mempool_get(rxq->mprq_mp,
1694 rxq->mprq_repl = rep;
1696 /* Advance to the next WQE. */
1699 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1701 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1702 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1706 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1707 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1708 MLX5_ASSERT(strd_cnt);
1709 consumed_strd += strd_cnt;
1710 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1713 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1714 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1716 /* mini-CQE for MPRQ doesn't have hash result. */
1717 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1719 MLX5_ASSERT(strd_idx < strd_n);
1720 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1722 pkt = rte_pktmbuf_alloc(rxq->mp);
1723 if (unlikely(pkt == NULL)) {
1724 ++rxq->stats.rx_nombuf;
1727 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1728 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1729 if (rxq->crc_present)
1730 len -= RTE_ETHER_CRC_LEN;
1731 offset = strd_idx * strd_sz + strd_shift;
1732 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1733 hdrm_overlap = len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz;
1735 * Memcpy packets to the target mbuf if:
1736 * - The size of packet is smaller than mprq_max_memcpy_len.
1737 * - Out of buffer in the Mempool for Multi-Packet RQ.
1738 * - The packet's stride overlaps a headroom and scatter is off.
1740 if (len <= rxq->mprq_max_memcpy_len ||
1741 rxq->mprq_repl == NULL ||
1742 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
1743 if (likely(rte_pktmbuf_tailroom(pkt) >= len)) {
1744 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
1746 DATA_LEN(pkt) = len;
1747 } else if (rxq->strd_scatter_en) {
1748 struct rte_mbuf *prev = pkt;
1750 RTE_MIN(rte_pktmbuf_tailroom(pkt), len);
1751 uint32_t rem_len = len - seg_len;
1753 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
1755 DATA_LEN(pkt) = seg_len;
1757 struct rte_mbuf *next =
1758 rte_pktmbuf_alloc(rxq->mp);
1760 if (unlikely(next == NULL)) {
1761 rte_pktmbuf_free(pkt);
1762 ++rxq->stats.rx_nombuf;
1766 SET_DATA_OFF(next, 0);
1767 addr = RTE_PTR_ADD(addr, seg_len);
1769 (rte_pktmbuf_tailroom(next),
1772 (rte_pktmbuf_mtod(next, void *),
1774 DATA_LEN(next) = seg_len;
1780 rte_pktmbuf_free_seg(pkt);
1781 ++rxq->stats.idropped;
1785 rte_iova_t buf_iova;
1786 struct rte_mbuf_ext_shared_info *shinfo;
1787 uint16_t buf_len = strd_cnt * strd_sz;
1790 /* Increment the refcnt of the whole chunk. */
1791 rte_atomic16_add_return(&buf->refcnt, 1);
1792 MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1794 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
1796 * MLX5 device doesn't use iova but it is necessary in a
1797 * case where the Rx packet is transmitted via a
1800 buf_iova = rte_mempool_virt2iova(buf) +
1801 RTE_PTR_DIFF(buf_addr, buf);
1802 shinfo = &buf->shinfos[strd_idx];
1803 rte_mbuf_ext_refcnt_set(shinfo, 1);
1805 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1806 * attaching the stride to mbuf and more offload flags
1807 * will be added below by calling rxq_cq_to_mbuf().
1808 * Other fields will be overwritten.
1810 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1812 /* Set mbuf head-room. */
1813 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
1814 MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
1815 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) <
1816 len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
1817 DATA_LEN(pkt) = len;
1819 * Copy the last fragment of a packet (up to headroom
1820 * size bytes) in case there is a stride overlap with
1821 * a next packet's headroom. Allocate a separate mbuf
1822 * to store this fragment and link it. Scatter is on.
1824 if (hdrm_overlap > 0) {
1825 MLX5_ASSERT(rxq->strd_scatter_en);
1826 struct rte_mbuf *seg =
1827 rte_pktmbuf_alloc(rxq->mp);
1829 if (unlikely(seg == NULL)) {
1830 rte_pktmbuf_free_seg(pkt);
1831 ++rxq->stats.rx_nombuf;
1834 SET_DATA_OFF(seg, 0);
1835 rte_memcpy(rte_pktmbuf_mtod(seg, void *),
1836 RTE_PTR_ADD(addr, len - hdrm_overlap),
1838 DATA_LEN(seg) = hdrm_overlap;
1839 DATA_LEN(pkt) = len - hdrm_overlap;
1844 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1845 if (cqe->lro_num_seg > 1) {
1846 mlx5_lro_update_hdr(addr, cqe, len);
1847 pkt->ol_flags |= PKT_RX_LRO;
1848 pkt->tso_segsz = len / cqe->lro_num_seg;
1851 PORT(pkt) = rxq->port_id;
1852 #ifdef MLX5_PMD_SOFT_COUNTERS
1853 /* Increment bytes counter. */
1854 rxq->stats.ibytes += PKT_LEN(pkt);
1856 /* Return packet. */
1861 /* Update the consumer indexes. */
1862 rxq->consumed_strd = consumed_strd;
1864 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1865 if (rq_ci != rxq->rq_ci) {
1868 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1870 #ifdef MLX5_PMD_SOFT_COUNTERS
1871 /* Increment packets counter. */
1872 rxq->stats.ipackets += i;
1878 * Dummy DPDK callback for TX.
1880 * This function is used to temporarily replace the real callback during
1881 * unsafe control operations on the queue, or in case of error.
1884 * Generic pointer to TX queue structure.
1886 * Packets to transmit.
1888 * Number of packets in array.
1891 * Number of packets successfully transmitted (<= pkts_n).
1894 removed_tx_burst(void *dpdk_txq __rte_unused,
1895 struct rte_mbuf **pkts __rte_unused,
1896 uint16_t pkts_n __rte_unused)
1903 * Dummy DPDK callback for RX.
1905 * This function is used to temporarily replace the real callback during
1906 * unsafe control operations on the queue, or in case of error.
1909 * Generic pointer to RX queue structure.
1911 * Array to store received packets.
1913 * Maximum number of packets in array.
1916 * Number of packets successfully received (<= pkts_n).
1919 removed_rx_burst(void *dpdk_txq __rte_unused,
1920 struct rte_mbuf **pkts __rte_unused,
1921 uint16_t pkts_n __rte_unused)
1928 * Vectorized Rx/Tx routines are not compiled in when required vector
1929 * instructions are not supported on a target architecture. The following null
1930 * stubs are needed for linkage when those are not included outside of this file
1931 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1935 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1936 struct rte_mbuf **pkts __rte_unused,
1937 uint16_t pkts_n __rte_unused)
1943 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1949 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1955 * Free the mbufs from the linear array of pointers.
1958 * Pointer to array of packets to be free.
1960 * Number of packets to be freed.
1962 * Configured Tx offloads mask. It is fully defined at
1963 * compile time and may be used for optimization.
1965 static __rte_always_inline void
1966 mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
1967 unsigned int pkts_n,
1968 unsigned int olx __rte_unused)
1970 struct rte_mempool *pool = NULL;
1971 struct rte_mbuf **p_free = NULL;
1972 struct rte_mbuf *mbuf;
1973 unsigned int n_free = 0;
1976 * The implemented algorithm eliminates
1977 * copying pointers to temporary array
1978 * for rte_mempool_put_bulk() calls.
1981 MLX5_ASSERT(pkts_n);
1985 * Decrement mbuf reference counter, detach
1986 * indirect and external buffers if needed.
1988 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1989 if (likely(mbuf != NULL)) {
1990 MLX5_ASSERT(mbuf == *pkts);
1991 if (likely(n_free != 0)) {
1992 if (unlikely(pool != mbuf->pool))
1993 /* From different pool. */
1996 /* Start new scan array. */
2003 if (unlikely(pkts_n == 0)) {
2009 * This happens if mbuf is still referenced.
2010 * We can't put it back to the pool, skip.
2014 if (unlikely(n_free != 0))
2015 /* There is some array to free.*/
2017 if (unlikely(pkts_n == 0))
2018 /* Last mbuf, nothing to free. */
2024 * This loop is implemented to avoid multiple
2025 * inlining of rte_mempool_put_bulk().
2028 MLX5_ASSERT(p_free);
2029 MLX5_ASSERT(n_free);
2031 * Free the array of pre-freed mbufs
2032 * belonging to the same memory pool.
2034 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
2035 if (unlikely(mbuf != NULL)) {
2036 /* There is the request to start new scan. */
2041 if (likely(pkts_n != 0))
2044 * This is the last mbuf to be freed.
2045 * Do one more loop iteration to complete.
2046 * This is rare case of the last unique mbuf.
2051 if (likely(pkts_n == 0))
2060 * Free the mbuf from the elts ring buffer till new tail.
2063 * Pointer to Tx queue structure.
2065 * Index in elts to free up to, becomes new elts tail.
2067 * Configured Tx offloads mask. It is fully defined at
2068 * compile time and may be used for optimization.
2070 static __rte_always_inline void
2071 mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
2073 unsigned int olx __rte_unused)
2075 uint16_t n_elts = tail - txq->elts_tail;
2077 MLX5_ASSERT(n_elts);
2078 MLX5_ASSERT(n_elts <= txq->elts_s);
2080 * Implement a loop to support ring buffer wraparound
2081 * with single inlining of mlx5_tx_free_mbuf().
2086 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
2087 part = RTE_MIN(part, n_elts);
2089 MLX5_ASSERT(part <= txq->elts_s);
2090 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
2092 txq->elts_tail += part;
2098 * Store the mbuf being sent into elts ring buffer.
2099 * On Tx completion these mbufs will be freed.
2102 * Pointer to Tx queue structure.
2104 * Pointer to array of packets to be stored.
2106 * Number of packets to be stored.
2108 * Configured Tx offloads mask. It is fully defined at
2109 * compile time and may be used for optimization.
2111 static __rte_always_inline void
2112 mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
2113 struct rte_mbuf **restrict pkts,
2114 unsigned int pkts_n,
2115 unsigned int olx __rte_unused)
2118 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2121 MLX5_ASSERT(pkts_n);
2122 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2124 MLX5_ASSERT(part <= txq->elts_s);
2125 /* This code is a good candidate for vectorizing with SIMD. */
2126 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2128 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2129 txq->elts_head += pkts_n;
2130 if (unlikely(part < pkts_n))
2131 /* The copy is wrapping around the elts array. */
2132 rte_memcpy((void *)elts, (void *)(pkts + part),
2133 (pkts_n - part) * sizeof(struct rte_mbuf *));
2137 * Update completion queue consuming index via doorbell
2138 * and flush the completed data buffers.
2141 * Pointer to TX queue structure.
2142 * @param valid CQE pointer
2143 * if not NULL update txq->wqe_pi and flush the buffers
2145 * Configured Tx offloads mask. It is fully defined at
2146 * compile time and may be used for optimization.
2148 static __rte_always_inline void
2149 mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
2150 volatile struct mlx5_cqe *last_cqe,
2151 unsigned int olx __rte_unused)
2153 if (likely(last_cqe != NULL)) {
2156 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2157 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2158 if (likely(tail != txq->elts_tail)) {
2159 mlx5_tx_free_elts(txq, tail, olx);
2160 MLX5_ASSERT(tail == txq->elts_tail);
2166 * Manage TX completions. This routine checks the CQ for
2167 * arrived CQEs, deduces the last accomplished WQE in SQ,
2168 * updates SQ producing index and frees all completed mbufs.
2171 * Pointer to TX queue structure.
2173 * Configured Tx offloads mask. It is fully defined at
2174 * compile time and may be used for optimization.
2176 * NOTE: not inlined intentionally, it makes tx_burst
2177 * routine smaller, simple and faster - from experiments.
2180 mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
2181 unsigned int olx __rte_unused)
2183 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2184 volatile struct mlx5_cqe *last_cqe = NULL;
2185 bool ring_doorbell = false;
2188 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2189 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2191 volatile struct mlx5_cqe *cqe;
2193 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
2194 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
2195 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2196 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2197 /* No new CQEs in completion queue. */
2198 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
2202 * Some error occurred, try to restart.
2203 * We have no barrier after WQE related Doorbell
2204 * written, make sure all writes are completed
2205 * here, before we might perform SQ reset.
2208 ret = mlx5_tx_error_cqe_handle
2209 (txq, (volatile struct mlx5_err_cqe *)cqe);
2210 if (unlikely(ret < 0)) {
2212 * Some error occurred on queue error
2213 * handling, we do not advance the index
2214 * here, allowing to retry on next call.
2219 * We are going to fetch all entries with
2220 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2221 * The send queue is supposed to be empty.
2223 ring_doorbell = true;
2225 txq->cq_pi = txq->cq_ci;
2229 /* Normal transmit completion. */
2230 MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
2231 MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
2233 ring_doorbell = true;
2237 * We have to restrict the amount of processed CQEs
2238 * in one tx_burst routine call. The CQ may be large
2239 * and many CQEs may be updated by the NIC in one
2240 * transaction. Buffers freeing is time consuming,
2241 * multiple iterations may introduce significant
2244 if (likely(--count == 0))
2247 if (likely(ring_doorbell)) {
2248 /* Ring doorbell to notify hardware. */
2249 rte_compiler_barrier();
2250 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
2251 mlx5_tx_comp_flush(txq, last_cqe, olx);
2256 * Check if the completion request flag should be set in the last WQE.
2257 * Both pushed mbufs and WQEs are monitored and the completion request
2258 * flag is set if any of thresholds is reached.
2261 * Pointer to TX queue structure.
2263 * Pointer to burst routine local context.
2265 * Configured Tx offloads mask. It is fully defined at
2266 * compile time and may be used for optimization.
2268 static __rte_always_inline void
2269 mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
2270 struct mlx5_txq_local *restrict loc,
2273 uint16_t head = txq->elts_head;
2276 part = MLX5_TXOFF_CONFIG(INLINE) ?
2277 0 : loc->pkts_sent - loc->pkts_copy;
2279 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2280 (MLX5_TXOFF_CONFIG(INLINE) &&
2281 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2282 volatile struct mlx5_wqe *last = loc->wqe_last;
2285 txq->elts_comp = head;
2286 if (MLX5_TXOFF_CONFIG(INLINE))
2287 txq->wqe_comp = txq->wqe_ci;
2288 /* Request unconditional completion on last WQE. */
2289 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2290 MLX5_COMP_MODE_OFFSET);
2291 /* Save elts_head in dedicated free on completion queue. */
2292 #ifdef RTE_LIBRTE_MLX5_DEBUG
2293 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2294 (last->cseg.opcode >> 8) << 16;
2296 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2298 /* A CQE slot must always be available. */
2299 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2304 * DPDK callback to check the status of a tx descriptor.
2309 * The index of the descriptor in the ring.
2312 * The status of the tx descriptor.
2315 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2317 struct mlx5_txq_data *restrict txq = tx_queue;
2320 mlx5_tx_handle_completion(txq, 0);
2321 used = txq->elts_head - txq->elts_tail;
2323 return RTE_ETH_TX_DESC_FULL;
2324 return RTE_ETH_TX_DESC_DONE;
2328 * Build the Control Segment with specified opcode:
2329 * - MLX5_OPCODE_SEND
2330 * - MLX5_OPCODE_ENHANCED_MPSW
2334 * Pointer to TX queue structure.
2336 * Pointer to burst routine local context.
2338 * Pointer to WQE to fill with built Control Segment.
2340 * Supposed length of WQE in segments.
2342 * SQ WQE opcode to put into Control Segment.
2344 * Configured Tx offloads mask. It is fully defined at
2345 * compile time and may be used for optimization.
2347 static __rte_always_inline void
2348 mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq,
2349 struct mlx5_txq_local *restrict loc __rte_unused,
2350 struct mlx5_wqe *restrict wqe,
2352 unsigned int opcode,
2353 unsigned int olx __rte_unused)
2355 struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
2357 /* For legacy MPW replace the EMPW by TSO with modifier. */
2358 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2359 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2360 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2361 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2362 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2363 MLX5_COMP_MODE_OFFSET);
2364 cs->misc = RTE_BE32(0);
2368 * Build the Ethernet Segment without inlined data.
2369 * Supports Software Parser, Checksums and VLAN
2370 * insertion Tx offload features.
2373 * Pointer to TX queue structure.
2375 * Pointer to burst routine local context.
2377 * Pointer to WQE to fill with built Ethernet Segment.
2379 * Configured Tx offloads mask. It is fully defined at
2380 * compile time and may be used for optimization.
2382 static __rte_always_inline void
2383 mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
2384 struct mlx5_txq_local *restrict loc,
2385 struct mlx5_wqe *restrict wqe,
2388 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2392 * Calculate and set check sum flags first, dword field
2393 * in segment may be shared with Software Parser flags.
2395 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2396 es->flags = rte_cpu_to_le_32(csum);
2398 * Calculate and set Software Parser offsets and flags.
2399 * These flags a set for custom UDP and IP tunnel packets.
2401 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2402 /* Fill metadata field if needed. */
2403 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2404 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2405 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2406 /* Engage VLAN tag insertion feature if requested. */
2407 if (MLX5_TXOFF_CONFIG(VLAN) &&
2408 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2410 * We should get here only if device support
2411 * this feature correctly.
2413 MLX5_ASSERT(txq->vlan_en);
2414 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2415 loc->mbuf->vlan_tci);
2417 es->inline_hdr = RTE_BE32(0);
2422 * Build the Ethernet Segment with minimal inlined data
2423 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2424 * used to fill the gap in single WQEBB WQEs.
2425 * Supports Software Parser, Checksums and VLAN
2426 * insertion Tx offload features.
2429 * Pointer to TX queue structure.
2431 * Pointer to burst routine local context.
2433 * Pointer to WQE to fill with built Ethernet Segment.
2435 * Length of VLAN tag insertion if any.
2437 * Configured Tx offloads mask. It is fully defined at
2438 * compile time and may be used for optimization.
2440 static __rte_always_inline void
2441 mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
2442 struct mlx5_txq_local *restrict loc,
2443 struct mlx5_wqe *restrict wqe,
2447 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2449 uint8_t *psrc, *pdst;
2452 * Calculate and set check sum flags first, dword field
2453 * in segment may be shared with Software Parser flags.
2455 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2456 es->flags = rte_cpu_to_le_32(csum);
2458 * Calculate and set Software Parser offsets and flags.
2459 * These flags a set for custom UDP and IP tunnel packets.
2461 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2462 /* Fill metadata field if needed. */
2463 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2464 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2465 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2466 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2468 sizeof(rte_v128u32_t)),
2469 "invalid Ethernet Segment data size");
2470 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2472 sizeof(struct rte_vlan_hdr) +
2473 2 * RTE_ETHER_ADDR_LEN),
2474 "invalid Ethernet Segment data size");
2475 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2476 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2477 es->inline_data = *(unaligned_uint16_t *)psrc;
2478 psrc += sizeof(uint16_t);
2479 pdst = (uint8_t *)(es + 1);
2480 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2481 /* Implement VLAN tag insertion as part inline data. */
2482 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2483 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2484 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2485 /* Insert VLAN ethertype + VLAN tag. */
2486 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2487 ((RTE_ETHER_TYPE_VLAN << 16) |
2488 loc->mbuf->vlan_tci);
2489 pdst += sizeof(struct rte_vlan_hdr);
2490 /* Copy the rest two bytes from packet data. */
2491 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2492 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2494 /* Fill the gap in the title WQEBB with inline data. */
2495 rte_mov16(pdst, psrc);
2500 * Build the Ethernet Segment with entire packet
2501 * data inlining. Checks the boundary of WQEBB and
2502 * ring buffer wrapping, supports Software Parser,
2503 * Checksums and VLAN insertion Tx offload features.
2506 * Pointer to TX queue structure.
2508 * Pointer to burst routine local context.
2510 * Pointer to WQE to fill with built Ethernet Segment.
2512 * Length of VLAN tag insertion if any.
2514 * Length of data to inline (VLAN included, if any).
2516 * TSO flag, set mss field from the packet.
2518 * Configured Tx offloads mask. It is fully defined at
2519 * compile time and may be used for optimization.
2522 * Pointer to the next Data Segment (aligned and wrapped around).
2524 static __rte_always_inline struct mlx5_wqe_dseg *
2525 mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
2526 struct mlx5_txq_local *restrict loc,
2527 struct mlx5_wqe *restrict wqe,
2533 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2535 uint8_t *psrc, *pdst;
2539 * Calculate and set check sum flags first, dword field
2540 * in segment may be shared with Software Parser flags.
2542 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2545 csum |= loc->mbuf->tso_segsz;
2546 es->flags = rte_cpu_to_be_32(csum);
2548 es->flags = rte_cpu_to_le_32(csum);
2551 * Calculate and set Software Parser offsets and flags.
2552 * These flags a set for custom UDP and IP tunnel packets.
2554 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2555 /* Fill metadata field if needed. */
2556 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2557 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2558 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2559 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2561 sizeof(rte_v128u32_t)),
2562 "invalid Ethernet Segment data size");
2563 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2565 sizeof(struct rte_vlan_hdr) +
2566 2 * RTE_ETHER_ADDR_LEN),
2567 "invalid Ethernet Segment data size");
2568 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2569 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2570 es->inline_data = *(unaligned_uint16_t *)psrc;
2571 psrc += sizeof(uint16_t);
2572 pdst = (uint8_t *)(es + 1);
2573 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2574 /* Implement VLAN tag insertion as part inline data. */
2575 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2576 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2577 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2578 /* Insert VLAN ethertype + VLAN tag. */
2579 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2580 ((RTE_ETHER_TYPE_VLAN << 16) |
2581 loc->mbuf->vlan_tci);
2582 pdst += sizeof(struct rte_vlan_hdr);
2583 /* Copy the rest two bytes from packet data. */
2584 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2585 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2586 psrc += sizeof(uint16_t);
2588 /* Fill the gap in the title WQEBB with inline data. */
2589 rte_mov16(pdst, psrc);
2590 psrc += sizeof(rte_v128u32_t);
2592 pdst = (uint8_t *)(es + 2);
2593 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2594 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2595 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2597 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2598 return (struct mlx5_wqe_dseg *)pdst;
2601 * The WQEBB space availability is checked by caller.
2602 * Here we should be aware of WQE ring buffer wraparound only.
2604 part = (uint8_t *)txq->wqes_end - pdst;
2605 part = RTE_MIN(part, inlen);
2607 rte_memcpy(pdst, psrc, part);
2609 if (likely(!inlen)) {
2611 * If return value is not used by the caller
2612 * the code below will be optimized out.
2615 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2616 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2617 pdst = (uint8_t *)txq->wqes;
2618 return (struct mlx5_wqe_dseg *)pdst;
2620 pdst = (uint8_t *)txq->wqes;
2627 * Copy data from chain of mbuf to the specified linear buffer.
2628 * Checksums and VLAN insertion Tx offload features. If data
2629 * from some mbuf copied completely this mbuf is freed. Local
2630 * structure is used to keep the byte stream state.
2633 * Pointer to the destination linear buffer.
2635 * Pointer to burst routine local context.
2637 * Length of data to be copied.
2639 * Length of data to be copied ignoring no inline hint.
2641 * Configured Tx offloads mask. It is fully defined at
2642 * compile time and may be used for optimization.
2645 * Number of actual copied data bytes. This is always greater than or
2646 * equal to must parameter and might be lesser than len in no inline
2647 * hint flag is encountered.
2649 static __rte_always_inline unsigned int
2650 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2651 struct mlx5_txq_local *restrict loc,
2654 unsigned int olx __rte_unused)
2656 struct rte_mbuf *mbuf;
2657 unsigned int part, dlen, copy = 0;
2661 MLX5_ASSERT(must <= len);
2663 /* Allow zero length packets, must check first. */
2664 dlen = rte_pktmbuf_data_len(loc->mbuf);
2665 if (dlen <= loc->mbuf_off) {
2666 /* Exhausted packet, just free. */
2668 loc->mbuf = mbuf->next;
2669 rte_pktmbuf_free_seg(mbuf);
2671 MLX5_ASSERT(loc->mbuf_nseg > 1);
2672 MLX5_ASSERT(loc->mbuf);
2674 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2679 * We already copied the minimal
2680 * requested amount of data.
2685 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
2687 * Copy only the minimal required
2688 * part of the data buffer.
2695 dlen -= loc->mbuf_off;
2696 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2698 part = RTE_MIN(len, dlen);
2699 rte_memcpy(pdst, psrc, part);
2701 loc->mbuf_off += part;
2704 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2706 /* Exhausted packet, just free. */
2708 loc->mbuf = mbuf->next;
2709 rte_pktmbuf_free_seg(mbuf);
2711 MLX5_ASSERT(loc->mbuf_nseg >= 1);
2721 * Build the Ethernet Segment with inlined data from
2722 * multi-segment packet. Checks the boundary of WQEBB
2723 * and ring buffer wrapping, supports Software Parser,
2724 * Checksums and VLAN insertion Tx offload features.
2727 * Pointer to TX queue structure.
2729 * Pointer to burst routine local context.
2731 * Pointer to WQE to fill with built Ethernet Segment.
2733 * Length of VLAN tag insertion if any.
2735 * Length of data to inline (VLAN included, if any).
2737 * TSO flag, set mss field from the packet.
2739 * Configured Tx offloads mask. It is fully defined at
2740 * compile time and may be used for optimization.
2743 * Pointer to the next Data Segment (aligned and
2744 * possible NOT wrapped around - caller should do
2745 * wrapping check on its own).
2747 static __rte_always_inline struct mlx5_wqe_dseg *
2748 mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
2749 struct mlx5_txq_local *restrict loc,
2750 struct mlx5_wqe *restrict wqe,
2756 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2759 unsigned int part, tlen = 0;
2762 * Calculate and set check sum flags first, uint32_t field
2763 * in segment may be shared with Software Parser flags.
2765 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2768 csum |= loc->mbuf->tso_segsz;
2769 es->flags = rte_cpu_to_be_32(csum);
2771 es->flags = rte_cpu_to_le_32(csum);
2774 * Calculate and set Software Parser offsets and flags.
2775 * These flags a set for custom UDP and IP tunnel packets.
2777 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2778 /* Fill metadata field if needed. */
2779 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2780 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2781 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2782 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2784 sizeof(rte_v128u32_t)),
2785 "invalid Ethernet Segment data size");
2786 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2788 sizeof(struct rte_vlan_hdr) +
2789 2 * RTE_ETHER_ADDR_LEN),
2790 "invalid Ethernet Segment data size");
2791 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2792 pdst = (uint8_t *)&es->inline_data;
2793 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2794 /* Implement VLAN tag insertion as part inline data. */
2795 mlx5_tx_mseg_memcpy(pdst, loc,
2796 2 * RTE_ETHER_ADDR_LEN,
2797 2 * RTE_ETHER_ADDR_LEN, olx);
2798 pdst += 2 * RTE_ETHER_ADDR_LEN;
2799 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2800 ((RTE_ETHER_TYPE_VLAN << 16) |
2801 loc->mbuf->vlan_tci);
2802 pdst += sizeof(struct rte_vlan_hdr);
2803 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2805 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2807 * The WQEBB space availability is checked by caller.
2808 * Here we should be aware of WQE ring buffer wraparound only.
2810 part = (uint8_t *)txq->wqes_end - pdst;
2811 part = RTE_MIN(part, inlen - tlen);
2817 * Copying may be interrupted inside the routine
2818 * if run into no inline hint flag.
2820 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
2821 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
2823 if (likely(inlen <= tlen) || copy < part) {
2824 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
2826 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2827 return (struct mlx5_wqe_dseg *)pdst;
2829 pdst = (uint8_t *)txq->wqes;
2830 part = inlen - tlen;
2835 * Build the Data Segment of pointer type.
2838 * Pointer to TX queue structure.
2840 * Pointer to burst routine local context.
2842 * Pointer to WQE to fill with built Data Segment.
2844 * Data buffer to point.
2846 * Data buffer length.
2848 * Configured Tx offloads mask. It is fully defined at
2849 * compile time and may be used for optimization.
2851 static __rte_always_inline void
2852 mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
2853 struct mlx5_txq_local *restrict loc,
2854 struct mlx5_wqe_dseg *restrict dseg,
2857 unsigned int olx __rte_unused)
2861 dseg->bcount = rte_cpu_to_be_32(len);
2862 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2863 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2867 * Build the Data Segment of pointer type or inline
2868 * if data length is less than buffer in minimal
2869 * Data Segment size.
2872 * Pointer to TX queue structure.
2874 * Pointer to burst routine local context.
2876 * Pointer to WQE to fill with built Data Segment.
2878 * Data buffer to point.
2880 * Data buffer length.
2882 * Configured Tx offloads mask. It is fully defined at
2883 * compile time and may be used for optimization.
2885 static __rte_always_inline void
2886 mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
2887 struct mlx5_txq_local *restrict loc,
2888 struct mlx5_wqe_dseg *restrict dseg,
2891 unsigned int olx __rte_unused)
2897 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2898 dseg->bcount = rte_cpu_to_be_32(len);
2899 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2900 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2904 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2905 /* Unrolled implementation of generic rte_memcpy. */
2906 dst = (uintptr_t)&dseg->inline_data[0];
2907 src = (uintptr_t)buf;
2909 #ifdef RTE_ARCH_STRICT_ALIGN
2910 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2911 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2912 dst += sizeof(uint32_t);
2913 src += sizeof(uint32_t);
2914 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2915 dst += sizeof(uint32_t);
2916 src += sizeof(uint32_t);
2918 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2919 dst += sizeof(uint64_t);
2920 src += sizeof(uint64_t);
2924 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2925 dst += sizeof(uint32_t);
2926 src += sizeof(uint32_t);
2929 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2930 dst += sizeof(uint16_t);
2931 src += sizeof(uint16_t);
2934 *(uint8_t *)dst = *(uint8_t *)src;
2938 * Build the Data Segment of inlined data from single
2939 * segment packet, no VLAN insertion.
2942 * Pointer to TX queue structure.
2944 * Pointer to burst routine local context.
2946 * Pointer to WQE to fill with built Data Segment.
2948 * Data buffer to point.
2950 * Data buffer length.
2952 * Configured Tx offloads mask. It is fully defined at
2953 * compile time and may be used for optimization.
2956 * Pointer to the next Data Segment after inlined data.
2957 * Ring buffer wraparound check is needed. We do not
2958 * do it here because it may not be needed for the
2959 * last packet in the eMPW session.
2961 static __rte_always_inline struct mlx5_wqe_dseg *
2962 mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq,
2963 struct mlx5_txq_local *restrict loc __rte_unused,
2964 struct mlx5_wqe_dseg *restrict dseg,
2967 unsigned int olx __rte_unused)
2972 if (!MLX5_TXOFF_CONFIG(MPW)) {
2973 /* Store the descriptor byte counter for eMPW sessions. */
2974 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2975 pdst = &dseg->inline_data[0];
2977 /* The entire legacy MPW session counter is stored on close. */
2978 pdst = (uint8_t *)dseg;
2981 * The WQEBB space availability is checked by caller.
2982 * Here we should be aware of WQE ring buffer wraparound only.
2984 part = (uint8_t *)txq->wqes_end - pdst;
2985 part = RTE_MIN(part, len);
2987 rte_memcpy(pdst, buf, part);
2991 if (!MLX5_TXOFF_CONFIG(MPW))
2992 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2993 /* Note: no final wraparound check here. */
2994 return (struct mlx5_wqe_dseg *)pdst;
2996 pdst = (uint8_t *)txq->wqes;
3003 * Build the Data Segment of inlined data from single
3004 * segment packet with VLAN insertion.
3007 * Pointer to TX queue structure.
3009 * Pointer to burst routine local context.
3011 * Pointer to the dseg fill with built Data Segment.
3013 * Data buffer to point.
3015 * Data buffer length.
3017 * Configured Tx offloads mask. It is fully defined at
3018 * compile time and may be used for optimization.
3021 * Pointer to the next Data Segment after inlined data.
3022 * Ring buffer wraparound check is needed.
3024 static __rte_always_inline struct mlx5_wqe_dseg *
3025 mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
3026 struct mlx5_txq_local *restrict loc __rte_unused,
3027 struct mlx5_wqe_dseg *restrict dseg,
3030 unsigned int olx __rte_unused)
3036 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
3037 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
3038 (2 * RTE_ETHER_ADDR_LEN),
3039 "invalid Data Segment data size");
3040 if (!MLX5_TXOFF_CONFIG(MPW)) {
3041 /* Store the descriptor byte counter for eMPW sessions. */
3042 dseg->bcount = rte_cpu_to_be_32
3043 ((len + sizeof(struct rte_vlan_hdr)) |
3044 MLX5_ETH_WQE_DATA_INLINE);
3045 pdst = &dseg->inline_data[0];
3047 /* The entire legacy MPW session counter is stored on close. */
3048 pdst = (uint8_t *)dseg;
3050 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
3051 buf += MLX5_DSEG_MIN_INLINE_SIZE;
3052 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
3053 len -= MLX5_DSEG_MIN_INLINE_SIZE;
3054 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
3055 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
3056 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
3057 pdst = (uint8_t *)txq->wqes;
3058 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
3059 loc->mbuf->vlan_tci);
3060 pdst += sizeof(struct rte_vlan_hdr);
3062 * The WQEBB space availability is checked by caller.
3063 * Here we should be aware of WQE ring buffer wraparound only.
3065 part = (uint8_t *)txq->wqes_end - pdst;
3066 part = RTE_MIN(part, len);
3068 rte_memcpy(pdst, buf, part);
3072 if (!MLX5_TXOFF_CONFIG(MPW))
3073 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
3074 /* Note: no final wraparound check here. */
3075 return (struct mlx5_wqe_dseg *)pdst;
3077 pdst = (uint8_t *)txq->wqes;
3084 * Build the Ethernet Segment with optionally inlined data with
3085 * VLAN insertion and following Data Segments (if any) from
3086 * multi-segment packet. Used by ordinary send and TSO.
3089 * Pointer to TX queue structure.
3091 * Pointer to burst routine local context.
3093 * Pointer to WQE to fill with built Ethernet/Data Segments.
3095 * Length of VLAN header to insert, 0 means no VLAN insertion.
3097 * Data length to inline. For TSO this parameter specifies
3098 * exact value, for ordinary send routine can be aligned by
3099 * caller to provide better WQE space saving and data buffer
3100 * start address alignment. This length includes VLAN header
3103 * Zero means ordinary send, inlined data can be extended,
3104 * otherwise this is TSO, inlined data length is fixed.
3106 * Configured Tx offloads mask. It is fully defined at
3107 * compile time and may be used for optimization.
3110 * Actual size of built WQE in segments.
3112 static __rte_always_inline unsigned int
3113 mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
3114 struct mlx5_txq_local *restrict loc,
3115 struct mlx5_wqe *restrict wqe,
3119 unsigned int olx __rte_unused)
3121 struct mlx5_wqe_dseg *restrict dseg;
3124 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
3125 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
3128 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
3129 if (!loc->mbuf_nseg)
3132 * There are still some mbuf remaining, not inlined.
3133 * The first mbuf may be partially inlined and we
3134 * must process the possible non-zero data offset.
3136 if (loc->mbuf_off) {
3141 * Exhausted packets must be dropped before.
3142 * Non-zero offset means there are some data
3143 * remained in the packet.
3145 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
3146 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
3147 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
3149 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
3151 * Build the pointer/minimal data Data Segment.
3152 * Do ring buffer wrapping check in advance.
3154 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3155 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3156 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
3157 /* Store the mbuf to be freed on completion. */
3158 MLX5_ASSERT(loc->elts_free);
3159 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3162 if (--loc->mbuf_nseg == 0)
3164 loc->mbuf = loc->mbuf->next;
3168 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3169 struct rte_mbuf *mbuf;
3171 /* Zero length segment found, just skip. */
3173 loc->mbuf = loc->mbuf->next;
3174 rte_pktmbuf_free_seg(mbuf);
3175 if (--loc->mbuf_nseg == 0)
3178 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3179 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3182 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3183 rte_pktmbuf_data_len(loc->mbuf), olx);
3184 MLX5_ASSERT(loc->elts_free);
3185 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3188 if (--loc->mbuf_nseg == 0)
3190 loc->mbuf = loc->mbuf->next;
3195 /* Calculate actual segments used from the dseg pointer. */
3196 if ((uintptr_t)wqe < (uintptr_t)dseg)
3197 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3199 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3200 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3205 * Tx one packet function for multi-segment TSO. Supports all
3206 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3207 * sends one packet per WQE.
3209 * This routine is responsible for storing processed mbuf
3210 * into elts ring buffer and update elts_head.
3213 * Pointer to TX queue structure.
3215 * Pointer to burst routine local context.
3217 * Configured Tx offloads mask. It is fully defined at
3218 * compile time and may be used for optimization.
3221 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3222 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3223 * Local context variables partially updated.
3225 static __rte_always_inline enum mlx5_txcmp_code
3226 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
3227 struct mlx5_txq_local *restrict loc,
3230 struct mlx5_wqe *restrict wqe;
3231 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3234 * Calculate data length to be inlined to estimate
3235 * the required space in WQE ring buffer.
3237 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3238 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3239 vlan = sizeof(struct rte_vlan_hdr);
3240 inlen = loc->mbuf->l2_len + vlan +
3241 loc->mbuf->l3_len + loc->mbuf->l4_len;
3242 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3243 return MLX5_TXCMP_CODE_ERROR;
3244 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3245 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3246 /* Packet must contain all TSO headers. */
3247 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3248 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3249 inlen > (dlen + vlan)))
3250 return MLX5_TXCMP_CODE_ERROR;
3251 MLX5_ASSERT(inlen >= txq->inlen_mode);
3253 * Check whether there are enough free WQEBBs:
3255 * - Ethernet Segment
3256 * - First Segment of inlined Ethernet data
3257 * - ... data continued ...
3258 * - Data Segments of pointer/min inline type
3260 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3261 MLX5_ESEG_MIN_INLINE_SIZE +
3263 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3264 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3265 return MLX5_TXCMP_CODE_EXIT;
3266 /* Check for maximal WQE size. */
3267 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3268 return MLX5_TXCMP_CODE_ERROR;
3269 #ifdef MLX5_PMD_SOFT_COUNTERS
3270 /* Update sent data bytes/packets counters. */
3271 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3272 loc->mbuf->tso_segsz;
3274 * One will be added for mbuf itself
3275 * at the end of the mlx5_tx_burst from
3276 * loc->pkts_sent field.
3279 txq->stats.opackets += ntcp;
3280 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3282 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3283 loc->wqe_last = wqe;
3284 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3285 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3286 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3287 txq->wqe_ci += (ds + 3) / 4;
3288 loc->wqe_free -= (ds + 3) / 4;
3289 return MLX5_TXCMP_CODE_MULTI;
3293 * Tx one packet function for multi-segment SEND. Supports all
3294 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3295 * sends one packet per WQE, without any data inlining in
3298 * This routine is responsible for storing processed mbuf
3299 * into elts ring buffer and update elts_head.
3302 * Pointer to TX queue structure.
3304 * Pointer to burst routine local context.
3306 * Configured Tx offloads mask. It is fully defined at
3307 * compile time and may be used for optimization.
3310 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3311 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3312 * Local context variables partially updated.
3314 static __rte_always_inline enum mlx5_txcmp_code
3315 mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
3316 struct mlx5_txq_local *restrict loc,
3319 struct mlx5_wqe_dseg *restrict dseg;
3320 struct mlx5_wqe *restrict wqe;
3321 unsigned int ds, nseg;
3323 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3325 * No inline at all, it means the CPU cycles saving
3326 * is prioritized at configuration, we should not
3327 * copy any packet data to WQE.
3329 nseg = NB_SEGS(loc->mbuf);
3331 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3332 return MLX5_TXCMP_CODE_EXIT;
3333 /* Check for maximal WQE size. */
3334 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3335 return MLX5_TXCMP_CODE_ERROR;
3337 * Some Tx offloads may cause an error if
3338 * packet is not long enough, check against
3339 * assumed minimal length.
3341 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3342 return MLX5_TXCMP_CODE_ERROR;
3343 #ifdef MLX5_PMD_SOFT_COUNTERS
3344 /* Update sent data bytes counter. */
3345 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3346 if (MLX5_TXOFF_CONFIG(VLAN) &&
3347 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3348 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3351 * SEND WQE, one WQEBB:
3352 * - Control Segment, SEND opcode
3353 * - Ethernet Segment, optional VLAN, no inline
3354 * - Data Segments, pointer only type
3356 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3357 loc->wqe_last = wqe;
3358 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3359 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3360 dseg = &wqe->dseg[0];
3362 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3363 struct rte_mbuf *mbuf;
3366 * Zero length segment found, have to
3367 * correct total size of WQE in segments.
3368 * It is supposed to be rare occasion, so
3369 * in normal case (no zero length segments)
3370 * we avoid extra writing to the Control
3374 wqe->cseg.sq_ds -= RTE_BE32(1);
3376 loc->mbuf = mbuf->next;
3377 rte_pktmbuf_free_seg(mbuf);
3383 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3384 rte_pktmbuf_data_len(loc->mbuf), olx);
3385 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3390 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3391 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3392 loc->mbuf = loc->mbuf->next;
3395 txq->wqe_ci += (ds + 3) / 4;
3396 loc->wqe_free -= (ds + 3) / 4;
3397 return MLX5_TXCMP_CODE_MULTI;
3401 * Tx one packet function for multi-segment SEND. Supports all
3402 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3403 * sends one packet per WQE, with data inlining in
3404 * Ethernet Segment and minimal Data Segments.
3406 * This routine is responsible for storing processed mbuf
3407 * into elts ring buffer and update elts_head.
3410 * Pointer to TX queue structure.
3412 * Pointer to burst routine local context.
3414 * Configured Tx offloads mask. It is fully defined at
3415 * compile time and may be used for optimization.
3418 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3419 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3420 * Local context variables partially updated.
3422 static __rte_always_inline enum mlx5_txcmp_code
3423 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
3424 struct mlx5_txq_local *restrict loc,
3427 struct mlx5_wqe *restrict wqe;
3428 unsigned int ds, inlen, dlen, vlan = 0;
3430 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3431 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3433 * First calculate data length to be inlined
3434 * to estimate the required space for WQE.
3436 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3437 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3438 vlan = sizeof(struct rte_vlan_hdr);
3439 inlen = dlen + vlan;
3440 /* Check against minimal length. */
3441 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3442 return MLX5_TXCMP_CODE_ERROR;
3443 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3444 if (inlen > txq->inlen_send ||
3445 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
3446 struct rte_mbuf *mbuf;
3451 * Packet length exceeds the allowed inline
3452 * data length, check whether the minimal
3453 * inlining is required.
3455 if (txq->inlen_mode) {
3456 MLX5_ASSERT(txq->inlen_mode >=
3457 MLX5_ESEG_MIN_INLINE_SIZE);
3458 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
3459 inlen = txq->inlen_mode;
3461 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
3462 !vlan || txq->vlan_en) {
3464 * VLAN insertion will be done inside by HW.
3465 * It is not utmost effective - VLAN flag is
3466 * checked twice, but we should proceed the
3467 * inlining length correctly and take into
3468 * account the VLAN header being inserted.
3470 return mlx5_tx_packet_multi_send
3473 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3476 * Now we know the minimal amount of data is requested
3477 * to inline. Check whether we should inline the buffers
3478 * from the chain beginning to eliminate some mbufs.
3481 nxlen = rte_pktmbuf_data_len(mbuf);
3482 if (unlikely(nxlen <= txq->inlen_send)) {
3483 /* We can inline first mbuf at least. */
3484 if (nxlen < inlen) {
3487 /* Scan mbufs till inlen filled. */
3492 nxlen = rte_pktmbuf_data_len(mbuf);
3494 } while (unlikely(nxlen < inlen));
3495 if (unlikely(nxlen > txq->inlen_send)) {
3496 /* We cannot inline entire mbuf. */
3497 smlen = inlen - smlen;
3498 start = rte_pktmbuf_mtod_offset
3499 (mbuf, uintptr_t, smlen);
3506 /* There should be not end of packet. */
3508 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3509 } while (unlikely(nxlen < txq->inlen_send));
3511 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3513 * Check whether we can do inline to align start
3514 * address of data buffer to cacheline.
3517 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3518 if (unlikely(start)) {
3520 if (start <= txq->inlen_send)
3525 * Check whether there are enough free WQEBBs:
3527 * - Ethernet Segment
3528 * - First Segment of inlined Ethernet data
3529 * - ... data continued ...
3530 * - Data Segments of pointer/min inline type
3532 * Estimate the number of Data Segments conservatively,
3533 * supposing no any mbufs is being freed during inlining.
3535 MLX5_ASSERT(inlen <= txq->inlen_send);
3536 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3537 MLX5_ESEG_MIN_INLINE_SIZE +
3539 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3540 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3541 return MLX5_TXCMP_CODE_EXIT;
3542 /* Check for maximal WQE size. */
3543 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3544 return MLX5_TXCMP_CODE_ERROR;
3545 #ifdef MLX5_PMD_SOFT_COUNTERS
3546 /* Update sent data bytes/packets counters. */
3547 txq->stats.obytes += dlen + vlan;
3549 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3550 loc->wqe_last = wqe;
3551 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3552 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3553 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3554 txq->wqe_ci += (ds + 3) / 4;
3555 loc->wqe_free -= (ds + 3) / 4;
3556 return MLX5_TXCMP_CODE_MULTI;
3560 * Tx burst function for multi-segment packets. Supports all
3561 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3562 * sends one packet per WQE. Function stops sending if it
3563 * encounters the single-segment packet.
3565 * This routine is responsible for storing processed mbuf
3566 * into elts ring buffer and update elts_head.
3569 * Pointer to TX queue structure.
3571 * Packets to transmit.
3573 * Number of packets in array.
3575 * Pointer to burst routine local context.
3577 * Configured Tx offloads mask. It is fully defined at
3578 * compile time and may be used for optimization.
3581 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3582 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3583 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3584 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3585 * Local context variables updated.
3587 static __rte_always_inline enum mlx5_txcmp_code
3588 mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
3589 struct rte_mbuf **restrict pkts,
3590 unsigned int pkts_n,
3591 struct mlx5_txq_local *restrict loc,
3594 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3595 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3596 pkts += loc->pkts_sent + 1;
3597 pkts_n -= loc->pkts_sent;
3599 enum mlx5_txcmp_code ret;
3601 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3603 * Estimate the number of free elts quickly but
3604 * conservatively. Some segment may be fully inlined
3605 * and freed, ignore this here - precise estimation
3608 if (loc->elts_free < NB_SEGS(loc->mbuf))
3609 return MLX5_TXCMP_CODE_EXIT;
3610 if (MLX5_TXOFF_CONFIG(TSO) &&
3611 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3612 /* Proceed with multi-segment TSO. */
3613 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3614 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3615 /* Proceed with multi-segment SEND with inlining. */
3616 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3618 /* Proceed with multi-segment SEND w/o inlining. */
3619 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3621 if (ret == MLX5_TXCMP_CODE_EXIT)
3622 return MLX5_TXCMP_CODE_EXIT;
3623 if (ret == MLX5_TXCMP_CODE_ERROR)
3624 return MLX5_TXCMP_CODE_ERROR;
3625 /* WQE is built, go to the next packet. */
3628 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3629 return MLX5_TXCMP_CODE_EXIT;
3630 loc->mbuf = *pkts++;
3632 rte_prefetch0(*pkts);
3633 if (likely(NB_SEGS(loc->mbuf) > 1))
3635 /* Here ends the series of multi-segment packets. */
3636 if (MLX5_TXOFF_CONFIG(TSO) &&
3637 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3638 return MLX5_TXCMP_CODE_TSO;
3639 return MLX5_TXCMP_CODE_SINGLE;
3645 * Tx burst function for single-segment packets with TSO.
3646 * Supports all types of Tx offloads, except multi-packets.
3647 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3648 * Function stops sending if it encounters the multi-segment
3649 * packet or packet without TSO requested.
3651 * The routine is responsible for storing processed mbuf
3652 * into elts ring buffer and update elts_head if inline
3653 * offloads is requested due to possible early freeing
3654 * of the inlined mbufs (can not store pkts array in elts
3658 * Pointer to TX queue structure.
3660 * Packets to transmit.
3662 * Number of packets in array.
3664 * Pointer to burst routine local context.
3666 * Configured Tx offloads mask. It is fully defined at
3667 * compile time and may be used for optimization.
3670 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3671 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3672 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3673 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3674 * Local context variables updated.
3676 static __rte_always_inline enum mlx5_txcmp_code
3677 mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
3678 struct rte_mbuf **restrict pkts,
3679 unsigned int pkts_n,
3680 struct mlx5_txq_local *restrict loc,
3683 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3684 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3685 pkts += loc->pkts_sent + 1;
3686 pkts_n -= loc->pkts_sent;
3688 struct mlx5_wqe_dseg *restrict dseg;
3689 struct mlx5_wqe *restrict wqe;
3690 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3693 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3694 dlen = rte_pktmbuf_data_len(loc->mbuf);
3695 if (MLX5_TXOFF_CONFIG(VLAN) &&
3696 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3697 vlan = sizeof(struct rte_vlan_hdr);
3700 * First calculate the WQE size to check
3701 * whether we have enough space in ring buffer.
3703 hlen = loc->mbuf->l2_len + vlan +
3704 loc->mbuf->l3_len + loc->mbuf->l4_len;
3705 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3706 return MLX5_TXCMP_CODE_ERROR;
3707 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3708 hlen += loc->mbuf->outer_l2_len +
3709 loc->mbuf->outer_l3_len;
3710 /* Segment must contain all TSO headers. */
3711 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3712 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3713 hlen > (dlen + vlan)))
3714 return MLX5_TXCMP_CODE_ERROR;
3716 * Check whether there are enough free WQEBBs:
3718 * - Ethernet Segment
3719 * - First Segment of inlined Ethernet data
3720 * - ... data continued ...
3721 * - Finishing Data Segment of pointer type
3723 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3724 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3725 if (loc->wqe_free < ((ds + 3) / 4))
3726 return MLX5_TXCMP_CODE_EXIT;
3727 #ifdef MLX5_PMD_SOFT_COUNTERS
3728 /* Update sent data bytes/packets counters. */
3729 ntcp = (dlen + vlan - hlen +
3730 loc->mbuf->tso_segsz - 1) /
3731 loc->mbuf->tso_segsz;
3733 * One will be added for mbuf itself at the end
3734 * of the mlx5_tx_burst from loc->pkts_sent field.
3737 txq->stats.opackets += ntcp;
3738 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3741 * Build the TSO WQE:
3743 * - Ethernet Segment with hlen bytes inlined
3744 * - Data Segment of pointer type
3746 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3747 loc->wqe_last = wqe;
3748 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3749 MLX5_OPCODE_TSO, olx);
3750 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3751 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3752 dlen -= hlen - vlan;
3753 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3755 * WQE is built, update the loop parameters
3756 * and go to the next packet.
3758 txq->wqe_ci += (ds + 3) / 4;
3759 loc->wqe_free -= (ds + 3) / 4;
3760 if (MLX5_TXOFF_CONFIG(INLINE))
3761 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3765 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3766 return MLX5_TXCMP_CODE_EXIT;
3767 loc->mbuf = *pkts++;
3769 rte_prefetch0(*pkts);
3770 if (MLX5_TXOFF_CONFIG(MULTI) &&
3771 unlikely(NB_SEGS(loc->mbuf) > 1))
3772 return MLX5_TXCMP_CODE_MULTI;
3773 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3774 return MLX5_TXCMP_CODE_SINGLE;
3775 /* Continue with the next TSO packet. */
3781 * Analyze the packet and select the best method to send.
3784 * Pointer to TX queue structure.
3786 * Pointer to burst routine local context.
3788 * Configured Tx offloads mask. It is fully defined at
3789 * compile time and may be used for optimization.
3791 * The predefined flag whether do complete check for
3792 * multi-segment packets and TSO.
3795 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3796 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3797 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3798 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3800 static __rte_always_inline enum mlx5_txcmp_code
3801 mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq,
3802 struct mlx5_txq_local *restrict loc,
3806 /* Check for multi-segment packet. */
3808 MLX5_TXOFF_CONFIG(MULTI) &&
3809 unlikely(NB_SEGS(loc->mbuf) > 1))
3810 return MLX5_TXCMP_CODE_MULTI;
3811 /* Check for TSO packet. */
3813 MLX5_TXOFF_CONFIG(TSO) &&
3814 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3815 return MLX5_TXCMP_CODE_TSO;
3816 /* Check if eMPW is enabled at all. */
3817 if (!MLX5_TXOFF_CONFIG(EMPW))
3818 return MLX5_TXCMP_CODE_SINGLE;
3819 /* Check if eMPW can be engaged. */
3820 if (MLX5_TXOFF_CONFIG(VLAN) &&
3821 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3822 (!MLX5_TXOFF_CONFIG(INLINE) ||
3823 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3824 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3826 * eMPW does not support VLAN insertion offload,
3827 * we have to inline the entire packet but
3828 * packet is too long for inlining.
3830 return MLX5_TXCMP_CODE_SINGLE;
3832 return MLX5_TXCMP_CODE_EMPW;
3836 * Check the next packet attributes to match with the eMPW batch ones.
3837 * In addition, for legacy MPW the packet length is checked either.
3840 * Pointer to TX queue structure.
3842 * Pointer to Ethernet Segment of eMPW batch.
3844 * Pointer to burst routine local context.
3846 * Length of previous packet in MPW descriptor.
3848 * Configured Tx offloads mask. It is fully defined at
3849 * compile time and may be used for optimization.
3852 * true - packet match with eMPW batch attributes.
3853 * false - no match, eMPW should be restarted.
3855 static __rte_always_inline bool
3856 mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
3857 struct mlx5_wqe_eseg *restrict es,
3858 struct mlx5_txq_local *restrict loc,
3862 uint8_t swp_flags = 0;
3864 /* Compare the checksum flags, if any. */
3865 if (MLX5_TXOFF_CONFIG(CSUM) &&
3866 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3868 /* Compare the Software Parser offsets and flags. */
3869 if (MLX5_TXOFF_CONFIG(SWP) &&
3870 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3871 es->swp_flags != swp_flags))
3873 /* Fill metadata field if needed. */
3874 if (MLX5_TXOFF_CONFIG(METADATA) &&
3875 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3876 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3878 /* Legacy MPW can send packets with the same lengt only. */
3879 if (MLX5_TXOFF_CONFIG(MPW) &&
3880 dlen != rte_pktmbuf_data_len(loc->mbuf))
3882 /* There must be no VLAN packets in eMPW loop. */
3883 if (MLX5_TXOFF_CONFIG(VLAN))
3884 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3889 * Update send loop variables and WQE for eMPW loop
3890 * without data inlining. Number of Data Segments is
3891 * equal to the number of sent packets.
3894 * Pointer to TX queue structure.
3896 * Pointer to burst routine local context.
3898 * Number of packets/Data Segments/Packets.
3900 * Accumulated statistics, bytes sent
3902 * Configured Tx offloads mask. It is fully defined at
3903 * compile time and may be used for optimization.
3906 * true - packet match with eMPW batch attributes.
3907 * false - no match, eMPW should be restarted.
3909 static __rte_always_inline void
3910 mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
3911 struct mlx5_txq_local *restrict loc,
3914 unsigned int olx __rte_unused)
3916 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3917 #ifdef MLX5_PMD_SOFT_COUNTERS
3918 /* Update sent data bytes counter. */
3919 txq->stats.obytes += slen;
3923 loc->elts_free -= ds;
3924 loc->pkts_sent += ds;
3926 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3927 txq->wqe_ci += (ds + 3) / 4;
3928 loc->wqe_free -= (ds + 3) / 4;
3932 * Update send loop variables and WQE for eMPW loop
3933 * with data inlining. Gets the size of pushed descriptors
3934 * and data to the WQE.
3937 * Pointer to TX queue structure.
3939 * Pointer to burst routine local context.
3941 * Total size of descriptor/data in bytes.
3943 * Accumulated statistics, data bytes sent.
3945 * The base WQE for the eMPW/MPW descriptor.
3947 * Configured Tx offloads mask. It is fully defined at
3948 * compile time and may be used for optimization.
3951 * true - packet match with eMPW batch attributes.
3952 * false - no match, eMPW should be restarted.
3954 static __rte_always_inline void
3955 mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
3956 struct mlx5_txq_local *restrict loc,
3959 struct mlx5_wqe *restrict wqem,
3960 unsigned int olx __rte_unused)
3962 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
3964 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3965 #ifdef MLX5_PMD_SOFT_COUNTERS
3966 /* Update sent data bytes counter. */
3967 txq->stats.obytes += slen;
3971 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
3973 * If the legacy MPW session contains the inline packets
3974 * we should set the only inline data segment length
3975 * and align the total length to the segment size.
3977 MLX5_ASSERT(len > sizeof(dseg->bcount));
3978 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
3979 MLX5_ETH_WQE_DATA_INLINE);
3980 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
3983 * The session is not legacy MPW or contains the
3984 * data buffer pointer segments.
3986 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
3987 len = len / MLX5_WSEG_SIZE + 2;
3989 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
3990 txq->wqe_ci += (len + 3) / 4;
3991 loc->wqe_free -= (len + 3) / 4;
3992 loc->wqe_last = wqem;
3996 * The set of Tx burst functions for single-segment packets
3997 * without TSO and with Multi-Packet Writing feature support.
3998 * Supports all types of Tx offloads, except multi-packets
4001 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
4002 * as many packet per WQE as it can. If eMPW is not configured
4003 * or packet can not be sent with eMPW (VLAN insertion) the
4004 * ordinary SEND opcode is used and only one packet placed
4007 * Functions stop sending if it encounters the multi-segment
4008 * packet or packet with TSO requested.
4010 * The routines are responsible for storing processed mbuf
4011 * into elts ring buffer and update elts_head if inlining
4012 * offload is requested. Otherwise the copying mbufs to elts
4013 * can be postponed and completed at the end of burst routine.
4016 * Pointer to TX queue structure.
4018 * Packets to transmit.
4020 * Number of packets in array.
4022 * Pointer to burst routine local context.
4024 * Configured Tx offloads mask. It is fully defined at
4025 * compile time and may be used for optimization.
4028 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
4029 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
4030 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
4031 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
4032 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
4033 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
4035 * Local context variables updated.
4038 * The routine sends packets with MLX5_OPCODE_EMPW
4039 * without inlining, this is dedicated optimized branch.
4040 * No VLAN insertion is supported.
4042 static __rte_always_inline enum mlx5_txcmp_code
4043 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
4044 struct rte_mbuf **restrict pkts,
4045 unsigned int pkts_n,
4046 struct mlx5_txq_local *restrict loc,
4050 * Subroutine is the part of mlx5_tx_burst_single()
4051 * and sends single-segment packet with eMPW opcode
4052 * without data inlining.
4054 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4055 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4056 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4057 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4058 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4059 pkts += loc->pkts_sent + 1;
4060 pkts_n -= loc->pkts_sent;
4062 struct mlx5_wqe_dseg *restrict dseg;
4063 struct mlx5_wqe_eseg *restrict eseg;
4064 enum mlx5_txcmp_code ret;
4065 unsigned int part, loop;
4066 unsigned int slen = 0;
4069 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4070 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4071 MLX5_MPW_MAX_PACKETS :
4072 MLX5_EMPW_MAX_PACKETS);
4073 if (unlikely(loc->elts_free < part)) {
4074 /* We have no enough elts to save all mbufs. */
4075 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
4076 return MLX5_TXCMP_CODE_EXIT;
4077 /* But we still able to send at least minimal eMPW. */
4078 part = loc->elts_free;
4080 /* Check whether we have enough WQEs */
4081 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
4082 if (unlikely(loc->wqe_free <
4083 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4084 return MLX5_TXCMP_CODE_EXIT;
4085 part = (loc->wqe_free * 4) - 2;
4087 if (likely(part > 1))
4088 rte_prefetch0(*pkts);
4089 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4091 * Build eMPW title WQEBB:
4092 * - Control Segment, eMPW opcode
4093 * - Ethernet Segment, no inline
4095 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
4096 MLX5_OPCODE_ENHANCED_MPSW, olx);
4097 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4098 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4099 eseg = &loc->wqe_last->eseg;
4100 dseg = &loc->wqe_last->dseg[0];
4102 /* Store the packet length for legacy MPW. */
4103 if (MLX5_TXOFF_CONFIG(MPW))
4104 eseg->mss = rte_cpu_to_be_16
4105 (rte_pktmbuf_data_len(loc->mbuf));
4107 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4108 #ifdef MLX5_PMD_SOFT_COUNTERS
4109 /* Update sent data bytes counter. */
4114 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4116 if (unlikely(--loop == 0))
4118 loc->mbuf = *pkts++;
4119 if (likely(loop > 1))
4120 rte_prefetch0(*pkts);
4121 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4123 * Unroll the completion code to avoid
4124 * returning variable value - it results in
4125 * unoptimized sequent checking in caller.
4127 if (ret == MLX5_TXCMP_CODE_MULTI) {
4129 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4130 if (unlikely(!loc->elts_free ||
4132 return MLX5_TXCMP_CODE_EXIT;
4133 return MLX5_TXCMP_CODE_MULTI;
4135 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4136 if (ret == MLX5_TXCMP_CODE_TSO) {
4138 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4139 if (unlikely(!loc->elts_free ||
4141 return MLX5_TXCMP_CODE_EXIT;
4142 return MLX5_TXCMP_CODE_TSO;
4144 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4146 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4147 if (unlikely(!loc->elts_free ||
4149 return MLX5_TXCMP_CODE_EXIT;
4150 return MLX5_TXCMP_CODE_SINGLE;
4152 if (ret != MLX5_TXCMP_CODE_EMPW) {
4155 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4156 return MLX5_TXCMP_CODE_ERROR;
4159 * Check whether packet parameters coincide
4160 * within assumed eMPW batch:
4161 * - check sum settings
4163 * - software parser settings
4164 * - packets length (legacy MPW only)
4166 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
4169 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4170 if (unlikely(!loc->elts_free ||
4172 return MLX5_TXCMP_CODE_EXIT;
4176 /* Packet attributes match, continue the same eMPW. */
4178 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4179 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4181 /* eMPW is built successfully, update loop parameters. */
4183 MLX5_ASSERT(pkts_n >= part);
4184 #ifdef MLX5_PMD_SOFT_COUNTERS
4185 /* Update sent data bytes counter. */
4186 txq->stats.obytes += slen;
4188 loc->elts_free -= part;
4189 loc->pkts_sent += part;
4190 txq->wqe_ci += (2 + part + 3) / 4;
4191 loc->wqe_free -= (2 + part + 3) / 4;
4193 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4194 return MLX5_TXCMP_CODE_EXIT;
4195 loc->mbuf = *pkts++;
4196 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4197 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4199 /* Continue sending eMPW batches. */
4205 * The routine sends packets with MLX5_OPCODE_EMPW
4206 * with inlining, optionally supports VLAN insertion.
4208 static __rte_always_inline enum mlx5_txcmp_code
4209 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
4210 struct rte_mbuf **restrict pkts,
4211 unsigned int pkts_n,
4212 struct mlx5_txq_local *restrict loc,
4216 * Subroutine is the part of mlx5_tx_burst_single()
4217 * and sends single-segment packet with eMPW opcode
4218 * with data inlining.
4220 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4221 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4222 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4223 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4224 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4225 pkts += loc->pkts_sent + 1;
4226 pkts_n -= loc->pkts_sent;
4228 struct mlx5_wqe_dseg *restrict dseg;
4229 struct mlx5_wqe *restrict wqem;
4230 enum mlx5_txcmp_code ret;
4231 unsigned int room, part, nlim;
4232 unsigned int slen = 0;
4234 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4236 * Limits the amount of packets in one WQE
4237 * to improve CQE latency generation.
4239 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4240 MLX5_MPW_INLINE_MAX_PACKETS :
4241 MLX5_EMPW_MAX_PACKETS);
4242 /* Check whether we have minimal amount WQEs */
4243 if (unlikely(loc->wqe_free <
4244 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4245 return MLX5_TXCMP_CODE_EXIT;
4246 if (likely(pkts_n > 1))
4247 rte_prefetch0(*pkts);
4248 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4250 * Build eMPW title WQEBB:
4251 * - Control Segment, eMPW opcode, zero DS
4252 * - Ethernet Segment, no inline
4254 mlx5_tx_cseg_init(txq, loc, wqem, 0,
4255 MLX5_OPCODE_ENHANCED_MPSW, olx);
4256 mlx5_tx_eseg_none(txq, loc, wqem,
4257 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4258 dseg = &wqem->dseg[0];
4259 /* Store the packet length for legacy MPW. */
4260 if (MLX5_TXOFF_CONFIG(MPW))
4261 wqem->eseg.mss = rte_cpu_to_be_16
4262 (rte_pktmbuf_data_len(loc->mbuf));
4263 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4264 loc->wqe_free) * MLX5_WQE_SIZE -
4265 MLX5_WQE_CSEG_SIZE -
4267 /* Limit the room for legacy MPW sessions for performance. */
4268 if (MLX5_TXOFF_CONFIG(MPW))
4269 room = RTE_MIN(room,
4270 RTE_MAX(txq->inlen_empw +
4271 sizeof(dseg->bcount) +
4272 (MLX5_TXOFF_CONFIG(VLAN) ?
4273 sizeof(struct rte_vlan_hdr) : 0),
4274 MLX5_MPW_INLINE_MAX_PACKETS *
4275 MLX5_WQE_DSEG_SIZE));
4276 /* Build WQE till we have space, packets and resources. */
4279 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4280 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4283 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4284 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
4285 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4287 * Some Tx offloads may cause an error if
4288 * packet is not long enough, check against
4289 * assumed minimal length.
4291 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4293 if (unlikely(!part))
4294 return MLX5_TXCMP_CODE_ERROR;
4296 * We have some successfully built
4297 * packet Data Segments to send.
4299 mlx5_tx_idone_empw(txq, loc, part,
4301 return MLX5_TXCMP_CODE_ERROR;
4303 /* Inline or not inline - that's the Question. */
4304 if (dlen > txq->inlen_empw ||
4305 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
4307 if (MLX5_TXOFF_CONFIG(MPW)) {
4308 if (dlen > txq->inlen_send)
4312 /* Open new inline MPW session. */
4313 tlen += sizeof(dseg->bcount);
4314 dseg->bcount = RTE_BE32(0);
4316 (dseg, sizeof(dseg->bcount));
4319 * No pointer and inline descriptor
4320 * intermix for legacy MPW sessions.
4322 if (wqem->dseg[0].bcount)
4326 tlen = sizeof(dseg->bcount) + dlen;
4328 /* Inline entire packet, optional VLAN insertion. */
4329 if (MLX5_TXOFF_CONFIG(VLAN) &&
4330 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4332 * The packet length must be checked in
4333 * mlx5_tx_able_to_empw() and packet
4334 * fits into inline length guaranteed.
4337 sizeof(struct rte_vlan_hdr)) <=
4339 tlen += sizeof(struct rte_vlan_hdr);
4342 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4344 #ifdef MLX5_PMD_SOFT_COUNTERS
4345 /* Update sent data bytes counter. */
4346 slen += sizeof(struct rte_vlan_hdr);
4351 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4354 if (!MLX5_TXOFF_CONFIG(MPW))
4355 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4356 MLX5_ASSERT(room >= tlen);
4359 * Packet data are completely inlined,
4360 * free the packet immediately.
4362 rte_pktmbuf_free_seg(loc->mbuf);
4366 * No pointer and inline descriptor
4367 * intermix for legacy MPW sessions.
4369 if (MLX5_TXOFF_CONFIG(MPW) &&
4371 wqem->dseg[0].bcount == RTE_BE32(0))
4374 * Not inlinable VLAN packets are
4375 * proceeded outside of this routine.
4377 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4378 if (MLX5_TXOFF_CONFIG(VLAN))
4379 MLX5_ASSERT(!(loc->mbuf->ol_flags &
4381 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4382 /* We have to store mbuf in elts.*/
4383 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4384 room -= MLX5_WQE_DSEG_SIZE;
4385 /* Ring buffer wraparound is checked at the loop end.*/
4388 #ifdef MLX5_PMD_SOFT_COUNTERS
4389 /* Update sent data bytes counter. */
4395 if (unlikely(!pkts_n || !loc->elts_free)) {
4397 * We have no resources/packets to
4398 * continue build descriptors.
4401 mlx5_tx_idone_empw(txq, loc, part,
4403 return MLX5_TXCMP_CODE_EXIT;
4405 loc->mbuf = *pkts++;
4406 if (likely(pkts_n > 1))
4407 rte_prefetch0(*pkts);
4408 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4410 * Unroll the completion code to avoid
4411 * returning variable value - it results in
4412 * unoptimized sequent checking in caller.
4414 if (ret == MLX5_TXCMP_CODE_MULTI) {
4416 mlx5_tx_idone_empw(txq, loc, part,
4418 if (unlikely(!loc->elts_free ||
4420 return MLX5_TXCMP_CODE_EXIT;
4421 return MLX5_TXCMP_CODE_MULTI;
4423 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4424 if (ret == MLX5_TXCMP_CODE_TSO) {
4426 mlx5_tx_idone_empw(txq, loc, part,
4428 if (unlikely(!loc->elts_free ||
4430 return MLX5_TXCMP_CODE_EXIT;
4431 return MLX5_TXCMP_CODE_TSO;
4433 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4435 mlx5_tx_idone_empw(txq, loc, part,
4437 if (unlikely(!loc->elts_free ||
4439 return MLX5_TXCMP_CODE_EXIT;
4440 return MLX5_TXCMP_CODE_SINGLE;
4442 if (ret != MLX5_TXCMP_CODE_EMPW) {
4445 mlx5_tx_idone_empw(txq, loc, part,
4447 return MLX5_TXCMP_CODE_ERROR;
4449 /* Check if we have minimal room left. */
4451 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4454 * Check whether packet parameters coincide
4455 * within assumed eMPW batch:
4456 * - check sum settings
4458 * - software parser settings
4459 * - packets length (legacy MPW only)
4461 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
4464 /* Packet attributes match, continue the same eMPW. */
4465 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4466 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4469 * We get here to close an existing eMPW
4470 * session and start the new one.
4472 MLX5_ASSERT(pkts_n);
4474 if (unlikely(!part))
4475 return MLX5_TXCMP_CODE_EXIT;
4476 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
4477 if (unlikely(!loc->elts_free ||
4479 return MLX5_TXCMP_CODE_EXIT;
4480 /* Continue the loop with new eMPW session. */
4486 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4487 * Data inlining and VLAN insertion are supported.
4489 static __rte_always_inline enum mlx5_txcmp_code
4490 mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
4491 struct rte_mbuf **restrict pkts,
4492 unsigned int pkts_n,
4493 struct mlx5_txq_local *restrict loc,
4497 * Subroutine is the part of mlx5_tx_burst_single()
4498 * and sends single-segment packet with SEND opcode.
4500 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4501 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4502 pkts += loc->pkts_sent + 1;
4503 pkts_n -= loc->pkts_sent;
4505 struct mlx5_wqe *restrict wqe;
4506 enum mlx5_txcmp_code ret;
4508 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4509 if (MLX5_TXOFF_CONFIG(INLINE)) {
4510 unsigned int inlen, vlan = 0;
4512 inlen = rte_pktmbuf_data_len(loc->mbuf);
4513 if (MLX5_TXOFF_CONFIG(VLAN) &&
4514 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4515 vlan = sizeof(struct rte_vlan_hdr);
4517 static_assert((sizeof(struct rte_vlan_hdr) +
4518 sizeof(struct rte_ether_hdr)) ==
4519 MLX5_ESEG_MIN_INLINE_SIZE,
4520 "invalid min inline data size");
4523 * If inlining is enabled at configuration time
4524 * the limit must be not less than minimal size.
4525 * Otherwise we would do extra check for data
4526 * size to avoid crashes due to length overflow.
4528 MLX5_ASSERT(txq->inlen_send >=
4529 MLX5_ESEG_MIN_INLINE_SIZE);
4530 if (inlen <= txq->inlen_send) {
4531 unsigned int seg_n, wqe_n;
4533 rte_prefetch0(rte_pktmbuf_mtod
4534 (loc->mbuf, uint8_t *));
4535 /* Check against minimal length. */
4536 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4537 return MLX5_TXCMP_CODE_ERROR;
4538 if (loc->mbuf->ol_flags &
4539 PKT_TX_DYNF_NOINLINE) {
4541 * The hint flag not to inline packet
4542 * data is set. Check whether we can
4545 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
4547 (MLX5_TXOFF_CONFIG(MPW) &&
4550 * The hardware requires the
4551 * minimal inline data header.
4553 goto single_min_inline;
4555 if (MLX5_TXOFF_CONFIG(VLAN) &&
4556 vlan && !txq->vlan_en) {
4558 * We must insert VLAN tag
4559 * by software means.
4561 goto single_part_inline;
4563 goto single_no_inline;
4566 * Completely inlined packet data WQE:
4567 * - Control Segment, SEND opcode
4568 * - Ethernet Segment, no VLAN insertion
4569 * - Data inlined, VLAN optionally inserted
4570 * - Alignment to MLX5_WSEG_SIZE
4571 * Have to estimate amount of WQEBBs
4573 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4574 MLX5_ESEG_MIN_INLINE_SIZE +
4575 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4576 /* Check if there are enough WQEBBs. */
4577 wqe_n = (seg_n + 3) / 4;
4578 if (wqe_n > loc->wqe_free)
4579 return MLX5_TXCMP_CODE_EXIT;
4580 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4581 loc->wqe_last = wqe;
4582 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4583 MLX5_OPCODE_SEND, olx);
4584 mlx5_tx_eseg_data(txq, loc, wqe,
4585 vlan, inlen, 0, olx);
4586 txq->wqe_ci += wqe_n;
4587 loc->wqe_free -= wqe_n;
4589 * Packet data are completely inlined,
4590 * free the packet immediately.
4592 rte_pktmbuf_free_seg(loc->mbuf);
4593 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4594 MLX5_TXOFF_CONFIG(MPW)) &&
4597 * If minimal inlining is requested the eMPW
4598 * feature should be disabled due to data is
4599 * inlined into Ethernet Segment, which can
4600 * not contain inlined data for eMPW due to
4601 * segment shared for all packets.
4603 struct mlx5_wqe_dseg *restrict dseg;
4608 * The inline-mode settings require
4609 * to inline the specified amount of
4610 * data bytes to the Ethernet Segment.
4611 * We should check the free space in
4612 * WQE ring buffer to inline partially.
4615 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
4616 MLX5_ASSERT(inlen > txq->inlen_mode);
4617 MLX5_ASSERT(txq->inlen_mode >=
4618 MLX5_ESEG_MIN_INLINE_SIZE);
4620 * Check whether there are enough free WQEBBs:
4622 * - Ethernet Segment
4623 * - First Segment of inlined Ethernet data
4624 * - ... data continued ...
4625 * - Finishing Data Segment of pointer type
4627 ds = (MLX5_WQE_CSEG_SIZE +
4628 MLX5_WQE_ESEG_SIZE +
4629 MLX5_WQE_DSEG_SIZE +
4631 MLX5_ESEG_MIN_INLINE_SIZE +
4632 MLX5_WQE_DSEG_SIZE +
4633 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4634 if (loc->wqe_free < ((ds + 3) / 4))
4635 return MLX5_TXCMP_CODE_EXIT;
4637 * Build the ordinary SEND WQE:
4639 * - Ethernet Segment, inline inlen_mode bytes
4640 * - Data Segment of pointer type
4642 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4643 loc->wqe_last = wqe;
4644 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4645 MLX5_OPCODE_SEND, olx);
4646 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4649 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4650 txq->inlen_mode - vlan;
4651 inlen -= txq->inlen_mode;
4652 mlx5_tx_dseg_ptr(txq, loc, dseg,
4655 * WQE is built, update the loop parameters
4656 * and got to the next packet.
4658 txq->wqe_ci += (ds + 3) / 4;
4659 loc->wqe_free -= (ds + 3) / 4;
4660 /* We have to store mbuf in elts.*/
4661 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4662 txq->elts[txq->elts_head++ & txq->elts_m] =
4670 * Partially inlined packet data WQE, we have
4671 * some space in title WQEBB, we can fill it
4672 * with some packet data. It takes one WQEBB,
4673 * it is available, no extra space check:
4674 * - Control Segment, SEND opcode
4675 * - Ethernet Segment, no VLAN insertion
4676 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4677 * - Data Segment, pointer type
4679 * We also get here if VLAN insertion is not
4680 * supported by HW, the inline is enabled.
4683 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4684 loc->wqe_last = wqe;
4685 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4686 MLX5_OPCODE_SEND, olx);
4687 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4688 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4689 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4691 * The length check is performed above, by
4692 * comparing with txq->inlen_send. We should
4693 * not get overflow here.
4695 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4696 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4697 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4701 /* We have to store mbuf in elts.*/
4702 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4703 txq->elts[txq->elts_head++ & txq->elts_m] =
4707 #ifdef MLX5_PMD_SOFT_COUNTERS
4708 /* Update sent data bytes counter. */
4709 txq->stats.obytes += vlan +
4710 rte_pktmbuf_data_len(loc->mbuf);
4714 * No inline at all, it means the CPU cycles saving
4715 * is prioritized at configuration, we should not
4716 * copy any packet data to WQE.
4718 * SEND WQE, one WQEBB:
4719 * - Control Segment, SEND opcode
4720 * - Ethernet Segment, optional VLAN, no inline
4721 * - Data Segment, pointer type
4724 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4725 loc->wqe_last = wqe;
4726 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4727 MLX5_OPCODE_SEND, olx);
4728 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4730 (txq, loc, &wqe->dseg[0],
4731 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4732 rte_pktmbuf_data_len(loc->mbuf), olx);
4736 * We should not store mbuf pointer in elts
4737 * if no inlining is configured, this is done
4738 * by calling routine in a batch copy.
4740 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4742 #ifdef MLX5_PMD_SOFT_COUNTERS
4743 /* Update sent data bytes counter. */
4744 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4745 if (MLX5_TXOFF_CONFIG(VLAN) &&
4746 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4747 txq->stats.obytes +=
4748 sizeof(struct rte_vlan_hdr);
4753 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4754 return MLX5_TXCMP_CODE_EXIT;
4755 loc->mbuf = *pkts++;
4757 rte_prefetch0(*pkts);
4758 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4759 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4765 static __rte_always_inline enum mlx5_txcmp_code
4766 mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
4767 struct rte_mbuf **restrict pkts,
4768 unsigned int pkts_n,
4769 struct mlx5_txq_local *restrict loc,
4772 enum mlx5_txcmp_code ret;
4774 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4775 if (ret == MLX5_TXCMP_CODE_SINGLE)
4777 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
4779 /* Optimize for inline/no inline eMPW send. */
4780 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4781 mlx5_tx_burst_empw_inline
4782 (txq, pkts, pkts_n, loc, olx) :
4783 mlx5_tx_burst_empw_simple
4784 (txq, pkts, pkts_n, loc, olx);
4785 if (ret != MLX5_TXCMP_CODE_SINGLE)
4787 /* The resources to send one packet should remain. */
4788 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4790 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4791 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
4792 if (ret != MLX5_TXCMP_CODE_EMPW)
4794 /* The resources to send one packet should remain. */
4795 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4800 * DPDK Tx callback template. This is configured template
4801 * used to generate routines optimized for specified offload setup.
4802 * One of this generated functions is chosen at SQ configuration
4806 * Generic pointer to TX queue structure.
4808 * Packets to transmit.
4810 * Number of packets in array.
4812 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4813 * values. Should be static to take compile time static configuration
4817 * Number of packets successfully transmitted (<= pkts_n).
4819 static __rte_always_inline uint16_t
4820 mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
4821 struct rte_mbuf **restrict pkts,
4825 struct mlx5_txq_local loc;
4826 enum mlx5_txcmp_code ret;
4829 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4830 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4831 if (unlikely(!pkts_n))
4835 loc.wqe_last = NULL;
4838 loc.pkts_loop = loc.pkts_sent;
4840 * Check if there are some CQEs, if any:
4841 * - process an encountered errors
4842 * - process the completed WQEs
4843 * - free related mbufs
4844 * - doorbell the NIC about processed CQEs
4846 rte_prefetch0(*(pkts + loc.pkts_sent));
4847 mlx5_tx_handle_completion(txq, olx);
4849 * Calculate the number of available resources - elts and WQEs.
4850 * There are two possible different scenarios:
4851 * - no data inlining into WQEs, one WQEBB may contains upto
4852 * four packets, in this case elts become scarce resource
4853 * - data inlining into WQEs, one packet may require multiple
4854 * WQEBBs, the WQEs become the limiting factor.
4856 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4857 loc.elts_free = txq->elts_s -
4858 (uint16_t)(txq->elts_head - txq->elts_tail);
4859 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4860 loc.wqe_free = txq->wqe_s -
4861 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4862 if (unlikely(!loc.elts_free || !loc.wqe_free))
4866 * Fetch the packet from array. Usually this is
4867 * the first packet in series of multi/single
4870 loc.mbuf = *(pkts + loc.pkts_sent);
4871 /* Dedicated branch for multi-segment packets. */
4872 if (MLX5_TXOFF_CONFIG(MULTI) &&
4873 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4875 * Multi-segment packet encountered.
4876 * Hardware is able to process it only
4877 * with SEND/TSO opcodes, one packet
4878 * per WQE, do it in dedicated routine.
4881 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
4882 part = loc.pkts_sent - loc.pkts_copy;
4883 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4885 * There are some single-segment mbufs not
4886 * stored in elts. The mbufs must be in the
4887 * same order as WQEs, so we must copy the
4888 * mbufs to elts here, before the coming
4889 * multi-segment packet mbufs is appended.
4891 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4893 loc.pkts_copy = loc.pkts_sent;
4895 MLX5_ASSERT(pkts_n > loc.pkts_sent);
4896 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4897 if (!MLX5_TXOFF_CONFIG(INLINE))
4898 loc.pkts_copy = loc.pkts_sent;
4900 * These returned code checks are supposed
4901 * to be optimized out due to routine inlining.
4903 if (ret == MLX5_TXCMP_CODE_EXIT) {
4905 * The routine returns this code when
4906 * all packets are sent or there is no
4907 * enough resources to complete request.
4911 if (ret == MLX5_TXCMP_CODE_ERROR) {
4913 * The routine returns this code when
4914 * some error in the incoming packets
4917 txq->stats.oerrors++;
4920 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4922 * The single-segment packet was encountered
4923 * in the array, try to send it with the
4924 * best optimized way, possible engaging eMPW.
4926 goto enter_send_single;
4928 if (MLX5_TXOFF_CONFIG(TSO) &&
4929 ret == MLX5_TXCMP_CODE_TSO) {
4931 * The single-segment TSO packet was
4932 * encountered in the array.
4934 goto enter_send_tso;
4936 /* We must not get here. Something is going wrong. */
4938 txq->stats.oerrors++;
4941 /* Dedicated branch for single-segment TSO packets. */
4942 if (MLX5_TXOFF_CONFIG(TSO) &&
4943 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4945 * TSO might require special way for inlining
4946 * (dedicated parameters) and is sent with
4947 * MLX5_OPCODE_TSO opcode only, provide this
4948 * in dedicated branch.
4951 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
4952 MLX5_ASSERT(pkts_n > loc.pkts_sent);
4953 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4955 * These returned code checks are supposed
4956 * to be optimized out due to routine inlining.
4958 if (ret == MLX5_TXCMP_CODE_EXIT)
4960 if (ret == MLX5_TXCMP_CODE_ERROR) {
4961 txq->stats.oerrors++;
4964 if (ret == MLX5_TXCMP_CODE_SINGLE)
4965 goto enter_send_single;
4966 if (MLX5_TXOFF_CONFIG(MULTI) &&
4967 ret == MLX5_TXCMP_CODE_MULTI) {
4969 * The multi-segment packet was
4970 * encountered in the array.
4972 goto enter_send_multi;
4974 /* We must not get here. Something is going wrong. */
4976 txq->stats.oerrors++;
4980 * The dedicated branch for the single-segment packets
4981 * without TSO. Often these ones can be sent using
4982 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
4983 * The routine builds the WQEs till it encounters
4984 * the TSO or multi-segment packet (in case if these
4985 * offloads are requested at SQ configuration time).
4988 MLX5_ASSERT(pkts_n > loc.pkts_sent);
4989 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
4991 * These returned code checks are supposed
4992 * to be optimized out due to routine inlining.
4994 if (ret == MLX5_TXCMP_CODE_EXIT)
4996 if (ret == MLX5_TXCMP_CODE_ERROR) {
4997 txq->stats.oerrors++;
5000 if (MLX5_TXOFF_CONFIG(MULTI) &&
5001 ret == MLX5_TXCMP_CODE_MULTI) {
5003 * The multi-segment packet was
5004 * encountered in the array.
5006 goto enter_send_multi;
5008 if (MLX5_TXOFF_CONFIG(TSO) &&
5009 ret == MLX5_TXCMP_CODE_TSO) {
5011 * The single-segment TSO packet was
5012 * encountered in the array.
5014 goto enter_send_tso;
5016 /* We must not get here. Something is going wrong. */
5018 txq->stats.oerrors++;
5022 * Main Tx loop is completed, do the rest:
5023 * - set completion request if thresholds are reached
5024 * - doorbell the hardware
5025 * - copy the rest of mbufs to elts (if any)
5027 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
5028 loc.pkts_sent >= loc.pkts_copy);
5029 /* Take a shortcut if nothing is sent. */
5030 if (unlikely(loc.pkts_sent == loc.pkts_loop))
5032 /* Request CQE generation if limits are reached. */
5033 mlx5_tx_request_completion(txq, &loc, olx);
5035 * Ring QP doorbell immediately after WQE building completion
5036 * to improve latencies. The pure software related data treatment
5037 * can be completed after doorbell. Tx CQEs for this SQ are
5038 * processed in this thread only by the polling.
5040 * The rdma core library can map doorbell register in two ways,
5041 * depending on the environment variable "MLX5_SHUT_UP_BF":
5043 * - as regular cached memory, the variable is either missing or
5044 * set to zero. This type of mapping may cause the significant
5045 * doorbell register writing latency and requires explicit
5046 * memory write barrier to mitigate this issue and prevent
5049 * - as non-cached memory, the variable is present and set to
5050 * not "0" value. This type of mapping may cause performance
5051 * impact under heavy loading conditions but the explicit write
5052 * memory barrier is not required and it may improve core
5055 * - the legacy behaviour (prior 19.08 release) was to use some
5056 * heuristics to decide whether write memory barrier should
5057 * be performed. This behavior is supported with specifying
5058 * tx_db_nc=2, write barrier is skipped if application
5059 * provides the full recommended burst of packets, it
5060 * supposes the next packets are coming and the write barrier
5061 * will be issued on the next burst (after descriptor writing,
5064 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
5065 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
5066 /* Not all of the mbufs may be stored into elts yet. */
5067 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
5068 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
5070 * There are some single-segment mbufs not stored in elts.
5071 * It can be only if the last packet was single-segment.
5072 * The copying is gathered into one place due to it is
5073 * a good opportunity to optimize that with SIMD.
5074 * Unfortunately if inlining is enabled the gaps in
5075 * pointer array may happen due to early freeing of the
5078 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
5079 loc.pkts_copy = loc.pkts_sent;
5081 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
5082 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
5083 if (pkts_n > loc.pkts_sent) {
5085 * If burst size is large there might be no enough CQE
5086 * fetched from completion queue and no enough resources
5087 * freed to send all the packets.
5092 #ifdef MLX5_PMD_SOFT_COUNTERS
5093 /* Increment sent packets counter. */
5094 txq->stats.opackets += loc.pkts_sent;
5096 return loc.pkts_sent;
5099 /* Generate routines with Enhanced Multi-Packet Write support. */
5100 MLX5_TXOFF_DECL(full_empw,
5101 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
5103 MLX5_TXOFF_DECL(none_empw,
5104 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5106 MLX5_TXOFF_DECL(md_empw,
5107 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5109 MLX5_TXOFF_DECL(mt_empw,
5110 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5111 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5113 MLX5_TXOFF_DECL(mtsc_empw,
5114 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5115 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5116 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5118 MLX5_TXOFF_DECL(mti_empw,
5119 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5120 MLX5_TXOFF_CONFIG_INLINE |
5121 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5123 MLX5_TXOFF_DECL(mtv_empw,
5124 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5125 MLX5_TXOFF_CONFIG_VLAN |
5126 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5128 MLX5_TXOFF_DECL(mtiv_empw,
5129 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5130 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5131 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5133 MLX5_TXOFF_DECL(sc_empw,
5134 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5135 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5137 MLX5_TXOFF_DECL(sci_empw,
5138 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5139 MLX5_TXOFF_CONFIG_INLINE |
5140 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5142 MLX5_TXOFF_DECL(scv_empw,
5143 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5144 MLX5_TXOFF_CONFIG_VLAN |
5145 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5147 MLX5_TXOFF_DECL(sciv_empw,
5148 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5149 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5150 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5152 MLX5_TXOFF_DECL(i_empw,
5153 MLX5_TXOFF_CONFIG_INLINE |
5154 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5156 MLX5_TXOFF_DECL(v_empw,
5157 MLX5_TXOFF_CONFIG_VLAN |
5158 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5160 MLX5_TXOFF_DECL(iv_empw,
5161 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5162 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5164 /* Generate routines without Enhanced Multi-Packet Write support. */
5165 MLX5_TXOFF_DECL(full,
5166 MLX5_TXOFF_CONFIG_FULL)
5168 MLX5_TXOFF_DECL(none,
5169 MLX5_TXOFF_CONFIG_NONE)
5172 MLX5_TXOFF_CONFIG_METADATA)
5175 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5176 MLX5_TXOFF_CONFIG_METADATA)
5178 MLX5_TXOFF_DECL(mtsc,
5179 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5180 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5181 MLX5_TXOFF_CONFIG_METADATA)
5183 MLX5_TXOFF_DECL(mti,
5184 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5185 MLX5_TXOFF_CONFIG_INLINE |
5186 MLX5_TXOFF_CONFIG_METADATA)
5189 MLX5_TXOFF_DECL(mtv,
5190 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5191 MLX5_TXOFF_CONFIG_VLAN |
5192 MLX5_TXOFF_CONFIG_METADATA)
5195 MLX5_TXOFF_DECL(mtiv,
5196 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5197 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5198 MLX5_TXOFF_CONFIG_METADATA)
5201 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5202 MLX5_TXOFF_CONFIG_METADATA)
5204 MLX5_TXOFF_DECL(sci,
5205 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5206 MLX5_TXOFF_CONFIG_INLINE |
5207 MLX5_TXOFF_CONFIG_METADATA)
5210 MLX5_TXOFF_DECL(scv,
5211 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5212 MLX5_TXOFF_CONFIG_VLAN |
5213 MLX5_TXOFF_CONFIG_METADATA)
5216 MLX5_TXOFF_DECL(sciv,
5217 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5218 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5219 MLX5_TXOFF_CONFIG_METADATA)
5222 MLX5_TXOFF_CONFIG_INLINE |
5223 MLX5_TXOFF_CONFIG_METADATA)
5226 MLX5_TXOFF_CONFIG_VLAN |
5227 MLX5_TXOFF_CONFIG_METADATA)
5230 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5231 MLX5_TXOFF_CONFIG_METADATA)
5234 * Generate routines with Legacy Multi-Packet Write support.
5235 * This mode is supported by ConnectX-4 Lx only and imposes
5236 * offload limitations, not supported:
5237 * - ACL/Flows (metadata are becoming meaningless)
5238 * - WQE Inline headers
5239 * - SRIOV (E-Switch offloads)
5241 * - tunnel encapsulation/decapsulation
5244 MLX5_TXOFF_DECL(none_mpw,
5245 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5246 MLX5_TXOFF_CONFIG_MPW)
5248 MLX5_TXOFF_DECL(mci_mpw,
5249 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5250 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5251 MLX5_TXOFF_CONFIG_MPW)
5253 MLX5_TXOFF_DECL(mc_mpw,
5254 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5255 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5257 MLX5_TXOFF_DECL(i_mpw,
5258 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5259 MLX5_TXOFF_CONFIG_MPW)
5262 * Array of declared and compiled Tx burst function and corresponding
5263 * supported offloads set. The array is used to select the Tx burst
5264 * function for specified offloads set at Tx queue configuration time.
5267 eth_tx_burst_t func;
5270 MLX5_TXOFF_INFO(full_empw,
5271 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5272 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5273 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5274 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5276 MLX5_TXOFF_INFO(none_empw,
5277 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5279 MLX5_TXOFF_INFO(md_empw,
5280 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5282 MLX5_TXOFF_INFO(mt_empw,
5283 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5284 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5286 MLX5_TXOFF_INFO(mtsc_empw,
5287 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5288 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5289 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5291 MLX5_TXOFF_INFO(mti_empw,
5292 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5293 MLX5_TXOFF_CONFIG_INLINE |
5294 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5296 MLX5_TXOFF_INFO(mtv_empw,
5297 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5298 MLX5_TXOFF_CONFIG_VLAN |
5299 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5301 MLX5_TXOFF_INFO(mtiv_empw,
5302 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5303 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5304 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5306 MLX5_TXOFF_INFO(sc_empw,
5307 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5308 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5310 MLX5_TXOFF_INFO(sci_empw,
5311 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5312 MLX5_TXOFF_CONFIG_INLINE |
5313 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5315 MLX5_TXOFF_INFO(scv_empw,
5316 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5317 MLX5_TXOFF_CONFIG_VLAN |
5318 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5320 MLX5_TXOFF_INFO(sciv_empw,
5321 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5322 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5323 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5325 MLX5_TXOFF_INFO(i_empw,
5326 MLX5_TXOFF_CONFIG_INLINE |
5327 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5329 MLX5_TXOFF_INFO(v_empw,
5330 MLX5_TXOFF_CONFIG_VLAN |
5331 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5333 MLX5_TXOFF_INFO(iv_empw,
5334 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5335 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5337 MLX5_TXOFF_INFO(full,
5338 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5339 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5340 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5341 MLX5_TXOFF_CONFIG_METADATA)
5343 MLX5_TXOFF_INFO(none,
5344 MLX5_TXOFF_CONFIG_NONE)
5347 MLX5_TXOFF_CONFIG_METADATA)
5350 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5351 MLX5_TXOFF_CONFIG_METADATA)
5353 MLX5_TXOFF_INFO(mtsc,
5354 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5355 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5356 MLX5_TXOFF_CONFIG_METADATA)
5358 MLX5_TXOFF_INFO(mti,
5359 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5360 MLX5_TXOFF_CONFIG_INLINE |
5361 MLX5_TXOFF_CONFIG_METADATA)
5363 MLX5_TXOFF_INFO(mtv,
5364 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5365 MLX5_TXOFF_CONFIG_VLAN |
5366 MLX5_TXOFF_CONFIG_METADATA)
5368 MLX5_TXOFF_INFO(mtiv,
5369 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5370 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5371 MLX5_TXOFF_CONFIG_METADATA)
5374 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5375 MLX5_TXOFF_CONFIG_METADATA)
5377 MLX5_TXOFF_INFO(sci,
5378 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5379 MLX5_TXOFF_CONFIG_INLINE |
5380 MLX5_TXOFF_CONFIG_METADATA)
5382 MLX5_TXOFF_INFO(scv,
5383 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5384 MLX5_TXOFF_CONFIG_VLAN |
5385 MLX5_TXOFF_CONFIG_METADATA)
5387 MLX5_TXOFF_INFO(sciv,
5388 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5389 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5390 MLX5_TXOFF_CONFIG_METADATA)
5393 MLX5_TXOFF_CONFIG_INLINE |
5394 MLX5_TXOFF_CONFIG_METADATA)
5397 MLX5_TXOFF_CONFIG_VLAN |
5398 MLX5_TXOFF_CONFIG_METADATA)
5401 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5402 MLX5_TXOFF_CONFIG_METADATA)
5404 MLX5_TXOFF_INFO(none_mpw,
5405 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5406 MLX5_TXOFF_CONFIG_MPW)
5408 MLX5_TXOFF_INFO(mci_mpw,
5409 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5410 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5411 MLX5_TXOFF_CONFIG_MPW)
5413 MLX5_TXOFF_INFO(mc_mpw,
5414 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5415 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5417 MLX5_TXOFF_INFO(i_mpw,
5418 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5419 MLX5_TXOFF_CONFIG_MPW)
5423 * Configure the Tx function to use. The routine checks configured
5424 * Tx offloads for the device and selects appropriate Tx burst
5425 * routine. There are multiple Tx burst routines compiled from
5426 * the same template in the most optimal way for the dedicated
5430 * Pointer to private data structure.
5433 * Pointer to selected Tx burst function.
5436 mlx5_select_tx_function(struct rte_eth_dev *dev)
5438 struct mlx5_priv *priv = dev->data->dev_private;
5439 struct mlx5_dev_config *config = &priv->config;
5440 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5441 unsigned int diff = 0, olx = 0, i, m;
5443 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5444 MLX5_DSEG_MAX, "invalid WQE max size");
5445 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5446 "invalid WQE Control Segment size");
5447 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5448 "invalid WQE Ethernet Segment size");
5449 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5450 "invalid WQE Data Segment size");
5451 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5452 "invalid WQE size");
5454 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5455 /* We should support Multi-Segment Packets. */
5456 olx |= MLX5_TXOFF_CONFIG_MULTI;
5458 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5459 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5460 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5461 DEV_TX_OFFLOAD_IP_TNL_TSO |
5462 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5463 /* We should support TCP Send Offload. */
5464 olx |= MLX5_TXOFF_CONFIG_TSO;
5466 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5467 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5468 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5469 /* We should support Software Parser for Tunnels. */
5470 olx |= MLX5_TXOFF_CONFIG_SWP;
5472 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5473 DEV_TX_OFFLOAD_UDP_CKSUM |
5474 DEV_TX_OFFLOAD_TCP_CKSUM |
5475 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5476 /* We should support IP/TCP/UDP Checksums. */
5477 olx |= MLX5_TXOFF_CONFIG_CSUM;
5479 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5480 /* We should support VLAN insertion. */
5481 olx |= MLX5_TXOFF_CONFIG_VLAN;
5483 if (priv->txqs_n && (*priv->txqs)[0]) {
5484 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5486 if (txd->inlen_send) {
5488 * Check the data inline requirements. Data inline
5489 * is enabled on per device basis, we can check
5490 * the first Tx queue only.
5492 * If device does not support VLAN insertion in WQE
5493 * and some queues are requested to perform VLAN
5494 * insertion offload than inline must be enabled.
5496 olx |= MLX5_TXOFF_CONFIG_INLINE;
5499 if (config->mps == MLX5_MPW_ENHANCED &&
5500 config->txq_inline_min <= 0) {
5502 * The NIC supports Enhanced Multi-Packet Write
5503 * and does not require minimal inline data.
5505 olx |= MLX5_TXOFF_CONFIG_EMPW;
5507 if (rte_flow_dynf_metadata_avail()) {
5508 /* We should support Flow metadata. */
5509 olx |= MLX5_TXOFF_CONFIG_METADATA;
5511 if (config->mps == MLX5_MPW) {
5513 * The NIC supports Legacy Multi-Packet Write.
5514 * The MLX5_TXOFF_CONFIG_MPW controls the
5515 * descriptor building method in combination
5516 * with MLX5_TXOFF_CONFIG_EMPW.
5518 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5519 MLX5_TXOFF_CONFIG_SWP |
5520 MLX5_TXOFF_CONFIG_VLAN |
5521 MLX5_TXOFF_CONFIG_METADATA)))
5522 olx |= MLX5_TXOFF_CONFIG_EMPW |
5523 MLX5_TXOFF_CONFIG_MPW;
5526 * Scan the routines table to find the minimal
5527 * satisfying routine with requested offloads.
5529 m = RTE_DIM(txoff_func);
5530 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5533 tmp = txoff_func[i].olx;
5535 /* Meets requested offloads exactly.*/
5539 if ((tmp & olx) != olx) {
5540 /* Does not meet requested offloads at all. */
5543 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5544 /* Do not enable eMPW if not configured. */
5546 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5547 /* Do not enable inlining if not configured. */
5550 * Some routine meets the requirements.
5551 * Check whether it has minimal amount
5552 * of not requested offloads.
5554 tmp = __builtin_popcountl(tmp & ~olx);
5555 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5556 /* First or better match, save and continue. */
5562 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5563 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5564 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5565 /* Lighter not requested offload. */
5570 if (m >= RTE_DIM(txoff_func)) {
5571 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5572 " for requested offloads %04X",
5573 dev->data->port_id, olx);
5576 DRV_LOG(DEBUG, "port %u has selected Tx function"
5577 " supporting offloads %04X/%04X",
5578 dev->data->port_id, olx, txoff_func[m].olx);
5579 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5580 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5581 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5582 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5583 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5584 DRV_LOG(DEBUG, "\tSWP (software parser)");
5585 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5586 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5587 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5588 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5589 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5590 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5591 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5592 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5593 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5594 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5595 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5597 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5599 return txoff_func[m].func;
5603 * DPDK callback to get the TX queue information
5606 * Pointer to the device structure.
5608 * @param tx_queue_id
5609 * Tx queue identificator.
5612 * Pointer to the TX queue information structure.
5619 mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
5620 struct rte_eth_txq_info *qinfo)
5622 struct mlx5_priv *priv = dev->data->dev_private;
5623 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
5624 struct mlx5_txq_ctrl *txq_ctrl =
5625 container_of(txq, struct mlx5_txq_ctrl, txq);
5629 qinfo->nb_desc = txq->elts_s;
5630 qinfo->conf.tx_thresh.pthresh = 0;
5631 qinfo->conf.tx_thresh.hthresh = 0;
5632 qinfo->conf.tx_thresh.wthresh = 0;
5633 qinfo->conf.tx_rs_thresh = 0;
5634 qinfo->conf.tx_free_thresh = 0;
5635 qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
5636 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
5640 * DPDK callback to get the TX packet burst mode information
5643 * Pointer to the device structure.
5645 * @param tx_queue_id
5646 * Tx queue identificatior.
5649 * Pointer to the burts mode information.
5652 * 0 as success, -EINVAL as failure.
5656 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
5657 uint16_t tx_queue_id __rte_unused,
5658 struct rte_eth_burst_mode *mode)
5660 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
5661 unsigned int i, olx;
5663 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5664 if (pkt_burst == txoff_func[i].func) {
5665 olx = txoff_func[i].olx;
5666 snprintf(mode->info, sizeof(mode->info),
5668 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
5669 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
5670 "Legacy MPW" : "Enhanced MPW") : "No MPW",
5671 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
5673 (olx & MLX5_TXOFF_CONFIG_TSO) ?
5675 (olx & MLX5_TXOFF_CONFIG_SWP) ?
5677 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
5679 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
5681 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
5683 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
5684 " + METADATA" : "");