1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
25 #include <rte_common.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_ether.h>
28 #include <rte_cycles.h>
31 #include <mlx5_devx_cmds.h>
33 #include <mlx5_common.h>
35 #include "mlx5_defs.h"
37 #include "mlx5_utils.h"
38 #include "mlx5_rxtx.h"
39 #include "mlx5_autoconf.h"
41 /* TX burst subroutines return codes. */
42 enum mlx5_txcmp_code {
43 MLX5_TXCMP_CODE_EXIT = 0,
44 MLX5_TXCMP_CODE_ERROR,
45 MLX5_TXCMP_CODE_SINGLE,
46 MLX5_TXCMP_CODE_MULTI,
52 * These defines are used to configure Tx burst routine option set
53 * supported at compile time. The not specified options are optimized out
54 * out due to if conditions can be explicitly calculated at compile time.
55 * The offloads with bigger runtime check (require more CPU cycles to
56 * skip) overhead should have the bigger index - this is needed to
57 * select the better matching routine function if no exact match and
58 * some offloads are not actually requested.
60 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
61 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
62 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
63 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
64 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
65 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
66 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
67 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
68 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
70 /* The most common offloads groups. */
71 #define MLX5_TXOFF_CONFIG_NONE 0
72 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
73 MLX5_TXOFF_CONFIG_TSO | \
74 MLX5_TXOFF_CONFIG_SWP | \
75 MLX5_TXOFF_CONFIG_CSUM | \
76 MLX5_TXOFF_CONFIG_INLINE | \
77 MLX5_TXOFF_CONFIG_VLAN | \
78 MLX5_TXOFF_CONFIG_METADATA)
80 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
82 #define MLX5_TXOFF_DECL(func, olx) \
83 static uint16_t mlx5_tx_burst_##func(void *txq, \
84 struct rte_mbuf **pkts, \
87 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
88 pkts, pkts_n, (olx)); \
91 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
93 static __rte_always_inline uint32_t
94 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
96 static __rte_always_inline int
97 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
98 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
100 static __rte_always_inline uint32_t
101 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
103 static __rte_always_inline void
104 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
105 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
107 static __rte_always_inline void
108 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
109 const unsigned int strd_n);
112 mlx5_queue_state_modify(struct rte_eth_dev *dev,
113 struct mlx5_mp_arg_queue_state_modify *sm);
116 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
117 volatile struct mlx5_cqe *restrict cqe,
121 mlx5_lro_update_hdr(uint8_t *restrict padd,
122 volatile struct mlx5_cqe *restrict cqe,
125 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
126 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
129 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
130 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
132 uint64_t rte_net_mlx5_dynf_inline_mask;
135 * Build a table to translate Rx completion flags to packet type.
137 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
140 mlx5_set_ptype_table(void)
143 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
145 /* Last entry must not be overwritten, reserved for errored packet. */
146 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
147 (*p)[i] = RTE_PTYPE_UNKNOWN;
149 * The index to the array should have:
150 * bit[1:0] = l3_hdr_type
151 * bit[4:2] = l4_hdr_type
154 * bit[7] = outer_l3_type
157 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
159 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
160 RTE_PTYPE_L4_NONFRAG;
161 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
162 RTE_PTYPE_L4_NONFRAG;
164 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
166 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
169 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
171 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
173 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
175 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
177 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
179 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
182 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
184 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
186 /* Repeat with outer_l3_type being set. Just in case. */
187 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
188 RTE_PTYPE_L4_NONFRAG;
189 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
190 RTE_PTYPE_L4_NONFRAG;
191 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
193 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
195 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
197 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
199 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
201 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
203 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
205 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
207 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
209 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
212 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
213 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
215 RTE_PTYPE_INNER_L4_NONFRAG;
216 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L4_NONFRAG;
219 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
220 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
221 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
222 RTE_PTYPE_INNER_L4_NONFRAG;
223 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L4_NONFRAG;
226 /* Tunneled - Fragmented */
227 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
228 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L4_FRAG;
230 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
232 RTE_PTYPE_INNER_L4_FRAG;
233 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L4_FRAG;
236 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
237 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L4_FRAG;
240 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
241 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L4_TCP;
243 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
244 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L4_TCP;
246 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
247 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L4_TCP;
249 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
250 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L4_TCP;
252 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L4_TCP;
255 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L4_TCP;
258 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
259 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L4_TCP;
261 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
262 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L4_TCP;
264 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
265 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L4_TCP;
267 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
268 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
269 RTE_PTYPE_INNER_L4_TCP;
270 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
272 RTE_PTYPE_INNER_L4_TCP;
273 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
274 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
275 RTE_PTYPE_INNER_L4_TCP;
277 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
278 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
279 RTE_PTYPE_INNER_L4_UDP;
280 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
281 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
282 RTE_PTYPE_INNER_L4_UDP;
283 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
284 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
285 RTE_PTYPE_INNER_L4_UDP;
286 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
287 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
288 RTE_PTYPE_INNER_L4_UDP;
292 * Build a table to translate packet to checksum type of Verbs.
295 mlx5_set_cksum_table(void)
301 * The index should have:
302 * bit[0] = PKT_TX_TCP_SEG
303 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
304 * bit[4] = PKT_TX_IP_CKSUM
305 * bit[8] = PKT_TX_OUTER_IP_CKSUM
308 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
311 /* Tunneled packet. */
312 if (i & (1 << 8)) /* Outer IP. */
313 v |= MLX5_ETH_WQE_L3_CSUM;
314 if (i & (1 << 4)) /* Inner IP. */
315 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
316 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
317 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
320 if (i & (1 << 4)) /* IP. */
321 v |= MLX5_ETH_WQE_L3_CSUM;
322 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
323 v |= MLX5_ETH_WQE_L4_CSUM;
325 mlx5_cksum_table[i] = v;
330 * Build a table to translate packet type of mbuf to SWP type of Verbs.
333 mlx5_set_swp_types_table(void)
339 * The index should have:
340 * bit[0:1] = PKT_TX_L4_MASK
341 * bit[4] = PKT_TX_IPV6
342 * bit[8] = PKT_TX_OUTER_IPV6
343 * bit[9] = PKT_TX_OUTER_UDP
345 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
348 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
350 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
352 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
353 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
354 v |= MLX5_ETH_WQE_L4_INNER_UDP;
355 mlx5_swp_types_table[i] = v;
360 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
361 * Flags must be preliminary initialized to zero.
364 * Pointer to burst routine local context.
366 * Pointer to store Software Parser flags
368 * Configured Tx offloads mask. It is fully defined at
369 * compile time and may be used for optimization.
372 * Software Parser offsets packed in dword.
373 * Software Parser flags are set by pointer.
375 static __rte_always_inline uint32_t
376 txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc,
381 unsigned int idx, off;
384 if (!MLX5_TXOFF_CONFIG(SWP))
386 ol = loc->mbuf->ol_flags;
387 tunnel = ol & PKT_TX_TUNNEL_MASK;
389 * Check whether Software Parser is required.
390 * Only customized tunnels may ask for.
392 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
395 * The index should have:
396 * bit[0:1] = PKT_TX_L4_MASK
397 * bit[4] = PKT_TX_IPV6
398 * bit[8] = PKT_TX_OUTER_IPV6
399 * bit[9] = PKT_TX_OUTER_UDP
401 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
402 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
403 *swp_flags = mlx5_swp_types_table[idx];
405 * Set offsets for SW parser. Since ConnectX-5, SW parser just
406 * complements HW parser. SW parser starts to engage only if HW parser
407 * can't reach a header. For the older devices, HW parser will not kick
408 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
409 * should be set regardless of HW offload.
411 off = loc->mbuf->outer_l2_len;
412 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
413 off += sizeof(struct rte_vlan_hdr);
414 set = (off >> 1) << 8; /* Outer L3 offset. */
415 off += loc->mbuf->outer_l3_len;
416 if (tunnel == PKT_TX_TUNNEL_UDP)
417 set |= off >> 1; /* Outer L4 offset. */
418 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
419 const uint64_t csum = ol & PKT_TX_L4_MASK;
420 off += loc->mbuf->l2_len;
421 set |= (off >> 1) << 24; /* Inner L3 offset. */
422 if (csum == PKT_TX_TCP_CKSUM ||
423 csum == PKT_TX_UDP_CKSUM ||
424 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
425 off += loc->mbuf->l3_len;
426 set |= (off >> 1) << 16; /* Inner L4 offset. */
429 set = rte_cpu_to_le_32(set);
434 * Convert the Checksum offloads to Verbs.
437 * Pointer to the mbuf.
440 * Converted checksum flags.
442 static __rte_always_inline uint8_t
443 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
446 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
447 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
448 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
451 * The index should have:
452 * bit[0] = PKT_TX_TCP_SEG
453 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
454 * bit[4] = PKT_TX_IP_CKSUM
455 * bit[8] = PKT_TX_OUTER_IP_CKSUM
458 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
459 return mlx5_cksum_table[idx];
463 * Internal function to compute the number of used descriptors in an RX queue
469 * The number of used rx descriptor.
472 rx_queue_count(struct mlx5_rxq_data *rxq)
474 struct rxq_zip *zip = &rxq->zip;
475 volatile struct mlx5_cqe *cqe;
476 const unsigned int cqe_n = (1 << rxq->cqe_n);
477 const unsigned int cqe_cnt = cqe_n - 1;
481 /* if we are processing a compressed cqe */
483 used = zip->cqe_cnt - zip->ca;
489 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
490 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
494 op_own = cqe->op_own;
495 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
496 n = rte_be_to_cpu_32(cqe->byte_cnt);
501 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
503 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
508 * DPDK callback to check the status of a rx descriptor.
513 * The index of the descriptor in the ring.
516 * The status of the tx descriptor.
519 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
521 struct mlx5_rxq_data *rxq = rx_queue;
522 struct mlx5_rxq_ctrl *rxq_ctrl =
523 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
524 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
526 if (dev->rx_pkt_burst != mlx5_rx_burst) {
530 if (offset >= (1 << rxq->elts_n)) {
534 if (offset < rx_queue_count(rxq))
535 return RTE_ETH_RX_DESC_DONE;
536 return RTE_ETH_RX_DESC_AVAIL;
540 * DPDK callback to get the number of used descriptors in a RX queue
543 * Pointer to the device structure.
549 * The number of used rx descriptor.
550 * -EINVAL if the queue is invalid
553 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
555 struct mlx5_priv *priv = dev->data->dev_private;
556 struct mlx5_rxq_data *rxq;
558 if (dev->rx_pkt_burst != mlx5_rx_burst) {
562 rxq = (*priv->rxqs)[rx_queue_id];
567 return rx_queue_count(rxq);
570 #define MLX5_SYSTEM_LOG_DIR "/var/log"
572 * Dump debug information to log file.
577 * If not NULL this string is printed as a header to the output
578 * and the output will be in hexadecimal view.
580 * This is the buffer address to print out.
582 * The number of bytes to dump out.
585 mlx5_dump_debug_information(const char *fname, const char *hex_title,
586 const void *buf, unsigned int hex_len)
590 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
591 fd = fopen(path, "a+");
593 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
594 MKSTR(path2, "./%s", fname);
595 fd = fopen(path2, "a+");
597 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
600 DRV_LOG(INFO, "New debug dump in file %s", path2);
602 DRV_LOG(INFO, "New debug dump in file %s", path);
605 rte_hexdump(fd, hex_title, buf, hex_len);
607 fprintf(fd, "%s", (const char *)buf);
608 fprintf(fd, "\n\n\n");
613 * Move QP from error state to running state and initialize indexes.
616 * Pointer to TX queue control structure.
619 * 0 on success, else -1.
622 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
624 struct mlx5_mp_arg_queue_state_modify sm = {
626 .queue_id = txq_ctrl->txq.idx,
629 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
631 txq_ctrl->txq.wqe_ci = 0;
632 txq_ctrl->txq.wqe_pi = 0;
633 txq_ctrl->txq.elts_comp = 0;
637 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
639 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
641 static const uint8_t magic[] = "seen";
645 for (i = 0; i < sizeof(magic); ++i)
646 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
648 err_cqe->rsvd1[i] = magic[i];
657 * Pointer to TX queue structure.
659 * Pointer to the error CQE.
662 * Negative value if queue recovery failed, otherwise
663 * the error completion entry is handled successfully.
666 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
667 volatile struct mlx5_err_cqe *err_cqe)
669 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
670 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
671 struct mlx5_txq_ctrl *txq_ctrl =
672 container_of(txq, struct mlx5_txq_ctrl, txq);
673 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
674 int seen = check_err_cqe_seen(err_cqe);
676 if (!seen && txq_ctrl->dump_file_n <
677 txq_ctrl->priv->config.max_dump_files_num) {
678 MKSTR(err_str, "Unexpected CQE error syndrome "
679 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
680 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
681 txq->cqe_s, txq->qp_num_8s >> 8,
682 rte_be_to_cpu_16(err_cqe->wqe_counter),
683 txq->wqe_ci, txq->cq_ci);
684 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
685 PORT_ID(txq_ctrl->priv), txq->idx,
686 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
687 mlx5_dump_debug_information(name, NULL, err_str, 0);
688 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
689 (const void *)((uintptr_t)
693 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
694 (const void *)((uintptr_t)
698 txq_ctrl->dump_file_n++;
702 * Count errors in WQEs units.
703 * Later it can be improved to count error packets,
704 * for example, by SQ parsing to find how much packets
705 * should be counted for each WQE.
707 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
709 if (tx_recover_qp(txq_ctrl)) {
710 /* Recovering failed - retry later on the same WQE. */
713 /* Release all the remaining buffers. */
714 txq_free_elts(txq_ctrl);
720 * Translate RX completion flags to packet type.
723 * Pointer to RX queue structure.
727 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
730 * Packet type for struct rte_mbuf.
732 static inline uint32_t
733 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
736 uint8_t pinfo = cqe->pkt_info;
737 uint16_t ptype = cqe->hdr_type_etc;
740 * The index to the array should have:
741 * bit[1:0] = l3_hdr_type
742 * bit[4:2] = l4_hdr_type
745 * bit[7] = outer_l3_type
747 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
748 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
752 * Initialize Rx WQ and indexes.
755 * Pointer to RX queue structure.
758 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
760 const unsigned int wqe_n = 1 << rxq->elts_n;
763 for (i = 0; (i != wqe_n); ++i) {
764 volatile struct mlx5_wqe_data_seg *scat;
768 if (mlx5_rxq_mprq_enabled(rxq)) {
769 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
771 scat = &((volatile struct mlx5_wqe_mprq *)
773 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
774 1 << rxq->strd_num_n);
775 byte_count = (1 << rxq->strd_sz_n) *
776 (1 << rxq->strd_num_n);
778 struct rte_mbuf *buf = (*rxq->elts)[i];
780 scat = &((volatile struct mlx5_wqe_data_seg *)
782 addr = rte_pktmbuf_mtod(buf, uintptr_t);
783 byte_count = DATA_LEN(buf);
785 /* scat->addr must be able to store a pointer. */
786 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
787 *scat = (struct mlx5_wqe_data_seg){
788 .addr = rte_cpu_to_be_64(addr),
789 .byte_count = rte_cpu_to_be_32(byte_count),
790 .lkey = mlx5_rx_addr2mr(rxq, addr),
793 rxq->consumed_strd = 0;
794 rxq->decompressed = 0;
796 rxq->zip = (struct rxq_zip){
799 /* Update doorbell counter. */
800 rxq->rq_ci = wqe_n >> rxq->sges_n;
802 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
806 * Modify a Verbs/DevX queue state.
807 * This must be called from the primary process.
810 * Pointer to Ethernet device.
812 * State modify request parameters.
815 * 0 in case of success else non-zero value and rte_errno is set.
818 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
819 const struct mlx5_mp_arg_queue_state_modify *sm)
822 struct mlx5_priv *priv = dev->data->dev_private;
825 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
826 struct mlx5_rxq_ctrl *rxq_ctrl =
827 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
829 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
830 struct ibv_wq_attr mod = {
831 .attr_mask = IBV_WQ_ATTR_STATE,
832 .wq_state = sm->state,
835 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
836 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
837 struct mlx5_devx_modify_rq_attr rq_attr;
839 memset(&rq_attr, 0, sizeof(rq_attr));
840 if (sm->state == IBV_WQS_RESET) {
841 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
842 rq_attr.state = MLX5_RQC_STATE_RST;
843 } else if (sm->state == IBV_WQS_RDY) {
844 rq_attr.rq_state = MLX5_RQC_STATE_RST;
845 rq_attr.state = MLX5_RQC_STATE_RDY;
846 } else if (sm->state == IBV_WQS_ERR) {
847 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
848 rq_attr.state = MLX5_RQC_STATE_ERR;
850 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
854 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
855 sm->state, strerror(errno));
860 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
861 struct mlx5_txq_ctrl *txq_ctrl =
862 container_of(txq, struct mlx5_txq_ctrl, txq);
863 struct ibv_qp_attr mod = {
864 .qp_state = IBV_QPS_RESET,
865 .port_num = (uint8_t)priv->ibv_port,
867 struct ibv_qp *qp = txq_ctrl->obj->qp;
869 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
871 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
872 "%s", strerror(errno));
876 mod.qp_state = IBV_QPS_INIT;
877 ret = mlx5_glue->modify_qp(qp, &mod,
878 (IBV_QP_STATE | IBV_QP_PORT));
880 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
885 mod.qp_state = IBV_QPS_RTR;
886 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
888 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
893 mod.qp_state = IBV_QPS_RTS;
894 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
896 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
906 * Modify a Verbs queue state.
909 * Pointer to Ethernet device.
911 * State modify request parameters.
914 * 0 in case of success else non-zero value.
917 mlx5_queue_state_modify(struct rte_eth_dev *dev,
918 struct mlx5_mp_arg_queue_state_modify *sm)
922 switch (rte_eal_process_type()) {
923 case RTE_PROC_PRIMARY:
924 ret = mlx5_queue_state_modify_primary(dev, sm);
926 case RTE_PROC_SECONDARY:
927 ret = mlx5_mp_req_queue_state_modify(dev, sm);
937 * The function inserts the RQ state to reset when the first error CQE is
938 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
939 * it moves the RQ state to ready and initializes the RQ.
940 * Next CQE identification and error counting are in the caller responsibility.
943 * Pointer to RX queue structure.
945 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
946 * 0 when called from non-vectorized Rx burst.
949 * -1 in case of recovery error, otherwise the CQE status.
952 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
954 const uint16_t cqe_n = 1 << rxq->cqe_n;
955 const uint16_t cqe_mask = cqe_n - 1;
956 const unsigned int wqe_n = 1 << rxq->elts_n;
957 struct mlx5_rxq_ctrl *rxq_ctrl =
958 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
960 volatile struct mlx5_cqe *cqe;
961 volatile struct mlx5_err_cqe *err_cqe;
963 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
965 struct mlx5_mp_arg_queue_state_modify sm;
968 switch (rxq->err_state) {
969 case MLX5_RXQ_ERR_STATE_NO_ERROR:
970 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
972 case MLX5_RXQ_ERR_STATE_NEED_RESET:
974 sm.queue_id = rxq->idx;
975 sm.state = IBV_WQS_RESET;
976 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
978 if (rxq_ctrl->dump_file_n <
979 rxq_ctrl->priv->config.max_dump_files_num) {
980 MKSTR(err_str, "Unexpected CQE error syndrome "
981 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
982 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
983 rxq->cqn, rxq_ctrl->wqn,
984 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
985 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
986 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
987 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
988 mlx5_dump_debug_information(name, NULL, err_str, 0);
989 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
990 (const void *)((uintptr_t)
992 sizeof(*u.cqe) * cqe_n);
993 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
994 (const void *)((uintptr_t)
997 rxq_ctrl->dump_file_n++;
999 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
1001 case MLX5_RXQ_ERR_STATE_NEED_READY:
1002 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1003 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1005 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1008 * The RQ consumer index must be zeroed while moving
1009 * from RESET state to RDY state.
1011 *rxq->rq_db = rte_cpu_to_be_32(0);
1014 sm.queue_id = rxq->idx;
1015 sm.state = IBV_WQS_RDY;
1016 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1020 const uint16_t q_mask = wqe_n - 1;
1022 struct rte_mbuf **elt;
1024 unsigned int n = wqe_n - (rxq->rq_ci -
1027 for (i = 0; i < (int)n; ++i) {
1028 elt_idx = (rxq->rq_ci + i) & q_mask;
1029 elt = &(*rxq->elts)[elt_idx];
1030 *elt = rte_mbuf_raw_alloc(rxq->mp);
1032 for (i--; i >= 0; --i) {
1033 elt_idx = (rxq->rq_ci +
1037 rte_pktmbuf_free_seg
1043 for (i = 0; i < (int)wqe_n; ++i) {
1044 elt = &(*rxq->elts)[i];
1046 (uint16_t)((*elt)->buf_len -
1047 rte_pktmbuf_headroom(*elt));
1049 /* Padding with a fake mbuf for vec Rx. */
1050 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1051 (*rxq->elts)[wqe_n + i] =
1054 mlx5_rxq_initialize(rxq);
1055 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1064 * Get size of the next packet for a given CQE. For compressed CQEs, the
1065 * consumer index is updated only once all packets of the current one have
1069 * Pointer to RX queue.
1073 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1077 * 0 in case of empty CQE, otherwise the packet size in bytes.
1080 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1081 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1083 struct rxq_zip *zip = &rxq->zip;
1084 uint16_t cqe_n = cqe_cnt + 1;
1090 /* Process compressed data in the CQE and mini arrays. */
1092 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1093 (volatile struct mlx5_mini_cqe8 (*)[8])
1094 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1097 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1098 *mcqe = &(*mc)[zip->ai & 7];
1099 if ((++zip->ai & 7) == 0) {
1100 /* Invalidate consumed CQEs */
1103 while (idx != end) {
1104 (*rxq->cqes)[idx & cqe_cnt].op_own =
1105 MLX5_CQE_INVALIDATE;
1109 * Increment consumer index to skip the number
1110 * of CQEs consumed. Hardware leaves holes in
1111 * the CQ ring for software use.
1116 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1117 /* Invalidate the rest */
1121 while (idx != end) {
1122 (*rxq->cqes)[idx & cqe_cnt].op_own =
1123 MLX5_CQE_INVALIDATE;
1126 rxq->cq_ci = zip->cq_ci;
1130 * No compressed data, get next CQE and verify if it is
1137 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1138 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1139 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1141 ret = mlx5_rx_err_handle(rxq, 0);
1142 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1150 op_own = cqe->op_own;
1151 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1152 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1153 (volatile struct mlx5_mini_cqe8 (*)[8])
1154 (uintptr_t)(&(*rxq->cqes)
1158 /* Fix endianness. */
1159 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1161 * Current mini array position is the one
1162 * returned by check_cqe64().
1164 * If completion comprises several mini arrays,
1165 * as a special case the second one is located
1166 * 7 CQEs after the initial CQE instead of 8
1167 * for subsequent ones.
1169 zip->ca = rxq->cq_ci;
1170 zip->na = zip->ca + 7;
1171 /* Compute the next non compressed CQE. */
1173 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1174 /* Get packet size to return. */
1175 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1178 /* Prefetch all to be invalidated */
1181 while (idx != end) {
1182 rte_prefetch0(&(*rxq->cqes)[(idx) &
1187 len = rte_be_to_cpu_32(cqe->byte_cnt);
1190 if (unlikely(rxq->err_state)) {
1191 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1192 ++rxq->stats.idropped;
1200 * Translate RX completion flags to offload flags.
1206 * Offload flags (ol_flags) for struct rte_mbuf.
1208 static inline uint32_t
1209 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1211 uint32_t ol_flags = 0;
1212 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1216 MLX5_CQE_RX_L3_HDR_VALID,
1217 PKT_RX_IP_CKSUM_GOOD) |
1219 MLX5_CQE_RX_L4_HDR_VALID,
1220 PKT_RX_L4_CKSUM_GOOD);
1225 * Fill in mbuf fields from RX completion flags.
1226 * Note that pkt->ol_flags should be initialized outside of this function.
1229 * Pointer to RX queue.
1234 * @param rss_hash_res
1235 * Packet RSS Hash result.
1238 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1239 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1241 /* Update packet information. */
1242 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1243 if (rss_hash_res && rxq->rss_hash) {
1244 pkt->hash.rss = rss_hash_res;
1245 pkt->ol_flags |= PKT_RX_RSS_HASH;
1247 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1248 pkt->ol_flags |= PKT_RX_FDIR;
1249 if (cqe->sop_drop_qpn !=
1250 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1251 uint32_t mark = cqe->sop_drop_qpn;
1253 pkt->ol_flags |= PKT_RX_FDIR_ID;
1254 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1257 if (rte_flow_dynf_metadata_avail() && cqe->flow_table_metadata) {
1258 pkt->ol_flags |= PKT_RX_DYNF_METADATA;
1259 *RTE_FLOW_DYNF_METADATA(pkt) = cqe->flow_table_metadata;
1262 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1263 if (rxq->vlan_strip &&
1264 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1265 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1266 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1268 if (rxq->hw_timestamp) {
1269 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1270 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1275 * DPDK callback for RX.
1278 * Generic pointer to RX queue structure.
1280 * Array to store received packets.
1282 * Maximum number of packets in array.
1285 * Number of packets successfully received (<= pkts_n).
1288 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1290 struct mlx5_rxq_data *rxq = dpdk_rxq;
1291 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1292 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1293 const unsigned int sges_n = rxq->sges_n;
1294 struct rte_mbuf *pkt = NULL;
1295 struct rte_mbuf *seg = NULL;
1296 volatile struct mlx5_cqe *cqe =
1297 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1299 unsigned int rq_ci = rxq->rq_ci << sges_n;
1300 int len = 0; /* keep its value across iterations. */
1303 unsigned int idx = rq_ci & wqe_cnt;
1304 volatile struct mlx5_wqe_data_seg *wqe =
1305 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1306 struct rte_mbuf *rep = (*rxq->elts)[idx];
1307 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1308 uint32_t rss_hash_res;
1316 rep = rte_mbuf_raw_alloc(rxq->mp);
1317 if (unlikely(rep == NULL)) {
1318 ++rxq->stats.rx_nombuf;
1321 * no buffers before we even started,
1322 * bail out silently.
1326 while (pkt != seg) {
1327 assert(pkt != (*rxq->elts)[idx]);
1331 rte_mbuf_raw_free(pkt);
1337 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1338 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1340 rte_mbuf_raw_free(rep);
1344 assert(len >= (rxq->crc_present << 2));
1345 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1346 /* If compressed, take hash result from mini-CQE. */
1347 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1349 mcqe->rx_hash_result);
1350 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1351 if (rxq->crc_present)
1352 len -= RTE_ETHER_CRC_LEN;
1354 if (cqe->lro_num_seg > 1) {
1356 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1358 pkt->ol_flags |= PKT_RX_LRO;
1359 pkt->tso_segsz = len / cqe->lro_num_seg;
1362 DATA_LEN(rep) = DATA_LEN(seg);
1363 PKT_LEN(rep) = PKT_LEN(seg);
1364 SET_DATA_OFF(rep, DATA_OFF(seg));
1365 PORT(rep) = PORT(seg);
1366 (*rxq->elts)[idx] = rep;
1368 * Fill NIC descriptor with the new buffer. The lkey and size
1369 * of the buffers are already known, only the buffer address
1372 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1373 /* If there's only one MR, no need to replace LKey in WQE. */
1374 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1375 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1376 if (len > DATA_LEN(seg)) {
1377 len -= DATA_LEN(seg);
1382 DATA_LEN(seg) = len;
1383 #ifdef MLX5_PMD_SOFT_COUNTERS
1384 /* Increment bytes counter. */
1385 rxq->stats.ibytes += PKT_LEN(pkt);
1387 /* Return packet. */
1392 /* Align consumer index to the next stride. */
1397 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1399 /* Update the consumer index. */
1400 rxq->rq_ci = rq_ci >> sges_n;
1402 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1404 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1405 #ifdef MLX5_PMD_SOFT_COUNTERS
1406 /* Increment packets counter. */
1407 rxq->stats.ipackets += i;
1413 * Update LRO packet TCP header.
1414 * The HW LRO feature doesn't update the TCP header after coalescing the
1415 * TCP segments but supplies information in CQE to fill it by SW.
1418 * Pointer to the TCP header.
1420 * Pointer to the completion entry..
1422 * The L3 pseudo-header checksum.
1425 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
1426 volatile struct mlx5_cqe *restrict cqe,
1429 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1430 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1432 * The HW calculates only the TCP payload checksum, need to complete
1433 * the TCP header checksum and the L3 pseudo-header checksum.
1435 uint32_t csum = phcsum + cqe->csum;
1437 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1438 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1439 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1440 tcp->recv_ack = cqe->lro_ack_seq_num;
1441 tcp->rx_win = cqe->lro_tcp_win;
1443 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1444 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1446 csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4);
1447 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1448 csum = (~csum) & 0xffff;
1455 * Update LRO packet headers.
1456 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1457 * TCP segments but supply information in CQE to fill it by SW.
1460 * The packet address.
1462 * Pointer to the completion entry..
1464 * The packet length.
1467 mlx5_lro_update_hdr(uint8_t *restrict padd,
1468 volatile struct mlx5_cqe *restrict cqe,
1472 struct rte_ether_hdr *eth;
1473 struct rte_vlan_hdr *vlan;
1474 struct rte_ipv4_hdr *ipv4;
1475 struct rte_ipv6_hdr *ipv6;
1476 struct rte_tcp_hdr *tcp;
1481 uint16_t proto = h.eth->ether_type;
1485 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1486 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1487 proto = h.vlan->eth_proto;
1490 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1491 h.ipv4->time_to_live = cqe->lro_min_ttl;
1492 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1493 h.ipv4->hdr_checksum = 0;
1494 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1495 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1498 h.ipv6->hop_limits = cqe->lro_min_ttl;
1499 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1501 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1504 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1508 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1510 struct mlx5_mprq_buf *buf = opaque;
1512 if (rte_atomic16_read(&buf->refcnt) == 1) {
1513 rte_mempool_put(buf->mp, buf);
1514 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1515 rte_atomic16_set(&buf->refcnt, 1);
1516 rte_mempool_put(buf->mp, buf);
1521 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1523 mlx5_mprq_buf_free_cb(NULL, buf);
1527 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1528 const unsigned int strd_n)
1530 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1531 volatile struct mlx5_wqe_data_seg *wqe =
1532 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1535 assert(rep != NULL);
1536 /* Replace MPRQ buf. */
1537 (*rxq->mprq_bufs)[rq_idx] = rep;
1539 addr = mlx5_mprq_buf_addr(rep, strd_n);
1540 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1541 /* If there's only one MR, no need to replace LKey in WQE. */
1542 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1543 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1544 /* Stash a mbuf for next replacement. */
1545 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1546 rxq->mprq_repl = rep;
1548 rxq->mprq_repl = NULL;
1552 * DPDK callback for RX with Multi-Packet RQ support.
1555 * Generic pointer to RX queue structure.
1557 * Array to store received packets.
1559 * Maximum number of packets in array.
1562 * Number of packets successfully received (<= pkts_n).
1565 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1567 struct mlx5_rxq_data *rxq = dpdk_rxq;
1568 const unsigned int strd_n = 1 << rxq->strd_num_n;
1569 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1570 const unsigned int strd_shift =
1571 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1572 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1573 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1574 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1576 uint32_t rq_ci = rxq->rq_ci;
1577 uint16_t consumed_strd = rxq->consumed_strd;
1578 uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM;
1579 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1581 while (i < pkts_n) {
1582 struct rte_mbuf *pkt;
1590 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1591 uint32_t rss_hash_res = 0;
1592 uint8_t lro_num_seg;
1594 if (consumed_strd == strd_n) {
1595 /* Replace WQE only if the buffer is still in use. */
1596 if (rte_atomic16_read(&buf->refcnt) > 1) {
1597 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1598 /* Release the old buffer. */
1599 mlx5_mprq_buf_free(buf);
1600 } else if (unlikely(rxq->mprq_repl == NULL)) {
1601 struct mlx5_mprq_buf *rep;
1604 * Currently, the MPRQ mempool is out of buffer
1605 * and doing memcpy regardless of the size of Rx
1606 * packet. Retry allocation to get back to
1609 if (!rte_mempool_get(rxq->mprq_mp,
1611 rxq->mprq_repl = rep;
1613 /* Advance to the next WQE. */
1616 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1618 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1619 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1623 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1624 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1626 consumed_strd += strd_cnt;
1627 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1630 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1631 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1633 /* mini-CQE for MPRQ doesn't have hash result. */
1634 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1636 assert(strd_idx < strd_n);
1637 assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
1638 lro_num_seg = cqe->lro_num_seg;
1640 * Currently configured to receive a packet per a stride. But if
1641 * MTU is adjusted through kernel interface, device could
1642 * consume multiple strides without raising an error. In this
1643 * case, the packet should be dropped because it is bigger than
1644 * the max_rx_pkt_len.
1646 if (unlikely(!lro_num_seg && strd_cnt > 1)) {
1647 ++rxq->stats.idropped;
1650 pkt = rte_pktmbuf_alloc(rxq->mp);
1651 if (unlikely(pkt == NULL)) {
1652 ++rxq->stats.rx_nombuf;
1655 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1656 assert((int)len >= (rxq->crc_present << 2));
1657 if (rxq->crc_present)
1658 len -= RTE_ETHER_CRC_LEN;
1659 offset = strd_idx * strd_sz + strd_shift;
1660 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1662 * Memcpy packets to the target mbuf if:
1663 * - The size of packet is smaller than mprq_max_memcpy_len.
1664 * - Out of buffer in the Mempool for Multi-Packet RQ.
1666 if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
1668 * When memcpy'ing packet due to out-of-buffer, the
1669 * packet must be smaller than the target mbuf.
1671 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1672 rte_pktmbuf_free_seg(pkt);
1673 ++rxq->stats.idropped;
1676 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
1677 DATA_LEN(pkt) = len;
1679 rte_iova_t buf_iova;
1680 struct rte_mbuf_ext_shared_info *shinfo;
1681 uint16_t buf_len = strd_cnt * strd_sz;
1684 /* Increment the refcnt of the whole chunk. */
1685 rte_atomic16_add_return(&buf->refcnt, 1);
1686 assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1688 buf_addr = RTE_PTR_SUB(addr, headroom_sz);
1690 * MLX5 device doesn't use iova but it is necessary in a
1691 * case where the Rx packet is transmitted via a
1694 buf_iova = rte_mempool_virt2iova(buf) +
1695 RTE_PTR_DIFF(buf_addr, buf);
1696 shinfo = &buf->shinfos[strd_idx];
1697 rte_mbuf_ext_refcnt_set(shinfo, 1);
1699 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1700 * attaching the stride to mbuf and more offload flags
1701 * will be added below by calling rxq_cq_to_mbuf().
1702 * Other fields will be overwritten.
1704 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1706 /* Set mbuf head-room. */
1707 pkt->data_off = headroom_sz;
1708 assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
1710 * Prevent potential overflow due to MTU change through
1713 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1714 rte_pktmbuf_free_seg(pkt);
1715 ++rxq->stats.idropped;
1718 DATA_LEN(pkt) = len;
1720 * LRO packet may consume all the stride memory, in this
1721 * case packet head-room space is not guaranteed so must
1722 * to add an empty mbuf for the head-room.
1724 if (!rxq->strd_headroom_en) {
1725 struct rte_mbuf *headroom_mbuf =
1726 rte_pktmbuf_alloc(rxq->mp);
1728 if (unlikely(headroom_mbuf == NULL)) {
1729 rte_pktmbuf_free_seg(pkt);
1730 ++rxq->stats.rx_nombuf;
1733 PORT(pkt) = rxq->port_id;
1734 NEXT(headroom_mbuf) = pkt;
1735 pkt = headroom_mbuf;
1739 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1740 if (lro_num_seg > 1) {
1741 mlx5_lro_update_hdr(addr, cqe, len);
1742 pkt->ol_flags |= PKT_RX_LRO;
1743 pkt->tso_segsz = strd_sz;
1746 PORT(pkt) = rxq->port_id;
1747 #ifdef MLX5_PMD_SOFT_COUNTERS
1748 /* Increment bytes counter. */
1749 rxq->stats.ibytes += PKT_LEN(pkt);
1751 /* Return packet. */
1755 /* Update the consumer indexes. */
1756 rxq->consumed_strd = consumed_strd;
1758 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1759 if (rq_ci != rxq->rq_ci) {
1762 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1764 #ifdef MLX5_PMD_SOFT_COUNTERS
1765 /* Increment packets counter. */
1766 rxq->stats.ipackets += i;
1772 * Dummy DPDK callback for TX.
1774 * This function is used to temporarily replace the real callback during
1775 * unsafe control operations on the queue, or in case of error.
1778 * Generic pointer to TX queue structure.
1780 * Packets to transmit.
1782 * Number of packets in array.
1785 * Number of packets successfully transmitted (<= pkts_n).
1788 removed_tx_burst(void *dpdk_txq __rte_unused,
1789 struct rte_mbuf **pkts __rte_unused,
1790 uint16_t pkts_n __rte_unused)
1797 * Dummy DPDK callback for RX.
1799 * This function is used to temporarily replace the real callback during
1800 * unsafe control operations on the queue, or in case of error.
1803 * Generic pointer to RX queue structure.
1805 * Array to store received packets.
1807 * Maximum number of packets in array.
1810 * Number of packets successfully received (<= pkts_n).
1813 removed_rx_burst(void *dpdk_txq __rte_unused,
1814 struct rte_mbuf **pkts __rte_unused,
1815 uint16_t pkts_n __rte_unused)
1822 * Vectorized Rx/Tx routines are not compiled in when required vector
1823 * instructions are not supported on a target architecture. The following null
1824 * stubs are needed for linkage when those are not included outside of this file
1825 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1829 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1830 struct rte_mbuf **pkts __rte_unused,
1831 uint16_t pkts_n __rte_unused)
1837 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1843 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1849 * Free the mbufs from the linear array of pointers.
1852 * Pointer to array of packets to be free.
1854 * Number of packets to be freed.
1856 * Configured Tx offloads mask. It is fully defined at
1857 * compile time and may be used for optimization.
1859 static __rte_always_inline void
1860 mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
1861 unsigned int pkts_n,
1862 unsigned int olx __rte_unused)
1864 struct rte_mempool *pool = NULL;
1865 struct rte_mbuf **p_free = NULL;
1866 struct rte_mbuf *mbuf;
1867 unsigned int n_free = 0;
1870 * The implemented algorithm eliminates
1871 * copying pointers to temporary array
1872 * for rte_mempool_put_bulk() calls.
1879 * Decrement mbuf reference counter, detach
1880 * indirect and external buffers if needed.
1882 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1883 if (likely(mbuf != NULL)) {
1884 assert(mbuf == *pkts);
1885 if (likely(n_free != 0)) {
1886 if (unlikely(pool != mbuf->pool))
1887 /* From different pool. */
1890 /* Start new scan array. */
1897 if (unlikely(pkts_n == 0)) {
1903 * This happens if mbuf is still referenced.
1904 * We can't put it back to the pool, skip.
1908 if (unlikely(n_free != 0))
1909 /* There is some array to free.*/
1911 if (unlikely(pkts_n == 0))
1912 /* Last mbuf, nothing to free. */
1918 * This loop is implemented to avoid multiple
1919 * inlining of rte_mempool_put_bulk().
1925 * Free the array of pre-freed mbufs
1926 * belonging to the same memory pool.
1928 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1929 if (unlikely(mbuf != NULL)) {
1930 /* There is the request to start new scan. */
1935 if (likely(pkts_n != 0))
1938 * This is the last mbuf to be freed.
1939 * Do one more loop iteration to complete.
1940 * This is rare case of the last unique mbuf.
1945 if (likely(pkts_n == 0))
1954 * Free the mbuf from the elts ring buffer till new tail.
1957 * Pointer to Tx queue structure.
1959 * Index in elts to free up to, becomes new elts tail.
1961 * Configured Tx offloads mask. It is fully defined at
1962 * compile time and may be used for optimization.
1964 static __rte_always_inline void
1965 mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
1967 unsigned int olx __rte_unused)
1969 uint16_t n_elts = tail - txq->elts_tail;
1972 assert(n_elts <= txq->elts_s);
1974 * Implement a loop to support ring buffer wraparound
1975 * with single inlining of mlx5_tx_free_mbuf().
1980 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
1981 part = RTE_MIN(part, n_elts);
1983 assert(part <= txq->elts_s);
1984 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
1986 txq->elts_tail += part;
1992 * Store the mbuf being sent into elts ring buffer.
1993 * On Tx completion these mbufs will be freed.
1996 * Pointer to Tx queue structure.
1998 * Pointer to array of packets to be stored.
2000 * Number of packets to be stored.
2002 * Configured Tx offloads mask. It is fully defined at
2003 * compile time and may be used for optimization.
2005 static __rte_always_inline void
2006 mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
2007 struct rte_mbuf **restrict pkts,
2008 unsigned int pkts_n,
2009 unsigned int olx __rte_unused)
2012 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2016 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2018 assert(part <= txq->elts_s);
2019 /* This code is a good candidate for vectorizing with SIMD. */
2020 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2022 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2023 txq->elts_head += pkts_n;
2024 if (unlikely(part < pkts_n))
2025 /* The copy is wrapping around the elts array. */
2026 rte_memcpy((void *)elts, (void *)(pkts + part),
2027 (pkts_n - part) * sizeof(struct rte_mbuf *));
2031 * Update completion queue consuming index via doorbell
2032 * and flush the completed data buffers.
2035 * Pointer to TX queue structure.
2036 * @param valid CQE pointer
2037 * if not NULL update txq->wqe_pi and flush the buffers
2039 * Configured Tx offloads mask. It is fully defined at
2040 * compile time and may be used for optimization.
2042 static __rte_always_inline void
2043 mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
2044 volatile struct mlx5_cqe *last_cqe,
2045 unsigned int olx __rte_unused)
2047 if (likely(last_cqe != NULL)) {
2050 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2051 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2052 if (likely(tail != txq->elts_tail)) {
2053 mlx5_tx_free_elts(txq, tail, olx);
2054 assert(tail == txq->elts_tail);
2060 * Manage TX completions. This routine checks the CQ for
2061 * arrived CQEs, deduces the last accomplished WQE in SQ,
2062 * updates SQ producing index and frees all completed mbufs.
2065 * Pointer to TX queue structure.
2067 * Configured Tx offloads mask. It is fully defined at
2068 * compile time and may be used for optimization.
2070 * NOTE: not inlined intentionally, it makes tx_burst
2071 * routine smaller, simple and faster - from experiments.
2074 mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
2075 unsigned int olx __rte_unused)
2077 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2078 volatile struct mlx5_cqe *last_cqe = NULL;
2079 uint16_t ci = txq->cq_ci;
2082 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2083 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2085 volatile struct mlx5_cqe *cqe;
2087 cqe = &txq->cqes[ci & txq->cqe_m];
2088 ret = check_cqe(cqe, txq->cqe_s, ci);
2089 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2090 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2091 /* No new CQEs in completion queue. */
2092 assert(ret == MLX5_CQE_STATUS_HW_OWN);
2096 * Some error occurred, try to restart.
2097 * We have no barrier after WQE related Doorbell
2098 * written, make sure all writes are completed
2099 * here, before we might perform SQ reset.
2103 ret = mlx5_tx_error_cqe_handle
2104 (txq, (volatile struct mlx5_err_cqe *)cqe);
2105 if (unlikely(ret < 0)) {
2107 * Some error occurred on queue error
2108 * handling, we do not advance the index
2109 * here, allowing to retry on next call.
2114 * We are going to fetch all entries with
2115 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2116 * The send queue is supposed to be empty.
2123 /* Normal transmit completion. */
2124 assert(ci != txq->cq_pi);
2125 assert((txq->fcqs[ci & txq->cqe_m] >> 16) == cqe->wqe_counter);
2129 * We have to restrict the amount of processed CQEs
2130 * in one tx_burst routine call. The CQ may be large
2131 * and many CQEs may be updated by the NIC in one
2132 * transaction. Buffers freeing is time consuming,
2133 * multiple iterations may introduce significant
2136 if (likely(--count == 0))
2139 if (likely(ci != txq->cq_ci)) {
2141 * Update completion queue consuming index
2142 * and ring doorbell to notify hardware.
2144 rte_compiler_barrier();
2146 *txq->cq_db = rte_cpu_to_be_32(ci);
2147 mlx5_tx_comp_flush(txq, last_cqe, olx);
2152 * Check if the completion request flag should be set in the last WQE.
2153 * Both pushed mbufs and WQEs are monitored and the completion request
2154 * flag is set if any of thresholds is reached.
2157 * Pointer to TX queue structure.
2159 * Pointer to burst routine local context.
2161 * Configured Tx offloads mask. It is fully defined at
2162 * compile time and may be used for optimization.
2164 static __rte_always_inline void
2165 mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
2166 struct mlx5_txq_local *restrict loc,
2169 uint16_t head = txq->elts_head;
2172 part = MLX5_TXOFF_CONFIG(INLINE) ?
2173 0 : loc->pkts_sent - loc->pkts_copy;
2175 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2176 (MLX5_TXOFF_CONFIG(INLINE) &&
2177 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2178 volatile struct mlx5_wqe *last = loc->wqe_last;
2180 txq->elts_comp = head;
2181 if (MLX5_TXOFF_CONFIG(INLINE))
2182 txq->wqe_comp = txq->wqe_ci;
2183 /* Request unconditional completion on last WQE. */
2184 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2185 MLX5_COMP_MODE_OFFSET);
2186 /* Save elts_head in dedicated free on completion queue. */
2188 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2190 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2191 (last->cseg.opcode >> 8) << 16;
2193 /* A CQE slot must always be available. */
2194 assert((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2199 * DPDK callback to check the status of a tx descriptor.
2204 * The index of the descriptor in the ring.
2207 * The status of the tx descriptor.
2210 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2212 struct mlx5_txq_data *restrict txq = tx_queue;
2215 mlx5_tx_handle_completion(txq, 0);
2216 used = txq->elts_head - txq->elts_tail;
2218 return RTE_ETH_TX_DESC_FULL;
2219 return RTE_ETH_TX_DESC_DONE;
2223 * Build the Control Segment with specified opcode:
2224 * - MLX5_OPCODE_SEND
2225 * - MLX5_OPCODE_ENHANCED_MPSW
2229 * Pointer to TX queue structure.
2231 * Pointer to burst routine local context.
2233 * Pointer to WQE to fill with built Control Segment.
2235 * Supposed length of WQE in segments.
2237 * SQ WQE opcode to put into Control Segment.
2239 * Configured Tx offloads mask. It is fully defined at
2240 * compile time and may be used for optimization.
2242 static __rte_always_inline void
2243 mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq,
2244 struct mlx5_txq_local *restrict loc __rte_unused,
2245 struct mlx5_wqe *restrict wqe,
2247 unsigned int opcode,
2248 unsigned int olx __rte_unused)
2250 struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
2252 /* For legacy MPW replace the EMPW by TSO with modifier. */
2253 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2254 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2255 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2256 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2257 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2258 MLX5_COMP_MODE_OFFSET);
2259 cs->misc = RTE_BE32(0);
2263 * Build the Ethernet Segment without inlined data.
2264 * Supports Software Parser, Checksums and VLAN
2265 * insertion Tx offload features.
2268 * Pointer to TX queue structure.
2270 * Pointer to burst routine local context.
2272 * Pointer to WQE to fill with built Ethernet Segment.
2274 * Configured Tx offloads mask. It is fully defined at
2275 * compile time and may be used for optimization.
2277 static __rte_always_inline void
2278 mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
2279 struct mlx5_txq_local *restrict loc,
2280 struct mlx5_wqe *restrict wqe,
2283 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2287 * Calculate and set check sum flags first, dword field
2288 * in segment may be shared with Software Parser flags.
2290 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2291 es->flags = rte_cpu_to_le_32(csum);
2293 * Calculate and set Software Parser offsets and flags.
2294 * These flags a set for custom UDP and IP tunnel packets.
2296 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2297 /* Fill metadata field if needed. */
2298 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2299 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2300 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2301 /* Engage VLAN tag insertion feature if requested. */
2302 if (MLX5_TXOFF_CONFIG(VLAN) &&
2303 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2305 * We should get here only if device support
2306 * this feature correctly.
2308 assert(txq->vlan_en);
2309 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2310 loc->mbuf->vlan_tci);
2312 es->inline_hdr = RTE_BE32(0);
2317 * Build the Ethernet Segment with minimal inlined data
2318 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2319 * used to fill the gap in single WQEBB WQEs.
2320 * Supports Software Parser, Checksums and VLAN
2321 * insertion Tx offload features.
2324 * Pointer to TX queue structure.
2326 * Pointer to burst routine local context.
2328 * Pointer to WQE to fill with built Ethernet Segment.
2330 * Length of VLAN tag insertion if any.
2332 * Configured Tx offloads mask. It is fully defined at
2333 * compile time and may be used for optimization.
2335 static __rte_always_inline void
2336 mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
2337 struct mlx5_txq_local *restrict loc,
2338 struct mlx5_wqe *restrict wqe,
2342 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2344 uint8_t *psrc, *pdst;
2347 * Calculate and set check sum flags first, dword field
2348 * in segment may be shared with Software Parser flags.
2350 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2351 es->flags = rte_cpu_to_le_32(csum);
2353 * Calculate and set Software Parser offsets and flags.
2354 * These flags a set for custom UDP and IP tunnel packets.
2356 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2357 /* Fill metadata field if needed. */
2358 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2359 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2360 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2361 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2363 sizeof(rte_v128u32_t)),
2364 "invalid Ethernet Segment data size");
2365 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2367 sizeof(struct rte_vlan_hdr) +
2368 2 * RTE_ETHER_ADDR_LEN),
2369 "invalid Ethernet Segment data size");
2370 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2371 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2372 es->inline_data = *(unaligned_uint16_t *)psrc;
2373 psrc += sizeof(uint16_t);
2374 pdst = (uint8_t *)(es + 1);
2375 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2376 /* Implement VLAN tag insertion as part inline data. */
2377 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2378 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2379 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2380 /* Insert VLAN ethertype + VLAN tag. */
2381 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2382 ((RTE_ETHER_TYPE_VLAN << 16) |
2383 loc->mbuf->vlan_tci);
2384 pdst += sizeof(struct rte_vlan_hdr);
2385 /* Copy the rest two bytes from packet data. */
2386 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2387 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2389 /* Fill the gap in the title WQEBB with inline data. */
2390 rte_mov16(pdst, psrc);
2395 * Build the Ethernet Segment with entire packet
2396 * data inlining. Checks the boundary of WQEBB and
2397 * ring buffer wrapping, supports Software Parser,
2398 * Checksums and VLAN insertion Tx offload features.
2401 * Pointer to TX queue structure.
2403 * Pointer to burst routine local context.
2405 * Pointer to WQE to fill with built Ethernet Segment.
2407 * Length of VLAN tag insertion if any.
2409 * Length of data to inline (VLAN included, if any).
2411 * TSO flag, set mss field from the packet.
2413 * Configured Tx offloads mask. It is fully defined at
2414 * compile time and may be used for optimization.
2417 * Pointer to the next Data Segment (aligned and wrapped around).
2419 static __rte_always_inline struct mlx5_wqe_dseg *
2420 mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
2421 struct mlx5_txq_local *restrict loc,
2422 struct mlx5_wqe *restrict wqe,
2428 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2430 uint8_t *psrc, *pdst;
2434 * Calculate and set check sum flags first, dword field
2435 * in segment may be shared with Software Parser flags.
2437 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2440 csum |= loc->mbuf->tso_segsz;
2441 es->flags = rte_cpu_to_be_32(csum);
2443 es->flags = rte_cpu_to_le_32(csum);
2446 * Calculate and set Software Parser offsets and flags.
2447 * These flags a set for custom UDP and IP tunnel packets.
2449 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2450 /* Fill metadata field if needed. */
2451 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2452 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2453 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2454 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2456 sizeof(rte_v128u32_t)),
2457 "invalid Ethernet Segment data size");
2458 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2460 sizeof(struct rte_vlan_hdr) +
2461 2 * RTE_ETHER_ADDR_LEN),
2462 "invalid Ethernet Segment data size");
2463 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2464 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2465 es->inline_data = *(unaligned_uint16_t *)psrc;
2466 psrc += sizeof(uint16_t);
2467 pdst = (uint8_t *)(es + 1);
2468 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2469 /* Implement VLAN tag insertion as part inline data. */
2470 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2471 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2472 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2473 /* Insert VLAN ethertype + VLAN tag. */
2474 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2475 ((RTE_ETHER_TYPE_VLAN << 16) |
2476 loc->mbuf->vlan_tci);
2477 pdst += sizeof(struct rte_vlan_hdr);
2478 /* Copy the rest two bytes from packet data. */
2479 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2480 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2481 psrc += sizeof(uint16_t);
2483 /* Fill the gap in the title WQEBB with inline data. */
2484 rte_mov16(pdst, psrc);
2485 psrc += sizeof(rte_v128u32_t);
2487 pdst = (uint8_t *)(es + 2);
2488 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2489 assert(pdst < (uint8_t *)txq->wqes_end);
2490 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2492 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2493 return (struct mlx5_wqe_dseg *)pdst;
2496 * The WQEBB space availability is checked by caller.
2497 * Here we should be aware of WQE ring buffer wraparound only.
2499 part = (uint8_t *)txq->wqes_end - pdst;
2500 part = RTE_MIN(part, inlen);
2502 rte_memcpy(pdst, psrc, part);
2504 if (likely(!inlen)) {
2506 * If return value is not used by the caller
2507 * the code below will be optimized out.
2510 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2511 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2512 pdst = (uint8_t *)txq->wqes;
2513 return (struct mlx5_wqe_dseg *)pdst;
2515 pdst = (uint8_t *)txq->wqes;
2522 * Copy data from chain of mbuf to the specified linear buffer.
2523 * Checksums and VLAN insertion Tx offload features. If data
2524 * from some mbuf copied completely this mbuf is freed. Local
2525 * structure is used to keep the byte stream state.
2528 * Pointer to the destination linear buffer.
2530 * Pointer to burst routine local context.
2532 * Length of data to be copied.
2534 * Configured Tx offloads mask. It is fully defined at
2535 * compile time and may be used for optimization.
2537 static __rte_always_inline void
2538 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2539 struct mlx5_txq_local *restrict loc,
2541 unsigned int olx __rte_unused)
2543 struct rte_mbuf *mbuf;
2544 unsigned int part, dlen;
2549 /* Allow zero length packets, must check first. */
2550 dlen = rte_pktmbuf_data_len(loc->mbuf);
2551 if (dlen <= loc->mbuf_off) {
2552 /* Exhausted packet, just free. */
2554 loc->mbuf = mbuf->next;
2555 rte_pktmbuf_free_seg(mbuf);
2557 assert(loc->mbuf_nseg > 1);
2562 dlen -= loc->mbuf_off;
2563 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2565 part = RTE_MIN(len, dlen);
2566 rte_memcpy(pdst, psrc, part);
2567 loc->mbuf_off += part;
2570 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2572 /* Exhausted packet, just free. */
2574 loc->mbuf = mbuf->next;
2575 rte_pktmbuf_free_seg(mbuf);
2577 assert(loc->mbuf_nseg >= 1);
2587 * Build the Ethernet Segment with inlined data from
2588 * multi-segment packet. Checks the boundary of WQEBB
2589 * and ring buffer wrapping, supports Software Parser,
2590 * Checksums and VLAN insertion Tx offload features.
2593 * Pointer to TX queue structure.
2595 * Pointer to burst routine local context.
2597 * Pointer to WQE to fill with built Ethernet Segment.
2599 * Length of VLAN tag insertion if any.
2601 * Length of data to inline (VLAN included, if any).
2603 * TSO flag, set mss field from the packet.
2605 * Configured Tx offloads mask. It is fully defined at
2606 * compile time and may be used for optimization.
2609 * Pointer to the next Data Segment (aligned and
2610 * possible NOT wrapped around - caller should do
2611 * wrapping check on its own).
2613 static __rte_always_inline struct mlx5_wqe_dseg *
2614 mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
2615 struct mlx5_txq_local *restrict loc,
2616 struct mlx5_wqe *restrict wqe,
2622 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2628 * Calculate and set check sum flags first, uint32_t field
2629 * in segment may be shared with Software Parser flags.
2631 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2634 csum |= loc->mbuf->tso_segsz;
2635 es->flags = rte_cpu_to_be_32(csum);
2637 es->flags = rte_cpu_to_le_32(csum);
2640 * Calculate and set Software Parser offsets and flags.
2641 * These flags a set for custom UDP and IP tunnel packets.
2643 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2644 /* Fill metadata field if needed. */
2645 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2646 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2647 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2648 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2650 sizeof(rte_v128u32_t)),
2651 "invalid Ethernet Segment data size");
2652 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2654 sizeof(struct rte_vlan_hdr) +
2655 2 * RTE_ETHER_ADDR_LEN),
2656 "invalid Ethernet Segment data size");
2657 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2658 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2659 pdst = (uint8_t *)&es->inline_data;
2660 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2661 /* Implement VLAN tag insertion as part inline data. */
2662 mlx5_tx_mseg_memcpy(pdst, loc, 2 * RTE_ETHER_ADDR_LEN, olx);
2663 pdst += 2 * RTE_ETHER_ADDR_LEN;
2664 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2665 ((RTE_ETHER_TYPE_VLAN << 16) |
2666 loc->mbuf->vlan_tci);
2667 pdst += sizeof(struct rte_vlan_hdr);
2668 inlen -= 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2670 assert(pdst < (uint8_t *)txq->wqes_end);
2672 * The WQEBB space availability is checked by caller.
2673 * Here we should be aware of WQE ring buffer wraparound only.
2675 part = (uint8_t *)txq->wqes_end - pdst;
2676 part = RTE_MIN(part, inlen);
2679 mlx5_tx_mseg_memcpy(pdst, loc, part, olx);
2681 if (likely(!inlen)) {
2683 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2684 return (struct mlx5_wqe_dseg *)pdst;
2686 pdst = (uint8_t *)txq->wqes;
2692 * Build the Data Segment of pointer type.
2695 * Pointer to TX queue structure.
2697 * Pointer to burst routine local context.
2699 * Pointer to WQE to fill with built Data Segment.
2701 * Data buffer to point.
2703 * Data buffer length.
2705 * Configured Tx offloads mask. It is fully defined at
2706 * compile time and may be used for optimization.
2708 static __rte_always_inline void
2709 mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
2710 struct mlx5_txq_local *restrict loc,
2711 struct mlx5_wqe_dseg *restrict dseg,
2714 unsigned int olx __rte_unused)
2718 dseg->bcount = rte_cpu_to_be_32(len);
2719 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2720 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2724 * Build the Data Segment of pointer type or inline
2725 * if data length is less than buffer in minimal
2726 * Data Segment size.
2729 * Pointer to TX queue structure.
2731 * Pointer to burst routine local context.
2733 * Pointer to WQE to fill with built Data Segment.
2735 * Data buffer to point.
2737 * Data buffer length.
2739 * Configured Tx offloads mask. It is fully defined at
2740 * compile time and may be used for optimization.
2742 static __rte_always_inline void
2743 mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
2744 struct mlx5_txq_local *restrict loc,
2745 struct mlx5_wqe_dseg *restrict dseg,
2748 unsigned int olx __rte_unused)
2754 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2755 dseg->bcount = rte_cpu_to_be_32(len);
2756 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2757 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2761 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2762 /* Unrolled implementation of generic rte_memcpy. */
2763 dst = (uintptr_t)&dseg->inline_data[0];
2764 src = (uintptr_t)buf;
2766 #ifdef RTE_ARCH_STRICT_ALIGN
2767 assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2768 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2769 dst += sizeof(uint32_t);
2770 src += sizeof(uint32_t);
2771 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2772 dst += sizeof(uint32_t);
2773 src += sizeof(uint32_t);
2775 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2776 dst += sizeof(uint64_t);
2777 src += sizeof(uint64_t);
2781 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2782 dst += sizeof(uint32_t);
2783 src += sizeof(uint32_t);
2786 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2787 dst += sizeof(uint16_t);
2788 src += sizeof(uint16_t);
2791 *(uint8_t *)dst = *(uint8_t *)src;
2795 * Build the Data Segment of inlined data from single
2796 * segment packet, no VLAN insertion.
2799 * Pointer to TX queue structure.
2801 * Pointer to burst routine local context.
2803 * Pointer to WQE to fill with built Data Segment.
2805 * Data buffer to point.
2807 * Data buffer length.
2809 * Configured Tx offloads mask. It is fully defined at
2810 * compile time and may be used for optimization.
2813 * Pointer to the next Data Segment after inlined data.
2814 * Ring buffer wraparound check is needed. We do not
2815 * do it here because it may not be needed for the
2816 * last packet in the eMPW session.
2818 static __rte_always_inline struct mlx5_wqe_dseg *
2819 mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq,
2820 struct mlx5_txq_local *restrict loc __rte_unused,
2821 struct mlx5_wqe_dseg *restrict dseg,
2824 unsigned int olx __rte_unused)
2829 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2830 pdst = &dseg->inline_data[0];
2832 * The WQEBB space availability is checked by caller.
2833 * Here we should be aware of WQE ring buffer wraparound only.
2835 part = (uint8_t *)txq->wqes_end - pdst;
2836 part = RTE_MIN(part, len);
2838 rte_memcpy(pdst, buf, part);
2842 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2843 /* Note: no final wraparound check here. */
2844 return (struct mlx5_wqe_dseg *)pdst;
2846 pdst = (uint8_t *)txq->wqes;
2853 * Build the Data Segment of inlined data from single
2854 * segment packet with VLAN insertion.
2857 * Pointer to TX queue structure.
2859 * Pointer to burst routine local context.
2861 * Pointer to the dseg fill with built Data Segment.
2863 * Data buffer to point.
2865 * Data buffer length.
2867 * Configured Tx offloads mask. It is fully defined at
2868 * compile time and may be used for optimization.
2871 * Pointer to the next Data Segment after inlined data.
2872 * Ring buffer wraparound check is needed.
2874 static __rte_always_inline struct mlx5_wqe_dseg *
2875 mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
2876 struct mlx5_txq_local *restrict loc __rte_unused,
2877 struct mlx5_wqe_dseg *restrict dseg,
2880 unsigned int olx __rte_unused)
2886 assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
2887 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
2888 (2 * RTE_ETHER_ADDR_LEN),
2889 "invalid Data Segment data size");
2890 dseg->bcount = rte_cpu_to_be_32((len + sizeof(struct rte_vlan_hdr)) |
2891 MLX5_ETH_WQE_DATA_INLINE);
2892 pdst = &dseg->inline_data[0];
2893 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
2894 buf += MLX5_DSEG_MIN_INLINE_SIZE;
2895 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
2896 len -= MLX5_DSEG_MIN_INLINE_SIZE;
2897 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
2898 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2899 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2900 pdst = (uint8_t *)txq->wqes;
2901 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
2902 loc->mbuf->vlan_tci);
2903 pdst += sizeof(struct rte_vlan_hdr);
2905 * The WQEBB space availability is checked by caller.
2906 * Here we should be aware of WQE ring buffer wraparound only.
2908 part = (uint8_t *)txq->wqes_end - pdst;
2909 part = RTE_MIN(part, len);
2911 rte_memcpy(pdst, buf, part);
2915 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2916 /* Note: no final wraparound check here. */
2917 return (struct mlx5_wqe_dseg *)pdst;
2919 pdst = (uint8_t *)txq->wqes;
2926 * Build the Ethernet Segment with optionally inlined data with
2927 * VLAN insertion and following Data Segments (if any) from
2928 * multi-segment packet. Used by ordinary send and TSO.
2931 * Pointer to TX queue structure.
2933 * Pointer to burst routine local context.
2935 * Pointer to WQE to fill with built Ethernet/Data Segments.
2937 * Length of VLAN header to insert, 0 means no VLAN insertion.
2939 * Data length to inline. For TSO this parameter specifies
2940 * exact value, for ordinary send routine can be aligned by
2941 * caller to provide better WQE space saving and data buffer
2942 * start address alignment. This length includes VLAN header
2945 * Zero means ordinary send, inlined data can be extended,
2946 * otherwise this is TSO, inlined data length is fixed.
2948 * Configured Tx offloads mask. It is fully defined at
2949 * compile time and may be used for optimization.
2952 * Actual size of built WQE in segments.
2954 static __rte_always_inline unsigned int
2955 mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
2956 struct mlx5_txq_local *restrict loc,
2957 struct mlx5_wqe *restrict wqe,
2961 unsigned int olx __rte_unused)
2963 struct mlx5_wqe_dseg *restrict dseg;
2966 assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
2967 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
2970 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
2971 if (!loc->mbuf_nseg)
2974 * There are still some mbuf remaining, not inlined.
2975 * The first mbuf may be partially inlined and we
2976 * must process the possible non-zero data offset.
2978 if (loc->mbuf_off) {
2983 * Exhausted packets must be dropped before.
2984 * Non-zero offset means there are some data
2985 * remained in the packet.
2987 assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
2988 assert(rte_pktmbuf_data_len(loc->mbuf));
2989 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2991 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
2993 * Build the pointer/minimal data Data Segment.
2994 * Do ring buffer wrapping check in advance.
2996 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2997 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2998 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
2999 /* Store the mbuf to be freed on completion. */
3000 assert(loc->elts_free);
3001 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3004 if (--loc->mbuf_nseg == 0)
3006 loc->mbuf = loc->mbuf->next;
3010 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3011 struct rte_mbuf *mbuf;
3013 /* Zero length segment found, just skip. */
3015 loc->mbuf = loc->mbuf->next;
3016 rte_pktmbuf_free_seg(mbuf);
3017 if (--loc->mbuf_nseg == 0)
3020 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3021 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3024 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3025 rte_pktmbuf_data_len(loc->mbuf), olx);
3026 assert(loc->elts_free);
3027 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3030 if (--loc->mbuf_nseg == 0)
3032 loc->mbuf = loc->mbuf->next;
3037 /* Calculate actual segments used from the dseg pointer. */
3038 if ((uintptr_t)wqe < (uintptr_t)dseg)
3039 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3041 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3042 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3047 * Tx one packet function for multi-segment TSO. Supports all
3048 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3049 * sends one packet per WQE.
3051 * This routine is responsible for storing processed mbuf
3052 * into elts ring buffer and update elts_head.
3055 * Pointer to TX queue structure.
3057 * Pointer to burst routine local context.
3059 * Configured Tx offloads mask. It is fully defined at
3060 * compile time and may be used for optimization.
3063 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3064 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3065 * Local context variables partially updated.
3067 static __rte_always_inline enum mlx5_txcmp_code
3068 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
3069 struct mlx5_txq_local *restrict loc,
3072 struct mlx5_wqe *restrict wqe;
3073 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3076 * Calculate data length to be inlined to estimate
3077 * the required space in WQE ring buffer.
3079 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3080 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3081 vlan = sizeof(struct rte_vlan_hdr);
3082 inlen = loc->mbuf->l2_len + vlan +
3083 loc->mbuf->l3_len + loc->mbuf->l4_len;
3084 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3085 return MLX5_TXCMP_CODE_ERROR;
3086 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3087 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3088 /* Packet must contain all TSO headers. */
3089 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3090 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3091 inlen > (dlen + vlan)))
3092 return MLX5_TXCMP_CODE_ERROR;
3093 assert(inlen >= txq->inlen_mode);
3095 * Check whether there are enough free WQEBBs:
3097 * - Ethernet Segment
3098 * - First Segment of inlined Ethernet data
3099 * - ... data continued ...
3100 * - Data Segments of pointer/min inline type
3102 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3103 MLX5_ESEG_MIN_INLINE_SIZE +
3105 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3106 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3107 return MLX5_TXCMP_CODE_EXIT;
3108 /* Check for maximal WQE size. */
3109 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3110 return MLX5_TXCMP_CODE_ERROR;
3111 #ifdef MLX5_PMD_SOFT_COUNTERS
3112 /* Update sent data bytes/packets counters. */
3113 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3114 loc->mbuf->tso_segsz;
3116 * One will be added for mbuf itself
3117 * at the end of the mlx5_tx_burst from
3118 * loc->pkts_sent field.
3121 txq->stats.opackets += ntcp;
3122 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3124 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3125 loc->wqe_last = wqe;
3126 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3127 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3128 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3129 txq->wqe_ci += (ds + 3) / 4;
3130 loc->wqe_free -= (ds + 3) / 4;
3131 return MLX5_TXCMP_CODE_MULTI;
3135 * Tx one packet function for multi-segment SEND. Supports all
3136 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3137 * sends one packet per WQE, without any data inlining in
3140 * This routine is responsible for storing processed mbuf
3141 * into elts ring buffer and update elts_head.
3144 * Pointer to TX queue structure.
3146 * Pointer to burst routine local context.
3148 * Configured Tx offloads mask. It is fully defined at
3149 * compile time and may be used for optimization.
3152 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3153 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3154 * Local context variables partially updated.
3156 static __rte_always_inline enum mlx5_txcmp_code
3157 mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
3158 struct mlx5_txq_local *restrict loc,
3161 struct mlx5_wqe_dseg *restrict dseg;
3162 struct mlx5_wqe *restrict wqe;
3163 unsigned int ds, nseg;
3165 assert(NB_SEGS(loc->mbuf) > 1);
3167 * No inline at all, it means the CPU cycles saving
3168 * is prioritized at configuration, we should not
3169 * copy any packet data to WQE.
3171 nseg = NB_SEGS(loc->mbuf);
3173 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3174 return MLX5_TXCMP_CODE_EXIT;
3175 /* Check for maximal WQE size. */
3176 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3177 return MLX5_TXCMP_CODE_ERROR;
3179 * Some Tx offloads may cause an error if
3180 * packet is not long enough, check against
3181 * assumed minimal length.
3183 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3184 return MLX5_TXCMP_CODE_ERROR;
3185 #ifdef MLX5_PMD_SOFT_COUNTERS
3186 /* Update sent data bytes counter. */
3187 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3188 if (MLX5_TXOFF_CONFIG(VLAN) &&
3189 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3190 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3193 * SEND WQE, one WQEBB:
3194 * - Control Segment, SEND opcode
3195 * - Ethernet Segment, optional VLAN, no inline
3196 * - Data Segments, pointer only type
3198 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3199 loc->wqe_last = wqe;
3200 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3201 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3202 dseg = &wqe->dseg[0];
3204 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3205 struct rte_mbuf *mbuf;
3208 * Zero length segment found, have to
3209 * correct total size of WQE in segments.
3210 * It is supposed to be rare occasion, so
3211 * in normal case (no zero length segments)
3212 * we avoid extra writing to the Control
3216 wqe->cseg.sq_ds -= RTE_BE32(1);
3218 loc->mbuf = mbuf->next;
3219 rte_pktmbuf_free_seg(mbuf);
3225 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3226 rte_pktmbuf_data_len(loc->mbuf), olx);
3227 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3232 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3233 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3234 loc->mbuf = loc->mbuf->next;
3237 txq->wqe_ci += (ds + 3) / 4;
3238 loc->wqe_free -= (ds + 3) / 4;
3239 return MLX5_TXCMP_CODE_MULTI;
3243 * Tx one packet function for multi-segment SEND. Supports all
3244 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3245 * sends one packet per WQE, with data inlining in
3246 * Ethernet Segment and minimal Data Segments.
3248 * This routine is responsible for storing processed mbuf
3249 * into elts ring buffer and update elts_head.
3252 * Pointer to TX queue structure.
3254 * Pointer to burst routine local context.
3256 * Configured Tx offloads mask. It is fully defined at
3257 * compile time and may be used for optimization.
3260 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3261 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3262 * Local context variables partially updated.
3264 static __rte_always_inline enum mlx5_txcmp_code
3265 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
3266 struct mlx5_txq_local *restrict loc,
3269 struct mlx5_wqe *restrict wqe;
3270 unsigned int ds, inlen, dlen, vlan = 0;
3272 assert(MLX5_TXOFF_CONFIG(INLINE));
3273 assert(NB_SEGS(loc->mbuf) > 1);
3275 * First calculate data length to be inlined
3276 * to estimate the required space for WQE.
3278 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3279 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3280 vlan = sizeof(struct rte_vlan_hdr);
3281 inlen = dlen + vlan;
3282 /* Check against minimal length. */
3283 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3284 return MLX5_TXCMP_CODE_ERROR;
3285 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3286 if (inlen > txq->inlen_send) {
3287 struct rte_mbuf *mbuf;
3292 * Packet length exceeds the allowed inline
3293 * data length, check whether the minimal
3294 * inlining is required.
3296 if (txq->inlen_mode) {
3297 assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
3298 assert(txq->inlen_mode <= txq->inlen_send);
3299 inlen = txq->inlen_mode;
3301 if (!vlan || txq->vlan_en) {
3303 * VLAN insertion will be done inside by HW.
3304 * It is not utmost effective - VLAN flag is
3305 * checked twice, but we should proceed the
3306 * inlining length correctly and take into
3307 * account the VLAN header being inserted.
3309 return mlx5_tx_packet_multi_send
3312 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3315 * Now we know the minimal amount of data is requested
3316 * to inline. Check whether we should inline the buffers
3317 * from the chain beginning to eliminate some mbufs.
3320 nxlen = rte_pktmbuf_data_len(mbuf);
3321 if (unlikely(nxlen <= txq->inlen_send)) {
3322 /* We can inline first mbuf at least. */
3323 if (nxlen < inlen) {
3326 /* Scan mbufs till inlen filled. */
3331 nxlen = rte_pktmbuf_data_len(mbuf);
3333 } while (unlikely(nxlen < inlen));
3334 if (unlikely(nxlen > txq->inlen_send)) {
3335 /* We cannot inline entire mbuf. */
3336 smlen = inlen - smlen;
3337 start = rte_pktmbuf_mtod_offset
3338 (mbuf, uintptr_t, smlen);
3345 /* There should be not end of packet. */
3347 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3348 } while (unlikely(nxlen < txq->inlen_send));
3350 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3352 * Check whether we can do inline to align start
3353 * address of data buffer to cacheline.
3356 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3357 if (unlikely(start)) {
3359 if (start <= txq->inlen_send)
3364 * Check whether there are enough free WQEBBs:
3366 * - Ethernet Segment
3367 * - First Segment of inlined Ethernet data
3368 * - ... data continued ...
3369 * - Data Segments of pointer/min inline type
3371 * Estimate the number of Data Segments conservatively,
3372 * supposing no any mbufs is being freed during inlining.
3374 assert(inlen <= txq->inlen_send);
3375 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3376 MLX5_ESEG_MIN_INLINE_SIZE +
3378 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3379 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3380 return MLX5_TXCMP_CODE_EXIT;
3381 /* Check for maximal WQE size. */
3382 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3383 return MLX5_TXCMP_CODE_ERROR;
3384 #ifdef MLX5_PMD_SOFT_COUNTERS
3385 /* Update sent data bytes/packets counters. */
3386 txq->stats.obytes += dlen + vlan;
3388 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3389 loc->wqe_last = wqe;
3390 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3391 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3392 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3393 txq->wqe_ci += (ds + 3) / 4;
3394 loc->wqe_free -= (ds + 3) / 4;
3395 return MLX5_TXCMP_CODE_MULTI;
3399 * Tx burst function for multi-segment packets. Supports all
3400 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3401 * sends one packet per WQE. Function stops sending if it
3402 * encounters the single-segment packet.
3404 * This routine is responsible for storing processed mbuf
3405 * into elts ring buffer and update elts_head.
3408 * Pointer to TX queue structure.
3410 * Packets to transmit.
3412 * Number of packets in array.
3414 * Pointer to burst routine local context.
3416 * Configured Tx offloads mask. It is fully defined at
3417 * compile time and may be used for optimization.
3420 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3421 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3422 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3423 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3424 * Local context variables updated.
3426 static __rte_always_inline enum mlx5_txcmp_code
3427 mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
3428 struct rte_mbuf **restrict pkts,
3429 unsigned int pkts_n,
3430 struct mlx5_txq_local *restrict loc,
3433 assert(loc->elts_free && loc->wqe_free);
3434 assert(pkts_n > loc->pkts_sent);
3435 pkts += loc->pkts_sent + 1;
3436 pkts_n -= loc->pkts_sent;
3438 enum mlx5_txcmp_code ret;
3440 assert(NB_SEGS(loc->mbuf) > 1);
3442 * Estimate the number of free elts quickly but
3443 * conservatively. Some segment may be fully inlined
3444 * and freed, ignore this here - precise estimation
3447 if (loc->elts_free < NB_SEGS(loc->mbuf))
3448 return MLX5_TXCMP_CODE_EXIT;
3449 if (MLX5_TXOFF_CONFIG(TSO) &&
3450 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3451 /* Proceed with multi-segment TSO. */
3452 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3453 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3454 /* Proceed with multi-segment SEND with inlining. */
3455 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3457 /* Proceed with multi-segment SEND w/o inlining. */
3458 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3460 if (ret == MLX5_TXCMP_CODE_EXIT)
3461 return MLX5_TXCMP_CODE_EXIT;
3462 if (ret == MLX5_TXCMP_CODE_ERROR)
3463 return MLX5_TXCMP_CODE_ERROR;
3464 /* WQE is built, go to the next packet. */
3467 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3468 return MLX5_TXCMP_CODE_EXIT;
3469 loc->mbuf = *pkts++;
3471 rte_prefetch0(*pkts);
3472 if (likely(NB_SEGS(loc->mbuf) > 1))
3474 /* Here ends the series of multi-segment packets. */
3475 if (MLX5_TXOFF_CONFIG(TSO) &&
3476 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3477 return MLX5_TXCMP_CODE_TSO;
3478 return MLX5_TXCMP_CODE_SINGLE;
3484 * Tx burst function for single-segment packets with TSO.
3485 * Supports all types of Tx offloads, except multi-packets.
3486 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3487 * Function stops sending if it encounters the multi-segment
3488 * packet or packet without TSO requested.
3490 * The routine is responsible for storing processed mbuf
3491 * into elts ring buffer and update elts_head if inline
3492 * offloads is requested due to possible early freeing
3493 * of the inlined mbufs (can not store pkts array in elts
3497 * Pointer to TX queue structure.
3499 * Packets to transmit.
3501 * Number of packets in array.
3503 * Pointer to burst routine local context.
3505 * Configured Tx offloads mask. It is fully defined at
3506 * compile time and may be used for optimization.
3509 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3510 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3511 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3512 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3513 * Local context variables updated.
3515 static __rte_always_inline enum mlx5_txcmp_code
3516 mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
3517 struct rte_mbuf **restrict pkts,
3518 unsigned int pkts_n,
3519 struct mlx5_txq_local *restrict loc,
3522 assert(loc->elts_free && loc->wqe_free);
3523 assert(pkts_n > loc->pkts_sent);
3524 pkts += loc->pkts_sent + 1;
3525 pkts_n -= loc->pkts_sent;
3527 struct mlx5_wqe_dseg *restrict dseg;
3528 struct mlx5_wqe *restrict wqe;
3529 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3532 assert(NB_SEGS(loc->mbuf) == 1);
3533 dlen = rte_pktmbuf_data_len(loc->mbuf);
3534 if (MLX5_TXOFF_CONFIG(VLAN) &&
3535 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3536 vlan = sizeof(struct rte_vlan_hdr);
3539 * First calculate the WQE size to check
3540 * whether we have enough space in ring buffer.
3542 hlen = loc->mbuf->l2_len + vlan +
3543 loc->mbuf->l3_len + loc->mbuf->l4_len;
3544 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3545 return MLX5_TXCMP_CODE_ERROR;
3546 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3547 hlen += loc->mbuf->outer_l2_len +
3548 loc->mbuf->outer_l3_len;
3549 /* Segment must contain all TSO headers. */
3550 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3551 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3552 hlen > (dlen + vlan)))
3553 return MLX5_TXCMP_CODE_ERROR;
3555 * Check whether there are enough free WQEBBs:
3557 * - Ethernet Segment
3558 * - First Segment of inlined Ethernet data
3559 * - ... data continued ...
3560 * - Finishing Data Segment of pointer type
3562 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3563 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3564 if (loc->wqe_free < ((ds + 3) / 4))
3565 return MLX5_TXCMP_CODE_EXIT;
3566 #ifdef MLX5_PMD_SOFT_COUNTERS
3567 /* Update sent data bytes/packets counters. */
3568 ntcp = (dlen + vlan - hlen +
3569 loc->mbuf->tso_segsz - 1) /
3570 loc->mbuf->tso_segsz;
3572 * One will be added for mbuf itself at the end
3573 * of the mlx5_tx_burst from loc->pkts_sent field.
3576 txq->stats.opackets += ntcp;
3577 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3580 * Build the TSO WQE:
3582 * - Ethernet Segment with hlen bytes inlined
3583 * - Data Segment of pointer type
3585 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3586 loc->wqe_last = wqe;
3587 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3588 MLX5_OPCODE_TSO, olx);
3589 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3590 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3591 dlen -= hlen - vlan;
3592 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3594 * WQE is built, update the loop parameters
3595 * and go to the next packet.
3597 txq->wqe_ci += (ds + 3) / 4;
3598 loc->wqe_free -= (ds + 3) / 4;
3599 if (MLX5_TXOFF_CONFIG(INLINE))
3600 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3604 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3605 return MLX5_TXCMP_CODE_EXIT;
3606 loc->mbuf = *pkts++;
3608 rte_prefetch0(*pkts);
3609 if (MLX5_TXOFF_CONFIG(MULTI) &&
3610 unlikely(NB_SEGS(loc->mbuf) > 1))
3611 return MLX5_TXCMP_CODE_MULTI;
3612 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3613 return MLX5_TXCMP_CODE_SINGLE;
3614 /* Continue with the next TSO packet. */
3620 * Analyze the packet and select the best method to send.
3623 * Pointer to TX queue structure.
3625 * Pointer to burst routine local context.
3627 * Configured Tx offloads mask. It is fully defined at
3628 * compile time and may be used for optimization.
3630 * The predefined flag whether do complete check for
3631 * multi-segment packets and TSO.
3634 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3635 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3636 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3637 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3639 static __rte_always_inline enum mlx5_txcmp_code
3640 mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq,
3641 struct mlx5_txq_local *restrict loc,
3645 /* Check for multi-segment packet. */
3647 MLX5_TXOFF_CONFIG(MULTI) &&
3648 unlikely(NB_SEGS(loc->mbuf) > 1))
3649 return MLX5_TXCMP_CODE_MULTI;
3650 /* Check for TSO packet. */
3652 MLX5_TXOFF_CONFIG(TSO) &&
3653 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3654 return MLX5_TXCMP_CODE_TSO;
3655 /* Check if eMPW is enabled at all. */
3656 if (!MLX5_TXOFF_CONFIG(EMPW))
3657 return MLX5_TXCMP_CODE_SINGLE;
3658 /* Check if eMPW can be engaged. */
3659 if (MLX5_TXOFF_CONFIG(VLAN) &&
3660 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3661 (!MLX5_TXOFF_CONFIG(INLINE) ||
3662 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3663 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3665 * eMPW does not support VLAN insertion offload,
3666 * we have to inline the entire packet but
3667 * packet is too long for inlining.
3669 return MLX5_TXCMP_CODE_SINGLE;
3671 return MLX5_TXCMP_CODE_EMPW;
3675 * Check the next packet attributes to match with the eMPW batch ones.
3676 * In addition, for legacy MPW the packet length is checked either.
3679 * Pointer to TX queue structure.
3681 * Pointer to Ethernet Segment of eMPW batch.
3683 * Pointer to burst routine local context.
3685 * Length of previous packet in MPW descriptor.
3687 * Configured Tx offloads mask. It is fully defined at
3688 * compile time and may be used for optimization.
3691 * true - packet match with eMPW batch attributes.
3692 * false - no match, eMPW should be restarted.
3694 static __rte_always_inline bool
3695 mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
3696 struct mlx5_wqe_eseg *restrict es,
3697 struct mlx5_txq_local *restrict loc,
3701 uint8_t swp_flags = 0;
3703 /* Compare the checksum flags, if any. */
3704 if (MLX5_TXOFF_CONFIG(CSUM) &&
3705 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3707 /* Compare the Software Parser offsets and flags. */
3708 if (MLX5_TXOFF_CONFIG(SWP) &&
3709 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3710 es->swp_flags != swp_flags))
3712 /* Fill metadata field if needed. */
3713 if (MLX5_TXOFF_CONFIG(METADATA) &&
3714 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3715 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3717 /* Legacy MPW can send packets with the same lengt only. */
3718 if (MLX5_TXOFF_CONFIG(MPW) &&
3719 dlen != rte_pktmbuf_data_len(loc->mbuf))
3721 /* There must be no VLAN packets in eMPW loop. */
3722 if (MLX5_TXOFF_CONFIG(VLAN))
3723 assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3728 * Update send loop variables and WQE for eMPW loop
3729 * without data inlining. Number of Data Segments is
3730 * equal to the number of sent packets.
3733 * Pointer to TX queue structure.
3735 * Pointer to burst routine local context.
3737 * Number of packets/Data Segments/Packets.
3739 * Accumulated statistics, bytes sent
3741 * Configured Tx offloads mask. It is fully defined at
3742 * compile time and may be used for optimization.
3745 * true - packet match with eMPW batch attributes.
3746 * false - no match, eMPW should be restarted.
3748 static __rte_always_inline void
3749 mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
3750 struct mlx5_txq_local *restrict loc,
3753 unsigned int olx __rte_unused)
3755 assert(!MLX5_TXOFF_CONFIG(INLINE));
3756 #ifdef MLX5_PMD_SOFT_COUNTERS
3757 /* Update sent data bytes counter. */
3758 txq->stats.obytes += slen;
3762 loc->elts_free -= ds;
3763 loc->pkts_sent += ds;
3765 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3766 txq->wqe_ci += (ds + 3) / 4;
3767 loc->wqe_free -= (ds + 3) / 4;
3771 * Update send loop variables and WQE for eMPW loop
3772 * with data inlining. Gets the size of pushed descriptors
3773 * and data to the WQE.
3776 * Pointer to TX queue structure.
3778 * Pointer to burst routine local context.
3780 * Total size of descriptor/data in bytes.
3782 * Accumulated statistics, data bytes sent.
3784 * Configured Tx offloads mask. It is fully defined at
3785 * compile time and may be used for optimization.
3788 * true - packet match with eMPW batch attributes.
3789 * false - no match, eMPW should be restarted.
3791 static __rte_always_inline void
3792 mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
3793 struct mlx5_txq_local *restrict loc,
3796 unsigned int olx __rte_unused)
3798 assert(MLX5_TXOFF_CONFIG(INLINE));
3799 assert((len % MLX5_WSEG_SIZE) == 0);
3800 #ifdef MLX5_PMD_SOFT_COUNTERS
3801 /* Update sent data bytes counter. */
3802 txq->stats.obytes += slen;
3806 len = len / MLX5_WSEG_SIZE + 2;
3807 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
3808 txq->wqe_ci += (len + 3) / 4;
3809 loc->wqe_free -= (len + 3) / 4;
3813 * The set of Tx burst functions for single-segment packets
3814 * without TSO and with Multi-Packet Writing feature support.
3815 * Supports all types of Tx offloads, except multi-packets
3818 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
3819 * as many packet per WQE as it can. If eMPW is not configured
3820 * or packet can not be sent with eMPW (VLAN insertion) the
3821 * ordinary SEND opcode is used and only one packet placed
3824 * Functions stop sending if it encounters the multi-segment
3825 * packet or packet with TSO requested.
3827 * The routines are responsible for storing processed mbuf
3828 * into elts ring buffer and update elts_head if inlining
3829 * offload is requested. Otherwise the copying mbufs to elts
3830 * can be postponed and completed at the end of burst routine.
3833 * Pointer to TX queue structure.
3835 * Packets to transmit.
3837 * Number of packets in array.
3839 * Pointer to burst routine local context.
3841 * Configured Tx offloads mask. It is fully defined at
3842 * compile time and may be used for optimization.
3845 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3846 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3847 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3848 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
3849 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
3850 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
3852 * Local context variables updated.
3855 * The routine sends packets with MLX5_OPCODE_EMPW
3856 * without inlining, this is dedicated optimized branch.
3857 * No VLAN insertion is supported.
3859 static __rte_always_inline enum mlx5_txcmp_code
3860 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
3861 struct rte_mbuf **restrict pkts,
3862 unsigned int pkts_n,
3863 struct mlx5_txq_local *restrict loc,
3867 * Subroutine is the part of mlx5_tx_burst_single()
3868 * and sends single-segment packet with eMPW opcode
3869 * without data inlining.
3871 assert(!MLX5_TXOFF_CONFIG(INLINE));
3872 assert(MLX5_TXOFF_CONFIG(EMPW));
3873 assert(loc->elts_free && loc->wqe_free);
3874 assert(pkts_n > loc->pkts_sent);
3875 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
3876 pkts += loc->pkts_sent + 1;
3877 pkts_n -= loc->pkts_sent;
3879 struct mlx5_wqe_dseg *restrict dseg;
3880 struct mlx5_wqe_eseg *restrict eseg;
3881 enum mlx5_txcmp_code ret;
3882 unsigned int part, loop;
3883 unsigned int slen = 0;
3886 assert(NB_SEGS(loc->mbuf) == 1);
3887 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
3888 MLX5_MPW_MAX_PACKETS :
3889 MLX5_EMPW_MAX_PACKETS);
3890 if (unlikely(loc->elts_free < part)) {
3891 /* We have no enough elts to save all mbufs. */
3892 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
3893 return MLX5_TXCMP_CODE_EXIT;
3894 /* But we still able to send at least minimal eMPW. */
3895 part = loc->elts_free;
3897 /* Check whether we have enough WQEs */
3898 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
3899 if (unlikely(loc->wqe_free <
3900 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3901 return MLX5_TXCMP_CODE_EXIT;
3902 part = (loc->wqe_free * 4) - 2;
3904 if (likely(part > 1))
3905 rte_prefetch0(*pkts);
3906 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3908 * Build eMPW title WQEBB:
3909 * - Control Segment, eMPW opcode
3910 * - Ethernet Segment, no inline
3912 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
3913 MLX5_OPCODE_ENHANCED_MPSW, olx);
3914 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
3915 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3916 eseg = &loc->wqe_last->eseg;
3917 dseg = &loc->wqe_last->dseg[0];
3919 /* Store the packet length for legacy MPW. */
3920 if (MLX5_TXOFF_CONFIG(MPW))
3921 eseg->mss = rte_cpu_to_be_16
3922 (rte_pktmbuf_data_len(loc->mbuf));
3924 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3925 #ifdef MLX5_PMD_SOFT_COUNTERS
3926 /* Update sent data bytes counter. */
3931 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3933 if (unlikely(--loop == 0))
3935 loc->mbuf = *pkts++;
3936 if (likely(loop > 1))
3937 rte_prefetch0(*pkts);
3938 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3940 * Unroll the completion code to avoid
3941 * returning variable value - it results in
3942 * unoptimized sequent checking in caller.
3944 if (ret == MLX5_TXCMP_CODE_MULTI) {
3946 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3947 if (unlikely(!loc->elts_free ||
3949 return MLX5_TXCMP_CODE_EXIT;
3950 return MLX5_TXCMP_CODE_MULTI;
3952 assert(NB_SEGS(loc->mbuf) == 1);
3953 if (ret == MLX5_TXCMP_CODE_TSO) {
3955 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3956 if (unlikely(!loc->elts_free ||
3958 return MLX5_TXCMP_CODE_EXIT;
3959 return MLX5_TXCMP_CODE_TSO;
3961 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3963 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3964 if (unlikely(!loc->elts_free ||
3966 return MLX5_TXCMP_CODE_EXIT;
3967 return MLX5_TXCMP_CODE_SINGLE;
3969 if (ret != MLX5_TXCMP_CODE_EMPW) {
3972 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3973 return MLX5_TXCMP_CODE_ERROR;
3976 * Check whether packet parameters coincide
3977 * within assumed eMPW batch:
3978 * - check sum settings
3980 * - software parser settings
3981 * - packets length (legacy MPW only)
3983 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
3986 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3987 if (unlikely(!loc->elts_free ||
3989 return MLX5_TXCMP_CODE_EXIT;
3993 /* Packet attributes match, continue the same eMPW. */
3995 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3996 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3998 /* eMPW is built successfully, update loop parameters. */
4000 assert(pkts_n >= part);
4001 #ifdef MLX5_PMD_SOFT_COUNTERS
4002 /* Update sent data bytes counter. */
4003 txq->stats.obytes += slen;
4005 loc->elts_free -= part;
4006 loc->pkts_sent += part;
4007 txq->wqe_ci += (2 + part + 3) / 4;
4008 loc->wqe_free -= (2 + part + 3) / 4;
4010 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4011 return MLX5_TXCMP_CODE_EXIT;
4012 loc->mbuf = *pkts++;
4013 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4014 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4016 /* Continue sending eMPW batches. */
4022 * The routine sends packets with MLX5_OPCODE_EMPW
4023 * with inlining, optionally supports VLAN insertion.
4025 static __rte_always_inline enum mlx5_txcmp_code
4026 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
4027 struct rte_mbuf **restrict pkts,
4028 unsigned int pkts_n,
4029 struct mlx5_txq_local *restrict loc,
4033 * Subroutine is the part of mlx5_tx_burst_single()
4034 * and sends single-segment packet with eMPW opcode
4035 * with data inlining.
4037 assert(MLX5_TXOFF_CONFIG(INLINE));
4038 assert(MLX5_TXOFF_CONFIG(EMPW));
4039 assert(loc->elts_free && loc->wqe_free);
4040 assert(pkts_n > loc->pkts_sent);
4041 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4042 pkts += loc->pkts_sent + 1;
4043 pkts_n -= loc->pkts_sent;
4045 struct mlx5_wqe_dseg *restrict dseg;
4046 struct mlx5_wqe_eseg *restrict eseg;
4047 enum mlx5_txcmp_code ret;
4048 unsigned int room, part, nlim;
4049 unsigned int slen = 0;
4051 assert(NB_SEGS(loc->mbuf) == 1);
4053 * Limits the amount of packets in one WQE
4054 * to improve CQE latency generation.
4056 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4057 MLX5_MPW_INLINE_MAX_PACKETS :
4058 MLX5_EMPW_MAX_PACKETS);
4059 /* Check whether we have minimal amount WQEs */
4060 if (unlikely(loc->wqe_free <
4061 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4062 return MLX5_TXCMP_CODE_EXIT;
4063 if (likely(pkts_n > 1))
4064 rte_prefetch0(*pkts);
4065 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4067 * Build eMPW title WQEBB:
4068 * - Control Segment, eMPW opcode, zero DS
4069 * - Ethernet Segment, no inline
4071 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0,
4072 MLX5_OPCODE_ENHANCED_MPSW, olx);
4073 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4074 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4075 eseg = &loc->wqe_last->eseg;
4076 dseg = &loc->wqe_last->dseg[0];
4077 /* Store the packet length for legacy MPW. */
4078 if (MLX5_TXOFF_CONFIG(MPW))
4079 eseg->mss = rte_cpu_to_be_16
4080 (rte_pktmbuf_data_len(loc->mbuf));
4081 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4082 loc->wqe_free) * MLX5_WQE_SIZE -
4083 MLX5_WQE_CSEG_SIZE -
4085 /* Build WQE till we have space, packets and resources. */
4088 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4089 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4092 assert(room >= MLX5_WQE_DSEG_SIZE);
4093 assert((room % MLX5_WQE_DSEG_SIZE) == 0);
4094 assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4096 * Some Tx offloads may cause an error if
4097 * packet is not long enough, check against
4098 * assumed minimal length.
4100 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4102 if (unlikely(!part))
4103 return MLX5_TXCMP_CODE_ERROR;
4105 * We have some successfully built
4106 * packet Data Segments to send.
4108 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4109 return MLX5_TXCMP_CODE_ERROR;
4111 /* Inline or not inline - that's the Question. */
4112 if (dlen > txq->inlen_empw)
4114 /* Inline entire packet, optional VLAN insertion. */
4115 tlen = sizeof(dseg->bcount) + dlen;
4116 if (MLX5_TXOFF_CONFIG(VLAN) &&
4117 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4119 * The packet length must be checked in
4120 * mlx5_tx_able_to_empw() and packet
4121 * fits into inline length guaranteed.
4123 assert((dlen + sizeof(struct rte_vlan_hdr)) <=
4125 tlen += sizeof(struct rte_vlan_hdr);
4128 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4130 #ifdef MLX5_PMD_SOFT_COUNTERS
4131 /* Update sent data bytes counter. */
4132 slen += sizeof(struct rte_vlan_hdr);
4137 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4140 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4141 assert(room >= tlen);
4144 * Packet data are completely inlined,
4145 * free the packet immediately.
4147 rte_pktmbuf_free_seg(loc->mbuf);
4151 * Not inlinable VLAN packets are
4152 * proceeded outside of this routine.
4154 assert(room >= MLX5_WQE_DSEG_SIZE);
4155 if (MLX5_TXOFF_CONFIG(VLAN))
4156 assert(!(loc->mbuf->ol_flags &
4158 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4159 /* We have to store mbuf in elts.*/
4160 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4161 room -= MLX5_WQE_DSEG_SIZE;
4162 /* Ring buffer wraparound is checked at the loop end.*/
4165 #ifdef MLX5_PMD_SOFT_COUNTERS
4166 /* Update sent data bytes counter. */
4172 if (unlikely(!pkts_n || !loc->elts_free)) {
4174 * We have no resources/packets to
4175 * continue build descriptors.
4178 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4179 return MLX5_TXCMP_CODE_EXIT;
4181 loc->mbuf = *pkts++;
4182 if (likely(pkts_n > 1))
4183 rte_prefetch0(*pkts);
4184 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4186 * Unroll the completion code to avoid
4187 * returning variable value - it results in
4188 * unoptimized sequent checking in caller.
4190 if (ret == MLX5_TXCMP_CODE_MULTI) {
4192 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4193 if (unlikely(!loc->elts_free ||
4195 return MLX5_TXCMP_CODE_EXIT;
4196 return MLX5_TXCMP_CODE_MULTI;
4198 assert(NB_SEGS(loc->mbuf) == 1);
4199 if (ret == MLX5_TXCMP_CODE_TSO) {
4201 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4202 if (unlikely(!loc->elts_free ||
4204 return MLX5_TXCMP_CODE_EXIT;
4205 return MLX5_TXCMP_CODE_TSO;
4207 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4209 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4210 if (unlikely(!loc->elts_free ||
4212 return MLX5_TXCMP_CODE_EXIT;
4213 return MLX5_TXCMP_CODE_SINGLE;
4215 if (ret != MLX5_TXCMP_CODE_EMPW) {
4218 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4219 return MLX5_TXCMP_CODE_ERROR;
4221 /* Check if we have minimal room left. */
4223 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4226 * Check whether packet parameters coincide
4227 * within assumed eMPW batch:
4228 * - check sum settings
4230 * - software parser settings
4231 * - packets length (legacy MPW only)
4233 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx))
4235 /* Packet attributes match, continue the same eMPW. */
4236 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4237 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4240 * We get here to close an existing eMPW
4241 * session and start the new one.
4245 if (unlikely(!part))
4246 return MLX5_TXCMP_CODE_EXIT;
4247 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4248 if (unlikely(!loc->elts_free ||
4250 return MLX5_TXCMP_CODE_EXIT;
4251 /* Continue the loop with new eMPW session. */
4257 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4258 * Data inlining and VLAN insertion are supported.
4260 static __rte_always_inline enum mlx5_txcmp_code
4261 mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
4262 struct rte_mbuf **restrict pkts,
4263 unsigned int pkts_n,
4264 struct mlx5_txq_local *restrict loc,
4268 * Subroutine is the part of mlx5_tx_burst_single()
4269 * and sends single-segment packet with SEND opcode.
4271 assert(loc->elts_free && loc->wqe_free);
4272 assert(pkts_n > loc->pkts_sent);
4273 pkts += loc->pkts_sent + 1;
4274 pkts_n -= loc->pkts_sent;
4276 struct mlx5_wqe *restrict wqe;
4277 enum mlx5_txcmp_code ret;
4279 assert(NB_SEGS(loc->mbuf) == 1);
4280 if (MLX5_TXOFF_CONFIG(INLINE)) {
4281 unsigned int inlen, vlan = 0;
4283 inlen = rte_pktmbuf_data_len(loc->mbuf);
4284 if (MLX5_TXOFF_CONFIG(VLAN) &&
4285 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4286 vlan = sizeof(struct rte_vlan_hdr);
4288 static_assert((sizeof(struct rte_vlan_hdr) +
4289 sizeof(struct rte_ether_hdr)) ==
4290 MLX5_ESEG_MIN_INLINE_SIZE,
4291 "invalid min inline data size");
4294 * If inlining is enabled at configuration time
4295 * the limit must be not less than minimal size.
4296 * Otherwise we would do extra check for data
4297 * size to avoid crashes due to length overflow.
4299 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
4300 if (inlen <= txq->inlen_send) {
4301 unsigned int seg_n, wqe_n;
4303 rte_prefetch0(rte_pktmbuf_mtod
4304 (loc->mbuf, uint8_t *));
4305 /* Check against minimal length. */
4306 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4307 return MLX5_TXCMP_CODE_ERROR;
4309 * Completely inlined packet data WQE:
4310 * - Control Segment, SEND opcode
4311 * - Ethernet Segment, no VLAN insertion
4312 * - Data inlined, VLAN optionally inserted
4313 * - Alignment to MLX5_WSEG_SIZE
4314 * Have to estimate amount of WQEBBs
4316 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4317 MLX5_ESEG_MIN_INLINE_SIZE +
4318 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4319 /* Check if there are enough WQEBBs. */
4320 wqe_n = (seg_n + 3) / 4;
4321 if (wqe_n > loc->wqe_free)
4322 return MLX5_TXCMP_CODE_EXIT;
4323 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4324 loc->wqe_last = wqe;
4325 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4326 MLX5_OPCODE_SEND, olx);
4327 mlx5_tx_eseg_data(txq, loc, wqe,
4328 vlan, inlen, 0, olx);
4329 txq->wqe_ci += wqe_n;
4330 loc->wqe_free -= wqe_n;
4332 * Packet data are completely inlined,
4333 * free the packet immediately.
4335 rte_pktmbuf_free_seg(loc->mbuf);
4336 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4337 MLX5_TXOFF_CONFIG(MPW)) &&
4340 * If minimal inlining is requested the eMPW
4341 * feature should be disabled due to data is
4342 * inlined into Ethernet Segment, which can
4343 * not contain inlined data for eMPW due to
4344 * segment shared for all packets.
4346 struct mlx5_wqe_dseg *restrict dseg;
4351 * The inline-mode settings require
4352 * to inline the specified amount of
4353 * data bytes to the Ethernet Segment.
4354 * We should check the free space in
4355 * WQE ring buffer to inline partially.
4357 assert(txq->inlen_send >= txq->inlen_mode);
4358 assert(inlen > txq->inlen_mode);
4359 assert(txq->inlen_mode >=
4360 MLX5_ESEG_MIN_INLINE_SIZE);
4362 * Check whether there are enough free WQEBBs:
4364 * - Ethernet Segment
4365 * - First Segment of inlined Ethernet data
4366 * - ... data continued ...
4367 * - Finishing Data Segment of pointer type
4369 ds = (MLX5_WQE_CSEG_SIZE +
4370 MLX5_WQE_ESEG_SIZE +
4371 MLX5_WQE_DSEG_SIZE +
4373 MLX5_ESEG_MIN_INLINE_SIZE +
4374 MLX5_WQE_DSEG_SIZE +
4375 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4376 if (loc->wqe_free < ((ds + 3) / 4))
4377 return MLX5_TXCMP_CODE_EXIT;
4379 * Build the ordinary SEND WQE:
4381 * - Ethernet Segment, inline inlen_mode bytes
4382 * - Data Segment of pointer type
4384 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4385 loc->wqe_last = wqe;
4386 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4387 MLX5_OPCODE_SEND, olx);
4388 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4391 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4392 txq->inlen_mode - vlan;
4393 inlen -= txq->inlen_mode;
4394 mlx5_tx_dseg_ptr(txq, loc, dseg,
4397 * WQE is built, update the loop parameters
4398 * and got to the next packet.
4400 txq->wqe_ci += (ds + 3) / 4;
4401 loc->wqe_free -= (ds + 3) / 4;
4402 /* We have to store mbuf in elts.*/
4403 assert(MLX5_TXOFF_CONFIG(INLINE));
4404 txq->elts[txq->elts_head++ & txq->elts_m] =
4412 * Partially inlined packet data WQE, we have
4413 * some space in title WQEBB, we can fill it
4414 * with some packet data. It takes one WQEBB,
4415 * it is available, no extra space check:
4416 * - Control Segment, SEND opcode
4417 * - Ethernet Segment, no VLAN insertion
4418 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4419 * - Data Segment, pointer type
4421 * We also get here if VLAN insertion is not
4422 * supported by HW, the inline is enabled.
4424 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4425 loc->wqe_last = wqe;
4426 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4427 MLX5_OPCODE_SEND, olx);
4428 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4429 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4430 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4432 * The length check is performed above, by
4433 * comparing with txq->inlen_send. We should
4434 * not get overflow here.
4436 assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4437 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4438 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4442 /* We have to store mbuf in elts.*/
4443 assert(MLX5_TXOFF_CONFIG(INLINE));
4444 txq->elts[txq->elts_head++ & txq->elts_m] =
4448 #ifdef MLX5_PMD_SOFT_COUNTERS
4449 /* Update sent data bytes counter. */
4450 txq->stats.obytes += vlan +
4451 rte_pktmbuf_data_len(loc->mbuf);
4455 * No inline at all, it means the CPU cycles saving
4456 * is prioritized at configuration, we should not
4457 * copy any packet data to WQE.
4459 * SEND WQE, one WQEBB:
4460 * - Control Segment, SEND opcode
4461 * - Ethernet Segment, optional VLAN, no inline
4462 * - Data Segment, pointer type
4464 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4465 loc->wqe_last = wqe;
4466 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4467 MLX5_OPCODE_SEND, olx);
4468 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4470 (txq, loc, &wqe->dseg[0],
4471 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4472 rte_pktmbuf_data_len(loc->mbuf), olx);
4476 * We should not store mbuf pointer in elts
4477 * if no inlining is configured, this is done
4478 * by calling routine in a batch copy.
4480 assert(!MLX5_TXOFF_CONFIG(INLINE));
4482 #ifdef MLX5_PMD_SOFT_COUNTERS
4483 /* Update sent data bytes counter. */
4484 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4485 if (MLX5_TXOFF_CONFIG(VLAN) &&
4486 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4487 txq->stats.obytes +=
4488 sizeof(struct rte_vlan_hdr);
4493 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4494 return MLX5_TXCMP_CODE_EXIT;
4495 loc->mbuf = *pkts++;
4497 rte_prefetch0(*pkts);
4498 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4499 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4505 static __rte_always_inline enum mlx5_txcmp_code
4506 mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
4507 struct rte_mbuf **restrict pkts,
4508 unsigned int pkts_n,
4509 struct mlx5_txq_local *restrict loc,
4512 enum mlx5_txcmp_code ret;
4514 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4515 if (ret == MLX5_TXCMP_CODE_SINGLE)
4517 assert(ret == MLX5_TXCMP_CODE_EMPW);
4519 /* Optimize for inline/no inline eMPW send. */
4520 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4521 mlx5_tx_burst_empw_inline
4522 (txq, pkts, pkts_n, loc, olx) :
4523 mlx5_tx_burst_empw_simple
4524 (txq, pkts, pkts_n, loc, olx);
4525 if (ret != MLX5_TXCMP_CODE_SINGLE)
4527 /* The resources to send one packet should remain. */
4528 assert(loc->elts_free && loc->wqe_free);
4530 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4531 assert(ret != MLX5_TXCMP_CODE_SINGLE);
4532 if (ret != MLX5_TXCMP_CODE_EMPW)
4534 /* The resources to send one packet should remain. */
4535 assert(loc->elts_free && loc->wqe_free);
4540 * DPDK Tx callback template. This is configured template
4541 * used to generate routines optimized for specified offload setup.
4542 * One of this generated functions is chosen at SQ configuration
4546 * Generic pointer to TX queue structure.
4548 * Packets to transmit.
4550 * Number of packets in array.
4552 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4553 * values. Should be static to take compile time static configuration
4557 * Number of packets successfully transmitted (<= pkts_n).
4559 static __rte_always_inline uint16_t
4560 mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
4561 struct rte_mbuf **restrict pkts,
4565 struct mlx5_txq_local loc;
4566 enum mlx5_txcmp_code ret;
4569 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4570 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4571 if (unlikely(!pkts_n))
4575 loc.wqe_last = NULL;
4578 loc.pkts_loop = loc.pkts_sent;
4580 * Check if there are some CQEs, if any:
4581 * - process an encountered errors
4582 * - process the completed WQEs
4583 * - free related mbufs
4584 * - doorbell the NIC about processed CQEs
4586 rte_prefetch0(*(pkts + loc.pkts_sent));
4587 mlx5_tx_handle_completion(txq, olx);
4589 * Calculate the number of available resources - elts and WQEs.
4590 * There are two possible different scenarios:
4591 * - no data inlining into WQEs, one WQEBB may contains upto
4592 * four packets, in this case elts become scarce resource
4593 * - data inlining into WQEs, one packet may require multiple
4594 * WQEBBs, the WQEs become the limiting factor.
4596 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4597 loc.elts_free = txq->elts_s -
4598 (uint16_t)(txq->elts_head - txq->elts_tail);
4599 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4600 loc.wqe_free = txq->wqe_s -
4601 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4602 if (unlikely(!loc.elts_free || !loc.wqe_free))
4606 * Fetch the packet from array. Usually this is
4607 * the first packet in series of multi/single
4610 loc.mbuf = *(pkts + loc.pkts_sent);
4611 /* Dedicated branch for multi-segment packets. */
4612 if (MLX5_TXOFF_CONFIG(MULTI) &&
4613 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4615 * Multi-segment packet encountered.
4616 * Hardware is able to process it only
4617 * with SEND/TSO opcodes, one packet
4618 * per WQE, do it in dedicated routine.
4621 assert(loc.pkts_sent >= loc.pkts_copy);
4622 part = loc.pkts_sent - loc.pkts_copy;
4623 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4625 * There are some single-segment mbufs not
4626 * stored in elts. The mbufs must be in the
4627 * same order as WQEs, so we must copy the
4628 * mbufs to elts here, before the coming
4629 * multi-segment packet mbufs is appended.
4631 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4633 loc.pkts_copy = loc.pkts_sent;
4635 assert(pkts_n > loc.pkts_sent);
4636 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4637 if (!MLX5_TXOFF_CONFIG(INLINE))
4638 loc.pkts_copy = loc.pkts_sent;
4640 * These returned code checks are supposed
4641 * to be optimized out due to routine inlining.
4643 if (ret == MLX5_TXCMP_CODE_EXIT) {
4645 * The routine returns this code when
4646 * all packets are sent or there is no
4647 * enough resources to complete request.
4651 if (ret == MLX5_TXCMP_CODE_ERROR) {
4653 * The routine returns this code when
4654 * some error in the incoming packets
4657 txq->stats.oerrors++;
4660 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4662 * The single-segment packet was encountered
4663 * in the array, try to send it with the
4664 * best optimized way, possible engaging eMPW.
4666 goto enter_send_single;
4668 if (MLX5_TXOFF_CONFIG(TSO) &&
4669 ret == MLX5_TXCMP_CODE_TSO) {
4671 * The single-segment TSO packet was
4672 * encountered in the array.
4674 goto enter_send_tso;
4676 /* We must not get here. Something is going wrong. */
4678 txq->stats.oerrors++;
4681 /* Dedicated branch for single-segment TSO packets. */
4682 if (MLX5_TXOFF_CONFIG(TSO) &&
4683 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4685 * TSO might require special way for inlining
4686 * (dedicated parameters) and is sent with
4687 * MLX5_OPCODE_TSO opcode only, provide this
4688 * in dedicated branch.
4691 assert(NB_SEGS(loc.mbuf) == 1);
4692 assert(pkts_n > loc.pkts_sent);
4693 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4695 * These returned code checks are supposed
4696 * to be optimized out due to routine inlining.
4698 if (ret == MLX5_TXCMP_CODE_EXIT)
4700 if (ret == MLX5_TXCMP_CODE_ERROR) {
4701 txq->stats.oerrors++;
4704 if (ret == MLX5_TXCMP_CODE_SINGLE)
4705 goto enter_send_single;
4706 if (MLX5_TXOFF_CONFIG(MULTI) &&
4707 ret == MLX5_TXCMP_CODE_MULTI) {
4709 * The multi-segment packet was
4710 * encountered in the array.
4712 goto enter_send_multi;
4714 /* We must not get here. Something is going wrong. */
4716 txq->stats.oerrors++;
4720 * The dedicated branch for the single-segment packets
4721 * without TSO. Often these ones can be sent using
4722 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
4723 * The routine builds the WQEs till it encounters
4724 * the TSO or multi-segment packet (in case if these
4725 * offloads are requested at SQ configuration time).
4728 assert(pkts_n > loc.pkts_sent);
4729 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
4731 * These returned code checks are supposed
4732 * to be optimized out due to routine inlining.
4734 if (ret == MLX5_TXCMP_CODE_EXIT)
4736 if (ret == MLX5_TXCMP_CODE_ERROR) {
4737 txq->stats.oerrors++;
4740 if (MLX5_TXOFF_CONFIG(MULTI) &&
4741 ret == MLX5_TXCMP_CODE_MULTI) {
4743 * The multi-segment packet was
4744 * encountered in the array.
4746 goto enter_send_multi;
4748 if (MLX5_TXOFF_CONFIG(TSO) &&
4749 ret == MLX5_TXCMP_CODE_TSO) {
4751 * The single-segment TSO packet was
4752 * encountered in the array.
4754 goto enter_send_tso;
4756 /* We must not get here. Something is going wrong. */
4758 txq->stats.oerrors++;
4762 * Main Tx loop is completed, do the rest:
4763 * - set completion request if thresholds are reached
4764 * - doorbell the hardware
4765 * - copy the rest of mbufs to elts (if any)
4767 assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
4768 /* Take a shortcut if nothing is sent. */
4769 if (unlikely(loc.pkts_sent == loc.pkts_loop))
4771 /* Request CQE generation if limits are reached. */
4772 mlx5_tx_request_completion(txq, &loc, olx);
4774 * Ring QP doorbell immediately after WQE building completion
4775 * to improve latencies. The pure software related data treatment
4776 * can be completed after doorbell. Tx CQEs for this SQ are
4777 * processed in this thread only by the polling.
4779 * The rdma core library can map doorbell register in two ways,
4780 * depending on the environment variable "MLX5_SHUT_UP_BF":
4782 * - as regular cached memory, the variable is either missing or
4783 * set to zero. This type of mapping may cause the significant
4784 * doorbell register writing latency and requires explicit
4785 * memory write barrier to mitigate this issue and prevent
4788 * - as non-cached memory, the variable is present and set to
4789 * not "0" value. This type of mapping may cause performance
4790 * impact under heavy loading conditions but the explicit write
4791 * memory barrier is not required and it may improve core
4794 * - the legacy behaviour (prior 19.08 release) was to use some
4795 * heuristics to decide whether write memory barrier should
4796 * be performed. This behavior is supported with specifying
4797 * tx_db_nc=2, write barrier is skipped if application
4798 * provides the full recommended burst of packets, it
4799 * supposes the next packets are coming and the write barrier
4800 * will be issued on the next burst (after descriptor writing,
4803 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
4804 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
4805 /* Not all of the mbufs may be stored into elts yet. */
4806 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
4807 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4809 * There are some single-segment mbufs not stored in elts.
4810 * It can be only if the last packet was single-segment.
4811 * The copying is gathered into one place due to it is
4812 * a good opportunity to optimize that with SIMD.
4813 * Unfortunately if inlining is enabled the gaps in
4814 * pointer array may happen due to early freeing of the
4817 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
4818 loc.pkts_copy = loc.pkts_sent;
4820 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4821 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4822 if (pkts_n > loc.pkts_sent) {
4824 * If burst size is large there might be no enough CQE
4825 * fetched from completion queue and no enough resources
4826 * freed to send all the packets.
4831 #ifdef MLX5_PMD_SOFT_COUNTERS
4832 /* Increment sent packets counter. */
4833 txq->stats.opackets += loc.pkts_sent;
4835 return loc.pkts_sent;
4838 /* Generate routines with Enhanced Multi-Packet Write support. */
4839 MLX5_TXOFF_DECL(full_empw,
4840 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
4842 MLX5_TXOFF_DECL(none_empw,
4843 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4845 MLX5_TXOFF_DECL(md_empw,
4846 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4848 MLX5_TXOFF_DECL(mt_empw,
4849 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4850 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4852 MLX5_TXOFF_DECL(mtsc_empw,
4853 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4854 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4855 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4857 MLX5_TXOFF_DECL(mti_empw,
4858 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4859 MLX5_TXOFF_CONFIG_INLINE |
4860 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4862 MLX5_TXOFF_DECL(mtv_empw,
4863 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4864 MLX5_TXOFF_CONFIG_VLAN |
4865 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4867 MLX5_TXOFF_DECL(mtiv_empw,
4868 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4869 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4870 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4872 MLX5_TXOFF_DECL(sc_empw,
4873 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4874 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4876 MLX5_TXOFF_DECL(sci_empw,
4877 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4878 MLX5_TXOFF_CONFIG_INLINE |
4879 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4881 MLX5_TXOFF_DECL(scv_empw,
4882 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4883 MLX5_TXOFF_CONFIG_VLAN |
4884 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4886 MLX5_TXOFF_DECL(sciv_empw,
4887 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4888 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4889 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4891 MLX5_TXOFF_DECL(i_empw,
4892 MLX5_TXOFF_CONFIG_INLINE |
4893 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4895 MLX5_TXOFF_DECL(v_empw,
4896 MLX5_TXOFF_CONFIG_VLAN |
4897 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4899 MLX5_TXOFF_DECL(iv_empw,
4900 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4901 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4903 /* Generate routines without Enhanced Multi-Packet Write support. */
4904 MLX5_TXOFF_DECL(full,
4905 MLX5_TXOFF_CONFIG_FULL)
4907 MLX5_TXOFF_DECL(none,
4908 MLX5_TXOFF_CONFIG_NONE)
4911 MLX5_TXOFF_CONFIG_METADATA)
4914 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4915 MLX5_TXOFF_CONFIG_METADATA)
4917 MLX5_TXOFF_DECL(mtsc,
4918 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4919 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4920 MLX5_TXOFF_CONFIG_METADATA)
4922 MLX5_TXOFF_DECL(mti,
4923 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4924 MLX5_TXOFF_CONFIG_INLINE |
4925 MLX5_TXOFF_CONFIG_METADATA)
4928 MLX5_TXOFF_DECL(mtv,
4929 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4930 MLX5_TXOFF_CONFIG_VLAN |
4931 MLX5_TXOFF_CONFIG_METADATA)
4934 MLX5_TXOFF_DECL(mtiv,
4935 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4936 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4937 MLX5_TXOFF_CONFIG_METADATA)
4940 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4941 MLX5_TXOFF_CONFIG_METADATA)
4943 MLX5_TXOFF_DECL(sci,
4944 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4945 MLX5_TXOFF_CONFIG_INLINE |
4946 MLX5_TXOFF_CONFIG_METADATA)
4949 MLX5_TXOFF_DECL(scv,
4950 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4951 MLX5_TXOFF_CONFIG_VLAN |
4952 MLX5_TXOFF_CONFIG_METADATA)
4955 MLX5_TXOFF_DECL(sciv,
4956 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4957 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4958 MLX5_TXOFF_CONFIG_METADATA)
4961 MLX5_TXOFF_CONFIG_INLINE |
4962 MLX5_TXOFF_CONFIG_METADATA)
4965 MLX5_TXOFF_CONFIG_VLAN |
4966 MLX5_TXOFF_CONFIG_METADATA)
4969 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4970 MLX5_TXOFF_CONFIG_METADATA)
4973 * Generate routines with Legacy Multi-Packet Write support.
4974 * This mode is supported by ConnectX-4LX only and imposes
4975 * offload limitations, not supported:
4976 * - ACL/Flows (metadata are becoming meaningless)
4977 * - WQE Inline headers
4978 * - SRIOV (E-Switch offloads)
4980 * - tunnel encapsulation/decapsulation
4983 MLX5_TXOFF_DECL(none_mpw,
4984 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
4985 MLX5_TXOFF_CONFIG_MPW)
4987 MLX5_TXOFF_DECL(mci_mpw,
4988 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4989 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4990 MLX5_TXOFF_CONFIG_MPW)
4992 MLX5_TXOFF_DECL(mc_mpw,
4993 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4994 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
4996 MLX5_TXOFF_DECL(i_mpw,
4997 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4998 MLX5_TXOFF_CONFIG_MPW)
5001 * Array of declared and compiled Tx burst function and corresponding
5002 * supported offloads set. The array is used to select the Tx burst
5003 * function for specified offloads set at Tx queue configuration time.
5006 eth_tx_burst_t func;
5009 MLX5_TXOFF_INFO(full_empw,
5010 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5011 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5012 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5013 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5015 MLX5_TXOFF_INFO(none_empw,
5016 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5018 MLX5_TXOFF_INFO(md_empw,
5019 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5021 MLX5_TXOFF_INFO(mt_empw,
5022 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5023 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5025 MLX5_TXOFF_INFO(mtsc_empw,
5026 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5027 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5028 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5030 MLX5_TXOFF_INFO(mti_empw,
5031 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5032 MLX5_TXOFF_CONFIG_INLINE |
5033 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5035 MLX5_TXOFF_INFO(mtv_empw,
5036 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5037 MLX5_TXOFF_CONFIG_VLAN |
5038 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5040 MLX5_TXOFF_INFO(mtiv_empw,
5041 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5042 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5043 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5045 MLX5_TXOFF_INFO(sc_empw,
5046 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5047 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5049 MLX5_TXOFF_INFO(sci_empw,
5050 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5051 MLX5_TXOFF_CONFIG_INLINE |
5052 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5054 MLX5_TXOFF_INFO(scv_empw,
5055 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5056 MLX5_TXOFF_CONFIG_VLAN |
5057 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5059 MLX5_TXOFF_INFO(sciv_empw,
5060 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5061 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5062 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5064 MLX5_TXOFF_INFO(i_empw,
5065 MLX5_TXOFF_CONFIG_INLINE |
5066 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5068 MLX5_TXOFF_INFO(v_empw,
5069 MLX5_TXOFF_CONFIG_VLAN |
5070 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5072 MLX5_TXOFF_INFO(iv_empw,
5073 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5074 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5076 MLX5_TXOFF_INFO(full,
5077 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5078 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5079 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5080 MLX5_TXOFF_CONFIG_METADATA)
5082 MLX5_TXOFF_INFO(none,
5083 MLX5_TXOFF_CONFIG_NONE)
5086 MLX5_TXOFF_CONFIG_METADATA)
5089 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5090 MLX5_TXOFF_CONFIG_METADATA)
5092 MLX5_TXOFF_INFO(mtsc,
5093 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5094 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5095 MLX5_TXOFF_CONFIG_METADATA)
5097 MLX5_TXOFF_INFO(mti,
5098 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5099 MLX5_TXOFF_CONFIG_INLINE |
5100 MLX5_TXOFF_CONFIG_METADATA)
5102 MLX5_TXOFF_INFO(mtv,
5103 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5104 MLX5_TXOFF_CONFIG_VLAN |
5105 MLX5_TXOFF_CONFIG_METADATA)
5107 MLX5_TXOFF_INFO(mtiv,
5108 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5109 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5110 MLX5_TXOFF_CONFIG_METADATA)
5113 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5114 MLX5_TXOFF_CONFIG_METADATA)
5116 MLX5_TXOFF_INFO(sci,
5117 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5118 MLX5_TXOFF_CONFIG_INLINE |
5119 MLX5_TXOFF_CONFIG_METADATA)
5121 MLX5_TXOFF_INFO(scv,
5122 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5123 MLX5_TXOFF_CONFIG_VLAN |
5124 MLX5_TXOFF_CONFIG_METADATA)
5126 MLX5_TXOFF_INFO(sciv,
5127 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5128 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5129 MLX5_TXOFF_CONFIG_METADATA)
5132 MLX5_TXOFF_CONFIG_INLINE |
5133 MLX5_TXOFF_CONFIG_METADATA)
5136 MLX5_TXOFF_CONFIG_VLAN |
5137 MLX5_TXOFF_CONFIG_METADATA)
5140 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5141 MLX5_TXOFF_CONFIG_METADATA)
5143 MLX5_TXOFF_INFO(none_mpw,
5144 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5145 MLX5_TXOFF_CONFIG_MPW)
5147 MLX5_TXOFF_INFO(mci_mpw,
5148 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5149 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5150 MLX5_TXOFF_CONFIG_MPW)
5152 MLX5_TXOFF_INFO(mc_mpw,
5153 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5154 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5156 MLX5_TXOFF_INFO(i_mpw,
5157 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5158 MLX5_TXOFF_CONFIG_MPW)
5162 * Configure the Tx function to use. The routine checks configured
5163 * Tx offloads for the device and selects appropriate Tx burst
5164 * routine. There are multiple Tx burst routines compiled from
5165 * the same template in the most optimal way for the dedicated
5169 * Pointer to private data structure.
5172 * Pointer to selected Tx burst function.
5175 mlx5_select_tx_function(struct rte_eth_dev *dev)
5177 struct mlx5_priv *priv = dev->data->dev_private;
5178 struct mlx5_dev_config *config = &priv->config;
5179 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5180 unsigned int diff = 0, olx = 0, i, m;
5182 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5183 MLX5_DSEG_MAX, "invalid WQE max size");
5184 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5185 "invalid WQE Control Segment size");
5186 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5187 "invalid WQE Ethernet Segment size");
5188 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5189 "invalid WQE Data Segment size");
5190 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5191 "invalid WQE size");
5193 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5194 /* We should support Multi-Segment Packets. */
5195 olx |= MLX5_TXOFF_CONFIG_MULTI;
5197 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5198 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5199 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5200 DEV_TX_OFFLOAD_IP_TNL_TSO |
5201 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5202 /* We should support TCP Send Offload. */
5203 olx |= MLX5_TXOFF_CONFIG_TSO;
5205 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5206 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5207 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5208 /* We should support Software Parser for Tunnels. */
5209 olx |= MLX5_TXOFF_CONFIG_SWP;
5211 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5212 DEV_TX_OFFLOAD_UDP_CKSUM |
5213 DEV_TX_OFFLOAD_TCP_CKSUM |
5214 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5215 /* We should support IP/TCP/UDP Checksums. */
5216 olx |= MLX5_TXOFF_CONFIG_CSUM;
5218 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5219 /* We should support VLAN insertion. */
5220 olx |= MLX5_TXOFF_CONFIG_VLAN;
5222 if (priv->txqs_n && (*priv->txqs)[0]) {
5223 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5225 if (txd->inlen_send) {
5227 * Check the data inline requirements. Data inline
5228 * is enabled on per device basis, we can check
5229 * the first Tx queue only.
5231 * If device does not support VLAN insertion in WQE
5232 * and some queues are requested to perform VLAN
5233 * insertion offload than inline must be enabled.
5235 olx |= MLX5_TXOFF_CONFIG_INLINE;
5238 if (config->mps == MLX5_MPW_ENHANCED &&
5239 config->txq_inline_min <= 0) {
5241 * The NIC supports Enhanced Multi-Packet Write
5242 * and does not require minimal inline data.
5244 olx |= MLX5_TXOFF_CONFIG_EMPW;
5246 if (rte_flow_dynf_metadata_avail()) {
5247 /* We should support Flow metadata. */
5248 olx |= MLX5_TXOFF_CONFIG_METADATA;
5250 if (config->mps == MLX5_MPW) {
5252 * The NIC supports Legacy Multi-Packet Write.
5253 * The MLX5_TXOFF_CONFIG_MPW controls the
5254 * descriptor building method in combination
5255 * with MLX5_TXOFF_CONFIG_EMPW.
5257 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5258 MLX5_TXOFF_CONFIG_SWP |
5259 MLX5_TXOFF_CONFIG_VLAN |
5260 MLX5_TXOFF_CONFIG_METADATA)))
5261 olx |= MLX5_TXOFF_CONFIG_EMPW |
5262 MLX5_TXOFF_CONFIG_MPW;
5265 * Scan the routines table to find the minimal
5266 * satisfying routine with requested offloads.
5268 m = RTE_DIM(txoff_func);
5269 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5272 tmp = txoff_func[i].olx;
5274 /* Meets requested offloads exactly.*/
5278 if ((tmp & olx) != olx) {
5279 /* Does not meet requested offloads at all. */
5282 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5283 /* Do not enable eMPW if not configured. */
5285 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5286 /* Do not enable inlining if not configured. */
5289 * Some routine meets the requirements.
5290 * Check whether it has minimal amount
5291 * of not requested offloads.
5293 tmp = __builtin_popcountl(tmp & ~olx);
5294 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5295 /* First or better match, save and continue. */
5301 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5302 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5303 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5304 /* Lighter not requested offload. */
5309 if (m >= RTE_DIM(txoff_func)) {
5310 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5311 " for requested offloads %04X",
5312 dev->data->port_id, olx);
5315 DRV_LOG(DEBUG, "port %u has selected Tx function"
5316 " supporting offloads %04X/%04X",
5317 dev->data->port_id, olx, txoff_func[m].olx);
5318 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5319 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5320 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5321 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5322 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5323 DRV_LOG(DEBUG, "\tSWP (software parser)");
5324 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5325 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5326 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5327 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5328 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5329 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5330 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5331 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5332 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5333 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5334 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5336 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5338 return txoff_func[m].func;