1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
20 #include <mlx5_common.h>
22 #include "mlx5_autoconf.h"
23 #include "mlx5_defs.h"
26 #include "mlx5_utils.h"
27 #include "mlx5_rxtx.h"
29 /* TX burst subroutines return codes. */
30 enum mlx5_txcmp_code {
31 MLX5_TXCMP_CODE_EXIT = 0,
32 MLX5_TXCMP_CODE_ERROR,
33 MLX5_TXCMP_CODE_SINGLE,
34 MLX5_TXCMP_CODE_MULTI,
40 * These defines are used to configure Tx burst routine option set
41 * supported at compile time. The not specified options are optimized out
42 * out due to if conditions can be explicitly calculated at compile time.
43 * The offloads with bigger runtime check (require more CPU cycles to
44 * skip) overhead should have the bigger index - this is needed to
45 * select the better matching routine function if no exact match and
46 * some offloads are not actually requested.
48 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
49 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
50 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
51 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
52 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
53 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
54 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
55 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
56 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
57 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
59 /* The most common offloads groups. */
60 #define MLX5_TXOFF_CONFIG_NONE 0
61 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
62 MLX5_TXOFF_CONFIG_TSO | \
63 MLX5_TXOFF_CONFIG_SWP | \
64 MLX5_TXOFF_CONFIG_CSUM | \
65 MLX5_TXOFF_CONFIG_INLINE | \
66 MLX5_TXOFF_CONFIG_VLAN | \
67 MLX5_TXOFF_CONFIG_METADATA)
69 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
71 #define MLX5_TXOFF_DECL(func, olx) \
72 static uint16_t mlx5_tx_burst_##func(void *txq, \
73 struct rte_mbuf **pkts, \
76 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
77 pkts, pkts_n, (olx)); \
80 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
82 static __rte_always_inline uint32_t
83 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
84 volatile struct mlx5_mini_cqe8 *mcqe);
86 static __rte_always_inline int
87 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
88 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
90 static __rte_always_inline uint32_t
91 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
93 static __rte_always_inline void
94 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
95 volatile struct mlx5_cqe *cqe,
96 volatile struct mlx5_mini_cqe8 *mcqe);
99 mlx5_queue_state_modify(struct rte_eth_dev *dev,
100 struct mlx5_mp_arg_queue_state_modify *sm);
103 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
104 volatile struct mlx5_cqe *__rte_restrict cqe,
105 uint32_t phcsum, uint8_t l4_type);
108 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
109 volatile struct mlx5_cqe *__rte_restrict cqe,
110 volatile struct mlx5_mini_cqe8 *mcqe,
111 struct mlx5_rxq_data *rxq, uint32_t len);
113 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
114 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
117 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
118 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
120 uint64_t rte_net_mlx5_dynf_inline_mask;
121 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
124 * Build a table to translate Rx completion flags to packet type.
126 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
129 mlx5_set_ptype_table(void)
132 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
134 /* Last entry must not be overwritten, reserved for errored packet. */
135 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
136 (*p)[i] = RTE_PTYPE_UNKNOWN;
138 * The index to the array should have:
139 * bit[1:0] = l3_hdr_type
140 * bit[4:2] = l4_hdr_type
143 * bit[7] = outer_l3_type
146 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
148 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
149 RTE_PTYPE_L4_NONFRAG;
150 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
151 RTE_PTYPE_L4_NONFRAG;
153 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
155 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
158 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
160 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
162 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
164 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
166 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
168 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
171 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
173 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
175 /* Repeat with outer_l3_type being set. Just in case. */
176 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
177 RTE_PTYPE_L4_NONFRAG;
178 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
179 RTE_PTYPE_L4_NONFRAG;
180 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
182 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
184 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
186 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
188 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
190 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
192 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
194 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
196 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
198 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
201 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
202 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
203 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
204 RTE_PTYPE_INNER_L4_NONFRAG;
205 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
206 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
207 RTE_PTYPE_INNER_L4_NONFRAG;
208 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
209 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
210 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
211 RTE_PTYPE_INNER_L4_NONFRAG;
212 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
213 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L4_NONFRAG;
215 /* Tunneled - Fragmented */
216 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L4_FRAG;
219 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
220 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
221 RTE_PTYPE_INNER_L4_FRAG;
222 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L4_FRAG;
225 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L4_FRAG;
229 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L4_TCP;
232 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L4_TCP;
235 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
237 RTE_PTYPE_INNER_L4_TCP;
238 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
240 RTE_PTYPE_INNER_L4_TCP;
241 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L4_TCP;
244 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L4_TCP;
247 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L4_TCP;
250 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L4_TCP;
253 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L4_TCP;
256 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L4_TCP;
259 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_TCP;
262 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L4_TCP;
266 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
268 RTE_PTYPE_INNER_L4_UDP;
269 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L4_UDP;
272 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
273 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
274 RTE_PTYPE_INNER_L4_UDP;
275 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
276 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
277 RTE_PTYPE_INNER_L4_UDP;
281 * Build a table to translate packet to checksum type of Verbs.
284 mlx5_set_cksum_table(void)
290 * The index should have:
291 * bit[0] = PKT_TX_TCP_SEG
292 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
293 * bit[4] = PKT_TX_IP_CKSUM
294 * bit[8] = PKT_TX_OUTER_IP_CKSUM
297 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
300 /* Tunneled packet. */
301 if (i & (1 << 8)) /* Outer IP. */
302 v |= MLX5_ETH_WQE_L3_CSUM;
303 if (i & (1 << 4)) /* Inner IP. */
304 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
305 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
306 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
309 if (i & (1 << 4)) /* IP. */
310 v |= MLX5_ETH_WQE_L3_CSUM;
311 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
312 v |= MLX5_ETH_WQE_L4_CSUM;
314 mlx5_cksum_table[i] = v;
319 * Build a table to translate packet type of mbuf to SWP type of Verbs.
322 mlx5_set_swp_types_table(void)
328 * The index should have:
329 * bit[0:1] = PKT_TX_L4_MASK
330 * bit[4] = PKT_TX_IPV6
331 * bit[8] = PKT_TX_OUTER_IPV6
332 * bit[9] = PKT_TX_OUTER_UDP
334 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
337 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
339 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
341 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
342 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
343 v |= MLX5_ETH_WQE_L4_INNER_UDP;
344 mlx5_swp_types_table[i] = v;
349 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
350 * Flags must be preliminary initialized to zero.
353 * Pointer to burst routine local context.
355 * Pointer to store Software Parser flags
357 * Configured Tx offloads mask. It is fully defined at
358 * compile time and may be used for optimization.
361 * Software Parser offsets packed in dword.
362 * Software Parser flags are set by pointer.
364 static __rte_always_inline uint32_t
365 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
370 unsigned int idx, off;
373 if (!MLX5_TXOFF_CONFIG(SWP))
375 ol = loc->mbuf->ol_flags;
376 tunnel = ol & PKT_TX_TUNNEL_MASK;
378 * Check whether Software Parser is required.
379 * Only customized tunnels may ask for.
381 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
384 * The index should have:
385 * bit[0:1] = PKT_TX_L4_MASK
386 * bit[4] = PKT_TX_IPV6
387 * bit[8] = PKT_TX_OUTER_IPV6
388 * bit[9] = PKT_TX_OUTER_UDP
390 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
391 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
392 *swp_flags = mlx5_swp_types_table[idx];
394 * Set offsets for SW parser. Since ConnectX-5, SW parser just
395 * complements HW parser. SW parser starts to engage only if HW parser
396 * can't reach a header. For the older devices, HW parser will not kick
397 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
398 * should be set regardless of HW offload.
400 off = loc->mbuf->outer_l2_len;
401 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
402 off += sizeof(struct rte_vlan_hdr);
403 set = (off >> 1) << 8; /* Outer L3 offset. */
404 off += loc->mbuf->outer_l3_len;
405 if (tunnel == PKT_TX_TUNNEL_UDP)
406 set |= off >> 1; /* Outer L4 offset. */
407 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
408 const uint64_t csum = ol & PKT_TX_L4_MASK;
409 off += loc->mbuf->l2_len;
410 set |= (off >> 1) << 24; /* Inner L3 offset. */
411 if (csum == PKT_TX_TCP_CKSUM ||
412 csum == PKT_TX_UDP_CKSUM ||
413 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
414 off += loc->mbuf->l3_len;
415 set |= (off >> 1) << 16; /* Inner L4 offset. */
418 set = rte_cpu_to_le_32(set);
423 * Convert the Checksum offloads to Verbs.
426 * Pointer to the mbuf.
429 * Converted checksum flags.
431 static __rte_always_inline uint8_t
432 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
435 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
436 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
437 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
440 * The index should have:
441 * bit[0] = PKT_TX_TCP_SEG
442 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
443 * bit[4] = PKT_TX_IP_CKSUM
444 * bit[8] = PKT_TX_OUTER_IP_CKSUM
447 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
448 return mlx5_cksum_table[idx];
452 * Internal function to compute the number of used descriptors in an RX queue
458 * The number of used rx descriptor.
461 rx_queue_count(struct mlx5_rxq_data *rxq)
463 struct rxq_zip *zip = &rxq->zip;
464 volatile struct mlx5_cqe *cqe;
465 const unsigned int cqe_n = (1 << rxq->cqe_n);
466 const unsigned int sges_n = (1 << rxq->sges_n);
467 const unsigned int elts_n = (1 << rxq->elts_n);
468 const unsigned int strd_n = (1 << rxq->strd_num_n);
469 const unsigned int cqe_cnt = cqe_n - 1;
470 unsigned int cq_ci, used;
472 /* if we are processing a compressed cqe */
474 used = zip->cqe_cnt - zip->ai;
480 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
481 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
485 op_own = cqe->op_own;
486 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
487 n = rte_be_to_cpu_32(cqe->byte_cnt);
492 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
494 used = RTE_MIN(used * sges_n, elts_n * strd_n);
499 * DPDK callback to check the status of a rx descriptor.
504 * The index of the descriptor in the ring.
507 * The status of the tx descriptor.
510 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
512 struct mlx5_rxq_data *rxq = rx_queue;
513 struct mlx5_rxq_ctrl *rxq_ctrl =
514 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
515 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
517 if (dev->rx_pkt_burst == NULL ||
518 dev->rx_pkt_burst == removed_rx_burst) {
522 if (offset >= (1 << rxq->cqe_n)) {
526 if (offset < rx_queue_count(rxq))
527 return RTE_ETH_RX_DESC_DONE;
528 return RTE_ETH_RX_DESC_AVAIL;
532 * DPDK callback to get the RX queue information
535 * Pointer to the device structure.
538 * Rx queue identificator.
541 * Pointer to the RX queue information structure.
548 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
549 struct rte_eth_rxq_info *qinfo)
551 struct mlx5_priv *priv = dev->data->dev_private;
552 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
553 struct mlx5_rxq_ctrl *rxq_ctrl =
554 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
558 qinfo->mp = mlx5_rxq_mprq_enabled(rxq) ?
559 rxq->mprq_mp : rxq->mp;
560 qinfo->conf.rx_thresh.pthresh = 0;
561 qinfo->conf.rx_thresh.hthresh = 0;
562 qinfo->conf.rx_thresh.wthresh = 0;
563 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
564 qinfo->conf.rx_drop_en = 1;
565 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
566 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
567 qinfo->scattered_rx = dev->data->scattered_rx;
568 qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
569 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
574 * DPDK callback to get the RX packet burst mode information
577 * Pointer to the device structure.
580 * Rx queue identificatior.
583 * Pointer to the burts mode information.
586 * 0 as success, -EINVAL as failure.
590 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
591 uint16_t rx_queue_id __rte_unused,
592 struct rte_eth_burst_mode *mode)
594 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
595 struct mlx5_priv *priv = dev->data->dev_private;
596 struct mlx5_rxq_data *rxq;
598 rxq = (*priv->rxqs)[rx_queue_id];
603 if (pkt_burst == mlx5_rx_burst) {
604 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
605 } else if (pkt_burst == mlx5_rx_burst_mprq) {
606 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
607 } else if (pkt_burst == mlx5_rx_burst_vec) {
608 #if defined RTE_ARCH_X86_64
609 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
610 #elif defined RTE_ARCH_ARM64
611 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
612 #elif defined RTE_ARCH_PPC_64
613 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
617 } else if (pkt_burst == mlx5_rx_burst_mprq_vec) {
618 #if defined RTE_ARCH_X86_64
619 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector SSE");
620 #elif defined RTE_ARCH_ARM64
621 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector Neon");
622 #elif defined RTE_ARCH_PPC_64
623 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector AltiVec");
634 * DPDK callback to get the number of used descriptors in a RX queue
637 * Pointer to the device structure.
643 * The number of used rx descriptor.
644 * -EINVAL if the queue is invalid
647 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
649 struct mlx5_priv *priv = dev->data->dev_private;
650 struct mlx5_rxq_data *rxq;
652 if (dev->rx_pkt_burst == NULL ||
653 dev->rx_pkt_burst == removed_rx_burst) {
657 rxq = (*priv->rxqs)[rx_queue_id];
662 return rx_queue_count(rxq);
665 #define MLX5_SYSTEM_LOG_DIR "/var/log"
667 * Dump debug information to log file.
672 * If not NULL this string is printed as a header to the output
673 * and the output will be in hexadecimal view.
675 * This is the buffer address to print out.
677 * The number of bytes to dump out.
680 mlx5_dump_debug_information(const char *fname, const char *hex_title,
681 const void *buf, unsigned int hex_len)
685 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
686 fd = fopen(path, "a+");
688 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
689 MKSTR(path2, "./%s", fname);
690 fd = fopen(path2, "a+");
692 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
695 DRV_LOG(INFO, "New debug dump in file %s", path2);
697 DRV_LOG(INFO, "New debug dump in file %s", path);
700 rte_hexdump(fd, hex_title, buf, hex_len);
702 fprintf(fd, "%s", (const char *)buf);
703 fprintf(fd, "\n\n\n");
708 * Move QP from error state to running state and initialize indexes.
711 * Pointer to TX queue control structure.
714 * 0 on success, else -1.
717 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
719 struct mlx5_mp_arg_queue_state_modify sm = {
721 .queue_id = txq_ctrl->txq.idx,
724 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
726 txq_ctrl->txq.wqe_ci = 0;
727 txq_ctrl->txq.wqe_pi = 0;
728 txq_ctrl->txq.elts_comp = 0;
732 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
734 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
736 static const uint8_t magic[] = "seen";
740 for (i = 0; i < sizeof(magic); ++i)
741 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
743 err_cqe->rsvd1[i] = magic[i];
752 * Pointer to TX queue structure.
754 * Pointer to the error CQE.
757 * Negative value if queue recovery failed, otherwise
758 * the error completion entry is handled successfully.
761 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
762 volatile struct mlx5_err_cqe *err_cqe)
764 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
765 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
766 struct mlx5_txq_ctrl *txq_ctrl =
767 container_of(txq, struct mlx5_txq_ctrl, txq);
768 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
769 int seen = check_err_cqe_seen(err_cqe);
771 if (!seen && txq_ctrl->dump_file_n <
772 txq_ctrl->priv->config.max_dump_files_num) {
773 MKSTR(err_str, "Unexpected CQE error syndrome "
774 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
775 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
776 txq->cqe_s, txq->qp_num_8s >> 8,
777 rte_be_to_cpu_16(err_cqe->wqe_counter),
778 txq->wqe_ci, txq->cq_ci);
779 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
780 PORT_ID(txq_ctrl->priv), txq->idx,
781 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
782 mlx5_dump_debug_information(name, NULL, err_str, 0);
783 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
784 (const void *)((uintptr_t)
788 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
789 (const void *)((uintptr_t)
793 txq_ctrl->dump_file_n++;
797 * Count errors in WQEs units.
798 * Later it can be improved to count error packets,
799 * for example, by SQ parsing to find how much packets
800 * should be counted for each WQE.
802 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
804 if (tx_recover_qp(txq_ctrl)) {
805 /* Recovering failed - retry later on the same WQE. */
808 /* Release all the remaining buffers. */
809 txq_free_elts(txq_ctrl);
815 * Translate RX completion flags to packet type.
818 * Pointer to RX queue structure.
822 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
825 * Packet type for struct rte_mbuf.
827 static inline uint32_t
828 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
829 volatile struct mlx5_mini_cqe8 *mcqe)
833 uint8_t pinfo = (cqe->pkt_info & 0x3) << 6;
835 /* Get l3/l4 header from mini-CQE in case L3/L4 format*/
837 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
838 ptype = (cqe->hdr_type_etc & 0xfc00) >> 10;
840 ptype = mcqe->hdr_type >> 2;
842 * The index to the array should have:
843 * bit[1:0] = l3_hdr_type
844 * bit[4:2] = l4_hdr_type
847 * bit[7] = outer_l3_type
850 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
854 * Initialize Rx WQ and indexes.
857 * Pointer to RX queue structure.
860 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
862 const unsigned int wqe_n = 1 << rxq->elts_n;
865 for (i = 0; (i != wqe_n); ++i) {
866 volatile struct mlx5_wqe_data_seg *scat;
870 if (mlx5_rxq_mprq_enabled(rxq)) {
871 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
873 scat = &((volatile struct mlx5_wqe_mprq *)
875 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
876 1 << rxq->strd_num_n);
877 byte_count = (1 << rxq->strd_sz_n) *
878 (1 << rxq->strd_num_n);
880 struct rte_mbuf *buf = (*rxq->elts)[i];
882 scat = &((volatile struct mlx5_wqe_data_seg *)
884 addr = rte_pktmbuf_mtod(buf, uintptr_t);
885 byte_count = DATA_LEN(buf);
887 /* scat->addr must be able to store a pointer. */
888 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
889 *scat = (struct mlx5_wqe_data_seg){
890 .addr = rte_cpu_to_be_64(addr),
891 .byte_count = rte_cpu_to_be_32(byte_count),
892 .lkey = mlx5_rx_addr2mr(rxq, addr),
895 rxq->consumed_strd = 0;
896 rxq->decompressed = 0;
898 rxq->zip = (struct rxq_zip){
901 rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
902 (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0;
903 /* Update doorbell counter. */
904 rxq->rq_ci = wqe_n >> rxq->sges_n;
906 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
910 * Modify a Verbs/DevX queue state.
911 * This must be called from the primary process.
914 * Pointer to Ethernet device.
916 * State modify request parameters.
919 * 0 in case of success else non-zero value and rte_errno is set.
922 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
923 const struct mlx5_mp_arg_queue_state_modify *sm)
926 struct mlx5_priv *priv = dev->data->dev_private;
929 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
930 struct mlx5_rxq_ctrl *rxq_ctrl =
931 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
933 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, sm->state);
935 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
936 sm->state, strerror(errno));
941 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
942 struct mlx5_txq_ctrl *txq_ctrl =
943 container_of(txq, struct mlx5_txq_ctrl, txq);
945 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
946 MLX5_TXQ_MOD_ERR2RDY,
947 (uint8_t)priv->dev_port);
955 * Modify a Verbs queue state.
958 * Pointer to Ethernet device.
960 * State modify request parameters.
963 * 0 in case of success else non-zero value.
966 mlx5_queue_state_modify(struct rte_eth_dev *dev,
967 struct mlx5_mp_arg_queue_state_modify *sm)
969 struct mlx5_priv *priv = dev->data->dev_private;
972 switch (rte_eal_process_type()) {
973 case RTE_PROC_PRIMARY:
974 ret = mlx5_queue_state_modify_primary(dev, sm);
976 case RTE_PROC_SECONDARY:
977 ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
987 * The function inserts the RQ state to reset when the first error CQE is
988 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
989 * it moves the RQ state to ready and initializes the RQ.
990 * Next CQE identification and error counting are in the caller responsibility.
993 * Pointer to RX queue structure.
995 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
996 * 0 when called from non-vectorized Rx burst.
999 * -1 in case of recovery error, otherwise the CQE status.
1002 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
1004 const uint16_t cqe_n = 1 << rxq->cqe_n;
1005 const uint16_t cqe_mask = cqe_n - 1;
1006 const uint16_t wqe_n = 1 << rxq->elts_n;
1007 const uint16_t strd_n = 1 << rxq->strd_num_n;
1008 struct mlx5_rxq_ctrl *rxq_ctrl =
1009 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1011 volatile struct mlx5_cqe *cqe;
1012 volatile struct mlx5_err_cqe *err_cqe;
1014 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
1016 struct mlx5_mp_arg_queue_state_modify sm;
1019 switch (rxq->err_state) {
1020 case MLX5_RXQ_ERR_STATE_NO_ERROR:
1021 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
1023 case MLX5_RXQ_ERR_STATE_NEED_RESET:
1025 sm.queue_id = rxq->idx;
1026 sm.state = IBV_WQS_RESET;
1027 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
1029 if (rxq_ctrl->dump_file_n <
1030 rxq_ctrl->priv->config.max_dump_files_num) {
1031 MKSTR(err_str, "Unexpected CQE error syndrome "
1032 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
1033 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
1034 rxq->cqn, rxq_ctrl->wqn,
1035 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
1036 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
1037 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
1038 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
1039 mlx5_dump_debug_information(name, NULL, err_str, 0);
1040 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
1041 (const void *)((uintptr_t)
1043 sizeof(*u.cqe) * cqe_n);
1044 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
1045 (const void *)((uintptr_t)
1048 rxq_ctrl->dump_file_n++;
1050 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
1052 case MLX5_RXQ_ERR_STATE_NEED_READY:
1053 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1054 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1056 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1059 * The RQ consumer index must be zeroed while moving
1060 * from RESET state to RDY state.
1062 *rxq->rq_db = rte_cpu_to_be_32(0);
1065 sm.queue_id = rxq->idx;
1066 sm.state = IBV_WQS_RDY;
1067 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1071 const uint32_t elts_n =
1072 mlx5_rxq_mprq_enabled(rxq) ?
1073 wqe_n * strd_n : wqe_n;
1074 const uint32_t e_mask = elts_n - 1;
1076 mlx5_rxq_mprq_enabled(rxq) ?
1077 rxq->elts_ci : rxq->rq_ci;
1079 struct rte_mbuf **elt;
1081 unsigned int n = elts_n - (elts_ci -
1084 for (i = 0; i < (int)n; ++i) {
1085 elt_idx = (elts_ci + i) & e_mask;
1086 elt = &(*rxq->elts)[elt_idx];
1087 *elt = rte_mbuf_raw_alloc(rxq->mp);
1089 for (i--; i >= 0; --i) {
1090 elt_idx = (elts_ci +
1094 rte_pktmbuf_free_seg
1100 for (i = 0; i < (int)elts_n; ++i) {
1101 elt = &(*rxq->elts)[i];
1103 (uint16_t)((*elt)->buf_len -
1104 rte_pktmbuf_headroom(*elt));
1106 /* Padding with a fake mbuf for vec Rx. */
1107 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1108 (*rxq->elts)[elts_n + i] =
1111 mlx5_rxq_initialize(rxq);
1112 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1121 * Get size of the next packet for a given CQE. For compressed CQEs, the
1122 * consumer index is updated only once all packets of the current one have
1126 * Pointer to RX queue.
1130 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1134 * 0 in case of empty CQE, otherwise the packet size in bytes.
1137 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1138 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1140 struct rxq_zip *zip = &rxq->zip;
1141 uint16_t cqe_n = cqe_cnt + 1;
1147 /* Process compressed data in the CQE and mini arrays. */
1149 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1150 (volatile struct mlx5_mini_cqe8 (*)[8])
1151 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1153 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt &
1155 *mcqe = &(*mc)[zip->ai & 7];
1156 if ((++zip->ai & 7) == 0) {
1157 /* Invalidate consumed CQEs */
1160 while (idx != end) {
1161 (*rxq->cqes)[idx & cqe_cnt].op_own =
1162 MLX5_CQE_INVALIDATE;
1166 * Increment consumer index to skip the number
1167 * of CQEs consumed. Hardware leaves holes in
1168 * the CQ ring for software use.
1173 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1174 /* Invalidate the rest */
1178 while (idx != end) {
1179 (*rxq->cqes)[idx & cqe_cnt].op_own =
1180 MLX5_CQE_INVALIDATE;
1183 rxq->cq_ci = zip->cq_ci;
1187 * No compressed data, get next CQE and verify if it is
1195 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1196 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1197 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1199 ret = mlx5_rx_err_handle(rxq, 0);
1200 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1208 * Introduce the local variable to have queue cq_ci
1209 * index in queue structure always consistent with
1210 * actual CQE boundary (not pointing to the middle
1211 * of compressed CQE session).
1213 cq_ci = rxq->cq_ci + 1;
1214 op_own = cqe->op_own;
1215 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1216 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1217 (volatile struct mlx5_mini_cqe8 (*)[8])
1218 (uintptr_t)(&(*rxq->cqes)
1219 [cq_ci & cqe_cnt].pkt_info);
1221 /* Fix endianness. */
1222 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1224 * Current mini array position is the one
1225 * returned by check_cqe64().
1227 * If completion comprises several mini arrays,
1228 * as a special case the second one is located
1229 * 7 CQEs after the initial CQE instead of 8
1230 * for subsequent ones.
1233 zip->na = zip->ca + 7;
1234 /* Compute the next non compressed CQE. */
1235 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1236 /* Get packet size to return. */
1237 len = rte_be_to_cpu_32((*mc)[0].byte_cnt &
1241 /* Prefetch all to be invalidated */
1244 while (idx != end) {
1245 rte_prefetch0(&(*rxq->cqes)[(idx) &
1251 len = rte_be_to_cpu_32(cqe->byte_cnt);
1254 if (unlikely(rxq->err_state)) {
1255 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1256 ++rxq->stats.idropped;
1264 * Translate RX completion flags to offload flags.
1270 * Offload flags (ol_flags) for struct rte_mbuf.
1272 static inline uint32_t
1273 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1275 uint32_t ol_flags = 0;
1276 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1280 MLX5_CQE_RX_L3_HDR_VALID,
1281 PKT_RX_IP_CKSUM_GOOD) |
1283 MLX5_CQE_RX_L4_HDR_VALID,
1284 PKT_RX_L4_CKSUM_GOOD);
1289 * Fill in mbuf fields from RX completion flags.
1290 * Note that pkt->ol_flags should be initialized outside of this function.
1293 * Pointer to RX queue.
1298 * @param rss_hash_res
1299 * Packet RSS Hash result.
1302 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1303 volatile struct mlx5_cqe *cqe,
1304 volatile struct mlx5_mini_cqe8 *mcqe)
1306 /* Update packet information. */
1307 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe);
1309 if (rxq->rss_hash) {
1310 uint32_t rss_hash_res = 0;
1312 /* If compressed, take hash result from mini-CQE. */
1314 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH)
1315 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1317 rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result);
1319 pkt->hash.rss = rss_hash_res;
1320 pkt->ol_flags |= PKT_RX_RSS_HASH;
1326 /* If compressed, take flow tag from mini-CQE. */
1328 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
1329 mark = cqe->sop_drop_qpn;
1331 mark = ((mcqe->byte_cnt_flow & 0xff) << 8) |
1332 (mcqe->flow_tag_high << 16);
1333 if (MLX5_FLOW_MARK_IS_VALID(mark)) {
1334 pkt->ol_flags |= PKT_RX_FDIR;
1335 if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) {
1336 pkt->ol_flags |= PKT_RX_FDIR_ID;
1337 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1341 if (rxq->dynf_meta && cqe->flow_table_metadata) {
1342 pkt->ol_flags |= rxq->flow_meta_mask;
1343 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, uint32_t *) =
1344 cqe->flow_table_metadata;
1347 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1348 if (rxq->vlan_strip) {
1352 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
1353 vlan_strip = cqe->hdr_type_etc &
1354 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
1356 vlan_strip = mcqe->hdr_type &
1357 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
1359 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1360 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1363 if (rxq->hw_timestamp) {
1364 uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
1366 if (rxq->rt_timestamp)
1367 ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
1368 mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts);
1369 pkt->ol_flags |= rxq->timestamp_rx_flag;
1374 * DPDK callback for RX.
1377 * Generic pointer to RX queue structure.
1379 * Array to store received packets.
1381 * Maximum number of packets in array.
1384 * Number of packets successfully received (<= pkts_n).
1387 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1389 struct mlx5_rxq_data *rxq = dpdk_rxq;
1390 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1391 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1392 const unsigned int sges_n = rxq->sges_n;
1393 struct rte_mbuf *pkt = NULL;
1394 struct rte_mbuf *seg = NULL;
1395 volatile struct mlx5_cqe *cqe =
1396 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1398 unsigned int rq_ci = rxq->rq_ci << sges_n;
1399 int len = 0; /* keep its value across iterations. */
1402 unsigned int idx = rq_ci & wqe_cnt;
1403 volatile struct mlx5_wqe_data_seg *wqe =
1404 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1405 struct rte_mbuf *rep = (*rxq->elts)[idx];
1406 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1414 /* Allocate the buf from the same pool. */
1415 rep = rte_mbuf_raw_alloc(seg->pool);
1416 if (unlikely(rep == NULL)) {
1417 ++rxq->stats.rx_nombuf;
1420 * no buffers before we even started,
1421 * bail out silently.
1425 while (pkt != seg) {
1426 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
1430 rte_mbuf_raw_free(pkt);
1436 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1437 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1439 rte_mbuf_raw_free(rep);
1443 MLX5_ASSERT(len >= (rxq->crc_present << 2));
1444 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1445 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
1446 if (rxq->crc_present)
1447 len -= RTE_ETHER_CRC_LEN;
1449 if (cqe->lro_num_seg > 1) {
1451 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1453 pkt->ol_flags |= PKT_RX_LRO;
1454 pkt->tso_segsz = len / cqe->lro_num_seg;
1457 DATA_LEN(rep) = DATA_LEN(seg);
1458 PKT_LEN(rep) = PKT_LEN(seg);
1459 SET_DATA_OFF(rep, DATA_OFF(seg));
1460 PORT(rep) = PORT(seg);
1461 (*rxq->elts)[idx] = rep;
1463 * Fill NIC descriptor with the new buffer. The lkey and size
1464 * of the buffers are already known, only the buffer address
1467 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1468 /* If there's only one MR, no need to replace LKey in WQE. */
1469 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1470 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1471 if (len > DATA_LEN(seg)) {
1472 len -= DATA_LEN(seg);
1477 DATA_LEN(seg) = len;
1478 #ifdef MLX5_PMD_SOFT_COUNTERS
1479 /* Increment bytes counter. */
1480 rxq->stats.ibytes += PKT_LEN(pkt);
1482 /* Return packet. */
1487 /* Align consumer index to the next stride. */
1492 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1494 /* Update the consumer index. */
1495 rxq->rq_ci = rq_ci >> sges_n;
1497 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1499 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1500 #ifdef MLX5_PMD_SOFT_COUNTERS
1501 /* Increment packets counter. */
1502 rxq->stats.ipackets += i;
1508 * Update LRO packet TCP header.
1509 * The HW LRO feature doesn't update the TCP header after coalescing the
1510 * TCP segments but supplies information in CQE to fill it by SW.
1513 * Pointer to the TCP header.
1515 * Pointer to the completion entry..
1517 * The L3 pseudo-header checksum.
1520 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
1521 volatile struct mlx5_cqe *__rte_restrict cqe,
1522 uint32_t phcsum, uint8_t l4_type)
1525 * The HW calculates only the TCP payload checksum, need to complete
1526 * the TCP header checksum and the L3 pseudo-header checksum.
1528 uint32_t csum = phcsum + cqe->csum;
1530 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1531 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1532 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1533 tcp->recv_ack = cqe->lro_ack_seq_num;
1534 tcp->rx_win = cqe->lro_tcp_win;
1536 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1537 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1539 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
1540 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1541 csum = (~csum) & 0xffff;
1548 * Update LRO packet headers.
1549 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1550 * TCP segments but supply information in CQE to fill it by SW.
1553 * The packet address.
1555 * Pointer to the completion entry..
1557 * The packet length.
1560 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
1561 volatile struct mlx5_cqe *__rte_restrict cqe,
1562 volatile struct mlx5_mini_cqe8 *mcqe,
1563 struct mlx5_rxq_data *rxq, uint32_t len)
1566 struct rte_ether_hdr *eth;
1567 struct rte_vlan_hdr *vlan;
1568 struct rte_ipv4_hdr *ipv4;
1569 struct rte_ipv6_hdr *ipv6;
1570 struct rte_tcp_hdr *tcp;
1575 uint16_t proto = h.eth->ether_type;
1580 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1581 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1582 proto = h.vlan->eth_proto;
1585 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1586 h.ipv4->time_to_live = cqe->lro_min_ttl;
1587 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1588 h.ipv4->hdr_checksum = 0;
1589 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1590 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1593 h.ipv6->hop_limits = cqe->lro_min_ttl;
1594 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1596 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1600 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
1601 l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1602 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1604 l4_type = (rte_be_to_cpu_16(mcqe->hdr_type) &
1605 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1606 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type);
1610 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1612 struct mlx5_mprq_buf *buf = opaque;
1614 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
1615 rte_mempool_put(buf->mp, buf);
1616 } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
1617 __ATOMIC_RELAXED) == 0)) {
1618 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1619 rte_mempool_put(buf->mp, buf);
1624 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1626 mlx5_mprq_buf_free_cb(NULL, buf);
1630 * DPDK callback for RX with Multi-Packet RQ support.
1633 * Generic pointer to RX queue structure.
1635 * Array to store received packets.
1637 * Maximum number of packets in array.
1640 * Number of packets successfully received (<= pkts_n).
1643 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1645 struct mlx5_rxq_data *rxq = dpdk_rxq;
1646 const uint32_t strd_n = 1 << rxq->strd_num_n;
1647 const uint32_t strd_sz = 1 << rxq->strd_sz_n;
1648 const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
1649 const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
1650 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1652 uint32_t rq_ci = rxq->rq_ci;
1653 uint16_t consumed_strd = rxq->consumed_strd;
1654 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1656 while (i < pkts_n) {
1657 struct rte_mbuf *pkt;
1663 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1664 enum mlx5_rqx_code rxq_code;
1666 if (consumed_strd == strd_n) {
1667 /* Replace WQE if the buffer is still in use. */
1668 mprq_buf_replace(rxq, rq_ci & wq_mask);
1669 /* Advance to the next WQE. */
1672 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1674 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1675 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1679 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1680 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1681 if (rxq->crc_present)
1682 len -= RTE_ETHER_CRC_LEN;
1684 rxq->mcqe_format == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
1685 strd_cnt = (len / strd_sz) + !!(len % strd_sz);
1687 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1688 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1689 MLX5_ASSERT(strd_cnt);
1690 consumed_strd += strd_cnt;
1691 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1693 strd_idx = rte_be_to_cpu_16(mcqe == NULL ?
1696 MLX5_ASSERT(strd_idx < strd_n);
1697 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1699 pkt = rte_pktmbuf_alloc(rxq->mp);
1700 if (unlikely(pkt == NULL)) {
1701 ++rxq->stats.rx_nombuf;
1704 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1705 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1706 if (rxq->crc_present)
1707 len -= RTE_ETHER_CRC_LEN;
1708 rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf,
1709 strd_idx, strd_cnt);
1710 if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
1711 rte_pktmbuf_free_seg(pkt);
1712 if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
1713 ++rxq->stats.idropped;
1716 if (rxq_code == MLX5_RXQ_CODE_NOMBUF) {
1717 ++rxq->stats.rx_nombuf;
1721 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
1722 if (cqe->lro_num_seg > 1) {
1723 mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
1724 cqe, mcqe, rxq, len);
1725 pkt->ol_flags |= PKT_RX_LRO;
1726 pkt->tso_segsz = len / cqe->lro_num_seg;
1729 PORT(pkt) = rxq->port_id;
1730 #ifdef MLX5_PMD_SOFT_COUNTERS
1731 /* Increment bytes counter. */
1732 rxq->stats.ibytes += PKT_LEN(pkt);
1734 /* Return packet. */
1738 /* Update the consumer indexes. */
1739 rxq->consumed_strd = consumed_strd;
1741 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1742 if (rq_ci != rxq->rq_ci) {
1745 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1747 #ifdef MLX5_PMD_SOFT_COUNTERS
1748 /* Increment packets counter. */
1749 rxq->stats.ipackets += i;
1755 * Dummy DPDK callback for TX.
1757 * This function is used to temporarily replace the real callback during
1758 * unsafe control operations on the queue, or in case of error.
1761 * Generic pointer to TX queue structure.
1763 * Packets to transmit.
1765 * Number of packets in array.
1768 * Number of packets successfully transmitted (<= pkts_n).
1771 removed_tx_burst(void *dpdk_txq __rte_unused,
1772 struct rte_mbuf **pkts __rte_unused,
1773 uint16_t pkts_n __rte_unused)
1780 * Dummy DPDK callback for RX.
1782 * This function is used to temporarily replace the real callback during
1783 * unsafe control operations on the queue, or in case of error.
1786 * Generic pointer to RX queue structure.
1788 * Array to store received packets.
1790 * Maximum number of packets in array.
1793 * Number of packets successfully received (<= pkts_n).
1796 removed_rx_burst(void *dpdk_txq __rte_unused,
1797 struct rte_mbuf **pkts __rte_unused,
1798 uint16_t pkts_n __rte_unused)
1805 * Vectorized Rx/Tx routines are not compiled in when required vector
1806 * instructions are not supported on a target architecture. The following null
1807 * stubs are needed for linkage when those are not included outside of this file
1808 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1812 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1813 struct rte_mbuf **pkts __rte_unused,
1814 uint16_t pkts_n __rte_unused)
1820 mlx5_rx_burst_mprq_vec(void *dpdk_txq __rte_unused,
1821 struct rte_mbuf **pkts __rte_unused,
1822 uint16_t pkts_n __rte_unused)
1828 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1834 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1840 * Free the mbufs from the linear array of pointers.
1843 * Pointer to array of packets to be free.
1845 * Number of packets to be freed.
1847 * Configured Tx offloads mask. It is fully defined at
1848 * compile time and may be used for optimization.
1850 static __rte_always_inline void
1851 mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
1852 unsigned int pkts_n,
1853 unsigned int olx __rte_unused)
1855 struct rte_mempool *pool = NULL;
1856 struct rte_mbuf **p_free = NULL;
1857 struct rte_mbuf *mbuf;
1858 unsigned int n_free = 0;
1861 * The implemented algorithm eliminates
1862 * copying pointers to temporary array
1863 * for rte_mempool_put_bulk() calls.
1866 MLX5_ASSERT(pkts_n);
1870 * Decrement mbuf reference counter, detach
1871 * indirect and external buffers if needed.
1873 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1874 if (likely(mbuf != NULL)) {
1875 MLX5_ASSERT(mbuf == *pkts);
1876 if (likely(n_free != 0)) {
1877 if (unlikely(pool != mbuf->pool))
1878 /* From different pool. */
1881 /* Start new scan array. */
1888 if (unlikely(pkts_n == 0)) {
1894 * This happens if mbuf is still referenced.
1895 * We can't put it back to the pool, skip.
1899 if (unlikely(n_free != 0))
1900 /* There is some array to free.*/
1902 if (unlikely(pkts_n == 0))
1903 /* Last mbuf, nothing to free. */
1909 * This loop is implemented to avoid multiple
1910 * inlining of rte_mempool_put_bulk().
1913 MLX5_ASSERT(p_free);
1914 MLX5_ASSERT(n_free);
1916 * Free the array of pre-freed mbufs
1917 * belonging to the same memory pool.
1919 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1920 if (unlikely(mbuf != NULL)) {
1921 /* There is the request to start new scan. */
1926 if (likely(pkts_n != 0))
1929 * This is the last mbuf to be freed.
1930 * Do one more loop iteration to complete.
1931 * This is rare case of the last unique mbuf.
1936 if (likely(pkts_n == 0))
1945 * Free the mbuf from the elts ring buffer till new tail.
1948 * Pointer to Tx queue structure.
1950 * Index in elts to free up to, becomes new elts tail.
1952 * Configured Tx offloads mask. It is fully defined at
1953 * compile time and may be used for optimization.
1955 static __rte_always_inline void
1956 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
1958 unsigned int olx __rte_unused)
1960 uint16_t n_elts = tail - txq->elts_tail;
1962 MLX5_ASSERT(n_elts);
1963 MLX5_ASSERT(n_elts <= txq->elts_s);
1965 * Implement a loop to support ring buffer wraparound
1966 * with single inlining of mlx5_tx_free_mbuf().
1971 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
1972 part = RTE_MIN(part, n_elts);
1974 MLX5_ASSERT(part <= txq->elts_s);
1975 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
1977 txq->elts_tail += part;
1983 * Store the mbuf being sent into elts ring buffer.
1984 * On Tx completion these mbufs will be freed.
1987 * Pointer to Tx queue structure.
1989 * Pointer to array of packets to be stored.
1991 * Number of packets to be stored.
1993 * Configured Tx offloads mask. It is fully defined at
1994 * compile time and may be used for optimization.
1996 static __rte_always_inline void
1997 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
1998 struct rte_mbuf **__rte_restrict pkts,
1999 unsigned int pkts_n,
2000 unsigned int olx __rte_unused)
2003 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2006 MLX5_ASSERT(pkts_n);
2007 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2009 MLX5_ASSERT(part <= txq->elts_s);
2010 /* This code is a good candidate for vectorizing with SIMD. */
2011 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2013 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2014 txq->elts_head += pkts_n;
2015 if (unlikely(part < pkts_n))
2016 /* The copy is wrapping around the elts array. */
2017 rte_memcpy((void *)elts, (void *)(pkts + part),
2018 (pkts_n - part) * sizeof(struct rte_mbuf *));
2022 * Update completion queue consuming index via doorbell
2023 * and flush the completed data buffers.
2026 * Pointer to TX queue structure.
2027 * @param valid CQE pointer
2028 * if not NULL update txq->wqe_pi and flush the buffers
2030 * Configured Tx offloads mask. It is fully defined at
2031 * compile time and may be used for optimization.
2033 static __rte_always_inline void
2034 mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
2035 volatile struct mlx5_cqe *last_cqe,
2036 unsigned int olx __rte_unused)
2038 if (likely(last_cqe != NULL)) {
2041 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2042 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2043 if (likely(tail != txq->elts_tail)) {
2044 mlx5_tx_free_elts(txq, tail, olx);
2045 MLX5_ASSERT(tail == txq->elts_tail);
2051 * Manage TX completions. This routine checks the CQ for
2052 * arrived CQEs, deduces the last accomplished WQE in SQ,
2053 * updates SQ producing index and frees all completed mbufs.
2056 * Pointer to TX queue structure.
2058 * Configured Tx offloads mask. It is fully defined at
2059 * compile time and may be used for optimization.
2061 * NOTE: not inlined intentionally, it makes tx_burst
2062 * routine smaller, simple and faster - from experiments.
2065 mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
2066 unsigned int olx __rte_unused)
2068 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2069 volatile struct mlx5_cqe *last_cqe = NULL;
2070 bool ring_doorbell = false;
2073 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2074 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2076 volatile struct mlx5_cqe *cqe;
2078 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
2079 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
2080 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2081 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2082 /* No new CQEs in completion queue. */
2083 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
2087 * Some error occurred, try to restart.
2088 * We have no barrier after WQE related Doorbell
2089 * written, make sure all writes are completed
2090 * here, before we might perform SQ reset.
2093 ret = mlx5_tx_error_cqe_handle
2094 (txq, (volatile struct mlx5_err_cqe *)cqe);
2095 if (unlikely(ret < 0)) {
2097 * Some error occurred on queue error
2098 * handling, we do not advance the index
2099 * here, allowing to retry on next call.
2104 * We are going to fetch all entries with
2105 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2106 * The send queue is supposed to be empty.
2108 ring_doorbell = true;
2110 txq->cq_pi = txq->cq_ci;
2114 /* Normal transmit completion. */
2115 MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
2116 #ifdef RTE_LIBRTE_MLX5_DEBUG
2117 MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
2120 ring_doorbell = true;
2124 * We have to restrict the amount of processed CQEs
2125 * in one tx_burst routine call. The CQ may be large
2126 * and many CQEs may be updated by the NIC in one
2127 * transaction. Buffers freeing is time consuming,
2128 * multiple iterations may introduce significant
2131 if (likely(--count == 0))
2134 if (likely(ring_doorbell)) {
2135 /* Ring doorbell to notify hardware. */
2136 rte_compiler_barrier();
2137 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
2138 mlx5_tx_comp_flush(txq, last_cqe, olx);
2143 * Check if the completion request flag should be set in the last WQE.
2144 * Both pushed mbufs and WQEs are monitored and the completion request
2145 * flag is set if any of thresholds is reached.
2148 * Pointer to TX queue structure.
2150 * Pointer to burst routine local context.
2152 * Configured Tx offloads mask. It is fully defined at
2153 * compile time and may be used for optimization.
2155 static __rte_always_inline void
2156 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
2157 struct mlx5_txq_local *__rte_restrict loc,
2160 uint16_t head = txq->elts_head;
2163 part = MLX5_TXOFF_CONFIG(INLINE) ?
2164 0 : loc->pkts_sent - loc->pkts_copy;
2166 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2167 (MLX5_TXOFF_CONFIG(INLINE) &&
2168 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2169 volatile struct mlx5_wqe *last = loc->wqe_last;
2172 txq->elts_comp = head;
2173 if (MLX5_TXOFF_CONFIG(INLINE))
2174 txq->wqe_comp = txq->wqe_ci;
2175 /* Request unconditional completion on last WQE. */
2176 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2177 MLX5_COMP_MODE_OFFSET);
2178 /* Save elts_head in dedicated free on completion queue. */
2179 #ifdef RTE_LIBRTE_MLX5_DEBUG
2180 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2181 (last->cseg.opcode >> 8) << 16;
2183 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2185 /* A CQE slot must always be available. */
2186 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2191 * DPDK callback to check the status of a tx descriptor.
2196 * The index of the descriptor in the ring.
2199 * The status of the tx descriptor.
2202 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2204 struct mlx5_txq_data *__rte_restrict txq = tx_queue;
2207 mlx5_tx_handle_completion(txq, 0);
2208 used = txq->elts_head - txq->elts_tail;
2210 return RTE_ETH_TX_DESC_FULL;
2211 return RTE_ETH_TX_DESC_DONE;
2215 * Build the Control Segment with specified opcode:
2216 * - MLX5_OPCODE_SEND
2217 * - MLX5_OPCODE_ENHANCED_MPSW
2221 * Pointer to TX queue structure.
2223 * Pointer to burst routine local context.
2225 * Pointer to WQE to fill with built Control Segment.
2227 * Supposed length of WQE in segments.
2229 * SQ WQE opcode to put into Control Segment.
2231 * Configured Tx offloads mask. It is fully defined at
2232 * compile time and may be used for optimization.
2234 static __rte_always_inline void
2235 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
2236 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2237 struct mlx5_wqe *__rte_restrict wqe,
2239 unsigned int opcode,
2240 unsigned int olx __rte_unused)
2242 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
2244 /* For legacy MPW replace the EMPW by TSO with modifier. */
2245 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2246 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2247 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2248 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2249 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2250 MLX5_COMP_MODE_OFFSET);
2251 cs->misc = RTE_BE32(0);
2255 * Build the Synchronize Queue Segment with specified completion index.
2258 * Pointer to TX queue structure.
2260 * Pointer to burst routine local context.
2262 * Pointer to WQE to fill with built Control Segment.
2264 * Completion index in Clock Queue to wait.
2266 * Configured Tx offloads mask. It is fully defined at
2267 * compile time and may be used for optimization.
2269 static __rte_always_inline void
2270 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
2271 struct mlx5_txq_local *restrict loc __rte_unused,
2272 struct mlx5_wqe *restrict wqe,
2274 unsigned int olx __rte_unused)
2276 struct mlx5_wqe_qseg *qs;
2278 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
2279 qs->max_index = rte_cpu_to_be_32(wci);
2280 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq->id);
2281 qs->reserved0 = RTE_BE32(0);
2282 qs->reserved1 = RTE_BE32(0);
2286 * Build the Ethernet Segment without inlined data.
2287 * Supports Software Parser, Checksums and VLAN
2288 * insertion Tx offload features.
2291 * Pointer to TX queue structure.
2293 * Pointer to burst routine local context.
2295 * Pointer to WQE to fill with built Ethernet Segment.
2297 * Configured Tx offloads mask. It is fully defined at
2298 * compile time and may be used for optimization.
2300 static __rte_always_inline void
2301 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2302 struct mlx5_txq_local *__rte_restrict loc,
2303 struct mlx5_wqe *__rte_restrict wqe,
2306 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2310 * Calculate and set check sum flags first, dword field
2311 * in segment may be shared with Software Parser flags.
2313 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2314 es->flags = rte_cpu_to_le_32(csum);
2316 * Calculate and set Software Parser offsets and flags.
2317 * These flags a set for custom UDP and IP tunnel packets.
2319 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2320 /* Fill metadata field if needed. */
2321 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2322 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2323 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2324 /* Engage VLAN tag insertion feature if requested. */
2325 if (MLX5_TXOFF_CONFIG(VLAN) &&
2326 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2328 * We should get here only if device support
2329 * this feature correctly.
2331 MLX5_ASSERT(txq->vlan_en);
2332 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2333 loc->mbuf->vlan_tci);
2335 es->inline_hdr = RTE_BE32(0);
2340 * Build the Ethernet Segment with minimal inlined data
2341 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2342 * used to fill the gap in single WQEBB WQEs.
2343 * Supports Software Parser, Checksums and VLAN
2344 * insertion Tx offload features.
2347 * Pointer to TX queue structure.
2349 * Pointer to burst routine local context.
2351 * Pointer to WQE to fill with built Ethernet Segment.
2353 * Length of VLAN tag insertion if any.
2355 * Configured Tx offloads mask. It is fully defined at
2356 * compile time and may be used for optimization.
2358 static __rte_always_inline void
2359 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2360 struct mlx5_txq_local *__rte_restrict loc,
2361 struct mlx5_wqe *__rte_restrict wqe,
2365 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2367 uint8_t *psrc, *pdst;
2370 * Calculate and set check sum flags first, dword field
2371 * in segment may be shared with Software Parser flags.
2373 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2374 es->flags = rte_cpu_to_le_32(csum);
2376 * Calculate and set Software Parser offsets and flags.
2377 * These flags a set for custom UDP and IP tunnel packets.
2379 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2380 /* Fill metadata field if needed. */
2381 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2382 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2383 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2384 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2386 sizeof(rte_v128u32_t)),
2387 "invalid Ethernet Segment data size");
2388 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2390 sizeof(struct rte_vlan_hdr) +
2391 2 * RTE_ETHER_ADDR_LEN),
2392 "invalid Ethernet Segment data size");
2393 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2394 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2395 es->inline_data = *(unaligned_uint16_t *)psrc;
2396 psrc += sizeof(uint16_t);
2397 pdst = (uint8_t *)(es + 1);
2398 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2399 /* Implement VLAN tag insertion as part inline data. */
2400 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2401 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2402 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2403 /* Insert VLAN ethertype + VLAN tag. */
2404 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2405 ((RTE_ETHER_TYPE_VLAN << 16) |
2406 loc->mbuf->vlan_tci);
2407 pdst += sizeof(struct rte_vlan_hdr);
2408 /* Copy the rest two bytes from packet data. */
2409 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2410 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2412 /* Fill the gap in the title WQEBB with inline data. */
2413 rte_mov16(pdst, psrc);
2418 * Build the Ethernet Segment with entire packet
2419 * data inlining. Checks the boundary of WQEBB and
2420 * ring buffer wrapping, supports Software Parser,
2421 * Checksums and VLAN insertion Tx offload features.
2424 * Pointer to TX queue structure.
2426 * Pointer to burst routine local context.
2428 * Pointer to WQE to fill with built Ethernet Segment.
2430 * Length of VLAN tag insertion if any.
2432 * Length of data to inline (VLAN included, if any).
2434 * TSO flag, set mss field from the packet.
2436 * Configured Tx offloads mask. It is fully defined at
2437 * compile time and may be used for optimization.
2440 * Pointer to the next Data Segment (aligned and wrapped around).
2442 static __rte_always_inline struct mlx5_wqe_dseg *
2443 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
2444 struct mlx5_txq_local *__rte_restrict loc,
2445 struct mlx5_wqe *__rte_restrict wqe,
2451 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2453 uint8_t *psrc, *pdst;
2457 * Calculate and set check sum flags first, dword field
2458 * in segment may be shared with Software Parser flags.
2460 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2463 csum |= loc->mbuf->tso_segsz;
2464 es->flags = rte_cpu_to_be_32(csum);
2466 es->flags = rte_cpu_to_le_32(csum);
2469 * Calculate and set Software Parser offsets and flags.
2470 * These flags a set for custom UDP and IP tunnel packets.
2472 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2473 /* Fill metadata field if needed. */
2474 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2475 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2476 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2477 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2479 sizeof(rte_v128u32_t)),
2480 "invalid Ethernet Segment data size");
2481 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2483 sizeof(struct rte_vlan_hdr) +
2484 2 * RTE_ETHER_ADDR_LEN),
2485 "invalid Ethernet Segment data size");
2486 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2487 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2488 es->inline_data = *(unaligned_uint16_t *)psrc;
2489 psrc += sizeof(uint16_t);
2490 pdst = (uint8_t *)(es + 1);
2491 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2492 /* Implement VLAN tag insertion as part inline data. */
2493 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2494 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2495 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2496 /* Insert VLAN ethertype + VLAN tag. */
2497 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2498 ((RTE_ETHER_TYPE_VLAN << 16) |
2499 loc->mbuf->vlan_tci);
2500 pdst += sizeof(struct rte_vlan_hdr);
2501 /* Copy the rest two bytes from packet data. */
2502 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2503 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2504 psrc += sizeof(uint16_t);
2506 /* Fill the gap in the title WQEBB with inline data. */
2507 rte_mov16(pdst, psrc);
2508 psrc += sizeof(rte_v128u32_t);
2510 pdst = (uint8_t *)(es + 2);
2511 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2512 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2513 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2515 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2516 return (struct mlx5_wqe_dseg *)pdst;
2519 * The WQEBB space availability is checked by caller.
2520 * Here we should be aware of WQE ring buffer wraparound only.
2522 part = (uint8_t *)txq->wqes_end - pdst;
2523 part = RTE_MIN(part, inlen);
2525 rte_memcpy(pdst, psrc, part);
2527 if (likely(!inlen)) {
2529 * If return value is not used by the caller
2530 * the code below will be optimized out.
2533 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2534 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2535 pdst = (uint8_t *)txq->wqes;
2536 return (struct mlx5_wqe_dseg *)pdst;
2538 pdst = (uint8_t *)txq->wqes;
2545 * Copy data from chain of mbuf to the specified linear buffer.
2546 * Checksums and VLAN insertion Tx offload features. If data
2547 * from some mbuf copied completely this mbuf is freed. Local
2548 * structure is used to keep the byte stream state.
2551 * Pointer to the destination linear buffer.
2553 * Pointer to burst routine local context.
2555 * Length of data to be copied.
2557 * Length of data to be copied ignoring no inline hint.
2559 * Configured Tx offloads mask. It is fully defined at
2560 * compile time and may be used for optimization.
2563 * Number of actual copied data bytes. This is always greater than or
2564 * equal to must parameter and might be lesser than len in no inline
2565 * hint flag is encountered.
2567 static __rte_always_inline unsigned int
2568 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2569 struct mlx5_txq_local *__rte_restrict loc,
2572 unsigned int olx __rte_unused)
2574 struct rte_mbuf *mbuf;
2575 unsigned int part, dlen, copy = 0;
2579 MLX5_ASSERT(must <= len);
2581 /* Allow zero length packets, must check first. */
2582 dlen = rte_pktmbuf_data_len(loc->mbuf);
2583 if (dlen <= loc->mbuf_off) {
2584 /* Exhausted packet, just free. */
2586 loc->mbuf = mbuf->next;
2587 rte_pktmbuf_free_seg(mbuf);
2589 MLX5_ASSERT(loc->mbuf_nseg > 1);
2590 MLX5_ASSERT(loc->mbuf);
2592 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2597 * We already copied the minimal
2598 * requested amount of data.
2603 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
2605 * Copy only the minimal required
2606 * part of the data buffer.
2613 dlen -= loc->mbuf_off;
2614 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2616 part = RTE_MIN(len, dlen);
2617 rte_memcpy(pdst, psrc, part);
2619 loc->mbuf_off += part;
2622 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2624 /* Exhausted packet, just free. */
2626 loc->mbuf = mbuf->next;
2627 rte_pktmbuf_free_seg(mbuf);
2629 MLX5_ASSERT(loc->mbuf_nseg >= 1);
2639 * Build the Ethernet Segment with inlined data from
2640 * multi-segment packet. Checks the boundary of WQEBB
2641 * and ring buffer wrapping, supports Software Parser,
2642 * Checksums and VLAN insertion Tx offload features.
2645 * Pointer to TX queue structure.
2647 * Pointer to burst routine local context.
2649 * Pointer to WQE to fill with built Ethernet Segment.
2651 * Length of VLAN tag insertion if any.
2653 * Length of data to inline (VLAN included, if any).
2655 * TSO flag, set mss field from the packet.
2657 * Configured Tx offloads mask. It is fully defined at
2658 * compile time and may be used for optimization.
2661 * Pointer to the next Data Segment (aligned and
2662 * possible NOT wrapped around - caller should do
2663 * wrapping check on its own).
2665 static __rte_always_inline struct mlx5_wqe_dseg *
2666 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
2667 struct mlx5_txq_local *__rte_restrict loc,
2668 struct mlx5_wqe *__rte_restrict wqe,
2674 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2677 unsigned int part, tlen = 0;
2680 * Calculate and set check sum flags first, uint32_t field
2681 * in segment may be shared with Software Parser flags.
2683 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2686 csum |= loc->mbuf->tso_segsz;
2687 es->flags = rte_cpu_to_be_32(csum);
2689 es->flags = rte_cpu_to_le_32(csum);
2692 * Calculate and set Software Parser offsets and flags.
2693 * These flags a set for custom UDP and IP tunnel packets.
2695 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2696 /* Fill metadata field if needed. */
2697 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2698 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2699 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2700 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2702 sizeof(rte_v128u32_t)),
2703 "invalid Ethernet Segment data size");
2704 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2706 sizeof(struct rte_vlan_hdr) +
2707 2 * RTE_ETHER_ADDR_LEN),
2708 "invalid Ethernet Segment data size");
2709 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2710 pdst = (uint8_t *)&es->inline_data;
2711 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2712 /* Implement VLAN tag insertion as part inline data. */
2713 mlx5_tx_mseg_memcpy(pdst, loc,
2714 2 * RTE_ETHER_ADDR_LEN,
2715 2 * RTE_ETHER_ADDR_LEN, olx);
2716 pdst += 2 * RTE_ETHER_ADDR_LEN;
2717 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2718 ((RTE_ETHER_TYPE_VLAN << 16) |
2719 loc->mbuf->vlan_tci);
2720 pdst += sizeof(struct rte_vlan_hdr);
2721 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2723 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2725 * The WQEBB space availability is checked by caller.
2726 * Here we should be aware of WQE ring buffer wraparound only.
2728 part = (uint8_t *)txq->wqes_end - pdst;
2729 part = RTE_MIN(part, inlen - tlen);
2735 * Copying may be interrupted inside the routine
2736 * if run into no inline hint flag.
2738 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
2739 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
2741 if (likely(inlen <= tlen) || copy < part) {
2742 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
2744 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2745 return (struct mlx5_wqe_dseg *)pdst;
2747 pdst = (uint8_t *)txq->wqes;
2748 part = inlen - tlen;
2753 * Build the Data Segment of pointer type.
2756 * Pointer to TX queue structure.
2758 * Pointer to burst routine local context.
2760 * Pointer to WQE to fill with built Data Segment.
2762 * Data buffer to point.
2764 * Data buffer length.
2766 * Configured Tx offloads mask. It is fully defined at
2767 * compile time and may be used for optimization.
2769 static __rte_always_inline void
2770 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
2771 struct mlx5_txq_local *__rte_restrict loc,
2772 struct mlx5_wqe_dseg *__rte_restrict dseg,
2775 unsigned int olx __rte_unused)
2779 dseg->bcount = rte_cpu_to_be_32(len);
2780 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2781 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2785 * Build the Data Segment of pointer type or inline
2786 * if data length is less than buffer in minimal
2787 * Data Segment size.
2790 * Pointer to TX queue structure.
2792 * Pointer to burst routine local context.
2794 * Pointer to WQE to fill with built Data Segment.
2796 * Data buffer to point.
2798 * Data buffer length.
2800 * Configured Tx offloads mask. It is fully defined at
2801 * compile time and may be used for optimization.
2803 static __rte_always_inline void
2804 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
2805 struct mlx5_txq_local *__rte_restrict loc,
2806 struct mlx5_wqe_dseg *__rte_restrict dseg,
2809 unsigned int olx __rte_unused)
2815 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2816 dseg->bcount = rte_cpu_to_be_32(len);
2817 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2818 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2822 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2823 /* Unrolled implementation of generic rte_memcpy. */
2824 dst = (uintptr_t)&dseg->inline_data[0];
2825 src = (uintptr_t)buf;
2827 #ifdef RTE_ARCH_STRICT_ALIGN
2828 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2829 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2830 dst += sizeof(uint32_t);
2831 src += sizeof(uint32_t);
2832 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2833 dst += sizeof(uint32_t);
2834 src += sizeof(uint32_t);
2836 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2837 dst += sizeof(uint64_t);
2838 src += sizeof(uint64_t);
2842 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2843 dst += sizeof(uint32_t);
2844 src += sizeof(uint32_t);
2847 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2848 dst += sizeof(uint16_t);
2849 src += sizeof(uint16_t);
2852 *(uint8_t *)dst = *(uint8_t *)src;
2856 * Build the Data Segment of inlined data from single
2857 * segment packet, no VLAN insertion.
2860 * Pointer to TX queue structure.
2862 * Pointer to burst routine local context.
2864 * Pointer to WQE to fill with built Data Segment.
2866 * Data buffer to point.
2868 * Data buffer length.
2870 * Configured Tx offloads mask. It is fully defined at
2871 * compile time and may be used for optimization.
2874 * Pointer to the next Data Segment after inlined data.
2875 * Ring buffer wraparound check is needed. We do not
2876 * do it here because it may not be needed for the
2877 * last packet in the eMPW session.
2879 static __rte_always_inline struct mlx5_wqe_dseg *
2880 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
2881 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2882 struct mlx5_wqe_dseg *__rte_restrict dseg,
2885 unsigned int olx __rte_unused)
2890 if (!MLX5_TXOFF_CONFIG(MPW)) {
2891 /* Store the descriptor byte counter for eMPW sessions. */
2892 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2893 pdst = &dseg->inline_data[0];
2895 /* The entire legacy MPW session counter is stored on close. */
2896 pdst = (uint8_t *)dseg;
2899 * The WQEBB space availability is checked by caller.
2900 * Here we should be aware of WQE ring buffer wraparound only.
2902 part = (uint8_t *)txq->wqes_end - pdst;
2903 part = RTE_MIN(part, len);
2905 rte_memcpy(pdst, buf, part);
2909 if (!MLX5_TXOFF_CONFIG(MPW))
2910 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2911 /* Note: no final wraparound check here. */
2912 return (struct mlx5_wqe_dseg *)pdst;
2914 pdst = (uint8_t *)txq->wqes;
2921 * Build the Data Segment of inlined data from single
2922 * segment packet with VLAN insertion.
2925 * Pointer to TX queue structure.
2927 * Pointer to burst routine local context.
2929 * Pointer to the dseg fill with built Data Segment.
2931 * Data buffer to point.
2933 * Data buffer length.
2935 * Configured Tx offloads mask. It is fully defined at
2936 * compile time and may be used for optimization.
2939 * Pointer to the next Data Segment after inlined data.
2940 * Ring buffer wraparound check is needed.
2942 static __rte_always_inline struct mlx5_wqe_dseg *
2943 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
2944 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2945 struct mlx5_wqe_dseg *__rte_restrict dseg,
2948 unsigned int olx __rte_unused)
2954 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
2955 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
2956 (2 * RTE_ETHER_ADDR_LEN),
2957 "invalid Data Segment data size");
2958 if (!MLX5_TXOFF_CONFIG(MPW)) {
2959 /* Store the descriptor byte counter for eMPW sessions. */
2960 dseg->bcount = rte_cpu_to_be_32
2961 ((len + sizeof(struct rte_vlan_hdr)) |
2962 MLX5_ETH_WQE_DATA_INLINE);
2963 pdst = &dseg->inline_data[0];
2965 /* The entire legacy MPW session counter is stored on close. */
2966 pdst = (uint8_t *)dseg;
2968 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
2969 buf += MLX5_DSEG_MIN_INLINE_SIZE;
2970 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
2971 len -= MLX5_DSEG_MIN_INLINE_SIZE;
2972 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
2973 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2974 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2975 pdst = (uint8_t *)txq->wqes;
2976 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
2977 loc->mbuf->vlan_tci);
2978 pdst += sizeof(struct rte_vlan_hdr);
2980 * The WQEBB space availability is checked by caller.
2981 * Here we should be aware of WQE ring buffer wraparound only.
2983 part = (uint8_t *)txq->wqes_end - pdst;
2984 part = RTE_MIN(part, len);
2986 rte_memcpy(pdst, buf, part);
2990 if (!MLX5_TXOFF_CONFIG(MPW))
2991 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2992 /* Note: no final wraparound check here. */
2993 return (struct mlx5_wqe_dseg *)pdst;
2995 pdst = (uint8_t *)txq->wqes;
3002 * Build the Ethernet Segment with optionally inlined data with
3003 * VLAN insertion and following Data Segments (if any) from
3004 * multi-segment packet. Used by ordinary send and TSO.
3007 * Pointer to TX queue structure.
3009 * Pointer to burst routine local context.
3011 * Pointer to WQE to fill with built Ethernet/Data Segments.
3013 * Length of VLAN header to insert, 0 means no VLAN insertion.
3015 * Data length to inline. For TSO this parameter specifies
3016 * exact value, for ordinary send routine can be aligned by
3017 * caller to provide better WQE space saving and data buffer
3018 * start address alignment. This length includes VLAN header
3021 * Zero means ordinary send, inlined data can be extended,
3022 * otherwise this is TSO, inlined data length is fixed.
3024 * Configured Tx offloads mask. It is fully defined at
3025 * compile time and may be used for optimization.
3028 * Actual size of built WQE in segments.
3030 static __rte_always_inline unsigned int
3031 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
3032 struct mlx5_txq_local *__rte_restrict loc,
3033 struct mlx5_wqe *__rte_restrict wqe,
3037 unsigned int olx __rte_unused)
3039 struct mlx5_wqe_dseg *__rte_restrict dseg;
3042 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
3043 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
3046 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
3047 if (!loc->mbuf_nseg)
3050 * There are still some mbuf remaining, not inlined.
3051 * The first mbuf may be partially inlined and we
3052 * must process the possible non-zero data offset.
3054 if (loc->mbuf_off) {
3059 * Exhausted packets must be dropped before.
3060 * Non-zero offset means there are some data
3061 * remained in the packet.
3063 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
3064 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
3065 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
3067 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
3069 * Build the pointer/minimal data Data Segment.
3070 * Do ring buffer wrapping check in advance.
3072 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3073 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3074 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
3075 /* Store the mbuf to be freed on completion. */
3076 MLX5_ASSERT(loc->elts_free);
3077 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3080 if (--loc->mbuf_nseg == 0)
3082 loc->mbuf = loc->mbuf->next;
3086 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3087 struct rte_mbuf *mbuf;
3089 /* Zero length segment found, just skip. */
3091 loc->mbuf = loc->mbuf->next;
3092 rte_pktmbuf_free_seg(mbuf);
3093 if (--loc->mbuf_nseg == 0)
3096 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3097 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3100 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3101 rte_pktmbuf_data_len(loc->mbuf), olx);
3102 MLX5_ASSERT(loc->elts_free);
3103 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3106 if (--loc->mbuf_nseg == 0)
3108 loc->mbuf = loc->mbuf->next;
3113 /* Calculate actual segments used from the dseg pointer. */
3114 if ((uintptr_t)wqe < (uintptr_t)dseg)
3115 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3117 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3118 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3123 * The routine checks timestamp flag in the current packet,
3124 * and push WAIT WQE into the queue if scheduling is required.
3127 * Pointer to TX queue structure.
3129 * Pointer to burst routine local context.
3131 * Configured Tx offloads mask. It is fully defined at
3132 * compile time and may be used for optimization.
3135 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3136 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
3137 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
3138 * Local context variables partially updated.
3140 static __rte_always_inline enum mlx5_txcmp_code
3141 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
3142 struct mlx5_txq_local *restrict loc,
3145 if (MLX5_TXOFF_CONFIG(TXPP) &&
3146 loc->mbuf->ol_flags & txq->ts_mask) {
3147 struct mlx5_wqe *wqe;
3152 * Estimate the required space quickly and roughly.
3153 * We would like to ensure the packet can be pushed
3154 * to the queue and we won't get the orphan WAIT WQE.
3156 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
3157 loc->elts_free < NB_SEGS(loc->mbuf))
3158 return MLX5_TXCMP_CODE_EXIT;
3159 /* Convert the timestamp into completion to wait. */
3160 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
3161 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
3162 if (unlikely(wci < 0))
3163 return MLX5_TXCMP_CODE_SINGLE;
3164 /* Build the WAIT WQE with specified completion. */
3165 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3166 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
3167 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
3170 return MLX5_TXCMP_CODE_MULTI;
3172 return MLX5_TXCMP_CODE_SINGLE;
3176 * Tx one packet function for multi-segment TSO. Supports all
3177 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3178 * sends one packet per WQE.
3180 * This routine is responsible for storing processed mbuf
3181 * into elts ring buffer and update elts_head.
3184 * Pointer to TX queue structure.
3186 * Pointer to burst routine local context.
3188 * Configured Tx offloads mask. It is fully defined at
3189 * compile time and may be used for optimization.
3192 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3193 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3194 * Local context variables partially updated.
3196 static __rte_always_inline enum mlx5_txcmp_code
3197 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
3198 struct mlx5_txq_local *__rte_restrict loc,
3201 struct mlx5_wqe *__rte_restrict wqe;
3202 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3204 if (MLX5_TXOFF_CONFIG(TXPP)) {
3205 enum mlx5_txcmp_code wret;
3207 /* Generate WAIT for scheduling if requested. */
3208 wret = mlx5_tx_schedule_send(txq, loc, olx);
3209 if (wret == MLX5_TXCMP_CODE_EXIT)
3210 return MLX5_TXCMP_CODE_EXIT;
3211 if (wret == MLX5_TXCMP_CODE_ERROR)
3212 return MLX5_TXCMP_CODE_ERROR;
3215 * Calculate data length to be inlined to estimate
3216 * the required space in WQE ring buffer.
3218 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3219 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3220 vlan = sizeof(struct rte_vlan_hdr);
3221 inlen = loc->mbuf->l2_len + vlan +
3222 loc->mbuf->l3_len + loc->mbuf->l4_len;
3223 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3224 return MLX5_TXCMP_CODE_ERROR;
3225 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3226 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3227 /* Packet must contain all TSO headers. */
3228 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3229 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3230 inlen > (dlen + vlan)))
3231 return MLX5_TXCMP_CODE_ERROR;
3232 MLX5_ASSERT(inlen >= txq->inlen_mode);
3234 * Check whether there are enough free WQEBBs:
3236 * - Ethernet Segment
3237 * - First Segment of inlined Ethernet data
3238 * - ... data continued ...
3239 * - Data Segments of pointer/min inline type
3241 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3242 MLX5_ESEG_MIN_INLINE_SIZE +
3244 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3245 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3246 return MLX5_TXCMP_CODE_EXIT;
3247 /* Check for maximal WQE size. */
3248 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3249 return MLX5_TXCMP_CODE_ERROR;
3250 #ifdef MLX5_PMD_SOFT_COUNTERS
3251 /* Update sent data bytes/packets counters. */
3252 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3253 loc->mbuf->tso_segsz;
3255 * One will be added for mbuf itself
3256 * at the end of the mlx5_tx_burst from
3257 * loc->pkts_sent field.
3260 txq->stats.opackets += ntcp;
3261 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3263 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3264 loc->wqe_last = wqe;
3265 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3266 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3267 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3268 txq->wqe_ci += (ds + 3) / 4;
3269 loc->wqe_free -= (ds + 3) / 4;
3270 return MLX5_TXCMP_CODE_MULTI;
3274 * Tx one packet function for multi-segment SEND. Supports all
3275 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3276 * sends one packet per WQE, without any data inlining in
3279 * This routine is responsible for storing processed mbuf
3280 * into elts ring buffer and update elts_head.
3283 * Pointer to TX queue structure.
3285 * Pointer to burst routine local context.
3287 * Configured Tx offloads mask. It is fully defined at
3288 * compile time and may be used for optimization.
3291 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3292 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3293 * Local context variables partially updated.
3295 static __rte_always_inline enum mlx5_txcmp_code
3296 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
3297 struct mlx5_txq_local *__rte_restrict loc,
3300 struct mlx5_wqe_dseg *__rte_restrict dseg;
3301 struct mlx5_wqe *__rte_restrict wqe;
3302 unsigned int ds, nseg;
3304 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3305 if (MLX5_TXOFF_CONFIG(TXPP)) {
3306 enum mlx5_txcmp_code wret;
3308 /* Generate WAIT for scheduling if requested. */
3309 wret = mlx5_tx_schedule_send(txq, loc, olx);
3310 if (wret == MLX5_TXCMP_CODE_EXIT)
3311 return MLX5_TXCMP_CODE_EXIT;
3312 if (wret == MLX5_TXCMP_CODE_ERROR)
3313 return MLX5_TXCMP_CODE_ERROR;
3316 * No inline at all, it means the CPU cycles saving
3317 * is prioritized at configuration, we should not
3318 * copy any packet data to WQE.
3320 nseg = NB_SEGS(loc->mbuf);
3322 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3323 return MLX5_TXCMP_CODE_EXIT;
3324 /* Check for maximal WQE size. */
3325 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3326 return MLX5_TXCMP_CODE_ERROR;
3328 * Some Tx offloads may cause an error if
3329 * packet is not long enough, check against
3330 * assumed minimal length.
3332 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3333 return MLX5_TXCMP_CODE_ERROR;
3334 #ifdef MLX5_PMD_SOFT_COUNTERS
3335 /* Update sent data bytes counter. */
3336 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3337 if (MLX5_TXOFF_CONFIG(VLAN) &&
3338 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3339 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3342 * SEND WQE, one WQEBB:
3343 * - Control Segment, SEND opcode
3344 * - Ethernet Segment, optional VLAN, no inline
3345 * - Data Segments, pointer only type
3347 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3348 loc->wqe_last = wqe;
3349 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3350 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3351 dseg = &wqe->dseg[0];
3353 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3354 struct rte_mbuf *mbuf;
3357 * Zero length segment found, have to
3358 * correct total size of WQE in segments.
3359 * It is supposed to be rare occasion, so
3360 * in normal case (no zero length segments)
3361 * we avoid extra writing to the Control
3365 wqe->cseg.sq_ds -= RTE_BE32(1);
3367 loc->mbuf = mbuf->next;
3368 rte_pktmbuf_free_seg(mbuf);
3374 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3375 rte_pktmbuf_data_len(loc->mbuf), olx);
3376 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3381 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3382 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3383 loc->mbuf = loc->mbuf->next;
3386 txq->wqe_ci += (ds + 3) / 4;
3387 loc->wqe_free -= (ds + 3) / 4;
3388 return MLX5_TXCMP_CODE_MULTI;
3392 * Tx one packet function for multi-segment SEND. Supports all
3393 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3394 * sends one packet per WQE, with data inlining in
3395 * Ethernet Segment and minimal Data Segments.
3397 * This routine is responsible for storing processed mbuf
3398 * into elts ring buffer and update elts_head.
3401 * Pointer to TX queue structure.
3403 * Pointer to burst routine local context.
3405 * Configured Tx offloads mask. It is fully defined at
3406 * compile time and may be used for optimization.
3409 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3410 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3411 * Local context variables partially updated.
3413 static __rte_always_inline enum mlx5_txcmp_code
3414 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
3415 struct mlx5_txq_local *__rte_restrict loc,
3418 struct mlx5_wqe *__rte_restrict wqe;
3419 unsigned int ds, inlen, dlen, vlan = 0;
3421 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3422 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3423 if (MLX5_TXOFF_CONFIG(TXPP)) {
3424 enum mlx5_txcmp_code wret;
3426 /* Generate WAIT for scheduling if requested. */
3427 wret = mlx5_tx_schedule_send(txq, loc, olx);
3428 if (wret == MLX5_TXCMP_CODE_EXIT)
3429 return MLX5_TXCMP_CODE_EXIT;
3430 if (wret == MLX5_TXCMP_CODE_ERROR)
3431 return MLX5_TXCMP_CODE_ERROR;
3434 * First calculate data length to be inlined
3435 * to estimate the required space for WQE.
3437 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3438 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3439 vlan = sizeof(struct rte_vlan_hdr);
3440 inlen = dlen + vlan;
3441 /* Check against minimal length. */
3442 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3443 return MLX5_TXCMP_CODE_ERROR;
3444 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3445 if (inlen > txq->inlen_send ||
3446 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
3447 struct rte_mbuf *mbuf;
3452 * Packet length exceeds the allowed inline
3453 * data length, check whether the minimal
3454 * inlining is required.
3456 if (txq->inlen_mode) {
3457 MLX5_ASSERT(txq->inlen_mode >=
3458 MLX5_ESEG_MIN_INLINE_SIZE);
3459 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
3460 inlen = txq->inlen_mode;
3462 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
3463 !vlan || txq->vlan_en) {
3465 * VLAN insertion will be done inside by HW.
3466 * It is not utmost effective - VLAN flag is
3467 * checked twice, but we should proceed the
3468 * inlining length correctly and take into
3469 * account the VLAN header being inserted.
3471 return mlx5_tx_packet_multi_send
3474 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3477 * Now we know the minimal amount of data is requested
3478 * to inline. Check whether we should inline the buffers
3479 * from the chain beginning to eliminate some mbufs.
3482 nxlen = rte_pktmbuf_data_len(mbuf);
3483 if (unlikely(nxlen <= txq->inlen_send)) {
3484 /* We can inline first mbuf at least. */
3485 if (nxlen < inlen) {
3488 /* Scan mbufs till inlen filled. */
3493 nxlen = rte_pktmbuf_data_len(mbuf);
3495 } while (unlikely(nxlen < inlen));
3496 if (unlikely(nxlen > txq->inlen_send)) {
3497 /* We cannot inline entire mbuf. */
3498 smlen = inlen - smlen;
3499 start = rte_pktmbuf_mtod_offset
3500 (mbuf, uintptr_t, smlen);
3507 /* There should be not end of packet. */
3509 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3510 } while (unlikely(nxlen < txq->inlen_send));
3512 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3514 * Check whether we can do inline to align start
3515 * address of data buffer to cacheline.
3518 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3519 if (unlikely(start)) {
3521 if (start <= txq->inlen_send)
3526 * Check whether there are enough free WQEBBs:
3528 * - Ethernet Segment
3529 * - First Segment of inlined Ethernet data
3530 * - ... data continued ...
3531 * - Data Segments of pointer/min inline type
3533 * Estimate the number of Data Segments conservatively,
3534 * supposing no any mbufs is being freed during inlining.
3536 MLX5_ASSERT(inlen <= txq->inlen_send);
3537 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3538 MLX5_ESEG_MIN_INLINE_SIZE +
3540 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3541 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3542 return MLX5_TXCMP_CODE_EXIT;
3543 /* Check for maximal WQE size. */
3544 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3545 return MLX5_TXCMP_CODE_ERROR;
3546 #ifdef MLX5_PMD_SOFT_COUNTERS
3547 /* Update sent data bytes/packets counters. */
3548 txq->stats.obytes += dlen + vlan;
3550 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3551 loc->wqe_last = wqe;
3552 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3553 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3554 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3555 txq->wqe_ci += (ds + 3) / 4;
3556 loc->wqe_free -= (ds + 3) / 4;
3557 return MLX5_TXCMP_CODE_MULTI;
3561 * Tx burst function for multi-segment packets. Supports all
3562 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3563 * sends one packet per WQE. Function stops sending if it
3564 * encounters the single-segment packet.
3566 * This routine is responsible for storing processed mbuf
3567 * into elts ring buffer and update elts_head.
3570 * Pointer to TX queue structure.
3572 * Packets to transmit.
3574 * Number of packets in array.
3576 * Pointer to burst routine local context.
3578 * Configured Tx offloads mask. It is fully defined at
3579 * compile time and may be used for optimization.
3582 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3583 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3584 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3585 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3586 * Local context variables updated.
3588 static __rte_always_inline enum mlx5_txcmp_code
3589 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
3590 struct rte_mbuf **__rte_restrict pkts,
3591 unsigned int pkts_n,
3592 struct mlx5_txq_local *__rte_restrict loc,
3595 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3596 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3597 pkts += loc->pkts_sent + 1;
3598 pkts_n -= loc->pkts_sent;
3600 enum mlx5_txcmp_code ret;
3602 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3604 * Estimate the number of free elts quickly but
3605 * conservatively. Some segment may be fully inlined
3606 * and freed, ignore this here - precise estimation
3609 if (loc->elts_free < NB_SEGS(loc->mbuf))
3610 return MLX5_TXCMP_CODE_EXIT;
3611 if (MLX5_TXOFF_CONFIG(TSO) &&
3612 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3613 /* Proceed with multi-segment TSO. */
3614 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3615 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3616 /* Proceed with multi-segment SEND with inlining. */
3617 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3619 /* Proceed with multi-segment SEND w/o inlining. */
3620 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3622 if (ret == MLX5_TXCMP_CODE_EXIT)
3623 return MLX5_TXCMP_CODE_EXIT;
3624 if (ret == MLX5_TXCMP_CODE_ERROR)
3625 return MLX5_TXCMP_CODE_ERROR;
3626 /* WQE is built, go to the next packet. */
3629 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3630 return MLX5_TXCMP_CODE_EXIT;
3631 loc->mbuf = *pkts++;
3633 rte_prefetch0(*pkts);
3634 if (likely(NB_SEGS(loc->mbuf) > 1))
3636 /* Here ends the series of multi-segment packets. */
3637 if (MLX5_TXOFF_CONFIG(TSO) &&
3638 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3639 return MLX5_TXCMP_CODE_TSO;
3640 return MLX5_TXCMP_CODE_SINGLE;
3646 * Tx burst function for single-segment packets with TSO.
3647 * Supports all types of Tx offloads, except multi-packets.
3648 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3649 * Function stops sending if it encounters the multi-segment
3650 * packet or packet without TSO requested.
3652 * The routine is responsible for storing processed mbuf
3653 * into elts ring buffer and update elts_head if inline
3654 * offloads is requested due to possible early freeing
3655 * of the inlined mbufs (can not store pkts array in elts
3659 * Pointer to TX queue structure.
3661 * Packets to transmit.
3663 * Number of packets in array.
3665 * Pointer to burst routine local context.
3667 * Configured Tx offloads mask. It is fully defined at
3668 * compile time and may be used for optimization.
3671 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3672 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3673 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3674 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3675 * Local context variables updated.
3677 static __rte_always_inline enum mlx5_txcmp_code
3678 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
3679 struct rte_mbuf **__rte_restrict pkts,
3680 unsigned int pkts_n,
3681 struct mlx5_txq_local *__rte_restrict loc,
3684 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3685 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3686 pkts += loc->pkts_sent + 1;
3687 pkts_n -= loc->pkts_sent;
3689 struct mlx5_wqe_dseg *__rte_restrict dseg;
3690 struct mlx5_wqe *__rte_restrict wqe;
3691 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3694 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3695 if (MLX5_TXOFF_CONFIG(TXPP)) {
3696 enum mlx5_txcmp_code wret;
3698 /* Generate WAIT for scheduling if requested. */
3699 wret = mlx5_tx_schedule_send(txq, loc, olx);
3700 if (wret == MLX5_TXCMP_CODE_EXIT)
3701 return MLX5_TXCMP_CODE_EXIT;
3702 if (wret == MLX5_TXCMP_CODE_ERROR)
3703 return MLX5_TXCMP_CODE_ERROR;
3705 dlen = rte_pktmbuf_data_len(loc->mbuf);
3706 if (MLX5_TXOFF_CONFIG(VLAN) &&
3707 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3708 vlan = sizeof(struct rte_vlan_hdr);
3711 * First calculate the WQE size to check
3712 * whether we have enough space in ring buffer.
3714 hlen = loc->mbuf->l2_len + vlan +
3715 loc->mbuf->l3_len + loc->mbuf->l4_len;
3716 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3717 return MLX5_TXCMP_CODE_ERROR;
3718 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3719 hlen += loc->mbuf->outer_l2_len +
3720 loc->mbuf->outer_l3_len;
3721 /* Segment must contain all TSO headers. */
3722 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3723 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3724 hlen > (dlen + vlan)))
3725 return MLX5_TXCMP_CODE_ERROR;
3727 * Check whether there are enough free WQEBBs:
3729 * - Ethernet Segment
3730 * - First Segment of inlined Ethernet data
3731 * - ... data continued ...
3732 * - Finishing Data Segment of pointer type
3734 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3735 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3736 if (loc->wqe_free < ((ds + 3) / 4))
3737 return MLX5_TXCMP_CODE_EXIT;
3738 #ifdef MLX5_PMD_SOFT_COUNTERS
3739 /* Update sent data bytes/packets counters. */
3740 ntcp = (dlen + vlan - hlen +
3741 loc->mbuf->tso_segsz - 1) /
3742 loc->mbuf->tso_segsz;
3744 * One will be added for mbuf itself at the end
3745 * of the mlx5_tx_burst from loc->pkts_sent field.
3748 txq->stats.opackets += ntcp;
3749 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3752 * Build the TSO WQE:
3754 * - Ethernet Segment with hlen bytes inlined
3755 * - Data Segment of pointer type
3757 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3758 loc->wqe_last = wqe;
3759 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3760 MLX5_OPCODE_TSO, olx);
3761 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3762 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3763 dlen -= hlen - vlan;
3764 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3766 * WQE is built, update the loop parameters
3767 * and go to the next packet.
3769 txq->wqe_ci += (ds + 3) / 4;
3770 loc->wqe_free -= (ds + 3) / 4;
3771 if (MLX5_TXOFF_CONFIG(INLINE))
3772 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3776 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3777 return MLX5_TXCMP_CODE_EXIT;
3778 loc->mbuf = *pkts++;
3780 rte_prefetch0(*pkts);
3781 if (MLX5_TXOFF_CONFIG(MULTI) &&
3782 unlikely(NB_SEGS(loc->mbuf) > 1))
3783 return MLX5_TXCMP_CODE_MULTI;
3784 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3785 return MLX5_TXCMP_CODE_SINGLE;
3786 /* Continue with the next TSO packet. */
3792 * Analyze the packet and select the best method to send.
3795 * Pointer to TX queue structure.
3797 * Pointer to burst routine local context.
3799 * Configured Tx offloads mask. It is fully defined at
3800 * compile time and may be used for optimization.
3802 * The predefined flag whether do complete check for
3803 * multi-segment packets and TSO.
3806 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3807 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3808 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3809 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3811 static __rte_always_inline enum mlx5_txcmp_code
3812 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
3813 struct mlx5_txq_local *__rte_restrict loc,
3817 /* Check for multi-segment packet. */
3819 MLX5_TXOFF_CONFIG(MULTI) &&
3820 unlikely(NB_SEGS(loc->mbuf) > 1))
3821 return MLX5_TXCMP_CODE_MULTI;
3822 /* Check for TSO packet. */
3824 MLX5_TXOFF_CONFIG(TSO) &&
3825 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3826 return MLX5_TXCMP_CODE_TSO;
3827 /* Check if eMPW is enabled at all. */
3828 if (!MLX5_TXOFF_CONFIG(EMPW))
3829 return MLX5_TXCMP_CODE_SINGLE;
3830 /* Check if eMPW can be engaged. */
3831 if (MLX5_TXOFF_CONFIG(VLAN) &&
3832 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3833 (!MLX5_TXOFF_CONFIG(INLINE) ||
3834 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3835 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3837 * eMPW does not support VLAN insertion offload,
3838 * we have to inline the entire packet but
3839 * packet is too long for inlining.
3841 return MLX5_TXCMP_CODE_SINGLE;
3843 return MLX5_TXCMP_CODE_EMPW;
3847 * Check the next packet attributes to match with the eMPW batch ones.
3848 * In addition, for legacy MPW the packet length is checked either.
3851 * Pointer to TX queue structure.
3853 * Pointer to Ethernet Segment of eMPW batch.
3855 * Pointer to burst routine local context.
3857 * Length of previous packet in MPW descriptor.
3859 * Configured Tx offloads mask. It is fully defined at
3860 * compile time and may be used for optimization.
3863 * true - packet match with eMPW batch attributes.
3864 * false - no match, eMPW should be restarted.
3866 static __rte_always_inline bool
3867 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
3868 struct mlx5_wqe_eseg *__rte_restrict es,
3869 struct mlx5_txq_local *__rte_restrict loc,
3873 uint8_t swp_flags = 0;
3875 /* Compare the checksum flags, if any. */
3876 if (MLX5_TXOFF_CONFIG(CSUM) &&
3877 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3879 /* Compare the Software Parser offsets and flags. */
3880 if (MLX5_TXOFF_CONFIG(SWP) &&
3881 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3882 es->swp_flags != swp_flags))
3884 /* Fill metadata field if needed. */
3885 if (MLX5_TXOFF_CONFIG(METADATA) &&
3886 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3887 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3889 /* Legacy MPW can send packets with the same lengt only. */
3890 if (MLX5_TXOFF_CONFIG(MPW) &&
3891 dlen != rte_pktmbuf_data_len(loc->mbuf))
3893 /* There must be no VLAN packets in eMPW loop. */
3894 if (MLX5_TXOFF_CONFIG(VLAN))
3895 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3896 /* Check if the scheduling is requested. */
3897 if (MLX5_TXOFF_CONFIG(TXPP) &&
3898 loc->mbuf->ol_flags & txq->ts_mask)
3904 * Update send loop variables and WQE for eMPW loop
3905 * without data inlining. Number of Data Segments is
3906 * equal to the number of sent packets.
3909 * Pointer to TX queue structure.
3911 * Pointer to burst routine local context.
3913 * Number of packets/Data Segments/Packets.
3915 * Accumulated statistics, bytes sent
3917 * Configured Tx offloads mask. It is fully defined at
3918 * compile time and may be used for optimization.
3921 * true - packet match with eMPW batch attributes.
3922 * false - no match, eMPW should be restarted.
3924 static __rte_always_inline void
3925 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
3926 struct mlx5_txq_local *__rte_restrict loc,
3929 unsigned int olx __rte_unused)
3931 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3932 #ifdef MLX5_PMD_SOFT_COUNTERS
3933 /* Update sent data bytes counter. */
3934 txq->stats.obytes += slen;
3938 loc->elts_free -= ds;
3939 loc->pkts_sent += ds;
3941 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3942 txq->wqe_ci += (ds + 3) / 4;
3943 loc->wqe_free -= (ds + 3) / 4;
3947 * Update send loop variables and WQE for eMPW loop
3948 * with data inlining. Gets the size of pushed descriptors
3949 * and data to the WQE.
3952 * Pointer to TX queue structure.
3954 * Pointer to burst routine local context.
3956 * Total size of descriptor/data in bytes.
3958 * Accumulated statistics, data bytes sent.
3960 * The base WQE for the eMPW/MPW descriptor.
3962 * Configured Tx offloads mask. It is fully defined at
3963 * compile time and may be used for optimization.
3966 * true - packet match with eMPW batch attributes.
3967 * false - no match, eMPW should be restarted.
3969 static __rte_always_inline void
3970 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
3971 struct mlx5_txq_local *__rte_restrict loc,
3974 struct mlx5_wqe *__rte_restrict wqem,
3975 unsigned int olx __rte_unused)
3977 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
3979 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3980 #ifdef MLX5_PMD_SOFT_COUNTERS
3981 /* Update sent data bytes counter. */
3982 txq->stats.obytes += slen;
3986 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
3988 * If the legacy MPW session contains the inline packets
3989 * we should set the only inline data segment length
3990 * and align the total length to the segment size.
3992 MLX5_ASSERT(len > sizeof(dseg->bcount));
3993 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
3994 MLX5_ETH_WQE_DATA_INLINE);
3995 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
3998 * The session is not legacy MPW or contains the
3999 * data buffer pointer segments.
4001 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
4002 len = len / MLX5_WSEG_SIZE + 2;
4004 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
4005 txq->wqe_ci += (len + 3) / 4;
4006 loc->wqe_free -= (len + 3) / 4;
4007 loc->wqe_last = wqem;
4011 * The set of Tx burst functions for single-segment packets
4012 * without TSO and with Multi-Packet Writing feature support.
4013 * Supports all types of Tx offloads, except multi-packets
4016 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
4017 * as many packet per WQE as it can. If eMPW is not configured
4018 * or packet can not be sent with eMPW (VLAN insertion) the
4019 * ordinary SEND opcode is used and only one packet placed
4022 * Functions stop sending if it encounters the multi-segment
4023 * packet or packet with TSO requested.
4025 * The routines are responsible for storing processed mbuf
4026 * into elts ring buffer and update elts_head if inlining
4027 * offload is requested. Otherwise the copying mbufs to elts
4028 * can be postponed and completed at the end of burst routine.
4031 * Pointer to TX queue structure.
4033 * Packets to transmit.
4035 * Number of packets in array.
4037 * Pointer to burst routine local context.
4039 * Configured Tx offloads mask. It is fully defined at
4040 * compile time and may be used for optimization.
4043 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
4044 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
4045 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
4046 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
4047 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
4048 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
4050 * Local context variables updated.
4053 * The routine sends packets with MLX5_OPCODE_EMPW
4054 * without inlining, this is dedicated optimized branch.
4055 * No VLAN insertion is supported.
4057 static __rte_always_inline enum mlx5_txcmp_code
4058 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
4059 struct rte_mbuf **__rte_restrict pkts,
4060 unsigned int pkts_n,
4061 struct mlx5_txq_local *__rte_restrict loc,
4065 * Subroutine is the part of mlx5_tx_burst_single()
4066 * and sends single-segment packet with eMPW opcode
4067 * without data inlining.
4069 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4070 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4071 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4072 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4073 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4074 pkts += loc->pkts_sent + 1;
4075 pkts_n -= loc->pkts_sent;
4077 struct mlx5_wqe_dseg *__rte_restrict dseg;
4078 struct mlx5_wqe_eseg *__rte_restrict eseg;
4079 enum mlx5_txcmp_code ret;
4080 unsigned int part, loop;
4081 unsigned int slen = 0;
4084 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4085 if (MLX5_TXOFF_CONFIG(TXPP)) {
4086 enum mlx5_txcmp_code wret;
4088 /* Generate WAIT for scheduling if requested. */
4089 wret = mlx5_tx_schedule_send(txq, loc, olx);
4090 if (wret == MLX5_TXCMP_CODE_EXIT)
4091 return MLX5_TXCMP_CODE_EXIT;
4092 if (wret == MLX5_TXCMP_CODE_ERROR)
4093 return MLX5_TXCMP_CODE_ERROR;
4095 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4096 MLX5_MPW_MAX_PACKETS :
4097 MLX5_EMPW_MAX_PACKETS);
4098 if (unlikely(loc->elts_free < part)) {
4099 /* We have no enough elts to save all mbufs. */
4100 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
4101 return MLX5_TXCMP_CODE_EXIT;
4102 /* But we still able to send at least minimal eMPW. */
4103 part = loc->elts_free;
4105 /* Check whether we have enough WQEs */
4106 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
4107 if (unlikely(loc->wqe_free <
4108 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4109 return MLX5_TXCMP_CODE_EXIT;
4110 part = (loc->wqe_free * 4) - 2;
4112 if (likely(part > 1))
4113 rte_prefetch0(*pkts);
4114 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4116 * Build eMPW title WQEBB:
4117 * - Control Segment, eMPW opcode
4118 * - Ethernet Segment, no inline
4120 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
4121 MLX5_OPCODE_ENHANCED_MPSW, olx);
4122 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4123 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4124 eseg = &loc->wqe_last->eseg;
4125 dseg = &loc->wqe_last->dseg[0];
4127 /* Store the packet length for legacy MPW. */
4128 if (MLX5_TXOFF_CONFIG(MPW))
4129 eseg->mss = rte_cpu_to_be_16
4130 (rte_pktmbuf_data_len(loc->mbuf));
4132 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4133 #ifdef MLX5_PMD_SOFT_COUNTERS
4134 /* Update sent data bytes counter. */
4139 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4141 if (unlikely(--loop == 0))
4143 loc->mbuf = *pkts++;
4144 if (likely(loop > 1))
4145 rte_prefetch0(*pkts);
4146 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4148 * Unroll the completion code to avoid
4149 * returning variable value - it results in
4150 * unoptimized sequent checking in caller.
4152 if (ret == MLX5_TXCMP_CODE_MULTI) {
4154 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4155 if (unlikely(!loc->elts_free ||
4157 return MLX5_TXCMP_CODE_EXIT;
4158 return MLX5_TXCMP_CODE_MULTI;
4160 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4161 if (ret == MLX5_TXCMP_CODE_TSO) {
4163 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4164 if (unlikely(!loc->elts_free ||
4166 return MLX5_TXCMP_CODE_EXIT;
4167 return MLX5_TXCMP_CODE_TSO;
4169 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4171 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4172 if (unlikely(!loc->elts_free ||
4174 return MLX5_TXCMP_CODE_EXIT;
4175 return MLX5_TXCMP_CODE_SINGLE;
4177 if (ret != MLX5_TXCMP_CODE_EMPW) {
4180 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4181 return MLX5_TXCMP_CODE_ERROR;
4184 * Check whether packet parameters coincide
4185 * within assumed eMPW batch:
4186 * - check sum settings
4188 * - software parser settings
4189 * - packets length (legacy MPW only)
4190 * - scheduling is not required
4192 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
4195 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4196 if (unlikely(!loc->elts_free ||
4198 return MLX5_TXCMP_CODE_EXIT;
4202 /* Packet attributes match, continue the same eMPW. */
4204 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4205 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4207 /* eMPW is built successfully, update loop parameters. */
4209 MLX5_ASSERT(pkts_n >= part);
4210 #ifdef MLX5_PMD_SOFT_COUNTERS
4211 /* Update sent data bytes counter. */
4212 txq->stats.obytes += slen;
4214 loc->elts_free -= part;
4215 loc->pkts_sent += part;
4216 txq->wqe_ci += (2 + part + 3) / 4;
4217 loc->wqe_free -= (2 + part + 3) / 4;
4219 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4220 return MLX5_TXCMP_CODE_EXIT;
4221 loc->mbuf = *pkts++;
4222 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4223 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4225 /* Continue sending eMPW batches. */
4231 * The routine sends packets with MLX5_OPCODE_EMPW
4232 * with inlining, optionally supports VLAN insertion.
4234 static __rte_always_inline enum mlx5_txcmp_code
4235 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
4236 struct rte_mbuf **__rte_restrict pkts,
4237 unsigned int pkts_n,
4238 struct mlx5_txq_local *__rte_restrict loc,
4242 * Subroutine is the part of mlx5_tx_burst_single()
4243 * and sends single-segment packet with eMPW opcode
4244 * with data inlining.
4246 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4247 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4248 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4249 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4250 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4251 pkts += loc->pkts_sent + 1;
4252 pkts_n -= loc->pkts_sent;
4254 struct mlx5_wqe_dseg *__rte_restrict dseg;
4255 struct mlx5_wqe *__rte_restrict wqem;
4256 enum mlx5_txcmp_code ret;
4257 unsigned int room, part, nlim;
4258 unsigned int slen = 0;
4260 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4261 if (MLX5_TXOFF_CONFIG(TXPP)) {
4262 enum mlx5_txcmp_code wret;
4264 /* Generate WAIT for scheduling if requested. */
4265 wret = mlx5_tx_schedule_send(txq, loc, olx);
4266 if (wret == MLX5_TXCMP_CODE_EXIT)
4267 return MLX5_TXCMP_CODE_EXIT;
4268 if (wret == MLX5_TXCMP_CODE_ERROR)
4269 return MLX5_TXCMP_CODE_ERROR;
4272 * Limits the amount of packets in one WQE
4273 * to improve CQE latency generation.
4275 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4276 MLX5_MPW_INLINE_MAX_PACKETS :
4277 MLX5_EMPW_MAX_PACKETS);
4278 /* Check whether we have minimal amount WQEs */
4279 if (unlikely(loc->wqe_free <
4280 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4281 return MLX5_TXCMP_CODE_EXIT;
4282 if (likely(pkts_n > 1))
4283 rte_prefetch0(*pkts);
4284 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4286 * Build eMPW title WQEBB:
4287 * - Control Segment, eMPW opcode, zero DS
4288 * - Ethernet Segment, no inline
4290 mlx5_tx_cseg_init(txq, loc, wqem, 0,
4291 MLX5_OPCODE_ENHANCED_MPSW, olx);
4292 mlx5_tx_eseg_none(txq, loc, wqem,
4293 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4294 dseg = &wqem->dseg[0];
4295 /* Store the packet length for legacy MPW. */
4296 if (MLX5_TXOFF_CONFIG(MPW))
4297 wqem->eseg.mss = rte_cpu_to_be_16
4298 (rte_pktmbuf_data_len(loc->mbuf));
4299 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4300 loc->wqe_free) * MLX5_WQE_SIZE -
4301 MLX5_WQE_CSEG_SIZE -
4303 /* Limit the room for legacy MPW sessions for performance. */
4304 if (MLX5_TXOFF_CONFIG(MPW))
4305 room = RTE_MIN(room,
4306 RTE_MAX(txq->inlen_empw +
4307 sizeof(dseg->bcount) +
4308 (MLX5_TXOFF_CONFIG(VLAN) ?
4309 sizeof(struct rte_vlan_hdr) : 0),
4310 MLX5_MPW_INLINE_MAX_PACKETS *
4311 MLX5_WQE_DSEG_SIZE));
4312 /* Build WQE till we have space, packets and resources. */
4315 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4316 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4319 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4320 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
4321 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4323 * Some Tx offloads may cause an error if
4324 * packet is not long enough, check against
4325 * assumed minimal length.
4327 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4329 if (unlikely(!part))
4330 return MLX5_TXCMP_CODE_ERROR;
4332 * We have some successfully built
4333 * packet Data Segments to send.
4335 mlx5_tx_idone_empw(txq, loc, part,
4337 return MLX5_TXCMP_CODE_ERROR;
4339 /* Inline or not inline - that's the Question. */
4340 if (dlen > txq->inlen_empw ||
4341 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
4343 if (MLX5_TXOFF_CONFIG(MPW)) {
4344 if (dlen > txq->inlen_send)
4348 /* Open new inline MPW session. */
4349 tlen += sizeof(dseg->bcount);
4350 dseg->bcount = RTE_BE32(0);
4352 (dseg, sizeof(dseg->bcount));
4355 * No pointer and inline descriptor
4356 * intermix for legacy MPW sessions.
4358 if (wqem->dseg[0].bcount)
4362 tlen = sizeof(dseg->bcount) + dlen;
4364 /* Inline entire packet, optional VLAN insertion. */
4365 if (MLX5_TXOFF_CONFIG(VLAN) &&
4366 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4368 * The packet length must be checked in
4369 * mlx5_tx_able_to_empw() and packet
4370 * fits into inline length guaranteed.
4373 sizeof(struct rte_vlan_hdr)) <=
4375 tlen += sizeof(struct rte_vlan_hdr);
4378 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4380 #ifdef MLX5_PMD_SOFT_COUNTERS
4381 /* Update sent data bytes counter. */
4382 slen += sizeof(struct rte_vlan_hdr);
4387 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4390 if (!MLX5_TXOFF_CONFIG(MPW))
4391 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4392 MLX5_ASSERT(room >= tlen);
4395 * Packet data are completely inlined,
4396 * free the packet immediately.
4398 rte_pktmbuf_free_seg(loc->mbuf);
4402 * No pointer and inline descriptor
4403 * intermix for legacy MPW sessions.
4405 if (MLX5_TXOFF_CONFIG(MPW) &&
4407 wqem->dseg[0].bcount == RTE_BE32(0))
4410 * Not inlinable VLAN packets are
4411 * proceeded outside of this routine.
4413 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4414 if (MLX5_TXOFF_CONFIG(VLAN))
4415 MLX5_ASSERT(!(loc->mbuf->ol_flags &
4417 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4418 /* We have to store mbuf in elts.*/
4419 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4420 room -= MLX5_WQE_DSEG_SIZE;
4421 /* Ring buffer wraparound is checked at the loop end.*/
4424 #ifdef MLX5_PMD_SOFT_COUNTERS
4425 /* Update sent data bytes counter. */
4431 if (unlikely(!pkts_n || !loc->elts_free)) {
4433 * We have no resources/packets to
4434 * continue build descriptors.
4437 mlx5_tx_idone_empw(txq, loc, part,
4439 return MLX5_TXCMP_CODE_EXIT;
4441 loc->mbuf = *pkts++;
4442 if (likely(pkts_n > 1))
4443 rte_prefetch0(*pkts);
4444 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4446 * Unroll the completion code to avoid
4447 * returning variable value - it results in
4448 * unoptimized sequent checking in caller.
4450 if (ret == MLX5_TXCMP_CODE_MULTI) {
4452 mlx5_tx_idone_empw(txq, loc, part,
4454 if (unlikely(!loc->elts_free ||
4456 return MLX5_TXCMP_CODE_EXIT;
4457 return MLX5_TXCMP_CODE_MULTI;
4459 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4460 if (ret == MLX5_TXCMP_CODE_TSO) {
4462 mlx5_tx_idone_empw(txq, loc, part,
4464 if (unlikely(!loc->elts_free ||
4466 return MLX5_TXCMP_CODE_EXIT;
4467 return MLX5_TXCMP_CODE_TSO;
4469 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4471 mlx5_tx_idone_empw(txq, loc, part,
4473 if (unlikely(!loc->elts_free ||
4475 return MLX5_TXCMP_CODE_EXIT;
4476 return MLX5_TXCMP_CODE_SINGLE;
4478 if (ret != MLX5_TXCMP_CODE_EMPW) {
4481 mlx5_tx_idone_empw(txq, loc, part,
4483 return MLX5_TXCMP_CODE_ERROR;
4485 /* Check if we have minimal room left. */
4487 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4490 * Check whether packet parameters coincide
4491 * within assumed eMPW batch:
4492 * - check sum settings
4494 * - software parser settings
4495 * - packets length (legacy MPW only)
4496 * - scheduling is not required
4498 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
4501 /* Packet attributes match, continue the same eMPW. */
4502 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4503 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4506 * We get here to close an existing eMPW
4507 * session and start the new one.
4509 MLX5_ASSERT(pkts_n);
4511 if (unlikely(!part))
4512 return MLX5_TXCMP_CODE_EXIT;
4513 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
4514 if (unlikely(!loc->elts_free ||
4516 return MLX5_TXCMP_CODE_EXIT;
4517 /* Continue the loop with new eMPW session. */
4523 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4524 * Data inlining and VLAN insertion are supported.
4526 static __rte_always_inline enum mlx5_txcmp_code
4527 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
4528 struct rte_mbuf **__rte_restrict pkts,
4529 unsigned int pkts_n,
4530 struct mlx5_txq_local *__rte_restrict loc,
4534 * Subroutine is the part of mlx5_tx_burst_single()
4535 * and sends single-segment packet with SEND opcode.
4537 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4538 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4539 pkts += loc->pkts_sent + 1;
4540 pkts_n -= loc->pkts_sent;
4542 struct mlx5_wqe *__rte_restrict wqe;
4543 enum mlx5_txcmp_code ret;
4545 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4546 if (MLX5_TXOFF_CONFIG(TXPP)) {
4547 enum mlx5_txcmp_code wret;
4549 /* Generate WAIT for scheduling if requested. */
4550 wret = mlx5_tx_schedule_send(txq, loc, olx);
4551 if (wret == MLX5_TXCMP_CODE_EXIT)
4552 return MLX5_TXCMP_CODE_EXIT;
4553 if (wret == MLX5_TXCMP_CODE_ERROR)
4554 return MLX5_TXCMP_CODE_ERROR;
4556 if (MLX5_TXOFF_CONFIG(INLINE)) {
4557 unsigned int inlen, vlan = 0;
4559 inlen = rte_pktmbuf_data_len(loc->mbuf);
4560 if (MLX5_TXOFF_CONFIG(VLAN) &&
4561 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4562 vlan = sizeof(struct rte_vlan_hdr);
4564 static_assert((sizeof(struct rte_vlan_hdr) +
4565 sizeof(struct rte_ether_hdr)) ==
4566 MLX5_ESEG_MIN_INLINE_SIZE,
4567 "invalid min inline data size");
4570 * If inlining is enabled at configuration time
4571 * the limit must be not less than minimal size.
4572 * Otherwise we would do extra check for data
4573 * size to avoid crashes due to length overflow.
4575 MLX5_ASSERT(txq->inlen_send >=
4576 MLX5_ESEG_MIN_INLINE_SIZE);
4577 if (inlen <= txq->inlen_send) {
4578 unsigned int seg_n, wqe_n;
4580 rte_prefetch0(rte_pktmbuf_mtod
4581 (loc->mbuf, uint8_t *));
4582 /* Check against minimal length. */
4583 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4584 return MLX5_TXCMP_CODE_ERROR;
4585 if (loc->mbuf->ol_flags &
4586 PKT_TX_DYNF_NOINLINE) {
4588 * The hint flag not to inline packet
4589 * data is set. Check whether we can
4592 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
4594 (MLX5_TXOFF_CONFIG(MPW) &&
4596 if (inlen <= txq->inlen_send)
4599 * The hardware requires the
4600 * minimal inline data header.
4602 goto single_min_inline;
4604 if (MLX5_TXOFF_CONFIG(VLAN) &&
4605 vlan && !txq->vlan_en) {
4607 * We must insert VLAN tag
4608 * by software means.
4610 goto single_part_inline;
4612 goto single_no_inline;
4616 * Completely inlined packet data WQE:
4617 * - Control Segment, SEND opcode
4618 * - Ethernet Segment, no VLAN insertion
4619 * - Data inlined, VLAN optionally inserted
4620 * - Alignment to MLX5_WSEG_SIZE
4621 * Have to estimate amount of WQEBBs
4623 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4624 MLX5_ESEG_MIN_INLINE_SIZE +
4625 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4626 /* Check if there are enough WQEBBs. */
4627 wqe_n = (seg_n + 3) / 4;
4628 if (wqe_n > loc->wqe_free)
4629 return MLX5_TXCMP_CODE_EXIT;
4630 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4631 loc->wqe_last = wqe;
4632 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4633 MLX5_OPCODE_SEND, olx);
4634 mlx5_tx_eseg_data(txq, loc, wqe,
4635 vlan, inlen, 0, olx);
4636 txq->wqe_ci += wqe_n;
4637 loc->wqe_free -= wqe_n;
4639 * Packet data are completely inlined,
4640 * free the packet immediately.
4642 rte_pktmbuf_free_seg(loc->mbuf);
4643 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4644 MLX5_TXOFF_CONFIG(MPW)) &&
4647 * If minimal inlining is requested the eMPW
4648 * feature should be disabled due to data is
4649 * inlined into Ethernet Segment, which can
4650 * not contain inlined data for eMPW due to
4651 * segment shared for all packets.
4653 struct mlx5_wqe_dseg *__rte_restrict dseg;
4658 * The inline-mode settings require
4659 * to inline the specified amount of
4660 * data bytes to the Ethernet Segment.
4661 * We should check the free space in
4662 * WQE ring buffer to inline partially.
4665 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
4666 MLX5_ASSERT(inlen > txq->inlen_mode);
4667 MLX5_ASSERT(txq->inlen_mode >=
4668 MLX5_ESEG_MIN_INLINE_SIZE);
4670 * Check whether there are enough free WQEBBs:
4672 * - Ethernet Segment
4673 * - First Segment of inlined Ethernet data
4674 * - ... data continued ...
4675 * - Finishing Data Segment of pointer type
4677 ds = (MLX5_WQE_CSEG_SIZE +
4678 MLX5_WQE_ESEG_SIZE +
4679 MLX5_WQE_DSEG_SIZE +
4681 MLX5_ESEG_MIN_INLINE_SIZE +
4682 MLX5_WQE_DSEG_SIZE +
4683 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4684 if (loc->wqe_free < ((ds + 3) / 4))
4685 return MLX5_TXCMP_CODE_EXIT;
4687 * Build the ordinary SEND WQE:
4689 * - Ethernet Segment, inline inlen_mode bytes
4690 * - Data Segment of pointer type
4692 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4693 loc->wqe_last = wqe;
4694 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4695 MLX5_OPCODE_SEND, olx);
4696 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4699 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4700 txq->inlen_mode - vlan;
4701 inlen -= txq->inlen_mode;
4702 mlx5_tx_dseg_ptr(txq, loc, dseg,
4705 * WQE is built, update the loop parameters
4706 * and got to the next packet.
4708 txq->wqe_ci += (ds + 3) / 4;
4709 loc->wqe_free -= (ds + 3) / 4;
4710 /* We have to store mbuf in elts.*/
4711 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4712 txq->elts[txq->elts_head++ & txq->elts_m] =
4720 * Partially inlined packet data WQE, we have
4721 * some space in title WQEBB, we can fill it
4722 * with some packet data. It takes one WQEBB,
4723 * it is available, no extra space check:
4724 * - Control Segment, SEND opcode
4725 * - Ethernet Segment, no VLAN insertion
4726 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4727 * - Data Segment, pointer type
4729 * We also get here if VLAN insertion is not
4730 * supported by HW, the inline is enabled.
4733 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4734 loc->wqe_last = wqe;
4735 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4736 MLX5_OPCODE_SEND, olx);
4737 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4738 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4739 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4741 * The length check is performed above, by
4742 * comparing with txq->inlen_send. We should
4743 * not get overflow here.
4745 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4746 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4747 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4751 /* We have to store mbuf in elts.*/
4752 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4753 txq->elts[txq->elts_head++ & txq->elts_m] =
4757 #ifdef MLX5_PMD_SOFT_COUNTERS
4758 /* Update sent data bytes counter. */
4759 txq->stats.obytes += vlan +
4760 rte_pktmbuf_data_len(loc->mbuf);
4764 * No inline at all, it means the CPU cycles saving
4765 * is prioritized at configuration, we should not
4766 * copy any packet data to WQE.
4768 * SEND WQE, one WQEBB:
4769 * - Control Segment, SEND opcode
4770 * - Ethernet Segment, optional VLAN, no inline
4771 * - Data Segment, pointer type
4774 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4775 loc->wqe_last = wqe;
4776 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4777 MLX5_OPCODE_SEND, olx);
4778 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4780 (txq, loc, &wqe->dseg[0],
4781 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4782 rte_pktmbuf_data_len(loc->mbuf), olx);
4786 * We should not store mbuf pointer in elts
4787 * if no inlining is configured, this is done
4788 * by calling routine in a batch copy.
4790 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4792 #ifdef MLX5_PMD_SOFT_COUNTERS
4793 /* Update sent data bytes counter. */
4794 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4795 if (MLX5_TXOFF_CONFIG(VLAN) &&
4796 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4797 txq->stats.obytes +=
4798 sizeof(struct rte_vlan_hdr);
4803 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4804 return MLX5_TXCMP_CODE_EXIT;
4805 loc->mbuf = *pkts++;
4807 rte_prefetch0(*pkts);
4808 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4809 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4815 static __rte_always_inline enum mlx5_txcmp_code
4816 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
4817 struct rte_mbuf **__rte_restrict pkts,
4818 unsigned int pkts_n,
4819 struct mlx5_txq_local *__rte_restrict loc,
4822 enum mlx5_txcmp_code ret;
4824 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4825 if (ret == MLX5_TXCMP_CODE_SINGLE)
4827 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
4829 /* Optimize for inline/no inline eMPW send. */
4830 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4831 mlx5_tx_burst_empw_inline
4832 (txq, pkts, pkts_n, loc, olx) :
4833 mlx5_tx_burst_empw_simple
4834 (txq, pkts, pkts_n, loc, olx);
4835 if (ret != MLX5_TXCMP_CODE_SINGLE)
4837 /* The resources to send one packet should remain. */
4838 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4840 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4841 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
4842 if (ret != MLX5_TXCMP_CODE_EMPW)
4844 /* The resources to send one packet should remain. */
4845 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4850 * DPDK Tx callback template. This is configured template
4851 * used to generate routines optimized for specified offload setup.
4852 * One of this generated functions is chosen at SQ configuration
4856 * Generic pointer to TX queue structure.
4858 * Packets to transmit.
4860 * Number of packets in array.
4862 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4863 * values. Should be static to take compile time static configuration
4867 * Number of packets successfully transmitted (<= pkts_n).
4869 static __rte_always_inline uint16_t
4870 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
4871 struct rte_mbuf **__rte_restrict pkts,
4875 struct mlx5_txq_local loc;
4876 enum mlx5_txcmp_code ret;
4879 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4880 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4881 if (unlikely(!pkts_n))
4885 loc.wqe_last = NULL;
4888 loc.pkts_loop = loc.pkts_sent;
4890 * Check if there are some CQEs, if any:
4891 * - process an encountered errors
4892 * - process the completed WQEs
4893 * - free related mbufs
4894 * - doorbell the NIC about processed CQEs
4896 rte_prefetch0(*(pkts + loc.pkts_sent));
4897 mlx5_tx_handle_completion(txq, olx);
4899 * Calculate the number of available resources - elts and WQEs.
4900 * There are two possible different scenarios:
4901 * - no data inlining into WQEs, one WQEBB may contains up to
4902 * four packets, in this case elts become scarce resource
4903 * - data inlining into WQEs, one packet may require multiple
4904 * WQEBBs, the WQEs become the limiting factor.
4906 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4907 loc.elts_free = txq->elts_s -
4908 (uint16_t)(txq->elts_head - txq->elts_tail);
4909 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4910 loc.wqe_free = txq->wqe_s -
4911 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4912 if (unlikely(!loc.elts_free || !loc.wqe_free))
4916 * Fetch the packet from array. Usually this is
4917 * the first packet in series of multi/single
4920 loc.mbuf = *(pkts + loc.pkts_sent);
4921 /* Dedicated branch for multi-segment packets. */
4922 if (MLX5_TXOFF_CONFIG(MULTI) &&
4923 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4925 * Multi-segment packet encountered.
4926 * Hardware is able to process it only
4927 * with SEND/TSO opcodes, one packet
4928 * per WQE, do it in dedicated routine.
4931 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
4932 part = loc.pkts_sent - loc.pkts_copy;
4933 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4935 * There are some single-segment mbufs not
4936 * stored in elts. The mbufs must be in the
4937 * same order as WQEs, so we must copy the
4938 * mbufs to elts here, before the coming
4939 * multi-segment packet mbufs is appended.
4941 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4943 loc.pkts_copy = loc.pkts_sent;
4945 MLX5_ASSERT(pkts_n > loc.pkts_sent);
4946 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4947 if (!MLX5_TXOFF_CONFIG(INLINE))
4948 loc.pkts_copy = loc.pkts_sent;
4950 * These returned code checks are supposed
4951 * to be optimized out due to routine inlining.
4953 if (ret == MLX5_TXCMP_CODE_EXIT) {
4955 * The routine returns this code when
4956 * all packets are sent or there is no
4957 * enough resources to complete request.
4961 if (ret == MLX5_TXCMP_CODE_ERROR) {
4963 * The routine returns this code when
4964 * some error in the incoming packets
4967 txq->stats.oerrors++;
4970 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4972 * The single-segment packet was encountered
4973 * in the array, try to send it with the
4974 * best optimized way, possible engaging eMPW.
4976 goto enter_send_single;
4978 if (MLX5_TXOFF_CONFIG(TSO) &&
4979 ret == MLX5_TXCMP_CODE_TSO) {
4981 * The single-segment TSO packet was
4982 * encountered in the array.
4984 goto enter_send_tso;
4986 /* We must not get here. Something is going wrong. */
4988 txq->stats.oerrors++;
4991 /* Dedicated branch for single-segment TSO packets. */
4992 if (MLX5_TXOFF_CONFIG(TSO) &&
4993 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4995 * TSO might require special way for inlining
4996 * (dedicated parameters) and is sent with
4997 * MLX5_OPCODE_TSO opcode only, provide this
4998 * in dedicated branch.
5001 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
5002 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5003 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
5005 * These returned code checks are supposed
5006 * to be optimized out due to routine inlining.
5008 if (ret == MLX5_TXCMP_CODE_EXIT)
5010 if (ret == MLX5_TXCMP_CODE_ERROR) {
5011 txq->stats.oerrors++;
5014 if (ret == MLX5_TXCMP_CODE_SINGLE)
5015 goto enter_send_single;
5016 if (MLX5_TXOFF_CONFIG(MULTI) &&
5017 ret == MLX5_TXCMP_CODE_MULTI) {
5019 * The multi-segment packet was
5020 * encountered in the array.
5022 goto enter_send_multi;
5024 /* We must not get here. Something is going wrong. */
5026 txq->stats.oerrors++;
5030 * The dedicated branch for the single-segment packets
5031 * without TSO. Often these ones can be sent using
5032 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
5033 * The routine builds the WQEs till it encounters
5034 * the TSO or multi-segment packet (in case if these
5035 * offloads are requested at SQ configuration time).
5038 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5039 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
5041 * These returned code checks are supposed
5042 * to be optimized out due to routine inlining.
5044 if (ret == MLX5_TXCMP_CODE_EXIT)
5046 if (ret == MLX5_TXCMP_CODE_ERROR) {
5047 txq->stats.oerrors++;
5050 if (MLX5_TXOFF_CONFIG(MULTI) &&
5051 ret == MLX5_TXCMP_CODE_MULTI) {
5053 * The multi-segment packet was
5054 * encountered in the array.
5056 goto enter_send_multi;
5058 if (MLX5_TXOFF_CONFIG(TSO) &&
5059 ret == MLX5_TXCMP_CODE_TSO) {
5061 * The single-segment TSO packet was
5062 * encountered in the array.
5064 goto enter_send_tso;
5066 /* We must not get here. Something is going wrong. */
5068 txq->stats.oerrors++;
5072 * Main Tx loop is completed, do the rest:
5073 * - set completion request if thresholds are reached
5074 * - doorbell the hardware
5075 * - copy the rest of mbufs to elts (if any)
5077 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
5078 loc.pkts_sent >= loc.pkts_copy);
5079 /* Take a shortcut if nothing is sent. */
5080 if (unlikely(loc.pkts_sent == loc.pkts_loop))
5082 /* Request CQE generation if limits are reached. */
5083 mlx5_tx_request_completion(txq, &loc, olx);
5085 * Ring QP doorbell immediately after WQE building completion
5086 * to improve latencies. The pure software related data treatment
5087 * can be completed after doorbell. Tx CQEs for this SQ are
5088 * processed in this thread only by the polling.
5090 * The rdma core library can map doorbell register in two ways,
5091 * depending on the environment variable "MLX5_SHUT_UP_BF":
5093 * - as regular cached memory, the variable is either missing or
5094 * set to zero. This type of mapping may cause the significant
5095 * doorbell register writing latency and requires explicit
5096 * memory write barrier to mitigate this issue and prevent
5099 * - as non-cached memory, the variable is present and set to
5100 * not "0" value. This type of mapping may cause performance
5101 * impact under heavy loading conditions but the explicit write
5102 * memory barrier is not required and it may improve core
5105 * - the legacy behaviour (prior 19.08 release) was to use some
5106 * heuristics to decide whether write memory barrier should
5107 * be performed. This behavior is supported with specifying
5108 * tx_db_nc=2, write barrier is skipped if application
5109 * provides the full recommended burst of packets, it
5110 * supposes the next packets are coming and the write barrier
5111 * will be issued on the next burst (after descriptor writing,
5114 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
5115 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
5116 /* Not all of the mbufs may be stored into elts yet. */
5117 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
5118 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
5120 * There are some single-segment mbufs not stored in elts.
5121 * It can be only if the last packet was single-segment.
5122 * The copying is gathered into one place due to it is
5123 * a good opportunity to optimize that with SIMD.
5124 * Unfortunately if inlining is enabled the gaps in
5125 * pointer array may happen due to early freeing of the
5128 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
5129 loc.pkts_copy = loc.pkts_sent;
5131 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
5132 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
5133 if (pkts_n > loc.pkts_sent) {
5135 * If burst size is large there might be no enough CQE
5136 * fetched from completion queue and no enough resources
5137 * freed to send all the packets.
5142 #ifdef MLX5_PMD_SOFT_COUNTERS
5143 /* Increment sent packets counter. */
5144 txq->stats.opackets += loc.pkts_sent;
5146 return loc.pkts_sent;
5149 /* Generate routines with Enhanced Multi-Packet Write support. */
5150 MLX5_TXOFF_DECL(full_empw,
5151 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
5153 MLX5_TXOFF_DECL(none_empw,
5154 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5156 MLX5_TXOFF_DECL(md_empw,
5157 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5159 MLX5_TXOFF_DECL(mt_empw,
5160 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5161 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5163 MLX5_TXOFF_DECL(mtsc_empw,
5164 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5165 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5166 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5168 MLX5_TXOFF_DECL(mti_empw,
5169 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5170 MLX5_TXOFF_CONFIG_INLINE |
5171 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5173 MLX5_TXOFF_DECL(mtv_empw,
5174 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5175 MLX5_TXOFF_CONFIG_VLAN |
5176 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5178 MLX5_TXOFF_DECL(mtiv_empw,
5179 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5180 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5181 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5183 MLX5_TXOFF_DECL(sc_empw,
5184 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5185 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5187 MLX5_TXOFF_DECL(sci_empw,
5188 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5189 MLX5_TXOFF_CONFIG_INLINE |
5190 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5192 MLX5_TXOFF_DECL(scv_empw,
5193 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5194 MLX5_TXOFF_CONFIG_VLAN |
5195 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5197 MLX5_TXOFF_DECL(sciv_empw,
5198 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5199 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5200 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5202 MLX5_TXOFF_DECL(i_empw,
5203 MLX5_TXOFF_CONFIG_INLINE |
5204 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5206 MLX5_TXOFF_DECL(v_empw,
5207 MLX5_TXOFF_CONFIG_VLAN |
5208 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5210 MLX5_TXOFF_DECL(iv_empw,
5211 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5212 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5214 /* Generate routines without Enhanced Multi-Packet Write support. */
5215 MLX5_TXOFF_DECL(full,
5216 MLX5_TXOFF_CONFIG_FULL)
5218 MLX5_TXOFF_DECL(none,
5219 MLX5_TXOFF_CONFIG_NONE)
5222 MLX5_TXOFF_CONFIG_METADATA)
5225 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5226 MLX5_TXOFF_CONFIG_METADATA)
5228 MLX5_TXOFF_DECL(mtsc,
5229 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5230 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5231 MLX5_TXOFF_CONFIG_METADATA)
5233 MLX5_TXOFF_DECL(mti,
5234 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5235 MLX5_TXOFF_CONFIG_INLINE |
5236 MLX5_TXOFF_CONFIG_METADATA)
5239 MLX5_TXOFF_DECL(mtv,
5240 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5241 MLX5_TXOFF_CONFIG_VLAN |
5242 MLX5_TXOFF_CONFIG_METADATA)
5245 MLX5_TXOFF_DECL(mtiv,
5246 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5247 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5248 MLX5_TXOFF_CONFIG_METADATA)
5251 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5252 MLX5_TXOFF_CONFIG_METADATA)
5254 MLX5_TXOFF_DECL(sci,
5255 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5256 MLX5_TXOFF_CONFIG_INLINE |
5257 MLX5_TXOFF_CONFIG_METADATA)
5260 MLX5_TXOFF_DECL(scv,
5261 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5262 MLX5_TXOFF_CONFIG_VLAN |
5263 MLX5_TXOFF_CONFIG_METADATA)
5266 MLX5_TXOFF_DECL(sciv,
5267 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5268 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5269 MLX5_TXOFF_CONFIG_METADATA)
5272 MLX5_TXOFF_CONFIG_INLINE |
5273 MLX5_TXOFF_CONFIG_METADATA)
5276 MLX5_TXOFF_CONFIG_VLAN |
5277 MLX5_TXOFF_CONFIG_METADATA)
5280 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5281 MLX5_TXOFF_CONFIG_METADATA)
5283 /* Generate routines with timestamp scheduling. */
5284 MLX5_TXOFF_DECL(full_ts_nompw,
5285 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
5287 MLX5_TXOFF_DECL(full_ts_nompwi,
5288 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5289 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5290 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5291 MLX5_TXOFF_CONFIG_TXPP)
5293 MLX5_TXOFF_DECL(full_ts,
5294 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
5295 MLX5_TXOFF_CONFIG_EMPW)
5297 MLX5_TXOFF_DECL(full_ts_noi,
5298 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5299 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5300 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5301 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5303 MLX5_TXOFF_DECL(none_ts,
5304 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
5305 MLX5_TXOFF_CONFIG_EMPW)
5307 MLX5_TXOFF_DECL(mdi_ts,
5308 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5309 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5311 MLX5_TXOFF_DECL(mti_ts,
5312 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5313 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5314 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5316 MLX5_TXOFF_DECL(mtiv_ts,
5317 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5318 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5319 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
5320 MLX5_TXOFF_CONFIG_EMPW)
5323 * Generate routines with Legacy Multi-Packet Write support.
5324 * This mode is supported by ConnectX-4 Lx only and imposes
5325 * offload limitations, not supported:
5326 * - ACL/Flows (metadata are becoming meaningless)
5327 * - WQE Inline headers
5328 * - SRIOV (E-Switch offloads)
5330 * - tunnel encapsulation/decapsulation
5333 MLX5_TXOFF_DECL(none_mpw,
5334 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5335 MLX5_TXOFF_CONFIG_MPW)
5337 MLX5_TXOFF_DECL(mci_mpw,
5338 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5339 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5340 MLX5_TXOFF_CONFIG_MPW)
5342 MLX5_TXOFF_DECL(mc_mpw,
5343 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5344 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5346 MLX5_TXOFF_DECL(i_mpw,
5347 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5348 MLX5_TXOFF_CONFIG_MPW)
5351 * Array of declared and compiled Tx burst function and corresponding
5352 * supported offloads set. The array is used to select the Tx burst
5353 * function for specified offloads set at Tx queue configuration time.
5356 eth_tx_burst_t func;
5359 MLX5_TXOFF_INFO(full_empw,
5360 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5361 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5362 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5363 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5365 MLX5_TXOFF_INFO(none_empw,
5366 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5368 MLX5_TXOFF_INFO(md_empw,
5369 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5371 MLX5_TXOFF_INFO(mt_empw,
5372 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5373 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5375 MLX5_TXOFF_INFO(mtsc_empw,
5376 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5377 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5378 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5380 MLX5_TXOFF_INFO(mti_empw,
5381 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5382 MLX5_TXOFF_CONFIG_INLINE |
5383 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5385 MLX5_TXOFF_INFO(mtv_empw,
5386 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5387 MLX5_TXOFF_CONFIG_VLAN |
5388 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5390 MLX5_TXOFF_INFO(mtiv_empw,
5391 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5392 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5393 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5395 MLX5_TXOFF_INFO(sc_empw,
5396 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5397 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5399 MLX5_TXOFF_INFO(sci_empw,
5400 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5401 MLX5_TXOFF_CONFIG_INLINE |
5402 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5404 MLX5_TXOFF_INFO(scv_empw,
5405 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5406 MLX5_TXOFF_CONFIG_VLAN |
5407 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5409 MLX5_TXOFF_INFO(sciv_empw,
5410 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5411 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5412 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5414 MLX5_TXOFF_INFO(i_empw,
5415 MLX5_TXOFF_CONFIG_INLINE |
5416 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5418 MLX5_TXOFF_INFO(v_empw,
5419 MLX5_TXOFF_CONFIG_VLAN |
5420 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5422 MLX5_TXOFF_INFO(iv_empw,
5423 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5424 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5426 MLX5_TXOFF_INFO(full_ts_nompw,
5427 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
5429 MLX5_TXOFF_INFO(full_ts_nompwi,
5430 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5431 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5432 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5433 MLX5_TXOFF_CONFIG_TXPP)
5435 MLX5_TXOFF_INFO(full_ts,
5436 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
5437 MLX5_TXOFF_CONFIG_EMPW)
5439 MLX5_TXOFF_INFO(full_ts_noi,
5440 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5441 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5442 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5443 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5445 MLX5_TXOFF_INFO(none_ts,
5446 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
5447 MLX5_TXOFF_CONFIG_EMPW)
5449 MLX5_TXOFF_INFO(mdi_ts,
5450 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5451 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5453 MLX5_TXOFF_INFO(mti_ts,
5454 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5455 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5456 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5458 MLX5_TXOFF_INFO(mtiv_ts,
5459 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5460 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5461 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
5462 MLX5_TXOFF_CONFIG_EMPW)
5464 MLX5_TXOFF_INFO(full,
5465 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5466 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5467 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5468 MLX5_TXOFF_CONFIG_METADATA)
5470 MLX5_TXOFF_INFO(none,
5471 MLX5_TXOFF_CONFIG_NONE)
5474 MLX5_TXOFF_CONFIG_METADATA)
5477 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5478 MLX5_TXOFF_CONFIG_METADATA)
5480 MLX5_TXOFF_INFO(mtsc,
5481 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5482 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5483 MLX5_TXOFF_CONFIG_METADATA)
5485 MLX5_TXOFF_INFO(mti,
5486 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5487 MLX5_TXOFF_CONFIG_INLINE |
5488 MLX5_TXOFF_CONFIG_METADATA)
5490 MLX5_TXOFF_INFO(mtv,
5491 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5492 MLX5_TXOFF_CONFIG_VLAN |
5493 MLX5_TXOFF_CONFIG_METADATA)
5495 MLX5_TXOFF_INFO(mtiv,
5496 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5497 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5498 MLX5_TXOFF_CONFIG_METADATA)
5501 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5502 MLX5_TXOFF_CONFIG_METADATA)
5504 MLX5_TXOFF_INFO(sci,
5505 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5506 MLX5_TXOFF_CONFIG_INLINE |
5507 MLX5_TXOFF_CONFIG_METADATA)
5509 MLX5_TXOFF_INFO(scv,
5510 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5511 MLX5_TXOFF_CONFIG_VLAN |
5512 MLX5_TXOFF_CONFIG_METADATA)
5514 MLX5_TXOFF_INFO(sciv,
5515 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5516 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5517 MLX5_TXOFF_CONFIG_METADATA)
5520 MLX5_TXOFF_CONFIG_INLINE |
5521 MLX5_TXOFF_CONFIG_METADATA)
5524 MLX5_TXOFF_CONFIG_VLAN |
5525 MLX5_TXOFF_CONFIG_METADATA)
5528 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5529 MLX5_TXOFF_CONFIG_METADATA)
5531 MLX5_TXOFF_INFO(none_mpw,
5532 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5533 MLX5_TXOFF_CONFIG_MPW)
5535 MLX5_TXOFF_INFO(mci_mpw,
5536 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5537 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5538 MLX5_TXOFF_CONFIG_MPW)
5540 MLX5_TXOFF_INFO(mc_mpw,
5541 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5542 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5544 MLX5_TXOFF_INFO(i_mpw,
5545 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5546 MLX5_TXOFF_CONFIG_MPW)
5550 * Configure the Tx function to use. The routine checks configured
5551 * Tx offloads for the device and selects appropriate Tx burst
5552 * routine. There are multiple Tx burst routines compiled from
5553 * the same template in the most optimal way for the dedicated
5557 * Pointer to private data structure.
5560 * Pointer to selected Tx burst function.
5563 mlx5_select_tx_function(struct rte_eth_dev *dev)
5565 struct mlx5_priv *priv = dev->data->dev_private;
5566 struct mlx5_dev_config *config = &priv->config;
5567 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5568 unsigned int diff = 0, olx = 0, i, m;
5570 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5571 MLX5_DSEG_MAX, "invalid WQE max size");
5572 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5573 "invalid WQE Control Segment size");
5574 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5575 "invalid WQE Ethernet Segment size");
5576 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5577 "invalid WQE Data Segment size");
5578 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5579 "invalid WQE size");
5581 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5582 /* We should support Multi-Segment Packets. */
5583 olx |= MLX5_TXOFF_CONFIG_MULTI;
5585 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5586 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5587 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5588 DEV_TX_OFFLOAD_IP_TNL_TSO |
5589 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5590 /* We should support TCP Send Offload. */
5591 olx |= MLX5_TXOFF_CONFIG_TSO;
5593 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5594 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5595 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5596 /* We should support Software Parser for Tunnels. */
5597 olx |= MLX5_TXOFF_CONFIG_SWP;
5599 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5600 DEV_TX_OFFLOAD_UDP_CKSUM |
5601 DEV_TX_OFFLOAD_TCP_CKSUM |
5602 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5603 /* We should support IP/TCP/UDP Checksums. */
5604 olx |= MLX5_TXOFF_CONFIG_CSUM;
5606 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5607 /* We should support VLAN insertion. */
5608 olx |= MLX5_TXOFF_CONFIG_VLAN;
5610 if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
5611 rte_mbuf_dynflag_lookup
5612 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
5613 rte_mbuf_dynfield_lookup
5614 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL) >= 0) {
5615 /* Offload configured, dynamic entities registered. */
5616 olx |= MLX5_TXOFF_CONFIG_TXPP;
5618 if (priv->txqs_n && (*priv->txqs)[0]) {
5619 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5621 if (txd->inlen_send) {
5623 * Check the data inline requirements. Data inline
5624 * is enabled on per device basis, we can check
5625 * the first Tx queue only.
5627 * If device does not support VLAN insertion in WQE
5628 * and some queues are requested to perform VLAN
5629 * insertion offload than inline must be enabled.
5631 olx |= MLX5_TXOFF_CONFIG_INLINE;
5634 if (config->mps == MLX5_MPW_ENHANCED &&
5635 config->txq_inline_min <= 0) {
5637 * The NIC supports Enhanced Multi-Packet Write
5638 * and does not require minimal inline data.
5640 olx |= MLX5_TXOFF_CONFIG_EMPW;
5642 if (rte_flow_dynf_metadata_avail()) {
5643 /* We should support Flow metadata. */
5644 olx |= MLX5_TXOFF_CONFIG_METADATA;
5646 if (config->mps == MLX5_MPW) {
5648 * The NIC supports Legacy Multi-Packet Write.
5649 * The MLX5_TXOFF_CONFIG_MPW controls the
5650 * descriptor building method in combination
5651 * with MLX5_TXOFF_CONFIG_EMPW.
5653 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5654 MLX5_TXOFF_CONFIG_SWP |
5655 MLX5_TXOFF_CONFIG_VLAN |
5656 MLX5_TXOFF_CONFIG_METADATA)))
5657 olx |= MLX5_TXOFF_CONFIG_EMPW |
5658 MLX5_TXOFF_CONFIG_MPW;
5661 * Scan the routines table to find the minimal
5662 * satisfying routine with requested offloads.
5664 m = RTE_DIM(txoff_func);
5665 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5668 tmp = txoff_func[i].olx;
5670 /* Meets requested offloads exactly.*/
5674 if ((tmp & olx) != olx) {
5675 /* Does not meet requested offloads at all. */
5678 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW)
5679 /* Do not enable legacy MPW if not configured. */
5681 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5682 /* Do not enable eMPW if not configured. */
5684 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5685 /* Do not enable inlining if not configured. */
5687 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_TXPP)
5688 /* Do not enable scheduling if not configured. */
5691 * Some routine meets the requirements.
5692 * Check whether it has minimal amount
5693 * of not requested offloads.
5695 tmp = __builtin_popcountl(tmp & ~olx);
5696 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5697 /* First or better match, save and continue. */
5703 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5704 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5705 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5706 /* Lighter not requested offload. */
5711 if (m >= RTE_DIM(txoff_func)) {
5712 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5713 " for requested offloads %04X",
5714 dev->data->port_id, olx);
5717 DRV_LOG(DEBUG, "port %u has selected Tx function"
5718 " supporting offloads %04X/%04X",
5719 dev->data->port_id, olx, txoff_func[m].olx);
5720 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5721 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5722 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5723 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5724 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5725 DRV_LOG(DEBUG, "\tSWP (software parser)");
5726 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5727 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5728 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5729 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5730 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5731 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5732 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5733 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5734 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TXPP)
5735 DRV_LOG(DEBUG, "\tMETAD (tx Scheduling)");
5736 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5737 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5738 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5740 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5742 return txoff_func[m].func;
5746 * DPDK callback to get the TX queue information
5749 * Pointer to the device structure.
5751 * @param tx_queue_id
5752 * Tx queue identificator.
5755 * Pointer to the TX queue information structure.
5762 mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
5763 struct rte_eth_txq_info *qinfo)
5765 struct mlx5_priv *priv = dev->data->dev_private;
5766 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
5767 struct mlx5_txq_ctrl *txq_ctrl =
5768 container_of(txq, struct mlx5_txq_ctrl, txq);
5772 qinfo->nb_desc = txq->elts_s;
5773 qinfo->conf.tx_thresh.pthresh = 0;
5774 qinfo->conf.tx_thresh.hthresh = 0;
5775 qinfo->conf.tx_thresh.wthresh = 0;
5776 qinfo->conf.tx_rs_thresh = 0;
5777 qinfo->conf.tx_free_thresh = 0;
5778 qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
5779 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
5783 * DPDK callback to get the TX packet burst mode information
5786 * Pointer to the device structure.
5788 * @param tx_queue_id
5789 * Tx queue identificatior.
5792 * Pointer to the burts mode information.
5795 * 0 as success, -EINVAL as failure.
5799 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
5800 uint16_t tx_queue_id __rte_unused,
5801 struct rte_eth_burst_mode *mode)
5803 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
5804 unsigned int i, olx;
5806 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5807 if (pkt_burst == txoff_func[i].func) {
5808 olx = txoff_func[i].olx;
5809 snprintf(mode->info, sizeof(mode->info),
5810 "%s%s%s%s%s%s%s%s%s",
5811 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
5812 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
5813 "Legacy MPW" : "Enhanced MPW") : "No MPW",
5814 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
5816 (olx & MLX5_TXOFF_CONFIG_TSO) ?
5818 (olx & MLX5_TXOFF_CONFIG_SWP) ?
5820 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
5822 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
5824 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
5826 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
5828 (olx & MLX5_TXOFF_CONFIG_TXPP) ?