1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
25 #include <rte_common.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_ether.h>
28 #include <rte_cycles.h>
31 #include <mlx5_devx_cmds.h>
33 #include <mlx5_common.h>
35 #include "mlx5_defs.h"
37 #include "mlx5_utils.h"
38 #include "mlx5_rxtx.h"
39 #include "mlx5_autoconf.h"
41 /* TX burst subroutines return codes. */
42 enum mlx5_txcmp_code {
43 MLX5_TXCMP_CODE_EXIT = 0,
44 MLX5_TXCMP_CODE_ERROR,
45 MLX5_TXCMP_CODE_SINGLE,
46 MLX5_TXCMP_CODE_MULTI,
52 * These defines are used to configure Tx burst routine option set
53 * supported at compile time. The not specified options are optimized out
54 * out due to if conditions can be explicitly calculated at compile time.
55 * The offloads with bigger runtime check (require more CPU cycles to
56 * skip) overhead should have the bigger index - this is needed to
57 * select the better matching routine function if no exact match and
58 * some offloads are not actually requested.
60 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
61 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
62 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
63 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
64 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
65 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
66 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
67 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
68 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
70 /* The most common offloads groups. */
71 #define MLX5_TXOFF_CONFIG_NONE 0
72 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
73 MLX5_TXOFF_CONFIG_TSO | \
74 MLX5_TXOFF_CONFIG_SWP | \
75 MLX5_TXOFF_CONFIG_CSUM | \
76 MLX5_TXOFF_CONFIG_INLINE | \
77 MLX5_TXOFF_CONFIG_VLAN | \
78 MLX5_TXOFF_CONFIG_METADATA)
80 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
82 #define MLX5_TXOFF_DECL(func, olx) \
83 static uint16_t mlx5_tx_burst_##func(void *txq, \
84 struct rte_mbuf **pkts, \
87 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
88 pkts, pkts_n, (olx)); \
91 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
93 static __rte_always_inline uint32_t
94 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
96 static __rte_always_inline int
97 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
98 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
100 static __rte_always_inline uint32_t
101 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
103 static __rte_always_inline void
104 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
105 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
107 static __rte_always_inline void
108 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
109 const unsigned int strd_n);
112 mlx5_queue_state_modify(struct rte_eth_dev *dev,
113 struct mlx5_mp_arg_queue_state_modify *sm);
116 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
117 volatile struct mlx5_cqe *restrict cqe,
121 mlx5_lro_update_hdr(uint8_t *restrict padd,
122 volatile struct mlx5_cqe *restrict cqe,
125 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
126 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
129 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
130 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
132 uint64_t rte_net_mlx5_dynf_inline_mask;
133 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
136 * Build a table to translate Rx completion flags to packet type.
138 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
141 mlx5_set_ptype_table(void)
144 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
146 /* Last entry must not be overwritten, reserved for errored packet. */
147 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
148 (*p)[i] = RTE_PTYPE_UNKNOWN;
150 * The index to the array should have:
151 * bit[1:0] = l3_hdr_type
152 * bit[4:2] = l4_hdr_type
155 * bit[7] = outer_l3_type
158 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
160 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
161 RTE_PTYPE_L4_NONFRAG;
162 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
163 RTE_PTYPE_L4_NONFRAG;
165 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
167 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
170 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
172 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
174 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
176 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
178 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
180 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
183 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
185 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
187 /* Repeat with outer_l3_type being set. Just in case. */
188 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
189 RTE_PTYPE_L4_NONFRAG;
190 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
191 RTE_PTYPE_L4_NONFRAG;
192 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
194 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
196 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
198 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
200 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
202 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
204 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
206 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
208 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
210 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
213 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
214 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
215 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
216 RTE_PTYPE_INNER_L4_NONFRAG;
217 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
219 RTE_PTYPE_INNER_L4_NONFRAG;
220 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
221 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
222 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L4_NONFRAG;
224 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L4_NONFRAG;
227 /* Tunneled - Fragmented */
228 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L4_FRAG;
231 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
232 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L4_FRAG;
234 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L4_FRAG;
237 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L4_FRAG;
241 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L4_TCP;
244 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L4_TCP;
247 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L4_TCP;
250 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L4_TCP;
253 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L4_TCP;
256 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L4_TCP;
259 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_TCP;
262 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L4_TCP;
265 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L4_TCP;
268 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
269 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L4_TCP;
271 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
272 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
273 RTE_PTYPE_INNER_L4_TCP;
274 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
275 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
276 RTE_PTYPE_INNER_L4_TCP;
278 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
279 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
280 RTE_PTYPE_INNER_L4_UDP;
281 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
282 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
283 RTE_PTYPE_INNER_L4_UDP;
284 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
285 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
286 RTE_PTYPE_INNER_L4_UDP;
287 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
288 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
289 RTE_PTYPE_INNER_L4_UDP;
293 * Build a table to translate packet to checksum type of Verbs.
296 mlx5_set_cksum_table(void)
302 * The index should have:
303 * bit[0] = PKT_TX_TCP_SEG
304 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
305 * bit[4] = PKT_TX_IP_CKSUM
306 * bit[8] = PKT_TX_OUTER_IP_CKSUM
309 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
312 /* Tunneled packet. */
313 if (i & (1 << 8)) /* Outer IP. */
314 v |= MLX5_ETH_WQE_L3_CSUM;
315 if (i & (1 << 4)) /* Inner IP. */
316 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
317 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
318 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
321 if (i & (1 << 4)) /* IP. */
322 v |= MLX5_ETH_WQE_L3_CSUM;
323 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
324 v |= MLX5_ETH_WQE_L4_CSUM;
326 mlx5_cksum_table[i] = v;
331 * Build a table to translate packet type of mbuf to SWP type of Verbs.
334 mlx5_set_swp_types_table(void)
340 * The index should have:
341 * bit[0:1] = PKT_TX_L4_MASK
342 * bit[4] = PKT_TX_IPV6
343 * bit[8] = PKT_TX_OUTER_IPV6
344 * bit[9] = PKT_TX_OUTER_UDP
346 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
349 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
351 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
353 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
354 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
355 v |= MLX5_ETH_WQE_L4_INNER_UDP;
356 mlx5_swp_types_table[i] = v;
361 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
362 * Flags must be preliminary initialized to zero.
365 * Pointer to burst routine local context.
367 * Pointer to store Software Parser flags
369 * Configured Tx offloads mask. It is fully defined at
370 * compile time and may be used for optimization.
373 * Software Parser offsets packed in dword.
374 * Software Parser flags are set by pointer.
376 static __rte_always_inline uint32_t
377 txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc,
382 unsigned int idx, off;
385 if (!MLX5_TXOFF_CONFIG(SWP))
387 ol = loc->mbuf->ol_flags;
388 tunnel = ol & PKT_TX_TUNNEL_MASK;
390 * Check whether Software Parser is required.
391 * Only customized tunnels may ask for.
393 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
396 * The index should have:
397 * bit[0:1] = PKT_TX_L4_MASK
398 * bit[4] = PKT_TX_IPV6
399 * bit[8] = PKT_TX_OUTER_IPV6
400 * bit[9] = PKT_TX_OUTER_UDP
402 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
403 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
404 *swp_flags = mlx5_swp_types_table[idx];
406 * Set offsets for SW parser. Since ConnectX-5, SW parser just
407 * complements HW parser. SW parser starts to engage only if HW parser
408 * can't reach a header. For the older devices, HW parser will not kick
409 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
410 * should be set regardless of HW offload.
412 off = loc->mbuf->outer_l2_len;
413 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
414 off += sizeof(struct rte_vlan_hdr);
415 set = (off >> 1) << 8; /* Outer L3 offset. */
416 off += loc->mbuf->outer_l3_len;
417 if (tunnel == PKT_TX_TUNNEL_UDP)
418 set |= off >> 1; /* Outer L4 offset. */
419 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
420 const uint64_t csum = ol & PKT_TX_L4_MASK;
421 off += loc->mbuf->l2_len;
422 set |= (off >> 1) << 24; /* Inner L3 offset. */
423 if (csum == PKT_TX_TCP_CKSUM ||
424 csum == PKT_TX_UDP_CKSUM ||
425 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
426 off += loc->mbuf->l3_len;
427 set |= (off >> 1) << 16; /* Inner L4 offset. */
430 set = rte_cpu_to_le_32(set);
435 * Convert the Checksum offloads to Verbs.
438 * Pointer to the mbuf.
441 * Converted checksum flags.
443 static __rte_always_inline uint8_t
444 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
447 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
448 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
449 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
452 * The index should have:
453 * bit[0] = PKT_TX_TCP_SEG
454 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
455 * bit[4] = PKT_TX_IP_CKSUM
456 * bit[8] = PKT_TX_OUTER_IP_CKSUM
459 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
460 return mlx5_cksum_table[idx];
464 * Internal function to compute the number of used descriptors in an RX queue
470 * The number of used rx descriptor.
473 rx_queue_count(struct mlx5_rxq_data *rxq)
475 struct rxq_zip *zip = &rxq->zip;
476 volatile struct mlx5_cqe *cqe;
477 const unsigned int cqe_n = (1 << rxq->cqe_n);
478 const unsigned int cqe_cnt = cqe_n - 1;
482 /* if we are processing a compressed cqe */
484 used = zip->cqe_cnt - zip->ca;
490 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
491 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
495 op_own = cqe->op_own;
496 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
497 n = rte_be_to_cpu_32(cqe->byte_cnt);
502 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
504 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
509 * DPDK callback to check the status of a rx descriptor.
514 * The index of the descriptor in the ring.
517 * The status of the tx descriptor.
520 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
522 struct mlx5_rxq_data *rxq = rx_queue;
523 struct mlx5_rxq_ctrl *rxq_ctrl =
524 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
525 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
527 if (dev->rx_pkt_burst != mlx5_rx_burst) {
531 if (offset >= (1 << rxq->elts_n)) {
535 if (offset < rx_queue_count(rxq))
536 return RTE_ETH_RX_DESC_DONE;
537 return RTE_ETH_RX_DESC_AVAIL;
541 * DPDK callback to get the number of used descriptors in a RX queue
544 * Pointer to the device structure.
550 * The number of used rx descriptor.
551 * -EINVAL if the queue is invalid
554 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
556 struct mlx5_priv *priv = dev->data->dev_private;
557 struct mlx5_rxq_data *rxq;
559 if (dev->rx_pkt_burst != mlx5_rx_burst) {
563 rxq = (*priv->rxqs)[rx_queue_id];
568 return rx_queue_count(rxq);
571 #define MLX5_SYSTEM_LOG_DIR "/var/log"
573 * Dump debug information to log file.
578 * If not NULL this string is printed as a header to the output
579 * and the output will be in hexadecimal view.
581 * This is the buffer address to print out.
583 * The number of bytes to dump out.
586 mlx5_dump_debug_information(const char *fname, const char *hex_title,
587 const void *buf, unsigned int hex_len)
591 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
592 fd = fopen(path, "a+");
594 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
595 MKSTR(path2, "./%s", fname);
596 fd = fopen(path2, "a+");
598 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
601 DRV_LOG(INFO, "New debug dump in file %s", path2);
603 DRV_LOG(INFO, "New debug dump in file %s", path);
606 rte_hexdump(fd, hex_title, buf, hex_len);
608 fprintf(fd, "%s", (const char *)buf);
609 fprintf(fd, "\n\n\n");
614 * Move QP from error state to running state and initialize indexes.
617 * Pointer to TX queue control structure.
620 * 0 on success, else -1.
623 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
625 struct mlx5_mp_arg_queue_state_modify sm = {
627 .queue_id = txq_ctrl->txq.idx,
630 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
632 txq_ctrl->txq.wqe_ci = 0;
633 txq_ctrl->txq.wqe_pi = 0;
634 txq_ctrl->txq.elts_comp = 0;
638 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
640 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
642 static const uint8_t magic[] = "seen";
646 for (i = 0; i < sizeof(magic); ++i)
647 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
649 err_cqe->rsvd1[i] = magic[i];
658 * Pointer to TX queue structure.
660 * Pointer to the error CQE.
663 * Negative value if queue recovery failed, otherwise
664 * the error completion entry is handled successfully.
667 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
668 volatile struct mlx5_err_cqe *err_cqe)
670 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
671 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
672 struct mlx5_txq_ctrl *txq_ctrl =
673 container_of(txq, struct mlx5_txq_ctrl, txq);
674 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
675 int seen = check_err_cqe_seen(err_cqe);
677 if (!seen && txq_ctrl->dump_file_n <
678 txq_ctrl->priv->config.max_dump_files_num) {
679 MKSTR(err_str, "Unexpected CQE error syndrome "
680 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
681 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
682 txq->cqe_s, txq->qp_num_8s >> 8,
683 rte_be_to_cpu_16(err_cqe->wqe_counter),
684 txq->wqe_ci, txq->cq_ci);
685 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
686 PORT_ID(txq_ctrl->priv), txq->idx,
687 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
688 mlx5_dump_debug_information(name, NULL, err_str, 0);
689 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
690 (const void *)((uintptr_t)
694 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
695 (const void *)((uintptr_t)
699 txq_ctrl->dump_file_n++;
703 * Count errors in WQEs units.
704 * Later it can be improved to count error packets,
705 * for example, by SQ parsing to find how much packets
706 * should be counted for each WQE.
708 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
710 if (tx_recover_qp(txq_ctrl)) {
711 /* Recovering failed - retry later on the same WQE. */
714 /* Release all the remaining buffers. */
715 txq_free_elts(txq_ctrl);
721 * Translate RX completion flags to packet type.
724 * Pointer to RX queue structure.
728 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
731 * Packet type for struct rte_mbuf.
733 static inline uint32_t
734 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
737 uint8_t pinfo = cqe->pkt_info;
738 uint16_t ptype = cqe->hdr_type_etc;
741 * The index to the array should have:
742 * bit[1:0] = l3_hdr_type
743 * bit[4:2] = l4_hdr_type
746 * bit[7] = outer_l3_type
748 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
749 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
753 * Initialize Rx WQ and indexes.
756 * Pointer to RX queue structure.
759 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
761 const unsigned int wqe_n = 1 << rxq->elts_n;
764 for (i = 0; (i != wqe_n); ++i) {
765 volatile struct mlx5_wqe_data_seg *scat;
769 if (mlx5_rxq_mprq_enabled(rxq)) {
770 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
772 scat = &((volatile struct mlx5_wqe_mprq *)
774 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
775 1 << rxq->strd_num_n);
776 byte_count = (1 << rxq->strd_sz_n) *
777 (1 << rxq->strd_num_n);
779 struct rte_mbuf *buf = (*rxq->elts)[i];
781 scat = &((volatile struct mlx5_wqe_data_seg *)
783 addr = rte_pktmbuf_mtod(buf, uintptr_t);
784 byte_count = DATA_LEN(buf);
786 /* scat->addr must be able to store a pointer. */
787 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
788 *scat = (struct mlx5_wqe_data_seg){
789 .addr = rte_cpu_to_be_64(addr),
790 .byte_count = rte_cpu_to_be_32(byte_count),
791 .lkey = mlx5_rx_addr2mr(rxq, addr),
794 rxq->consumed_strd = 0;
795 rxq->decompressed = 0;
797 rxq->zip = (struct rxq_zip){
800 /* Update doorbell counter. */
801 rxq->rq_ci = wqe_n >> rxq->sges_n;
803 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
807 * Modify a Verbs/DevX queue state.
808 * This must be called from the primary process.
811 * Pointer to Ethernet device.
813 * State modify request parameters.
816 * 0 in case of success else non-zero value and rte_errno is set.
819 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
820 const struct mlx5_mp_arg_queue_state_modify *sm)
823 struct mlx5_priv *priv = dev->data->dev_private;
826 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
827 struct mlx5_rxq_ctrl *rxq_ctrl =
828 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
830 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
831 struct ibv_wq_attr mod = {
832 .attr_mask = IBV_WQ_ATTR_STATE,
833 .wq_state = sm->state,
836 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
837 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
838 struct mlx5_devx_modify_rq_attr rq_attr;
840 memset(&rq_attr, 0, sizeof(rq_attr));
841 if (sm->state == IBV_WQS_RESET) {
842 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
843 rq_attr.state = MLX5_RQC_STATE_RST;
844 } else if (sm->state == IBV_WQS_RDY) {
845 rq_attr.rq_state = MLX5_RQC_STATE_RST;
846 rq_attr.state = MLX5_RQC_STATE_RDY;
847 } else if (sm->state == IBV_WQS_ERR) {
848 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
849 rq_attr.state = MLX5_RQC_STATE_ERR;
851 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
855 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
856 sm->state, strerror(errno));
861 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
862 struct mlx5_txq_ctrl *txq_ctrl =
863 container_of(txq, struct mlx5_txq_ctrl, txq);
864 struct ibv_qp_attr mod = {
865 .qp_state = IBV_QPS_RESET,
866 .port_num = (uint8_t)priv->ibv_port,
868 struct ibv_qp *qp = txq_ctrl->obj->qp;
870 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
872 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
873 "%s", strerror(errno));
877 mod.qp_state = IBV_QPS_INIT;
878 ret = mlx5_glue->modify_qp(qp, &mod,
879 (IBV_QP_STATE | IBV_QP_PORT));
881 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
886 mod.qp_state = IBV_QPS_RTR;
887 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
889 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
894 mod.qp_state = IBV_QPS_RTS;
895 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
897 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
907 * Modify a Verbs queue state.
910 * Pointer to Ethernet device.
912 * State modify request parameters.
915 * 0 in case of success else non-zero value.
918 mlx5_queue_state_modify(struct rte_eth_dev *dev,
919 struct mlx5_mp_arg_queue_state_modify *sm)
923 switch (rte_eal_process_type()) {
924 case RTE_PROC_PRIMARY:
925 ret = mlx5_queue_state_modify_primary(dev, sm);
927 case RTE_PROC_SECONDARY:
928 ret = mlx5_mp_req_queue_state_modify(dev, sm);
938 * The function inserts the RQ state to reset when the first error CQE is
939 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
940 * it moves the RQ state to ready and initializes the RQ.
941 * Next CQE identification and error counting are in the caller responsibility.
944 * Pointer to RX queue structure.
946 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
947 * 0 when called from non-vectorized Rx burst.
950 * -1 in case of recovery error, otherwise the CQE status.
953 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
955 const uint16_t cqe_n = 1 << rxq->cqe_n;
956 const uint16_t cqe_mask = cqe_n - 1;
957 const unsigned int wqe_n = 1 << rxq->elts_n;
958 struct mlx5_rxq_ctrl *rxq_ctrl =
959 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
961 volatile struct mlx5_cqe *cqe;
962 volatile struct mlx5_err_cqe *err_cqe;
964 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
966 struct mlx5_mp_arg_queue_state_modify sm;
969 switch (rxq->err_state) {
970 case MLX5_RXQ_ERR_STATE_NO_ERROR:
971 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
973 case MLX5_RXQ_ERR_STATE_NEED_RESET:
975 sm.queue_id = rxq->idx;
976 sm.state = IBV_WQS_RESET;
977 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
979 if (rxq_ctrl->dump_file_n <
980 rxq_ctrl->priv->config.max_dump_files_num) {
981 MKSTR(err_str, "Unexpected CQE error syndrome "
982 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
983 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
984 rxq->cqn, rxq_ctrl->wqn,
985 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
986 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
987 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
988 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
989 mlx5_dump_debug_information(name, NULL, err_str, 0);
990 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
991 (const void *)((uintptr_t)
993 sizeof(*u.cqe) * cqe_n);
994 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
995 (const void *)((uintptr_t)
998 rxq_ctrl->dump_file_n++;
1000 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
1002 case MLX5_RXQ_ERR_STATE_NEED_READY:
1003 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1004 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1006 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1009 * The RQ consumer index must be zeroed while moving
1010 * from RESET state to RDY state.
1012 *rxq->rq_db = rte_cpu_to_be_32(0);
1015 sm.queue_id = rxq->idx;
1016 sm.state = IBV_WQS_RDY;
1017 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1021 const uint16_t q_mask = wqe_n - 1;
1023 struct rte_mbuf **elt;
1025 unsigned int n = wqe_n - (rxq->rq_ci -
1028 for (i = 0; i < (int)n; ++i) {
1029 elt_idx = (rxq->rq_ci + i) & q_mask;
1030 elt = &(*rxq->elts)[elt_idx];
1031 *elt = rte_mbuf_raw_alloc(rxq->mp);
1033 for (i--; i >= 0; --i) {
1034 elt_idx = (rxq->rq_ci +
1038 rte_pktmbuf_free_seg
1044 for (i = 0; i < (int)wqe_n; ++i) {
1045 elt = &(*rxq->elts)[i];
1047 (uint16_t)((*elt)->buf_len -
1048 rte_pktmbuf_headroom(*elt));
1050 /* Padding with a fake mbuf for vec Rx. */
1051 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1052 (*rxq->elts)[wqe_n + i] =
1055 mlx5_rxq_initialize(rxq);
1056 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1065 * Get size of the next packet for a given CQE. For compressed CQEs, the
1066 * consumer index is updated only once all packets of the current one have
1070 * Pointer to RX queue.
1074 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1078 * 0 in case of empty CQE, otherwise the packet size in bytes.
1081 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1082 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1084 struct rxq_zip *zip = &rxq->zip;
1085 uint16_t cqe_n = cqe_cnt + 1;
1091 /* Process compressed data in the CQE and mini arrays. */
1093 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1094 (volatile struct mlx5_mini_cqe8 (*)[8])
1095 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1098 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1099 *mcqe = &(*mc)[zip->ai & 7];
1100 if ((++zip->ai & 7) == 0) {
1101 /* Invalidate consumed CQEs */
1104 while (idx != end) {
1105 (*rxq->cqes)[idx & cqe_cnt].op_own =
1106 MLX5_CQE_INVALIDATE;
1110 * Increment consumer index to skip the number
1111 * of CQEs consumed. Hardware leaves holes in
1112 * the CQ ring for software use.
1117 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1118 /* Invalidate the rest */
1122 while (idx != end) {
1123 (*rxq->cqes)[idx & cqe_cnt].op_own =
1124 MLX5_CQE_INVALIDATE;
1127 rxq->cq_ci = zip->cq_ci;
1131 * No compressed data, get next CQE and verify if it is
1138 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1139 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1140 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1142 ret = mlx5_rx_err_handle(rxq, 0);
1143 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1151 op_own = cqe->op_own;
1152 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1153 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1154 (volatile struct mlx5_mini_cqe8 (*)[8])
1155 (uintptr_t)(&(*rxq->cqes)
1159 /* Fix endianness. */
1160 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1162 * Current mini array position is the one
1163 * returned by check_cqe64().
1165 * If completion comprises several mini arrays,
1166 * as a special case the second one is located
1167 * 7 CQEs after the initial CQE instead of 8
1168 * for subsequent ones.
1170 zip->ca = rxq->cq_ci;
1171 zip->na = zip->ca + 7;
1172 /* Compute the next non compressed CQE. */
1174 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1175 /* Get packet size to return. */
1176 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1179 /* Prefetch all to be invalidated */
1182 while (idx != end) {
1183 rte_prefetch0(&(*rxq->cqes)[(idx) &
1188 len = rte_be_to_cpu_32(cqe->byte_cnt);
1191 if (unlikely(rxq->err_state)) {
1192 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1193 ++rxq->stats.idropped;
1201 * Translate RX completion flags to offload flags.
1207 * Offload flags (ol_flags) for struct rte_mbuf.
1209 static inline uint32_t
1210 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1212 uint32_t ol_flags = 0;
1213 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1217 MLX5_CQE_RX_L3_HDR_VALID,
1218 PKT_RX_IP_CKSUM_GOOD) |
1220 MLX5_CQE_RX_L4_HDR_VALID,
1221 PKT_RX_L4_CKSUM_GOOD);
1226 * Fill in mbuf fields from RX completion flags.
1227 * Note that pkt->ol_flags should be initialized outside of this function.
1230 * Pointer to RX queue.
1235 * @param rss_hash_res
1236 * Packet RSS Hash result.
1239 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1240 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1242 /* Update packet information. */
1243 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1244 if (rss_hash_res && rxq->rss_hash) {
1245 pkt->hash.rss = rss_hash_res;
1246 pkt->ol_flags |= PKT_RX_RSS_HASH;
1248 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1249 pkt->ol_flags |= PKT_RX_FDIR;
1250 if (cqe->sop_drop_qpn !=
1251 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1252 uint32_t mark = cqe->sop_drop_qpn;
1254 pkt->ol_flags |= PKT_RX_FDIR_ID;
1255 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1258 if (rte_flow_dynf_metadata_avail() && cqe->flow_table_metadata) {
1259 pkt->ol_flags |= PKT_RX_DYNF_METADATA;
1260 *RTE_FLOW_DYNF_METADATA(pkt) = cqe->flow_table_metadata;
1263 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1264 if (rxq->vlan_strip &&
1265 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1266 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1267 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1269 if (rxq->hw_timestamp) {
1270 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1271 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1276 * DPDK callback for RX.
1279 * Generic pointer to RX queue structure.
1281 * Array to store received packets.
1283 * Maximum number of packets in array.
1286 * Number of packets successfully received (<= pkts_n).
1289 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1291 struct mlx5_rxq_data *rxq = dpdk_rxq;
1292 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1293 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1294 const unsigned int sges_n = rxq->sges_n;
1295 struct rte_mbuf *pkt = NULL;
1296 struct rte_mbuf *seg = NULL;
1297 volatile struct mlx5_cqe *cqe =
1298 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1300 unsigned int rq_ci = rxq->rq_ci << sges_n;
1301 int len = 0; /* keep its value across iterations. */
1304 unsigned int idx = rq_ci & wqe_cnt;
1305 volatile struct mlx5_wqe_data_seg *wqe =
1306 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1307 struct rte_mbuf *rep = (*rxq->elts)[idx];
1308 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1309 uint32_t rss_hash_res;
1317 rep = rte_mbuf_raw_alloc(rxq->mp);
1318 if (unlikely(rep == NULL)) {
1319 ++rxq->stats.rx_nombuf;
1322 * no buffers before we even started,
1323 * bail out silently.
1327 while (pkt != seg) {
1328 assert(pkt != (*rxq->elts)[idx]);
1332 rte_mbuf_raw_free(pkt);
1338 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1339 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1341 rte_mbuf_raw_free(rep);
1345 assert(len >= (rxq->crc_present << 2));
1346 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1347 /* If compressed, take hash result from mini-CQE. */
1348 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1350 mcqe->rx_hash_result);
1351 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1352 if (rxq->crc_present)
1353 len -= RTE_ETHER_CRC_LEN;
1355 if (cqe->lro_num_seg > 1) {
1357 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1359 pkt->ol_flags |= PKT_RX_LRO;
1360 pkt->tso_segsz = len / cqe->lro_num_seg;
1363 DATA_LEN(rep) = DATA_LEN(seg);
1364 PKT_LEN(rep) = PKT_LEN(seg);
1365 SET_DATA_OFF(rep, DATA_OFF(seg));
1366 PORT(rep) = PORT(seg);
1367 (*rxq->elts)[idx] = rep;
1369 * Fill NIC descriptor with the new buffer. The lkey and size
1370 * of the buffers are already known, only the buffer address
1373 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1374 /* If there's only one MR, no need to replace LKey in WQE. */
1375 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1376 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1377 if (len > DATA_LEN(seg)) {
1378 len -= DATA_LEN(seg);
1383 DATA_LEN(seg) = len;
1384 #ifdef MLX5_PMD_SOFT_COUNTERS
1385 /* Increment bytes counter. */
1386 rxq->stats.ibytes += PKT_LEN(pkt);
1388 /* Return packet. */
1393 /* Align consumer index to the next stride. */
1398 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1400 /* Update the consumer index. */
1401 rxq->rq_ci = rq_ci >> sges_n;
1403 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1405 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1406 #ifdef MLX5_PMD_SOFT_COUNTERS
1407 /* Increment packets counter. */
1408 rxq->stats.ipackets += i;
1414 * Update LRO packet TCP header.
1415 * The HW LRO feature doesn't update the TCP header after coalescing the
1416 * TCP segments but supplies information in CQE to fill it by SW.
1419 * Pointer to the TCP header.
1421 * Pointer to the completion entry..
1423 * The L3 pseudo-header checksum.
1426 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
1427 volatile struct mlx5_cqe *restrict cqe,
1430 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1431 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1433 * The HW calculates only the TCP payload checksum, need to complete
1434 * the TCP header checksum and the L3 pseudo-header checksum.
1436 uint32_t csum = phcsum + cqe->csum;
1438 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1439 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1440 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1441 tcp->recv_ack = cqe->lro_ack_seq_num;
1442 tcp->rx_win = cqe->lro_tcp_win;
1444 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1445 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1447 csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4);
1448 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1449 csum = (~csum) & 0xffff;
1456 * Update LRO packet headers.
1457 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1458 * TCP segments but supply information in CQE to fill it by SW.
1461 * The packet address.
1463 * Pointer to the completion entry..
1465 * The packet length.
1468 mlx5_lro_update_hdr(uint8_t *restrict padd,
1469 volatile struct mlx5_cqe *restrict cqe,
1473 struct rte_ether_hdr *eth;
1474 struct rte_vlan_hdr *vlan;
1475 struct rte_ipv4_hdr *ipv4;
1476 struct rte_ipv6_hdr *ipv6;
1477 struct rte_tcp_hdr *tcp;
1482 uint16_t proto = h.eth->ether_type;
1486 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1487 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1488 proto = h.vlan->eth_proto;
1491 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1492 h.ipv4->time_to_live = cqe->lro_min_ttl;
1493 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1494 h.ipv4->hdr_checksum = 0;
1495 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1496 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1499 h.ipv6->hop_limits = cqe->lro_min_ttl;
1500 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1502 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1505 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1509 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1511 struct mlx5_mprq_buf *buf = opaque;
1513 if (rte_atomic16_read(&buf->refcnt) == 1) {
1514 rte_mempool_put(buf->mp, buf);
1515 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1516 rte_atomic16_set(&buf->refcnt, 1);
1517 rte_mempool_put(buf->mp, buf);
1522 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1524 mlx5_mprq_buf_free_cb(NULL, buf);
1528 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1529 const unsigned int strd_n)
1531 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1532 volatile struct mlx5_wqe_data_seg *wqe =
1533 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1536 assert(rep != NULL);
1537 /* Replace MPRQ buf. */
1538 (*rxq->mprq_bufs)[rq_idx] = rep;
1540 addr = mlx5_mprq_buf_addr(rep, strd_n);
1541 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1542 /* If there's only one MR, no need to replace LKey in WQE. */
1543 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1544 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1545 /* Stash a mbuf for next replacement. */
1546 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1547 rxq->mprq_repl = rep;
1549 rxq->mprq_repl = NULL;
1553 * DPDK callback for RX with Multi-Packet RQ support.
1556 * Generic pointer to RX queue structure.
1558 * Array to store received packets.
1560 * Maximum number of packets in array.
1563 * Number of packets successfully received (<= pkts_n).
1566 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1568 struct mlx5_rxq_data *rxq = dpdk_rxq;
1569 const unsigned int strd_n = 1 << rxq->strd_num_n;
1570 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1571 const unsigned int strd_shift =
1572 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1573 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1574 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1575 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1577 uint32_t rq_ci = rxq->rq_ci;
1578 uint16_t consumed_strd = rxq->consumed_strd;
1579 uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM;
1580 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1582 while (i < pkts_n) {
1583 struct rte_mbuf *pkt;
1591 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1592 uint32_t rss_hash_res = 0;
1593 uint8_t lro_num_seg;
1595 if (consumed_strd == strd_n) {
1596 /* Replace WQE only if the buffer is still in use. */
1597 if (rte_atomic16_read(&buf->refcnt) > 1) {
1598 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1599 /* Release the old buffer. */
1600 mlx5_mprq_buf_free(buf);
1601 } else if (unlikely(rxq->mprq_repl == NULL)) {
1602 struct mlx5_mprq_buf *rep;
1605 * Currently, the MPRQ mempool is out of buffer
1606 * and doing memcpy regardless of the size of Rx
1607 * packet. Retry allocation to get back to
1610 if (!rte_mempool_get(rxq->mprq_mp,
1612 rxq->mprq_repl = rep;
1614 /* Advance to the next WQE. */
1617 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1619 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1620 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1624 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1625 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1627 consumed_strd += strd_cnt;
1628 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1631 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1632 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1634 /* mini-CQE for MPRQ doesn't have hash result. */
1635 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1637 assert(strd_idx < strd_n);
1638 assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
1639 lro_num_seg = cqe->lro_num_seg;
1641 * Currently configured to receive a packet per a stride. But if
1642 * MTU is adjusted through kernel interface, device could
1643 * consume multiple strides without raising an error. In this
1644 * case, the packet should be dropped because it is bigger than
1645 * the max_rx_pkt_len.
1647 if (unlikely(!lro_num_seg && strd_cnt > 1)) {
1648 ++rxq->stats.idropped;
1651 pkt = rte_pktmbuf_alloc(rxq->mp);
1652 if (unlikely(pkt == NULL)) {
1653 ++rxq->stats.rx_nombuf;
1656 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1657 assert((int)len >= (rxq->crc_present << 2));
1658 if (rxq->crc_present)
1659 len -= RTE_ETHER_CRC_LEN;
1660 offset = strd_idx * strd_sz + strd_shift;
1661 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1663 * Memcpy packets to the target mbuf if:
1664 * - The size of packet is smaller than mprq_max_memcpy_len.
1665 * - Out of buffer in the Mempool for Multi-Packet RQ.
1667 if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
1669 * When memcpy'ing packet due to out-of-buffer, the
1670 * packet must be smaller than the target mbuf.
1672 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1673 rte_pktmbuf_free_seg(pkt);
1674 ++rxq->stats.idropped;
1677 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
1678 DATA_LEN(pkt) = len;
1680 rte_iova_t buf_iova;
1681 struct rte_mbuf_ext_shared_info *shinfo;
1682 uint16_t buf_len = strd_cnt * strd_sz;
1685 /* Increment the refcnt of the whole chunk. */
1686 rte_atomic16_add_return(&buf->refcnt, 1);
1687 assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1689 buf_addr = RTE_PTR_SUB(addr, headroom_sz);
1691 * MLX5 device doesn't use iova but it is necessary in a
1692 * case where the Rx packet is transmitted via a
1695 buf_iova = rte_mempool_virt2iova(buf) +
1696 RTE_PTR_DIFF(buf_addr, buf);
1697 shinfo = &buf->shinfos[strd_idx];
1698 rte_mbuf_ext_refcnt_set(shinfo, 1);
1700 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1701 * attaching the stride to mbuf and more offload flags
1702 * will be added below by calling rxq_cq_to_mbuf().
1703 * Other fields will be overwritten.
1705 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1707 /* Set mbuf head-room. */
1708 pkt->data_off = headroom_sz;
1709 assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
1711 * Prevent potential overflow due to MTU change through
1714 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1715 rte_pktmbuf_free_seg(pkt);
1716 ++rxq->stats.idropped;
1719 DATA_LEN(pkt) = len;
1721 * LRO packet may consume all the stride memory, in this
1722 * case packet head-room space is not guaranteed so must
1723 * to add an empty mbuf for the head-room.
1725 if (!rxq->strd_headroom_en) {
1726 struct rte_mbuf *headroom_mbuf =
1727 rte_pktmbuf_alloc(rxq->mp);
1729 if (unlikely(headroom_mbuf == NULL)) {
1730 rte_pktmbuf_free_seg(pkt);
1731 ++rxq->stats.rx_nombuf;
1734 PORT(pkt) = rxq->port_id;
1735 NEXT(headroom_mbuf) = pkt;
1736 pkt = headroom_mbuf;
1740 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1741 if (lro_num_seg > 1) {
1742 mlx5_lro_update_hdr(addr, cqe, len);
1743 pkt->ol_flags |= PKT_RX_LRO;
1744 pkt->tso_segsz = strd_sz;
1747 PORT(pkt) = rxq->port_id;
1748 #ifdef MLX5_PMD_SOFT_COUNTERS
1749 /* Increment bytes counter. */
1750 rxq->stats.ibytes += PKT_LEN(pkt);
1752 /* Return packet. */
1756 /* Update the consumer indexes. */
1757 rxq->consumed_strd = consumed_strd;
1759 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1760 if (rq_ci != rxq->rq_ci) {
1763 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1765 #ifdef MLX5_PMD_SOFT_COUNTERS
1766 /* Increment packets counter. */
1767 rxq->stats.ipackets += i;
1773 * Dummy DPDK callback for TX.
1775 * This function is used to temporarily replace the real callback during
1776 * unsafe control operations on the queue, or in case of error.
1779 * Generic pointer to TX queue structure.
1781 * Packets to transmit.
1783 * Number of packets in array.
1786 * Number of packets successfully transmitted (<= pkts_n).
1789 removed_tx_burst(void *dpdk_txq __rte_unused,
1790 struct rte_mbuf **pkts __rte_unused,
1791 uint16_t pkts_n __rte_unused)
1798 * Dummy DPDK callback for RX.
1800 * This function is used to temporarily replace the real callback during
1801 * unsafe control operations on the queue, or in case of error.
1804 * Generic pointer to RX queue structure.
1806 * Array to store received packets.
1808 * Maximum number of packets in array.
1811 * Number of packets successfully received (<= pkts_n).
1814 removed_rx_burst(void *dpdk_txq __rte_unused,
1815 struct rte_mbuf **pkts __rte_unused,
1816 uint16_t pkts_n __rte_unused)
1823 * Vectorized Rx/Tx routines are not compiled in when required vector
1824 * instructions are not supported on a target architecture. The following null
1825 * stubs are needed for linkage when those are not included outside of this file
1826 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1830 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1831 struct rte_mbuf **pkts __rte_unused,
1832 uint16_t pkts_n __rte_unused)
1838 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1844 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1850 * Free the mbufs from the linear array of pointers.
1853 * Pointer to array of packets to be free.
1855 * Number of packets to be freed.
1857 * Configured Tx offloads mask. It is fully defined at
1858 * compile time and may be used for optimization.
1860 static __rte_always_inline void
1861 mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
1862 unsigned int pkts_n,
1863 unsigned int olx __rte_unused)
1865 struct rte_mempool *pool = NULL;
1866 struct rte_mbuf **p_free = NULL;
1867 struct rte_mbuf *mbuf;
1868 unsigned int n_free = 0;
1871 * The implemented algorithm eliminates
1872 * copying pointers to temporary array
1873 * for rte_mempool_put_bulk() calls.
1880 * Decrement mbuf reference counter, detach
1881 * indirect and external buffers if needed.
1883 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1884 if (likely(mbuf != NULL)) {
1885 assert(mbuf == *pkts);
1886 if (likely(n_free != 0)) {
1887 if (unlikely(pool != mbuf->pool))
1888 /* From different pool. */
1891 /* Start new scan array. */
1898 if (unlikely(pkts_n == 0)) {
1904 * This happens if mbuf is still referenced.
1905 * We can't put it back to the pool, skip.
1909 if (unlikely(n_free != 0))
1910 /* There is some array to free.*/
1912 if (unlikely(pkts_n == 0))
1913 /* Last mbuf, nothing to free. */
1919 * This loop is implemented to avoid multiple
1920 * inlining of rte_mempool_put_bulk().
1926 * Free the array of pre-freed mbufs
1927 * belonging to the same memory pool.
1929 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1930 if (unlikely(mbuf != NULL)) {
1931 /* There is the request to start new scan. */
1936 if (likely(pkts_n != 0))
1939 * This is the last mbuf to be freed.
1940 * Do one more loop iteration to complete.
1941 * This is rare case of the last unique mbuf.
1946 if (likely(pkts_n == 0))
1955 * Free the mbuf from the elts ring buffer till new tail.
1958 * Pointer to Tx queue structure.
1960 * Index in elts to free up to, becomes new elts tail.
1962 * Configured Tx offloads mask. It is fully defined at
1963 * compile time and may be used for optimization.
1965 static __rte_always_inline void
1966 mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
1968 unsigned int olx __rte_unused)
1970 uint16_t n_elts = tail - txq->elts_tail;
1973 assert(n_elts <= txq->elts_s);
1975 * Implement a loop to support ring buffer wraparound
1976 * with single inlining of mlx5_tx_free_mbuf().
1981 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
1982 part = RTE_MIN(part, n_elts);
1984 assert(part <= txq->elts_s);
1985 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
1987 txq->elts_tail += part;
1993 * Store the mbuf being sent into elts ring buffer.
1994 * On Tx completion these mbufs will be freed.
1997 * Pointer to Tx queue structure.
1999 * Pointer to array of packets to be stored.
2001 * Number of packets to be stored.
2003 * Configured Tx offloads mask. It is fully defined at
2004 * compile time and may be used for optimization.
2006 static __rte_always_inline void
2007 mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
2008 struct rte_mbuf **restrict pkts,
2009 unsigned int pkts_n,
2010 unsigned int olx __rte_unused)
2013 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2017 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2019 assert(part <= txq->elts_s);
2020 /* This code is a good candidate for vectorizing with SIMD. */
2021 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2023 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2024 txq->elts_head += pkts_n;
2025 if (unlikely(part < pkts_n))
2026 /* The copy is wrapping around the elts array. */
2027 rte_memcpy((void *)elts, (void *)(pkts + part),
2028 (pkts_n - part) * sizeof(struct rte_mbuf *));
2032 * Update completion queue consuming index via doorbell
2033 * and flush the completed data buffers.
2036 * Pointer to TX queue structure.
2037 * @param valid CQE pointer
2038 * if not NULL update txq->wqe_pi and flush the buffers
2040 * Configured Tx offloads mask. It is fully defined at
2041 * compile time and may be used for optimization.
2043 static __rte_always_inline void
2044 mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
2045 volatile struct mlx5_cqe *last_cqe,
2046 unsigned int olx __rte_unused)
2048 if (likely(last_cqe != NULL)) {
2051 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2052 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2053 if (likely(tail != txq->elts_tail)) {
2054 mlx5_tx_free_elts(txq, tail, olx);
2055 assert(tail == txq->elts_tail);
2061 * Manage TX completions. This routine checks the CQ for
2062 * arrived CQEs, deduces the last accomplished WQE in SQ,
2063 * updates SQ producing index and frees all completed mbufs.
2066 * Pointer to TX queue structure.
2068 * Configured Tx offloads mask. It is fully defined at
2069 * compile time and may be used for optimization.
2071 * NOTE: not inlined intentionally, it makes tx_burst
2072 * routine smaller, simple and faster - from experiments.
2075 mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
2076 unsigned int olx __rte_unused)
2078 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2079 volatile struct mlx5_cqe *last_cqe = NULL;
2080 uint16_t ci = txq->cq_ci;
2083 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2084 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2086 volatile struct mlx5_cqe *cqe;
2088 cqe = &txq->cqes[ci & txq->cqe_m];
2089 ret = check_cqe(cqe, txq->cqe_s, ci);
2090 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2091 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2092 /* No new CQEs in completion queue. */
2093 assert(ret == MLX5_CQE_STATUS_HW_OWN);
2097 * Some error occurred, try to restart.
2098 * We have no barrier after WQE related Doorbell
2099 * written, make sure all writes are completed
2100 * here, before we might perform SQ reset.
2104 ret = mlx5_tx_error_cqe_handle
2105 (txq, (volatile struct mlx5_err_cqe *)cqe);
2106 if (unlikely(ret < 0)) {
2108 * Some error occurred on queue error
2109 * handling, we do not advance the index
2110 * here, allowing to retry on next call.
2115 * We are going to fetch all entries with
2116 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2117 * The send queue is supposed to be empty.
2124 /* Normal transmit completion. */
2125 assert(ci != txq->cq_pi);
2126 assert((txq->fcqs[ci & txq->cqe_m] >> 16) == cqe->wqe_counter);
2130 * We have to restrict the amount of processed CQEs
2131 * in one tx_burst routine call. The CQ may be large
2132 * and many CQEs may be updated by the NIC in one
2133 * transaction. Buffers freeing is time consuming,
2134 * multiple iterations may introduce significant
2137 if (likely(--count == 0))
2140 if (likely(ci != txq->cq_ci)) {
2142 * Update completion queue consuming index
2143 * and ring doorbell to notify hardware.
2145 rte_compiler_barrier();
2147 *txq->cq_db = rte_cpu_to_be_32(ci);
2148 mlx5_tx_comp_flush(txq, last_cqe, olx);
2153 * Check if the completion request flag should be set in the last WQE.
2154 * Both pushed mbufs and WQEs are monitored and the completion request
2155 * flag is set if any of thresholds is reached.
2158 * Pointer to TX queue structure.
2160 * Pointer to burst routine local context.
2162 * Configured Tx offloads mask. It is fully defined at
2163 * compile time and may be used for optimization.
2165 static __rte_always_inline void
2166 mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
2167 struct mlx5_txq_local *restrict loc,
2170 uint16_t head = txq->elts_head;
2173 part = MLX5_TXOFF_CONFIG(INLINE) ?
2174 0 : loc->pkts_sent - loc->pkts_copy;
2176 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2177 (MLX5_TXOFF_CONFIG(INLINE) &&
2178 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2179 volatile struct mlx5_wqe *last = loc->wqe_last;
2181 txq->elts_comp = head;
2182 if (MLX5_TXOFF_CONFIG(INLINE))
2183 txq->wqe_comp = txq->wqe_ci;
2184 /* Request unconditional completion on last WQE. */
2185 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2186 MLX5_COMP_MODE_OFFSET);
2187 /* Save elts_head in dedicated free on completion queue. */
2188 #ifdef RTE_LIBRTE_MLX5_DEBUG
2189 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2190 (last->cseg.opcode >> 8) << 16;
2192 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2194 /* A CQE slot must always be available. */
2195 assert((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2200 * DPDK callback to check the status of a tx descriptor.
2205 * The index of the descriptor in the ring.
2208 * The status of the tx descriptor.
2211 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2213 struct mlx5_txq_data *restrict txq = tx_queue;
2216 mlx5_tx_handle_completion(txq, 0);
2217 used = txq->elts_head - txq->elts_tail;
2219 return RTE_ETH_TX_DESC_FULL;
2220 return RTE_ETH_TX_DESC_DONE;
2224 * Build the Control Segment with specified opcode:
2225 * - MLX5_OPCODE_SEND
2226 * - MLX5_OPCODE_ENHANCED_MPSW
2230 * Pointer to TX queue structure.
2232 * Pointer to burst routine local context.
2234 * Pointer to WQE to fill with built Control Segment.
2236 * Supposed length of WQE in segments.
2238 * SQ WQE opcode to put into Control Segment.
2240 * Configured Tx offloads mask. It is fully defined at
2241 * compile time and may be used for optimization.
2243 static __rte_always_inline void
2244 mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq,
2245 struct mlx5_txq_local *restrict loc __rte_unused,
2246 struct mlx5_wqe *restrict wqe,
2248 unsigned int opcode,
2249 unsigned int olx __rte_unused)
2251 struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
2253 /* For legacy MPW replace the EMPW by TSO with modifier. */
2254 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2255 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2256 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2257 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2258 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2259 MLX5_COMP_MODE_OFFSET);
2260 cs->misc = RTE_BE32(0);
2264 * Build the Ethernet Segment without inlined data.
2265 * Supports Software Parser, Checksums and VLAN
2266 * insertion Tx offload features.
2269 * Pointer to TX queue structure.
2271 * Pointer to burst routine local context.
2273 * Pointer to WQE to fill with built Ethernet Segment.
2275 * Configured Tx offloads mask. It is fully defined at
2276 * compile time and may be used for optimization.
2278 static __rte_always_inline void
2279 mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
2280 struct mlx5_txq_local *restrict loc,
2281 struct mlx5_wqe *restrict wqe,
2284 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2288 * Calculate and set check sum flags first, dword field
2289 * in segment may be shared with Software Parser flags.
2291 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2292 es->flags = rte_cpu_to_le_32(csum);
2294 * Calculate and set Software Parser offsets and flags.
2295 * These flags a set for custom UDP and IP tunnel packets.
2297 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2298 /* Fill metadata field if needed. */
2299 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2300 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2301 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2302 /* Engage VLAN tag insertion feature if requested. */
2303 if (MLX5_TXOFF_CONFIG(VLAN) &&
2304 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2306 * We should get here only if device support
2307 * this feature correctly.
2309 assert(txq->vlan_en);
2310 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2311 loc->mbuf->vlan_tci);
2313 es->inline_hdr = RTE_BE32(0);
2318 * Build the Ethernet Segment with minimal inlined data
2319 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2320 * used to fill the gap in single WQEBB WQEs.
2321 * Supports Software Parser, Checksums and VLAN
2322 * insertion Tx offload features.
2325 * Pointer to TX queue structure.
2327 * Pointer to burst routine local context.
2329 * Pointer to WQE to fill with built Ethernet Segment.
2331 * Length of VLAN tag insertion if any.
2333 * Configured Tx offloads mask. It is fully defined at
2334 * compile time and may be used for optimization.
2336 static __rte_always_inline void
2337 mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
2338 struct mlx5_txq_local *restrict loc,
2339 struct mlx5_wqe *restrict wqe,
2343 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2345 uint8_t *psrc, *pdst;
2348 * Calculate and set check sum flags first, dword field
2349 * in segment may be shared with Software Parser flags.
2351 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2352 es->flags = rte_cpu_to_le_32(csum);
2354 * Calculate and set Software Parser offsets and flags.
2355 * These flags a set for custom UDP and IP tunnel packets.
2357 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2358 /* Fill metadata field if needed. */
2359 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2360 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2361 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2362 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2364 sizeof(rte_v128u32_t)),
2365 "invalid Ethernet Segment data size");
2366 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2368 sizeof(struct rte_vlan_hdr) +
2369 2 * RTE_ETHER_ADDR_LEN),
2370 "invalid Ethernet Segment data size");
2371 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2372 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2373 es->inline_data = *(unaligned_uint16_t *)psrc;
2374 psrc += sizeof(uint16_t);
2375 pdst = (uint8_t *)(es + 1);
2376 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2377 /* Implement VLAN tag insertion as part inline data. */
2378 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2379 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2380 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2381 /* Insert VLAN ethertype + VLAN tag. */
2382 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2383 ((RTE_ETHER_TYPE_VLAN << 16) |
2384 loc->mbuf->vlan_tci);
2385 pdst += sizeof(struct rte_vlan_hdr);
2386 /* Copy the rest two bytes from packet data. */
2387 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2388 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2390 /* Fill the gap in the title WQEBB with inline data. */
2391 rte_mov16(pdst, psrc);
2396 * Build the Ethernet Segment with entire packet
2397 * data inlining. Checks the boundary of WQEBB and
2398 * ring buffer wrapping, supports Software Parser,
2399 * Checksums and VLAN insertion Tx offload features.
2402 * Pointer to TX queue structure.
2404 * Pointer to burst routine local context.
2406 * Pointer to WQE to fill with built Ethernet Segment.
2408 * Length of VLAN tag insertion if any.
2410 * Length of data to inline (VLAN included, if any).
2412 * TSO flag, set mss field from the packet.
2414 * Configured Tx offloads mask. It is fully defined at
2415 * compile time and may be used for optimization.
2418 * Pointer to the next Data Segment (aligned and wrapped around).
2420 static __rte_always_inline struct mlx5_wqe_dseg *
2421 mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
2422 struct mlx5_txq_local *restrict loc,
2423 struct mlx5_wqe *restrict wqe,
2429 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2431 uint8_t *psrc, *pdst;
2435 * Calculate and set check sum flags first, dword field
2436 * in segment may be shared with Software Parser flags.
2438 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2441 csum |= loc->mbuf->tso_segsz;
2442 es->flags = rte_cpu_to_be_32(csum);
2444 es->flags = rte_cpu_to_le_32(csum);
2447 * Calculate and set Software Parser offsets and flags.
2448 * These flags a set for custom UDP and IP tunnel packets.
2450 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2451 /* Fill metadata field if needed. */
2452 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2453 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2454 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2455 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2457 sizeof(rte_v128u32_t)),
2458 "invalid Ethernet Segment data size");
2459 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2461 sizeof(struct rte_vlan_hdr) +
2462 2 * RTE_ETHER_ADDR_LEN),
2463 "invalid Ethernet Segment data size");
2464 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2465 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2466 es->inline_data = *(unaligned_uint16_t *)psrc;
2467 psrc += sizeof(uint16_t);
2468 pdst = (uint8_t *)(es + 1);
2469 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2470 /* Implement VLAN tag insertion as part inline data. */
2471 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2472 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2473 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2474 /* Insert VLAN ethertype + VLAN tag. */
2475 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2476 ((RTE_ETHER_TYPE_VLAN << 16) |
2477 loc->mbuf->vlan_tci);
2478 pdst += sizeof(struct rte_vlan_hdr);
2479 /* Copy the rest two bytes from packet data. */
2480 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2481 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2482 psrc += sizeof(uint16_t);
2484 /* Fill the gap in the title WQEBB with inline data. */
2485 rte_mov16(pdst, psrc);
2486 psrc += sizeof(rte_v128u32_t);
2488 pdst = (uint8_t *)(es + 2);
2489 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2490 assert(pdst < (uint8_t *)txq->wqes_end);
2491 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2493 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2494 return (struct mlx5_wqe_dseg *)pdst;
2497 * The WQEBB space availability is checked by caller.
2498 * Here we should be aware of WQE ring buffer wraparound only.
2500 part = (uint8_t *)txq->wqes_end - pdst;
2501 part = RTE_MIN(part, inlen);
2503 rte_memcpy(pdst, psrc, part);
2505 if (likely(!inlen)) {
2507 * If return value is not used by the caller
2508 * the code below will be optimized out.
2511 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2512 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2513 pdst = (uint8_t *)txq->wqes;
2514 return (struct mlx5_wqe_dseg *)pdst;
2516 pdst = (uint8_t *)txq->wqes;
2523 * Copy data from chain of mbuf to the specified linear buffer.
2524 * Checksums and VLAN insertion Tx offload features. If data
2525 * from some mbuf copied completely this mbuf is freed. Local
2526 * structure is used to keep the byte stream state.
2529 * Pointer to the destination linear buffer.
2531 * Pointer to burst routine local context.
2533 * Length of data to be copied.
2535 * Length of data to be copied ignoring no inline hint.
2537 * Configured Tx offloads mask. It is fully defined at
2538 * compile time and may be used for optimization.
2541 * Number of actual copied data bytes. This is always greater than or
2542 * equal to must parameter and might be lesser than len in no inline
2543 * hint flag is encountered.
2545 static __rte_always_inline unsigned int
2546 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2547 struct mlx5_txq_local *restrict loc,
2550 unsigned int olx __rte_unused)
2552 struct rte_mbuf *mbuf;
2553 unsigned int part, dlen, copy = 0;
2557 assert(must <= len);
2559 /* Allow zero length packets, must check first. */
2560 dlen = rte_pktmbuf_data_len(loc->mbuf);
2561 if (dlen <= loc->mbuf_off) {
2562 /* Exhausted packet, just free. */
2564 loc->mbuf = mbuf->next;
2565 rte_pktmbuf_free_seg(mbuf);
2567 assert(loc->mbuf_nseg > 1);
2570 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2575 * We already copied the minimal
2576 * requested amount of data.
2581 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
2583 * Copy only the minimal required
2584 * part of the data buffer.
2591 dlen -= loc->mbuf_off;
2592 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2594 part = RTE_MIN(len, dlen);
2595 rte_memcpy(pdst, psrc, part);
2597 loc->mbuf_off += part;
2600 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2602 /* Exhausted packet, just free. */
2604 loc->mbuf = mbuf->next;
2605 rte_pktmbuf_free_seg(mbuf);
2607 assert(loc->mbuf_nseg >= 1);
2617 * Build the Ethernet Segment with inlined data from
2618 * multi-segment packet. Checks the boundary of WQEBB
2619 * and ring buffer wrapping, supports Software Parser,
2620 * Checksums and VLAN insertion Tx offload features.
2623 * Pointer to TX queue structure.
2625 * Pointer to burst routine local context.
2627 * Pointer to WQE to fill with built Ethernet Segment.
2629 * Length of VLAN tag insertion if any.
2631 * Length of data to inline (VLAN included, if any).
2633 * TSO flag, set mss field from the packet.
2635 * Configured Tx offloads mask. It is fully defined at
2636 * compile time and may be used for optimization.
2639 * Pointer to the next Data Segment (aligned and
2640 * possible NOT wrapped around - caller should do
2641 * wrapping check on its own).
2643 static __rte_always_inline struct mlx5_wqe_dseg *
2644 mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
2645 struct mlx5_txq_local *restrict loc,
2646 struct mlx5_wqe *restrict wqe,
2652 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2655 unsigned int part, tlen = 0;
2658 * Calculate and set check sum flags first, uint32_t field
2659 * in segment may be shared with Software Parser flags.
2661 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2664 csum |= loc->mbuf->tso_segsz;
2665 es->flags = rte_cpu_to_be_32(csum);
2667 es->flags = rte_cpu_to_le_32(csum);
2670 * Calculate and set Software Parser offsets and flags.
2671 * These flags a set for custom UDP and IP tunnel packets.
2673 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2674 /* Fill metadata field if needed. */
2675 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2676 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2677 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2678 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2680 sizeof(rte_v128u32_t)),
2681 "invalid Ethernet Segment data size");
2682 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2684 sizeof(struct rte_vlan_hdr) +
2685 2 * RTE_ETHER_ADDR_LEN),
2686 "invalid Ethernet Segment data size");
2687 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2688 pdst = (uint8_t *)&es->inline_data;
2689 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2690 /* Implement VLAN tag insertion as part inline data. */
2691 mlx5_tx_mseg_memcpy(pdst, loc,
2692 2 * RTE_ETHER_ADDR_LEN,
2693 2 * RTE_ETHER_ADDR_LEN, olx);
2694 pdst += 2 * RTE_ETHER_ADDR_LEN;
2695 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2696 ((RTE_ETHER_TYPE_VLAN << 16) |
2697 loc->mbuf->vlan_tci);
2698 pdst += sizeof(struct rte_vlan_hdr);
2699 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2701 assert(pdst < (uint8_t *)txq->wqes_end);
2703 * The WQEBB space availability is checked by caller.
2704 * Here we should be aware of WQE ring buffer wraparound only.
2706 part = (uint8_t *)txq->wqes_end - pdst;
2707 part = RTE_MIN(part, inlen - tlen);
2713 * Copying may be interrupted inside the routine
2714 * if run into no inline hint flag.
2716 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
2717 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
2719 if (likely(inlen <= tlen) || copy < part) {
2720 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
2722 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2723 return (struct mlx5_wqe_dseg *)pdst;
2725 pdst = (uint8_t *)txq->wqes;
2726 part = inlen - tlen;
2731 * Build the Data Segment of pointer type.
2734 * Pointer to TX queue structure.
2736 * Pointer to burst routine local context.
2738 * Pointer to WQE to fill with built Data Segment.
2740 * Data buffer to point.
2742 * Data buffer length.
2744 * Configured Tx offloads mask. It is fully defined at
2745 * compile time and may be used for optimization.
2747 static __rte_always_inline void
2748 mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
2749 struct mlx5_txq_local *restrict loc,
2750 struct mlx5_wqe_dseg *restrict dseg,
2753 unsigned int olx __rte_unused)
2757 dseg->bcount = rte_cpu_to_be_32(len);
2758 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2759 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2763 * Build the Data Segment of pointer type or inline
2764 * if data length is less than buffer in minimal
2765 * Data Segment size.
2768 * Pointer to TX queue structure.
2770 * Pointer to burst routine local context.
2772 * Pointer to WQE to fill with built Data Segment.
2774 * Data buffer to point.
2776 * Data buffer length.
2778 * Configured Tx offloads mask. It is fully defined at
2779 * compile time and may be used for optimization.
2781 static __rte_always_inline void
2782 mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
2783 struct mlx5_txq_local *restrict loc,
2784 struct mlx5_wqe_dseg *restrict dseg,
2787 unsigned int olx __rte_unused)
2793 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2794 dseg->bcount = rte_cpu_to_be_32(len);
2795 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2796 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2800 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2801 /* Unrolled implementation of generic rte_memcpy. */
2802 dst = (uintptr_t)&dseg->inline_data[0];
2803 src = (uintptr_t)buf;
2805 #ifdef RTE_ARCH_STRICT_ALIGN
2806 assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2807 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2808 dst += sizeof(uint32_t);
2809 src += sizeof(uint32_t);
2810 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2811 dst += sizeof(uint32_t);
2812 src += sizeof(uint32_t);
2814 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2815 dst += sizeof(uint64_t);
2816 src += sizeof(uint64_t);
2820 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2821 dst += sizeof(uint32_t);
2822 src += sizeof(uint32_t);
2825 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2826 dst += sizeof(uint16_t);
2827 src += sizeof(uint16_t);
2830 *(uint8_t *)dst = *(uint8_t *)src;
2834 * Build the Data Segment of inlined data from single
2835 * segment packet, no VLAN insertion.
2838 * Pointer to TX queue structure.
2840 * Pointer to burst routine local context.
2842 * Pointer to WQE to fill with built Data Segment.
2844 * Data buffer to point.
2846 * Data buffer length.
2848 * Configured Tx offloads mask. It is fully defined at
2849 * compile time and may be used for optimization.
2852 * Pointer to the next Data Segment after inlined data.
2853 * Ring buffer wraparound check is needed. We do not
2854 * do it here because it may not be needed for the
2855 * last packet in the eMPW session.
2857 static __rte_always_inline struct mlx5_wqe_dseg *
2858 mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq,
2859 struct mlx5_txq_local *restrict loc __rte_unused,
2860 struct mlx5_wqe_dseg *restrict dseg,
2863 unsigned int olx __rte_unused)
2868 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2869 pdst = &dseg->inline_data[0];
2871 * The WQEBB space availability is checked by caller.
2872 * Here we should be aware of WQE ring buffer wraparound only.
2874 part = (uint8_t *)txq->wqes_end - pdst;
2875 part = RTE_MIN(part, len);
2877 rte_memcpy(pdst, buf, part);
2881 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2882 /* Note: no final wraparound check here. */
2883 return (struct mlx5_wqe_dseg *)pdst;
2885 pdst = (uint8_t *)txq->wqes;
2892 * Build the Data Segment of inlined data from single
2893 * segment packet with VLAN insertion.
2896 * Pointer to TX queue structure.
2898 * Pointer to burst routine local context.
2900 * Pointer to the dseg fill with built Data Segment.
2902 * Data buffer to point.
2904 * Data buffer length.
2906 * Configured Tx offloads mask. It is fully defined at
2907 * compile time and may be used for optimization.
2910 * Pointer to the next Data Segment after inlined data.
2911 * Ring buffer wraparound check is needed.
2913 static __rte_always_inline struct mlx5_wqe_dseg *
2914 mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
2915 struct mlx5_txq_local *restrict loc __rte_unused,
2916 struct mlx5_wqe_dseg *restrict dseg,
2919 unsigned int olx __rte_unused)
2925 assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
2926 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
2927 (2 * RTE_ETHER_ADDR_LEN),
2928 "invalid Data Segment data size");
2929 dseg->bcount = rte_cpu_to_be_32((len + sizeof(struct rte_vlan_hdr)) |
2930 MLX5_ETH_WQE_DATA_INLINE);
2931 pdst = &dseg->inline_data[0];
2932 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
2933 buf += MLX5_DSEG_MIN_INLINE_SIZE;
2934 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
2935 len -= MLX5_DSEG_MIN_INLINE_SIZE;
2936 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
2937 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2938 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2939 pdst = (uint8_t *)txq->wqes;
2940 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
2941 loc->mbuf->vlan_tci);
2942 pdst += sizeof(struct rte_vlan_hdr);
2944 * The WQEBB space availability is checked by caller.
2945 * Here we should be aware of WQE ring buffer wraparound only.
2947 part = (uint8_t *)txq->wqes_end - pdst;
2948 part = RTE_MIN(part, len);
2950 rte_memcpy(pdst, buf, part);
2954 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2955 /* Note: no final wraparound check here. */
2956 return (struct mlx5_wqe_dseg *)pdst;
2958 pdst = (uint8_t *)txq->wqes;
2965 * Build the Ethernet Segment with optionally inlined data with
2966 * VLAN insertion and following Data Segments (if any) from
2967 * multi-segment packet. Used by ordinary send and TSO.
2970 * Pointer to TX queue structure.
2972 * Pointer to burst routine local context.
2974 * Pointer to WQE to fill with built Ethernet/Data Segments.
2976 * Length of VLAN header to insert, 0 means no VLAN insertion.
2978 * Data length to inline. For TSO this parameter specifies
2979 * exact value, for ordinary send routine can be aligned by
2980 * caller to provide better WQE space saving and data buffer
2981 * start address alignment. This length includes VLAN header
2984 * Zero means ordinary send, inlined data can be extended,
2985 * otherwise this is TSO, inlined data length is fixed.
2987 * Configured Tx offloads mask. It is fully defined at
2988 * compile time and may be used for optimization.
2991 * Actual size of built WQE in segments.
2993 static __rte_always_inline unsigned int
2994 mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
2995 struct mlx5_txq_local *restrict loc,
2996 struct mlx5_wqe *restrict wqe,
3000 unsigned int olx __rte_unused)
3002 struct mlx5_wqe_dseg *restrict dseg;
3005 assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
3006 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
3009 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
3010 if (!loc->mbuf_nseg)
3013 * There are still some mbuf remaining, not inlined.
3014 * The first mbuf may be partially inlined and we
3015 * must process the possible non-zero data offset.
3017 if (loc->mbuf_off) {
3022 * Exhausted packets must be dropped before.
3023 * Non-zero offset means there are some data
3024 * remained in the packet.
3026 assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
3027 assert(rte_pktmbuf_data_len(loc->mbuf));
3028 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
3030 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
3032 * Build the pointer/minimal data Data Segment.
3033 * Do ring buffer wrapping check in advance.
3035 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3036 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3037 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
3038 /* Store the mbuf to be freed on completion. */
3039 assert(loc->elts_free);
3040 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3043 if (--loc->mbuf_nseg == 0)
3045 loc->mbuf = loc->mbuf->next;
3049 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3050 struct rte_mbuf *mbuf;
3052 /* Zero length segment found, just skip. */
3054 loc->mbuf = loc->mbuf->next;
3055 rte_pktmbuf_free_seg(mbuf);
3056 if (--loc->mbuf_nseg == 0)
3059 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3060 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3063 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3064 rte_pktmbuf_data_len(loc->mbuf), olx);
3065 assert(loc->elts_free);
3066 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3069 if (--loc->mbuf_nseg == 0)
3071 loc->mbuf = loc->mbuf->next;
3076 /* Calculate actual segments used from the dseg pointer. */
3077 if ((uintptr_t)wqe < (uintptr_t)dseg)
3078 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3080 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3081 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3086 * Tx one packet function for multi-segment TSO. Supports all
3087 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3088 * sends one packet per WQE.
3090 * This routine is responsible for storing processed mbuf
3091 * into elts ring buffer and update elts_head.
3094 * Pointer to TX queue structure.
3096 * Pointer to burst routine local context.
3098 * Configured Tx offloads mask. It is fully defined at
3099 * compile time and may be used for optimization.
3102 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3103 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3104 * Local context variables partially updated.
3106 static __rte_always_inline enum mlx5_txcmp_code
3107 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
3108 struct mlx5_txq_local *restrict loc,
3111 struct mlx5_wqe *restrict wqe;
3112 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3115 * Calculate data length to be inlined to estimate
3116 * the required space in WQE ring buffer.
3118 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3119 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3120 vlan = sizeof(struct rte_vlan_hdr);
3121 inlen = loc->mbuf->l2_len + vlan +
3122 loc->mbuf->l3_len + loc->mbuf->l4_len;
3123 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3124 return MLX5_TXCMP_CODE_ERROR;
3125 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3126 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3127 /* Packet must contain all TSO headers. */
3128 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3129 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3130 inlen > (dlen + vlan)))
3131 return MLX5_TXCMP_CODE_ERROR;
3132 assert(inlen >= txq->inlen_mode);
3134 * Check whether there are enough free WQEBBs:
3136 * - Ethernet Segment
3137 * - First Segment of inlined Ethernet data
3138 * - ... data continued ...
3139 * - Data Segments of pointer/min inline type
3141 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3142 MLX5_ESEG_MIN_INLINE_SIZE +
3144 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3145 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3146 return MLX5_TXCMP_CODE_EXIT;
3147 /* Check for maximal WQE size. */
3148 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3149 return MLX5_TXCMP_CODE_ERROR;
3150 #ifdef MLX5_PMD_SOFT_COUNTERS
3151 /* Update sent data bytes/packets counters. */
3152 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3153 loc->mbuf->tso_segsz;
3155 * One will be added for mbuf itself
3156 * at the end of the mlx5_tx_burst from
3157 * loc->pkts_sent field.
3160 txq->stats.opackets += ntcp;
3161 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3163 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3164 loc->wqe_last = wqe;
3165 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3166 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3167 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3168 txq->wqe_ci += (ds + 3) / 4;
3169 loc->wqe_free -= (ds + 3) / 4;
3170 return MLX5_TXCMP_CODE_MULTI;
3174 * Tx one packet function for multi-segment SEND. Supports all
3175 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3176 * sends one packet per WQE, without any data inlining in
3179 * This routine is responsible for storing processed mbuf
3180 * into elts ring buffer and update elts_head.
3183 * Pointer to TX queue structure.
3185 * Pointer to burst routine local context.
3187 * Configured Tx offloads mask. It is fully defined at
3188 * compile time and may be used for optimization.
3191 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3192 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3193 * Local context variables partially updated.
3195 static __rte_always_inline enum mlx5_txcmp_code
3196 mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
3197 struct mlx5_txq_local *restrict loc,
3200 struct mlx5_wqe_dseg *restrict dseg;
3201 struct mlx5_wqe *restrict wqe;
3202 unsigned int ds, nseg;
3204 assert(NB_SEGS(loc->mbuf) > 1);
3206 * No inline at all, it means the CPU cycles saving
3207 * is prioritized at configuration, we should not
3208 * copy any packet data to WQE.
3210 nseg = NB_SEGS(loc->mbuf);
3212 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3213 return MLX5_TXCMP_CODE_EXIT;
3214 /* Check for maximal WQE size. */
3215 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3216 return MLX5_TXCMP_CODE_ERROR;
3218 * Some Tx offloads may cause an error if
3219 * packet is not long enough, check against
3220 * assumed minimal length.
3222 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3223 return MLX5_TXCMP_CODE_ERROR;
3224 #ifdef MLX5_PMD_SOFT_COUNTERS
3225 /* Update sent data bytes counter. */
3226 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3227 if (MLX5_TXOFF_CONFIG(VLAN) &&
3228 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3229 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3232 * SEND WQE, one WQEBB:
3233 * - Control Segment, SEND opcode
3234 * - Ethernet Segment, optional VLAN, no inline
3235 * - Data Segments, pointer only type
3237 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3238 loc->wqe_last = wqe;
3239 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3240 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3241 dseg = &wqe->dseg[0];
3243 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3244 struct rte_mbuf *mbuf;
3247 * Zero length segment found, have to
3248 * correct total size of WQE in segments.
3249 * It is supposed to be rare occasion, so
3250 * in normal case (no zero length segments)
3251 * we avoid extra writing to the Control
3255 wqe->cseg.sq_ds -= RTE_BE32(1);
3257 loc->mbuf = mbuf->next;
3258 rte_pktmbuf_free_seg(mbuf);
3264 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3265 rte_pktmbuf_data_len(loc->mbuf), olx);
3266 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3271 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3272 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3273 loc->mbuf = loc->mbuf->next;
3276 txq->wqe_ci += (ds + 3) / 4;
3277 loc->wqe_free -= (ds + 3) / 4;
3278 return MLX5_TXCMP_CODE_MULTI;
3282 * Tx one packet function for multi-segment SEND. Supports all
3283 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3284 * sends one packet per WQE, with data inlining in
3285 * Ethernet Segment and minimal Data Segments.
3287 * This routine is responsible for storing processed mbuf
3288 * into elts ring buffer and update elts_head.
3291 * Pointer to TX queue structure.
3293 * Pointer to burst routine local context.
3295 * Configured Tx offloads mask. It is fully defined at
3296 * compile time and may be used for optimization.
3299 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3300 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3301 * Local context variables partially updated.
3303 static __rte_always_inline enum mlx5_txcmp_code
3304 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
3305 struct mlx5_txq_local *restrict loc,
3308 struct mlx5_wqe *restrict wqe;
3309 unsigned int ds, inlen, dlen, vlan = 0;
3311 assert(MLX5_TXOFF_CONFIG(INLINE));
3312 assert(NB_SEGS(loc->mbuf) > 1);
3314 * First calculate data length to be inlined
3315 * to estimate the required space for WQE.
3317 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3318 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3319 vlan = sizeof(struct rte_vlan_hdr);
3320 inlen = dlen + vlan;
3321 /* Check against minimal length. */
3322 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3323 return MLX5_TXCMP_CODE_ERROR;
3324 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3325 if (inlen > txq->inlen_send ||
3326 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
3327 struct rte_mbuf *mbuf;
3332 * Packet length exceeds the allowed inline
3333 * data length, check whether the minimal
3334 * inlining is required.
3336 if (txq->inlen_mode) {
3337 assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
3338 assert(txq->inlen_mode <= txq->inlen_send);
3339 inlen = txq->inlen_mode;
3341 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
3342 !vlan || txq->vlan_en) {
3344 * VLAN insertion will be done inside by HW.
3345 * It is not utmost effective - VLAN flag is
3346 * checked twice, but we should proceed the
3347 * inlining length correctly and take into
3348 * account the VLAN header being inserted.
3350 return mlx5_tx_packet_multi_send
3353 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3356 * Now we know the minimal amount of data is requested
3357 * to inline. Check whether we should inline the buffers
3358 * from the chain beginning to eliminate some mbufs.
3361 nxlen = rte_pktmbuf_data_len(mbuf);
3362 if (unlikely(nxlen <= txq->inlen_send)) {
3363 /* We can inline first mbuf at least. */
3364 if (nxlen < inlen) {
3367 /* Scan mbufs till inlen filled. */
3372 nxlen = rte_pktmbuf_data_len(mbuf);
3374 } while (unlikely(nxlen < inlen));
3375 if (unlikely(nxlen > txq->inlen_send)) {
3376 /* We cannot inline entire mbuf. */
3377 smlen = inlen - smlen;
3378 start = rte_pktmbuf_mtod_offset
3379 (mbuf, uintptr_t, smlen);
3386 /* There should be not end of packet. */
3388 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3389 } while (unlikely(nxlen < txq->inlen_send));
3391 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3393 * Check whether we can do inline to align start
3394 * address of data buffer to cacheline.
3397 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3398 if (unlikely(start)) {
3400 if (start <= txq->inlen_send)
3405 * Check whether there are enough free WQEBBs:
3407 * - Ethernet Segment
3408 * - First Segment of inlined Ethernet data
3409 * - ... data continued ...
3410 * - Data Segments of pointer/min inline type
3412 * Estimate the number of Data Segments conservatively,
3413 * supposing no any mbufs is being freed during inlining.
3415 assert(inlen <= txq->inlen_send);
3416 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3417 MLX5_ESEG_MIN_INLINE_SIZE +
3419 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3420 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3421 return MLX5_TXCMP_CODE_EXIT;
3422 /* Check for maximal WQE size. */
3423 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3424 return MLX5_TXCMP_CODE_ERROR;
3425 #ifdef MLX5_PMD_SOFT_COUNTERS
3426 /* Update sent data bytes/packets counters. */
3427 txq->stats.obytes += dlen + vlan;
3429 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3430 loc->wqe_last = wqe;
3431 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3432 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3433 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3434 txq->wqe_ci += (ds + 3) / 4;
3435 loc->wqe_free -= (ds + 3) / 4;
3436 return MLX5_TXCMP_CODE_MULTI;
3440 * Tx burst function for multi-segment packets. Supports all
3441 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3442 * sends one packet per WQE. Function stops sending if it
3443 * encounters the single-segment packet.
3445 * This routine is responsible for storing processed mbuf
3446 * into elts ring buffer and update elts_head.
3449 * Pointer to TX queue structure.
3451 * Packets to transmit.
3453 * Number of packets in array.
3455 * Pointer to burst routine local context.
3457 * Configured Tx offloads mask. It is fully defined at
3458 * compile time and may be used for optimization.
3461 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3462 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3463 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3464 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3465 * Local context variables updated.
3467 static __rte_always_inline enum mlx5_txcmp_code
3468 mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
3469 struct rte_mbuf **restrict pkts,
3470 unsigned int pkts_n,
3471 struct mlx5_txq_local *restrict loc,
3474 assert(loc->elts_free && loc->wqe_free);
3475 assert(pkts_n > loc->pkts_sent);
3476 pkts += loc->pkts_sent + 1;
3477 pkts_n -= loc->pkts_sent;
3479 enum mlx5_txcmp_code ret;
3481 assert(NB_SEGS(loc->mbuf) > 1);
3483 * Estimate the number of free elts quickly but
3484 * conservatively. Some segment may be fully inlined
3485 * and freed, ignore this here - precise estimation
3488 if (loc->elts_free < NB_SEGS(loc->mbuf))
3489 return MLX5_TXCMP_CODE_EXIT;
3490 if (MLX5_TXOFF_CONFIG(TSO) &&
3491 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3492 /* Proceed with multi-segment TSO. */
3493 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3494 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3495 /* Proceed with multi-segment SEND with inlining. */
3496 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3498 /* Proceed with multi-segment SEND w/o inlining. */
3499 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3501 if (ret == MLX5_TXCMP_CODE_EXIT)
3502 return MLX5_TXCMP_CODE_EXIT;
3503 if (ret == MLX5_TXCMP_CODE_ERROR)
3504 return MLX5_TXCMP_CODE_ERROR;
3505 /* WQE is built, go to the next packet. */
3508 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3509 return MLX5_TXCMP_CODE_EXIT;
3510 loc->mbuf = *pkts++;
3512 rte_prefetch0(*pkts);
3513 if (likely(NB_SEGS(loc->mbuf) > 1))
3515 /* Here ends the series of multi-segment packets. */
3516 if (MLX5_TXOFF_CONFIG(TSO) &&
3517 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3518 return MLX5_TXCMP_CODE_TSO;
3519 return MLX5_TXCMP_CODE_SINGLE;
3525 * Tx burst function for single-segment packets with TSO.
3526 * Supports all types of Tx offloads, except multi-packets.
3527 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3528 * Function stops sending if it encounters the multi-segment
3529 * packet or packet without TSO requested.
3531 * The routine is responsible for storing processed mbuf
3532 * into elts ring buffer and update elts_head if inline
3533 * offloads is requested due to possible early freeing
3534 * of the inlined mbufs (can not store pkts array in elts
3538 * Pointer to TX queue structure.
3540 * Packets to transmit.
3542 * Number of packets in array.
3544 * Pointer to burst routine local context.
3546 * Configured Tx offloads mask. It is fully defined at
3547 * compile time and may be used for optimization.
3550 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3551 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3552 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3553 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3554 * Local context variables updated.
3556 static __rte_always_inline enum mlx5_txcmp_code
3557 mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
3558 struct rte_mbuf **restrict pkts,
3559 unsigned int pkts_n,
3560 struct mlx5_txq_local *restrict loc,
3563 assert(loc->elts_free && loc->wqe_free);
3564 assert(pkts_n > loc->pkts_sent);
3565 pkts += loc->pkts_sent + 1;
3566 pkts_n -= loc->pkts_sent;
3568 struct mlx5_wqe_dseg *restrict dseg;
3569 struct mlx5_wqe *restrict wqe;
3570 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3573 assert(NB_SEGS(loc->mbuf) == 1);
3574 dlen = rte_pktmbuf_data_len(loc->mbuf);
3575 if (MLX5_TXOFF_CONFIG(VLAN) &&
3576 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3577 vlan = sizeof(struct rte_vlan_hdr);
3580 * First calculate the WQE size to check
3581 * whether we have enough space in ring buffer.
3583 hlen = loc->mbuf->l2_len + vlan +
3584 loc->mbuf->l3_len + loc->mbuf->l4_len;
3585 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3586 return MLX5_TXCMP_CODE_ERROR;
3587 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3588 hlen += loc->mbuf->outer_l2_len +
3589 loc->mbuf->outer_l3_len;
3590 /* Segment must contain all TSO headers. */
3591 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3592 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3593 hlen > (dlen + vlan)))
3594 return MLX5_TXCMP_CODE_ERROR;
3596 * Check whether there are enough free WQEBBs:
3598 * - Ethernet Segment
3599 * - First Segment of inlined Ethernet data
3600 * - ... data continued ...
3601 * - Finishing Data Segment of pointer type
3603 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3604 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3605 if (loc->wqe_free < ((ds + 3) / 4))
3606 return MLX5_TXCMP_CODE_EXIT;
3607 #ifdef MLX5_PMD_SOFT_COUNTERS
3608 /* Update sent data bytes/packets counters. */
3609 ntcp = (dlen + vlan - hlen +
3610 loc->mbuf->tso_segsz - 1) /
3611 loc->mbuf->tso_segsz;
3613 * One will be added for mbuf itself at the end
3614 * of the mlx5_tx_burst from loc->pkts_sent field.
3617 txq->stats.opackets += ntcp;
3618 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3621 * Build the TSO WQE:
3623 * - Ethernet Segment with hlen bytes inlined
3624 * - Data Segment of pointer type
3626 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3627 loc->wqe_last = wqe;
3628 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3629 MLX5_OPCODE_TSO, olx);
3630 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3631 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3632 dlen -= hlen - vlan;
3633 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3635 * WQE is built, update the loop parameters
3636 * and go to the next packet.
3638 txq->wqe_ci += (ds + 3) / 4;
3639 loc->wqe_free -= (ds + 3) / 4;
3640 if (MLX5_TXOFF_CONFIG(INLINE))
3641 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3645 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3646 return MLX5_TXCMP_CODE_EXIT;
3647 loc->mbuf = *pkts++;
3649 rte_prefetch0(*pkts);
3650 if (MLX5_TXOFF_CONFIG(MULTI) &&
3651 unlikely(NB_SEGS(loc->mbuf) > 1))
3652 return MLX5_TXCMP_CODE_MULTI;
3653 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3654 return MLX5_TXCMP_CODE_SINGLE;
3655 /* Continue with the next TSO packet. */
3661 * Analyze the packet and select the best method to send.
3664 * Pointer to TX queue structure.
3666 * Pointer to burst routine local context.
3668 * Configured Tx offloads mask. It is fully defined at
3669 * compile time and may be used for optimization.
3671 * The predefined flag whether do complete check for
3672 * multi-segment packets and TSO.
3675 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3676 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3677 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3678 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3680 static __rte_always_inline enum mlx5_txcmp_code
3681 mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq,
3682 struct mlx5_txq_local *restrict loc,
3686 /* Check for multi-segment packet. */
3688 MLX5_TXOFF_CONFIG(MULTI) &&
3689 unlikely(NB_SEGS(loc->mbuf) > 1))
3690 return MLX5_TXCMP_CODE_MULTI;
3691 /* Check for TSO packet. */
3693 MLX5_TXOFF_CONFIG(TSO) &&
3694 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3695 return MLX5_TXCMP_CODE_TSO;
3696 /* Check if eMPW is enabled at all. */
3697 if (!MLX5_TXOFF_CONFIG(EMPW))
3698 return MLX5_TXCMP_CODE_SINGLE;
3699 /* Check if eMPW can be engaged. */
3700 if (MLX5_TXOFF_CONFIG(VLAN) &&
3701 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3702 (!MLX5_TXOFF_CONFIG(INLINE) ||
3703 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3704 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3706 * eMPW does not support VLAN insertion offload,
3707 * we have to inline the entire packet but
3708 * packet is too long for inlining.
3710 return MLX5_TXCMP_CODE_SINGLE;
3712 return MLX5_TXCMP_CODE_EMPW;
3716 * Check the next packet attributes to match with the eMPW batch ones.
3717 * In addition, for legacy MPW the packet length is checked either.
3720 * Pointer to TX queue structure.
3722 * Pointer to Ethernet Segment of eMPW batch.
3724 * Pointer to burst routine local context.
3726 * Length of previous packet in MPW descriptor.
3728 * Configured Tx offloads mask. It is fully defined at
3729 * compile time and may be used for optimization.
3732 * true - packet match with eMPW batch attributes.
3733 * false - no match, eMPW should be restarted.
3735 static __rte_always_inline bool
3736 mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
3737 struct mlx5_wqe_eseg *restrict es,
3738 struct mlx5_txq_local *restrict loc,
3742 uint8_t swp_flags = 0;
3744 /* Compare the checksum flags, if any. */
3745 if (MLX5_TXOFF_CONFIG(CSUM) &&
3746 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3748 /* Compare the Software Parser offsets and flags. */
3749 if (MLX5_TXOFF_CONFIG(SWP) &&
3750 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3751 es->swp_flags != swp_flags))
3753 /* Fill metadata field if needed. */
3754 if (MLX5_TXOFF_CONFIG(METADATA) &&
3755 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3756 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3758 /* Legacy MPW can send packets with the same lengt only. */
3759 if (MLX5_TXOFF_CONFIG(MPW) &&
3760 dlen != rte_pktmbuf_data_len(loc->mbuf))
3762 /* There must be no VLAN packets in eMPW loop. */
3763 if (MLX5_TXOFF_CONFIG(VLAN))
3764 assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3769 * Update send loop variables and WQE for eMPW loop
3770 * without data inlining. Number of Data Segments is
3771 * equal to the number of sent packets.
3774 * Pointer to TX queue structure.
3776 * Pointer to burst routine local context.
3778 * Number of packets/Data Segments/Packets.
3780 * Accumulated statistics, bytes sent
3782 * Configured Tx offloads mask. It is fully defined at
3783 * compile time and may be used for optimization.
3786 * true - packet match with eMPW batch attributes.
3787 * false - no match, eMPW should be restarted.
3789 static __rte_always_inline void
3790 mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
3791 struct mlx5_txq_local *restrict loc,
3794 unsigned int olx __rte_unused)
3796 assert(!MLX5_TXOFF_CONFIG(INLINE));
3797 #ifdef MLX5_PMD_SOFT_COUNTERS
3798 /* Update sent data bytes counter. */
3799 txq->stats.obytes += slen;
3803 loc->elts_free -= ds;
3804 loc->pkts_sent += ds;
3806 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3807 txq->wqe_ci += (ds + 3) / 4;
3808 loc->wqe_free -= (ds + 3) / 4;
3812 * Update send loop variables and WQE for eMPW loop
3813 * with data inlining. Gets the size of pushed descriptors
3814 * and data to the WQE.
3817 * Pointer to TX queue structure.
3819 * Pointer to burst routine local context.
3821 * Total size of descriptor/data in bytes.
3823 * Accumulated statistics, data bytes sent.
3825 * Configured Tx offloads mask. It is fully defined at
3826 * compile time and may be used for optimization.
3829 * true - packet match with eMPW batch attributes.
3830 * false - no match, eMPW should be restarted.
3832 static __rte_always_inline void
3833 mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
3834 struct mlx5_txq_local *restrict loc,
3837 unsigned int olx __rte_unused)
3839 assert(MLX5_TXOFF_CONFIG(INLINE));
3840 assert((len % MLX5_WSEG_SIZE) == 0);
3841 #ifdef MLX5_PMD_SOFT_COUNTERS
3842 /* Update sent data bytes counter. */
3843 txq->stats.obytes += slen;
3847 len = len / MLX5_WSEG_SIZE + 2;
3848 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
3849 txq->wqe_ci += (len + 3) / 4;
3850 loc->wqe_free -= (len + 3) / 4;
3854 * The set of Tx burst functions for single-segment packets
3855 * without TSO and with Multi-Packet Writing feature support.
3856 * Supports all types of Tx offloads, except multi-packets
3859 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
3860 * as many packet per WQE as it can. If eMPW is not configured
3861 * or packet can not be sent with eMPW (VLAN insertion) the
3862 * ordinary SEND opcode is used and only one packet placed
3865 * Functions stop sending if it encounters the multi-segment
3866 * packet or packet with TSO requested.
3868 * The routines are responsible for storing processed mbuf
3869 * into elts ring buffer and update elts_head if inlining
3870 * offload is requested. Otherwise the copying mbufs to elts
3871 * can be postponed and completed at the end of burst routine.
3874 * Pointer to TX queue structure.
3876 * Packets to transmit.
3878 * Number of packets in array.
3880 * Pointer to burst routine local context.
3882 * Configured Tx offloads mask. It is fully defined at
3883 * compile time and may be used for optimization.
3886 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3887 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3888 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3889 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
3890 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
3891 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
3893 * Local context variables updated.
3896 * The routine sends packets with MLX5_OPCODE_EMPW
3897 * without inlining, this is dedicated optimized branch.
3898 * No VLAN insertion is supported.
3900 static __rte_always_inline enum mlx5_txcmp_code
3901 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
3902 struct rte_mbuf **restrict pkts,
3903 unsigned int pkts_n,
3904 struct mlx5_txq_local *restrict loc,
3908 * Subroutine is the part of mlx5_tx_burst_single()
3909 * and sends single-segment packet with eMPW opcode
3910 * without data inlining.
3912 assert(!MLX5_TXOFF_CONFIG(INLINE));
3913 assert(MLX5_TXOFF_CONFIG(EMPW));
3914 assert(loc->elts_free && loc->wqe_free);
3915 assert(pkts_n > loc->pkts_sent);
3916 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
3917 pkts += loc->pkts_sent + 1;
3918 pkts_n -= loc->pkts_sent;
3920 struct mlx5_wqe_dseg *restrict dseg;
3921 struct mlx5_wqe_eseg *restrict eseg;
3922 enum mlx5_txcmp_code ret;
3923 unsigned int part, loop;
3924 unsigned int slen = 0;
3927 assert(NB_SEGS(loc->mbuf) == 1);
3928 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
3929 MLX5_MPW_MAX_PACKETS :
3930 MLX5_EMPW_MAX_PACKETS);
3931 if (unlikely(loc->elts_free < part)) {
3932 /* We have no enough elts to save all mbufs. */
3933 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
3934 return MLX5_TXCMP_CODE_EXIT;
3935 /* But we still able to send at least minimal eMPW. */
3936 part = loc->elts_free;
3938 /* Check whether we have enough WQEs */
3939 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
3940 if (unlikely(loc->wqe_free <
3941 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3942 return MLX5_TXCMP_CODE_EXIT;
3943 part = (loc->wqe_free * 4) - 2;
3945 if (likely(part > 1))
3946 rte_prefetch0(*pkts);
3947 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3949 * Build eMPW title WQEBB:
3950 * - Control Segment, eMPW opcode
3951 * - Ethernet Segment, no inline
3953 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
3954 MLX5_OPCODE_ENHANCED_MPSW, olx);
3955 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
3956 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3957 eseg = &loc->wqe_last->eseg;
3958 dseg = &loc->wqe_last->dseg[0];
3960 /* Store the packet length for legacy MPW. */
3961 if (MLX5_TXOFF_CONFIG(MPW))
3962 eseg->mss = rte_cpu_to_be_16
3963 (rte_pktmbuf_data_len(loc->mbuf));
3965 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3966 #ifdef MLX5_PMD_SOFT_COUNTERS
3967 /* Update sent data bytes counter. */
3972 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3974 if (unlikely(--loop == 0))
3976 loc->mbuf = *pkts++;
3977 if (likely(loop > 1))
3978 rte_prefetch0(*pkts);
3979 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3981 * Unroll the completion code to avoid
3982 * returning variable value - it results in
3983 * unoptimized sequent checking in caller.
3985 if (ret == MLX5_TXCMP_CODE_MULTI) {
3987 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3988 if (unlikely(!loc->elts_free ||
3990 return MLX5_TXCMP_CODE_EXIT;
3991 return MLX5_TXCMP_CODE_MULTI;
3993 assert(NB_SEGS(loc->mbuf) == 1);
3994 if (ret == MLX5_TXCMP_CODE_TSO) {
3996 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3997 if (unlikely(!loc->elts_free ||
3999 return MLX5_TXCMP_CODE_EXIT;
4000 return MLX5_TXCMP_CODE_TSO;
4002 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4004 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4005 if (unlikely(!loc->elts_free ||
4007 return MLX5_TXCMP_CODE_EXIT;
4008 return MLX5_TXCMP_CODE_SINGLE;
4010 if (ret != MLX5_TXCMP_CODE_EMPW) {
4013 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4014 return MLX5_TXCMP_CODE_ERROR;
4017 * Check whether packet parameters coincide
4018 * within assumed eMPW batch:
4019 * - check sum settings
4021 * - software parser settings
4022 * - packets length (legacy MPW only)
4024 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
4027 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4028 if (unlikely(!loc->elts_free ||
4030 return MLX5_TXCMP_CODE_EXIT;
4034 /* Packet attributes match, continue the same eMPW. */
4036 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4037 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4039 /* eMPW is built successfully, update loop parameters. */
4041 assert(pkts_n >= part);
4042 #ifdef MLX5_PMD_SOFT_COUNTERS
4043 /* Update sent data bytes counter. */
4044 txq->stats.obytes += slen;
4046 loc->elts_free -= part;
4047 loc->pkts_sent += part;
4048 txq->wqe_ci += (2 + part + 3) / 4;
4049 loc->wqe_free -= (2 + part + 3) / 4;
4051 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4052 return MLX5_TXCMP_CODE_EXIT;
4053 loc->mbuf = *pkts++;
4054 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4055 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4057 /* Continue sending eMPW batches. */
4063 * The routine sends packets with MLX5_OPCODE_EMPW
4064 * with inlining, optionally supports VLAN insertion.
4066 static __rte_always_inline enum mlx5_txcmp_code
4067 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
4068 struct rte_mbuf **restrict pkts,
4069 unsigned int pkts_n,
4070 struct mlx5_txq_local *restrict loc,
4074 * Subroutine is the part of mlx5_tx_burst_single()
4075 * and sends single-segment packet with eMPW opcode
4076 * with data inlining.
4078 assert(MLX5_TXOFF_CONFIG(INLINE));
4079 assert(MLX5_TXOFF_CONFIG(EMPW));
4080 assert(loc->elts_free && loc->wqe_free);
4081 assert(pkts_n > loc->pkts_sent);
4082 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4083 pkts += loc->pkts_sent + 1;
4084 pkts_n -= loc->pkts_sent;
4086 struct mlx5_wqe_dseg *restrict dseg;
4087 struct mlx5_wqe_eseg *restrict eseg;
4088 enum mlx5_txcmp_code ret;
4089 unsigned int room, part, nlim;
4090 unsigned int slen = 0;
4092 assert(NB_SEGS(loc->mbuf) == 1);
4094 * Limits the amount of packets in one WQE
4095 * to improve CQE latency generation.
4097 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4098 MLX5_MPW_INLINE_MAX_PACKETS :
4099 MLX5_EMPW_MAX_PACKETS);
4100 /* Check whether we have minimal amount WQEs */
4101 if (unlikely(loc->wqe_free <
4102 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4103 return MLX5_TXCMP_CODE_EXIT;
4104 if (likely(pkts_n > 1))
4105 rte_prefetch0(*pkts);
4106 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4108 * Build eMPW title WQEBB:
4109 * - Control Segment, eMPW opcode, zero DS
4110 * - Ethernet Segment, no inline
4112 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0,
4113 MLX5_OPCODE_ENHANCED_MPSW, olx);
4114 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4115 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4116 eseg = &loc->wqe_last->eseg;
4117 dseg = &loc->wqe_last->dseg[0];
4118 /* Store the packet length for legacy MPW. */
4119 if (MLX5_TXOFF_CONFIG(MPW))
4120 eseg->mss = rte_cpu_to_be_16
4121 (rte_pktmbuf_data_len(loc->mbuf));
4122 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4123 loc->wqe_free) * MLX5_WQE_SIZE -
4124 MLX5_WQE_CSEG_SIZE -
4126 /* Build WQE till we have space, packets and resources. */
4129 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4130 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4133 assert(room >= MLX5_WQE_DSEG_SIZE);
4134 assert((room % MLX5_WQE_DSEG_SIZE) == 0);
4135 assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4137 * Some Tx offloads may cause an error if
4138 * packet is not long enough, check against
4139 * assumed minimal length.
4141 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4143 if (unlikely(!part))
4144 return MLX5_TXCMP_CODE_ERROR;
4146 * We have some successfully built
4147 * packet Data Segments to send.
4149 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4150 return MLX5_TXCMP_CODE_ERROR;
4152 /* Inline or not inline - that's the Question. */
4153 if (dlen > txq->inlen_empw ||
4154 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
4156 /* Inline entire packet, optional VLAN insertion. */
4157 tlen = sizeof(dseg->bcount) + dlen;
4158 if (MLX5_TXOFF_CONFIG(VLAN) &&
4159 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4161 * The packet length must be checked in
4162 * mlx5_tx_able_to_empw() and packet
4163 * fits into inline length guaranteed.
4165 assert((dlen + sizeof(struct rte_vlan_hdr)) <=
4167 tlen += sizeof(struct rte_vlan_hdr);
4170 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4172 #ifdef MLX5_PMD_SOFT_COUNTERS
4173 /* Update sent data bytes counter. */
4174 slen += sizeof(struct rte_vlan_hdr);
4179 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4182 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4183 assert(room >= tlen);
4186 * Packet data are completely inlined,
4187 * free the packet immediately.
4189 rte_pktmbuf_free_seg(loc->mbuf);
4193 * Not inlinable VLAN packets are
4194 * proceeded outside of this routine.
4196 assert(room >= MLX5_WQE_DSEG_SIZE);
4197 if (MLX5_TXOFF_CONFIG(VLAN))
4198 assert(!(loc->mbuf->ol_flags &
4200 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4201 /* We have to store mbuf in elts.*/
4202 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4203 room -= MLX5_WQE_DSEG_SIZE;
4204 /* Ring buffer wraparound is checked at the loop end.*/
4207 #ifdef MLX5_PMD_SOFT_COUNTERS
4208 /* Update sent data bytes counter. */
4214 if (unlikely(!pkts_n || !loc->elts_free)) {
4216 * We have no resources/packets to
4217 * continue build descriptors.
4220 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4221 return MLX5_TXCMP_CODE_EXIT;
4223 loc->mbuf = *pkts++;
4224 if (likely(pkts_n > 1))
4225 rte_prefetch0(*pkts);
4226 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4228 * Unroll the completion code to avoid
4229 * returning variable value - it results in
4230 * unoptimized sequent checking in caller.
4232 if (ret == MLX5_TXCMP_CODE_MULTI) {
4234 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4235 if (unlikely(!loc->elts_free ||
4237 return MLX5_TXCMP_CODE_EXIT;
4238 return MLX5_TXCMP_CODE_MULTI;
4240 assert(NB_SEGS(loc->mbuf) == 1);
4241 if (ret == MLX5_TXCMP_CODE_TSO) {
4243 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4244 if (unlikely(!loc->elts_free ||
4246 return MLX5_TXCMP_CODE_EXIT;
4247 return MLX5_TXCMP_CODE_TSO;
4249 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4251 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4252 if (unlikely(!loc->elts_free ||
4254 return MLX5_TXCMP_CODE_EXIT;
4255 return MLX5_TXCMP_CODE_SINGLE;
4257 if (ret != MLX5_TXCMP_CODE_EMPW) {
4260 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4261 return MLX5_TXCMP_CODE_ERROR;
4263 /* Check if we have minimal room left. */
4265 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4268 * Check whether packet parameters coincide
4269 * within assumed eMPW batch:
4270 * - check sum settings
4272 * - software parser settings
4273 * - packets length (legacy MPW only)
4275 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx))
4277 /* Packet attributes match, continue the same eMPW. */
4278 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4279 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4282 * We get here to close an existing eMPW
4283 * session and start the new one.
4287 if (unlikely(!part))
4288 return MLX5_TXCMP_CODE_EXIT;
4289 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4290 if (unlikely(!loc->elts_free ||
4292 return MLX5_TXCMP_CODE_EXIT;
4293 /* Continue the loop with new eMPW session. */
4299 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4300 * Data inlining and VLAN insertion are supported.
4302 static __rte_always_inline enum mlx5_txcmp_code
4303 mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
4304 struct rte_mbuf **restrict pkts,
4305 unsigned int pkts_n,
4306 struct mlx5_txq_local *restrict loc,
4310 * Subroutine is the part of mlx5_tx_burst_single()
4311 * and sends single-segment packet with SEND opcode.
4313 assert(loc->elts_free && loc->wqe_free);
4314 assert(pkts_n > loc->pkts_sent);
4315 pkts += loc->pkts_sent + 1;
4316 pkts_n -= loc->pkts_sent;
4318 struct mlx5_wqe *restrict wqe;
4319 enum mlx5_txcmp_code ret;
4321 assert(NB_SEGS(loc->mbuf) == 1);
4322 if (MLX5_TXOFF_CONFIG(INLINE)) {
4323 unsigned int inlen, vlan = 0;
4325 inlen = rte_pktmbuf_data_len(loc->mbuf);
4326 if (MLX5_TXOFF_CONFIG(VLAN) &&
4327 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4328 vlan = sizeof(struct rte_vlan_hdr);
4330 static_assert((sizeof(struct rte_vlan_hdr) +
4331 sizeof(struct rte_ether_hdr)) ==
4332 MLX5_ESEG_MIN_INLINE_SIZE,
4333 "invalid min inline data size");
4336 * If inlining is enabled at configuration time
4337 * the limit must be not less than minimal size.
4338 * Otherwise we would do extra check for data
4339 * size to avoid crashes due to length overflow.
4341 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
4342 if (inlen <= txq->inlen_send) {
4343 unsigned int seg_n, wqe_n;
4345 rte_prefetch0(rte_pktmbuf_mtod
4346 (loc->mbuf, uint8_t *));
4347 /* Check against minimal length. */
4348 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4349 return MLX5_TXCMP_CODE_ERROR;
4350 if (loc->mbuf->ol_flags &
4351 PKT_TX_DYNF_NOINLINE) {
4353 * The hint flag not to inline packet
4354 * data is set. Check whether we can
4357 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
4359 (MLX5_TXOFF_CONFIG(MPW) &&
4362 * The hardware requires the
4363 * minimal inline data header.
4365 goto single_min_inline;
4367 if (MLX5_TXOFF_CONFIG(VLAN) &&
4368 vlan && !txq->vlan_en) {
4370 * We must insert VLAN tag
4371 * by software means.
4373 goto single_part_inline;
4375 goto single_no_inline;
4378 * Completely inlined packet data WQE:
4379 * - Control Segment, SEND opcode
4380 * - Ethernet Segment, no VLAN insertion
4381 * - Data inlined, VLAN optionally inserted
4382 * - Alignment to MLX5_WSEG_SIZE
4383 * Have to estimate amount of WQEBBs
4385 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4386 MLX5_ESEG_MIN_INLINE_SIZE +
4387 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4388 /* Check if there are enough WQEBBs. */
4389 wqe_n = (seg_n + 3) / 4;
4390 if (wqe_n > loc->wqe_free)
4391 return MLX5_TXCMP_CODE_EXIT;
4392 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4393 loc->wqe_last = wqe;
4394 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4395 MLX5_OPCODE_SEND, olx);
4396 mlx5_tx_eseg_data(txq, loc, wqe,
4397 vlan, inlen, 0, olx);
4398 txq->wqe_ci += wqe_n;
4399 loc->wqe_free -= wqe_n;
4401 * Packet data are completely inlined,
4402 * free the packet immediately.
4404 rte_pktmbuf_free_seg(loc->mbuf);
4405 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4406 MLX5_TXOFF_CONFIG(MPW)) &&
4409 * If minimal inlining is requested the eMPW
4410 * feature should be disabled due to data is
4411 * inlined into Ethernet Segment, which can
4412 * not contain inlined data for eMPW due to
4413 * segment shared for all packets.
4415 struct mlx5_wqe_dseg *restrict dseg;
4420 * The inline-mode settings require
4421 * to inline the specified amount of
4422 * data bytes to the Ethernet Segment.
4423 * We should check the free space in
4424 * WQE ring buffer to inline partially.
4427 assert(txq->inlen_send >= txq->inlen_mode);
4428 assert(inlen > txq->inlen_mode);
4429 assert(txq->inlen_mode >=
4430 MLX5_ESEG_MIN_INLINE_SIZE);
4432 * Check whether there are enough free WQEBBs:
4434 * - Ethernet Segment
4435 * - First Segment of inlined Ethernet data
4436 * - ... data continued ...
4437 * - Finishing Data Segment of pointer type
4439 ds = (MLX5_WQE_CSEG_SIZE +
4440 MLX5_WQE_ESEG_SIZE +
4441 MLX5_WQE_DSEG_SIZE +
4443 MLX5_ESEG_MIN_INLINE_SIZE +
4444 MLX5_WQE_DSEG_SIZE +
4445 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4446 if (loc->wqe_free < ((ds + 3) / 4))
4447 return MLX5_TXCMP_CODE_EXIT;
4449 * Build the ordinary SEND WQE:
4451 * - Ethernet Segment, inline inlen_mode bytes
4452 * - Data Segment of pointer type
4454 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4455 loc->wqe_last = wqe;
4456 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4457 MLX5_OPCODE_SEND, olx);
4458 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4461 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4462 txq->inlen_mode - vlan;
4463 inlen -= txq->inlen_mode;
4464 mlx5_tx_dseg_ptr(txq, loc, dseg,
4467 * WQE is built, update the loop parameters
4468 * and got to the next packet.
4470 txq->wqe_ci += (ds + 3) / 4;
4471 loc->wqe_free -= (ds + 3) / 4;
4472 /* We have to store mbuf in elts.*/
4473 assert(MLX5_TXOFF_CONFIG(INLINE));
4474 txq->elts[txq->elts_head++ & txq->elts_m] =
4482 * Partially inlined packet data WQE, we have
4483 * some space in title WQEBB, we can fill it
4484 * with some packet data. It takes one WQEBB,
4485 * it is available, no extra space check:
4486 * - Control Segment, SEND opcode
4487 * - Ethernet Segment, no VLAN insertion
4488 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4489 * - Data Segment, pointer type
4491 * We also get here if VLAN insertion is not
4492 * supported by HW, the inline is enabled.
4495 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4496 loc->wqe_last = wqe;
4497 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4498 MLX5_OPCODE_SEND, olx);
4499 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4500 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4501 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4503 * The length check is performed above, by
4504 * comparing with txq->inlen_send. We should
4505 * not get overflow here.
4507 assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4508 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4509 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4513 /* We have to store mbuf in elts.*/
4514 assert(MLX5_TXOFF_CONFIG(INLINE));
4515 txq->elts[txq->elts_head++ & txq->elts_m] =
4519 #ifdef MLX5_PMD_SOFT_COUNTERS
4520 /* Update sent data bytes counter. */
4521 txq->stats.obytes += vlan +
4522 rte_pktmbuf_data_len(loc->mbuf);
4526 * No inline at all, it means the CPU cycles saving
4527 * is prioritized at configuration, we should not
4528 * copy any packet data to WQE.
4530 * SEND WQE, one WQEBB:
4531 * - Control Segment, SEND opcode
4532 * - Ethernet Segment, optional VLAN, no inline
4533 * - Data Segment, pointer type
4536 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4537 loc->wqe_last = wqe;
4538 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4539 MLX5_OPCODE_SEND, olx);
4540 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4542 (txq, loc, &wqe->dseg[0],
4543 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4544 rte_pktmbuf_data_len(loc->mbuf), olx);
4548 * We should not store mbuf pointer in elts
4549 * if no inlining is configured, this is done
4550 * by calling routine in a batch copy.
4552 assert(!MLX5_TXOFF_CONFIG(INLINE));
4554 #ifdef MLX5_PMD_SOFT_COUNTERS
4555 /* Update sent data bytes counter. */
4556 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4557 if (MLX5_TXOFF_CONFIG(VLAN) &&
4558 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4559 txq->stats.obytes +=
4560 sizeof(struct rte_vlan_hdr);
4565 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4566 return MLX5_TXCMP_CODE_EXIT;
4567 loc->mbuf = *pkts++;
4569 rte_prefetch0(*pkts);
4570 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4571 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4577 static __rte_always_inline enum mlx5_txcmp_code
4578 mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
4579 struct rte_mbuf **restrict pkts,
4580 unsigned int pkts_n,
4581 struct mlx5_txq_local *restrict loc,
4584 enum mlx5_txcmp_code ret;
4586 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4587 if (ret == MLX5_TXCMP_CODE_SINGLE)
4589 assert(ret == MLX5_TXCMP_CODE_EMPW);
4591 /* Optimize for inline/no inline eMPW send. */
4592 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4593 mlx5_tx_burst_empw_inline
4594 (txq, pkts, pkts_n, loc, olx) :
4595 mlx5_tx_burst_empw_simple
4596 (txq, pkts, pkts_n, loc, olx);
4597 if (ret != MLX5_TXCMP_CODE_SINGLE)
4599 /* The resources to send one packet should remain. */
4600 assert(loc->elts_free && loc->wqe_free);
4602 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4603 assert(ret != MLX5_TXCMP_CODE_SINGLE);
4604 if (ret != MLX5_TXCMP_CODE_EMPW)
4606 /* The resources to send one packet should remain. */
4607 assert(loc->elts_free && loc->wqe_free);
4612 * DPDK Tx callback template. This is configured template
4613 * used to generate routines optimized for specified offload setup.
4614 * One of this generated functions is chosen at SQ configuration
4618 * Generic pointer to TX queue structure.
4620 * Packets to transmit.
4622 * Number of packets in array.
4624 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4625 * values. Should be static to take compile time static configuration
4629 * Number of packets successfully transmitted (<= pkts_n).
4631 static __rte_always_inline uint16_t
4632 mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
4633 struct rte_mbuf **restrict pkts,
4637 struct mlx5_txq_local loc;
4638 enum mlx5_txcmp_code ret;
4641 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4642 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4643 if (unlikely(!pkts_n))
4647 loc.wqe_last = NULL;
4650 loc.pkts_loop = loc.pkts_sent;
4652 * Check if there are some CQEs, if any:
4653 * - process an encountered errors
4654 * - process the completed WQEs
4655 * - free related mbufs
4656 * - doorbell the NIC about processed CQEs
4658 rte_prefetch0(*(pkts + loc.pkts_sent));
4659 mlx5_tx_handle_completion(txq, olx);
4661 * Calculate the number of available resources - elts and WQEs.
4662 * There are two possible different scenarios:
4663 * - no data inlining into WQEs, one WQEBB may contains upto
4664 * four packets, in this case elts become scarce resource
4665 * - data inlining into WQEs, one packet may require multiple
4666 * WQEBBs, the WQEs become the limiting factor.
4668 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4669 loc.elts_free = txq->elts_s -
4670 (uint16_t)(txq->elts_head - txq->elts_tail);
4671 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4672 loc.wqe_free = txq->wqe_s -
4673 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4674 if (unlikely(!loc.elts_free || !loc.wqe_free))
4678 * Fetch the packet from array. Usually this is
4679 * the first packet in series of multi/single
4682 loc.mbuf = *(pkts + loc.pkts_sent);
4683 /* Dedicated branch for multi-segment packets. */
4684 if (MLX5_TXOFF_CONFIG(MULTI) &&
4685 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4687 * Multi-segment packet encountered.
4688 * Hardware is able to process it only
4689 * with SEND/TSO opcodes, one packet
4690 * per WQE, do it in dedicated routine.
4693 assert(loc.pkts_sent >= loc.pkts_copy);
4694 part = loc.pkts_sent - loc.pkts_copy;
4695 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4697 * There are some single-segment mbufs not
4698 * stored in elts. The mbufs must be in the
4699 * same order as WQEs, so we must copy the
4700 * mbufs to elts here, before the coming
4701 * multi-segment packet mbufs is appended.
4703 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4705 loc.pkts_copy = loc.pkts_sent;
4707 assert(pkts_n > loc.pkts_sent);
4708 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4709 if (!MLX5_TXOFF_CONFIG(INLINE))
4710 loc.pkts_copy = loc.pkts_sent;
4712 * These returned code checks are supposed
4713 * to be optimized out due to routine inlining.
4715 if (ret == MLX5_TXCMP_CODE_EXIT) {
4717 * The routine returns this code when
4718 * all packets are sent or there is no
4719 * enough resources to complete request.
4723 if (ret == MLX5_TXCMP_CODE_ERROR) {
4725 * The routine returns this code when
4726 * some error in the incoming packets
4729 txq->stats.oerrors++;
4732 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4734 * The single-segment packet was encountered
4735 * in the array, try to send it with the
4736 * best optimized way, possible engaging eMPW.
4738 goto enter_send_single;
4740 if (MLX5_TXOFF_CONFIG(TSO) &&
4741 ret == MLX5_TXCMP_CODE_TSO) {
4743 * The single-segment TSO packet was
4744 * encountered in the array.
4746 goto enter_send_tso;
4748 /* We must not get here. Something is going wrong. */
4750 txq->stats.oerrors++;
4753 /* Dedicated branch for single-segment TSO packets. */
4754 if (MLX5_TXOFF_CONFIG(TSO) &&
4755 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4757 * TSO might require special way for inlining
4758 * (dedicated parameters) and is sent with
4759 * MLX5_OPCODE_TSO opcode only, provide this
4760 * in dedicated branch.
4763 assert(NB_SEGS(loc.mbuf) == 1);
4764 assert(pkts_n > loc.pkts_sent);
4765 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4767 * These returned code checks are supposed
4768 * to be optimized out due to routine inlining.
4770 if (ret == MLX5_TXCMP_CODE_EXIT)
4772 if (ret == MLX5_TXCMP_CODE_ERROR) {
4773 txq->stats.oerrors++;
4776 if (ret == MLX5_TXCMP_CODE_SINGLE)
4777 goto enter_send_single;
4778 if (MLX5_TXOFF_CONFIG(MULTI) &&
4779 ret == MLX5_TXCMP_CODE_MULTI) {
4781 * The multi-segment packet was
4782 * encountered in the array.
4784 goto enter_send_multi;
4786 /* We must not get here. Something is going wrong. */
4788 txq->stats.oerrors++;
4792 * The dedicated branch for the single-segment packets
4793 * without TSO. Often these ones can be sent using
4794 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
4795 * The routine builds the WQEs till it encounters
4796 * the TSO or multi-segment packet (in case if these
4797 * offloads are requested at SQ configuration time).
4800 assert(pkts_n > loc.pkts_sent);
4801 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
4803 * These returned code checks are supposed
4804 * to be optimized out due to routine inlining.
4806 if (ret == MLX5_TXCMP_CODE_EXIT)
4808 if (ret == MLX5_TXCMP_CODE_ERROR) {
4809 txq->stats.oerrors++;
4812 if (MLX5_TXOFF_CONFIG(MULTI) &&
4813 ret == MLX5_TXCMP_CODE_MULTI) {
4815 * The multi-segment packet was
4816 * encountered in the array.
4818 goto enter_send_multi;
4820 if (MLX5_TXOFF_CONFIG(TSO) &&
4821 ret == MLX5_TXCMP_CODE_TSO) {
4823 * The single-segment TSO packet was
4824 * encountered in the array.
4826 goto enter_send_tso;
4828 /* We must not get here. Something is going wrong. */
4830 txq->stats.oerrors++;
4834 * Main Tx loop is completed, do the rest:
4835 * - set completion request if thresholds are reached
4836 * - doorbell the hardware
4837 * - copy the rest of mbufs to elts (if any)
4839 assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
4840 /* Take a shortcut if nothing is sent. */
4841 if (unlikely(loc.pkts_sent == loc.pkts_loop))
4843 /* Request CQE generation if limits are reached. */
4844 mlx5_tx_request_completion(txq, &loc, olx);
4846 * Ring QP doorbell immediately after WQE building completion
4847 * to improve latencies. The pure software related data treatment
4848 * can be completed after doorbell. Tx CQEs for this SQ are
4849 * processed in this thread only by the polling.
4851 * The rdma core library can map doorbell register in two ways,
4852 * depending on the environment variable "MLX5_SHUT_UP_BF":
4854 * - as regular cached memory, the variable is either missing or
4855 * set to zero. This type of mapping may cause the significant
4856 * doorbell register writing latency and requires explicit
4857 * memory write barrier to mitigate this issue and prevent
4860 * - as non-cached memory, the variable is present and set to
4861 * not "0" value. This type of mapping may cause performance
4862 * impact under heavy loading conditions but the explicit write
4863 * memory barrier is not required and it may improve core
4866 * - the legacy behaviour (prior 19.08 release) was to use some
4867 * heuristics to decide whether write memory barrier should
4868 * be performed. This behavior is supported with specifying
4869 * tx_db_nc=2, write barrier is skipped if application
4870 * provides the full recommended burst of packets, it
4871 * supposes the next packets are coming and the write barrier
4872 * will be issued on the next burst (after descriptor writing,
4875 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
4876 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
4877 /* Not all of the mbufs may be stored into elts yet. */
4878 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
4879 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4881 * There are some single-segment mbufs not stored in elts.
4882 * It can be only if the last packet was single-segment.
4883 * The copying is gathered into one place due to it is
4884 * a good opportunity to optimize that with SIMD.
4885 * Unfortunately if inlining is enabled the gaps in
4886 * pointer array may happen due to early freeing of the
4889 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
4890 loc.pkts_copy = loc.pkts_sent;
4892 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4893 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4894 if (pkts_n > loc.pkts_sent) {
4896 * If burst size is large there might be no enough CQE
4897 * fetched from completion queue and no enough resources
4898 * freed to send all the packets.
4903 #ifdef MLX5_PMD_SOFT_COUNTERS
4904 /* Increment sent packets counter. */
4905 txq->stats.opackets += loc.pkts_sent;
4907 return loc.pkts_sent;
4910 /* Generate routines with Enhanced Multi-Packet Write support. */
4911 MLX5_TXOFF_DECL(full_empw,
4912 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
4914 MLX5_TXOFF_DECL(none_empw,
4915 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4917 MLX5_TXOFF_DECL(md_empw,
4918 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4920 MLX5_TXOFF_DECL(mt_empw,
4921 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4922 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4924 MLX5_TXOFF_DECL(mtsc_empw,
4925 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4926 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4927 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4929 MLX5_TXOFF_DECL(mti_empw,
4930 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4931 MLX5_TXOFF_CONFIG_INLINE |
4932 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4934 MLX5_TXOFF_DECL(mtv_empw,
4935 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4936 MLX5_TXOFF_CONFIG_VLAN |
4937 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4939 MLX5_TXOFF_DECL(mtiv_empw,
4940 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4941 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4942 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4944 MLX5_TXOFF_DECL(sc_empw,
4945 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4946 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4948 MLX5_TXOFF_DECL(sci_empw,
4949 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4950 MLX5_TXOFF_CONFIG_INLINE |
4951 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4953 MLX5_TXOFF_DECL(scv_empw,
4954 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4955 MLX5_TXOFF_CONFIG_VLAN |
4956 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4958 MLX5_TXOFF_DECL(sciv_empw,
4959 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4960 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4961 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4963 MLX5_TXOFF_DECL(i_empw,
4964 MLX5_TXOFF_CONFIG_INLINE |
4965 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4967 MLX5_TXOFF_DECL(v_empw,
4968 MLX5_TXOFF_CONFIG_VLAN |
4969 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4971 MLX5_TXOFF_DECL(iv_empw,
4972 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4973 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4975 /* Generate routines without Enhanced Multi-Packet Write support. */
4976 MLX5_TXOFF_DECL(full,
4977 MLX5_TXOFF_CONFIG_FULL)
4979 MLX5_TXOFF_DECL(none,
4980 MLX5_TXOFF_CONFIG_NONE)
4983 MLX5_TXOFF_CONFIG_METADATA)
4986 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4987 MLX5_TXOFF_CONFIG_METADATA)
4989 MLX5_TXOFF_DECL(mtsc,
4990 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4991 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4992 MLX5_TXOFF_CONFIG_METADATA)
4994 MLX5_TXOFF_DECL(mti,
4995 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4996 MLX5_TXOFF_CONFIG_INLINE |
4997 MLX5_TXOFF_CONFIG_METADATA)
5000 MLX5_TXOFF_DECL(mtv,
5001 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5002 MLX5_TXOFF_CONFIG_VLAN |
5003 MLX5_TXOFF_CONFIG_METADATA)
5006 MLX5_TXOFF_DECL(mtiv,
5007 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5008 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5009 MLX5_TXOFF_CONFIG_METADATA)
5012 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5013 MLX5_TXOFF_CONFIG_METADATA)
5015 MLX5_TXOFF_DECL(sci,
5016 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5017 MLX5_TXOFF_CONFIG_INLINE |
5018 MLX5_TXOFF_CONFIG_METADATA)
5021 MLX5_TXOFF_DECL(scv,
5022 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5023 MLX5_TXOFF_CONFIG_VLAN |
5024 MLX5_TXOFF_CONFIG_METADATA)
5027 MLX5_TXOFF_DECL(sciv,
5028 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5029 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5030 MLX5_TXOFF_CONFIG_METADATA)
5033 MLX5_TXOFF_CONFIG_INLINE |
5034 MLX5_TXOFF_CONFIG_METADATA)
5037 MLX5_TXOFF_CONFIG_VLAN |
5038 MLX5_TXOFF_CONFIG_METADATA)
5041 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5042 MLX5_TXOFF_CONFIG_METADATA)
5045 * Generate routines with Legacy Multi-Packet Write support.
5046 * This mode is supported by ConnectX-4LX only and imposes
5047 * offload limitations, not supported:
5048 * - ACL/Flows (metadata are becoming meaningless)
5049 * - WQE Inline headers
5050 * - SRIOV (E-Switch offloads)
5052 * - tunnel encapsulation/decapsulation
5055 MLX5_TXOFF_DECL(none_mpw,
5056 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5057 MLX5_TXOFF_CONFIG_MPW)
5059 MLX5_TXOFF_DECL(mci_mpw,
5060 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5061 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5062 MLX5_TXOFF_CONFIG_MPW)
5064 MLX5_TXOFF_DECL(mc_mpw,
5065 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5066 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5068 MLX5_TXOFF_DECL(i_mpw,
5069 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5070 MLX5_TXOFF_CONFIG_MPW)
5073 * Array of declared and compiled Tx burst function and corresponding
5074 * supported offloads set. The array is used to select the Tx burst
5075 * function for specified offloads set at Tx queue configuration time.
5078 eth_tx_burst_t func;
5081 MLX5_TXOFF_INFO(full_empw,
5082 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5083 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5084 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5085 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5087 MLX5_TXOFF_INFO(none_empw,
5088 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5090 MLX5_TXOFF_INFO(md_empw,
5091 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5093 MLX5_TXOFF_INFO(mt_empw,
5094 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5095 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5097 MLX5_TXOFF_INFO(mtsc_empw,
5098 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5099 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5100 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5102 MLX5_TXOFF_INFO(mti_empw,
5103 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5104 MLX5_TXOFF_CONFIG_INLINE |
5105 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5107 MLX5_TXOFF_INFO(mtv_empw,
5108 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5109 MLX5_TXOFF_CONFIG_VLAN |
5110 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5112 MLX5_TXOFF_INFO(mtiv_empw,
5113 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5114 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5115 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5117 MLX5_TXOFF_INFO(sc_empw,
5118 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5119 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5121 MLX5_TXOFF_INFO(sci_empw,
5122 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5123 MLX5_TXOFF_CONFIG_INLINE |
5124 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5126 MLX5_TXOFF_INFO(scv_empw,
5127 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5128 MLX5_TXOFF_CONFIG_VLAN |
5129 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5131 MLX5_TXOFF_INFO(sciv_empw,
5132 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5133 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5134 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5136 MLX5_TXOFF_INFO(i_empw,
5137 MLX5_TXOFF_CONFIG_INLINE |
5138 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5140 MLX5_TXOFF_INFO(v_empw,
5141 MLX5_TXOFF_CONFIG_VLAN |
5142 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5144 MLX5_TXOFF_INFO(iv_empw,
5145 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5146 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5148 MLX5_TXOFF_INFO(full,
5149 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5150 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5151 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5152 MLX5_TXOFF_CONFIG_METADATA)
5154 MLX5_TXOFF_INFO(none,
5155 MLX5_TXOFF_CONFIG_NONE)
5158 MLX5_TXOFF_CONFIG_METADATA)
5161 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5162 MLX5_TXOFF_CONFIG_METADATA)
5164 MLX5_TXOFF_INFO(mtsc,
5165 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5166 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5167 MLX5_TXOFF_CONFIG_METADATA)
5169 MLX5_TXOFF_INFO(mti,
5170 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5171 MLX5_TXOFF_CONFIG_INLINE |
5172 MLX5_TXOFF_CONFIG_METADATA)
5174 MLX5_TXOFF_INFO(mtv,
5175 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5176 MLX5_TXOFF_CONFIG_VLAN |
5177 MLX5_TXOFF_CONFIG_METADATA)
5179 MLX5_TXOFF_INFO(mtiv,
5180 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5181 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5182 MLX5_TXOFF_CONFIG_METADATA)
5185 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5186 MLX5_TXOFF_CONFIG_METADATA)
5188 MLX5_TXOFF_INFO(sci,
5189 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5190 MLX5_TXOFF_CONFIG_INLINE |
5191 MLX5_TXOFF_CONFIG_METADATA)
5193 MLX5_TXOFF_INFO(scv,
5194 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5195 MLX5_TXOFF_CONFIG_VLAN |
5196 MLX5_TXOFF_CONFIG_METADATA)
5198 MLX5_TXOFF_INFO(sciv,
5199 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5200 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5201 MLX5_TXOFF_CONFIG_METADATA)
5204 MLX5_TXOFF_CONFIG_INLINE |
5205 MLX5_TXOFF_CONFIG_METADATA)
5208 MLX5_TXOFF_CONFIG_VLAN |
5209 MLX5_TXOFF_CONFIG_METADATA)
5212 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5213 MLX5_TXOFF_CONFIG_METADATA)
5215 MLX5_TXOFF_INFO(none_mpw,
5216 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5217 MLX5_TXOFF_CONFIG_MPW)
5219 MLX5_TXOFF_INFO(mci_mpw,
5220 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5221 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5222 MLX5_TXOFF_CONFIG_MPW)
5224 MLX5_TXOFF_INFO(mc_mpw,
5225 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5226 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5228 MLX5_TXOFF_INFO(i_mpw,
5229 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5230 MLX5_TXOFF_CONFIG_MPW)
5234 * Configure the Tx function to use. The routine checks configured
5235 * Tx offloads for the device and selects appropriate Tx burst
5236 * routine. There are multiple Tx burst routines compiled from
5237 * the same template in the most optimal way for the dedicated
5241 * Pointer to private data structure.
5244 * Pointer to selected Tx burst function.
5247 mlx5_select_tx_function(struct rte_eth_dev *dev)
5249 struct mlx5_priv *priv = dev->data->dev_private;
5250 struct mlx5_dev_config *config = &priv->config;
5251 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5252 unsigned int diff = 0, olx = 0, i, m;
5254 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5255 MLX5_DSEG_MAX, "invalid WQE max size");
5256 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5257 "invalid WQE Control Segment size");
5258 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5259 "invalid WQE Ethernet Segment size");
5260 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5261 "invalid WQE Data Segment size");
5262 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5263 "invalid WQE size");
5265 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5266 /* We should support Multi-Segment Packets. */
5267 olx |= MLX5_TXOFF_CONFIG_MULTI;
5269 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5270 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5271 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5272 DEV_TX_OFFLOAD_IP_TNL_TSO |
5273 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5274 /* We should support TCP Send Offload. */
5275 olx |= MLX5_TXOFF_CONFIG_TSO;
5277 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5278 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5279 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5280 /* We should support Software Parser for Tunnels. */
5281 olx |= MLX5_TXOFF_CONFIG_SWP;
5283 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5284 DEV_TX_OFFLOAD_UDP_CKSUM |
5285 DEV_TX_OFFLOAD_TCP_CKSUM |
5286 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5287 /* We should support IP/TCP/UDP Checksums. */
5288 olx |= MLX5_TXOFF_CONFIG_CSUM;
5290 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5291 /* We should support VLAN insertion. */
5292 olx |= MLX5_TXOFF_CONFIG_VLAN;
5294 if (priv->txqs_n && (*priv->txqs)[0]) {
5295 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5297 if (txd->inlen_send) {
5299 * Check the data inline requirements. Data inline
5300 * is enabled on per device basis, we can check
5301 * the first Tx queue only.
5303 * If device does not support VLAN insertion in WQE
5304 * and some queues are requested to perform VLAN
5305 * insertion offload than inline must be enabled.
5307 olx |= MLX5_TXOFF_CONFIG_INLINE;
5310 if (config->mps == MLX5_MPW_ENHANCED &&
5311 config->txq_inline_min <= 0) {
5313 * The NIC supports Enhanced Multi-Packet Write
5314 * and does not require minimal inline data.
5316 olx |= MLX5_TXOFF_CONFIG_EMPW;
5318 if (rte_flow_dynf_metadata_avail()) {
5319 /* We should support Flow metadata. */
5320 olx |= MLX5_TXOFF_CONFIG_METADATA;
5322 if (config->mps == MLX5_MPW) {
5324 * The NIC supports Legacy Multi-Packet Write.
5325 * The MLX5_TXOFF_CONFIG_MPW controls the
5326 * descriptor building method in combination
5327 * with MLX5_TXOFF_CONFIG_EMPW.
5329 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5330 MLX5_TXOFF_CONFIG_SWP |
5331 MLX5_TXOFF_CONFIG_VLAN |
5332 MLX5_TXOFF_CONFIG_METADATA)))
5333 olx |= MLX5_TXOFF_CONFIG_EMPW |
5334 MLX5_TXOFF_CONFIG_MPW;
5337 * Scan the routines table to find the minimal
5338 * satisfying routine with requested offloads.
5340 m = RTE_DIM(txoff_func);
5341 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5344 tmp = txoff_func[i].olx;
5346 /* Meets requested offloads exactly.*/
5350 if ((tmp & olx) != olx) {
5351 /* Does not meet requested offloads at all. */
5354 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5355 /* Do not enable eMPW if not configured. */
5357 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5358 /* Do not enable inlining if not configured. */
5361 * Some routine meets the requirements.
5362 * Check whether it has minimal amount
5363 * of not requested offloads.
5365 tmp = __builtin_popcountl(tmp & ~olx);
5366 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5367 /* First or better match, save and continue. */
5373 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5374 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5375 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5376 /* Lighter not requested offload. */
5381 if (m >= RTE_DIM(txoff_func)) {
5382 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5383 " for requested offloads %04X",
5384 dev->data->port_id, olx);
5387 DRV_LOG(DEBUG, "port %u has selected Tx function"
5388 " supporting offloads %04X/%04X",
5389 dev->data->port_id, olx, txoff_func[m].olx);
5390 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5391 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5392 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5393 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5394 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5395 DRV_LOG(DEBUG, "\tSWP (software parser)");
5396 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5397 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5398 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5399 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5400 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5401 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5402 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5403 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5404 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5405 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5406 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5408 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5410 return txoff_func[m].func;