1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
16 #include <infiniband/mlx5dv.h>
18 #pragma GCC diagnostic error "-Wpedantic"
22 #include <rte_mempool.h>
23 #include <rte_prefetch.h>
24 #include <rte_common.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_ether.h>
27 #include <rte_cycles.h>
30 #include <mlx5_devx_cmds.h>
32 #include <mlx5_common.h>
34 #include "mlx5_defs.h"
37 #include "mlx5_utils.h"
38 #include "mlx5_rxtx.h"
39 #include "mlx5_autoconf.h"
41 /* TX burst subroutines return codes. */
42 enum mlx5_txcmp_code {
43 MLX5_TXCMP_CODE_EXIT = 0,
44 MLX5_TXCMP_CODE_ERROR,
45 MLX5_TXCMP_CODE_SINGLE,
46 MLX5_TXCMP_CODE_MULTI,
52 * These defines are used to configure Tx burst routine option set
53 * supported at compile time. The not specified options are optimized out
54 * out due to if conditions can be explicitly calculated at compile time.
55 * The offloads with bigger runtime check (require more CPU cycles to
56 * skip) overhead should have the bigger index - this is needed to
57 * select the better matching routine function if no exact match and
58 * some offloads are not actually requested.
60 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
61 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
62 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
63 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
64 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
65 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
66 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
67 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
68 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
70 /* The most common offloads groups. */
71 #define MLX5_TXOFF_CONFIG_NONE 0
72 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
73 MLX5_TXOFF_CONFIG_TSO | \
74 MLX5_TXOFF_CONFIG_SWP | \
75 MLX5_TXOFF_CONFIG_CSUM | \
76 MLX5_TXOFF_CONFIG_INLINE | \
77 MLX5_TXOFF_CONFIG_VLAN | \
78 MLX5_TXOFF_CONFIG_METADATA)
80 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
82 #define MLX5_TXOFF_DECL(func, olx) \
83 static uint16_t mlx5_tx_burst_##func(void *txq, \
84 struct rte_mbuf **pkts, \
87 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
88 pkts, pkts_n, (olx)); \
91 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
93 static __rte_always_inline uint32_t
94 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
96 static __rte_always_inline int
97 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
98 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
100 static __rte_always_inline uint32_t
101 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
103 static __rte_always_inline void
104 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
105 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
107 static __rte_always_inline void
108 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
109 const unsigned int strd_n);
112 mlx5_queue_state_modify(struct rte_eth_dev *dev,
113 struct mlx5_mp_arg_queue_state_modify *sm);
116 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
117 volatile struct mlx5_cqe *__rte_restrict cqe,
121 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
122 volatile struct mlx5_cqe *__rte_restrict cqe,
125 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
126 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
129 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
130 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
132 uint64_t rte_net_mlx5_dynf_inline_mask;
133 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
136 * Build a table to translate Rx completion flags to packet type.
138 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
141 mlx5_set_ptype_table(void)
144 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
146 /* Last entry must not be overwritten, reserved for errored packet. */
147 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
148 (*p)[i] = RTE_PTYPE_UNKNOWN;
150 * The index to the array should have:
151 * bit[1:0] = l3_hdr_type
152 * bit[4:2] = l4_hdr_type
155 * bit[7] = outer_l3_type
158 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
160 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
161 RTE_PTYPE_L4_NONFRAG;
162 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
163 RTE_PTYPE_L4_NONFRAG;
165 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
167 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
170 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
172 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
174 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
176 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
178 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
180 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
183 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
185 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
187 /* Repeat with outer_l3_type being set. Just in case. */
188 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
189 RTE_PTYPE_L4_NONFRAG;
190 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
191 RTE_PTYPE_L4_NONFRAG;
192 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
194 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
196 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
198 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
200 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
202 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
204 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
206 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
208 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
210 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
213 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
214 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
215 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
216 RTE_PTYPE_INNER_L4_NONFRAG;
217 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
219 RTE_PTYPE_INNER_L4_NONFRAG;
220 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
221 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
222 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L4_NONFRAG;
224 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L4_NONFRAG;
227 /* Tunneled - Fragmented */
228 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L4_FRAG;
231 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
232 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L4_FRAG;
234 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L4_FRAG;
237 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L4_FRAG;
241 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L4_TCP;
244 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L4_TCP;
247 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L4_TCP;
250 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L4_TCP;
253 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L4_TCP;
256 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L4_TCP;
259 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_TCP;
262 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L4_TCP;
265 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L4_TCP;
268 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
269 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L4_TCP;
271 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
272 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
273 RTE_PTYPE_INNER_L4_TCP;
274 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
275 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
276 RTE_PTYPE_INNER_L4_TCP;
278 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
279 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
280 RTE_PTYPE_INNER_L4_UDP;
281 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
282 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
283 RTE_PTYPE_INNER_L4_UDP;
284 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
285 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
286 RTE_PTYPE_INNER_L4_UDP;
287 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
288 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
289 RTE_PTYPE_INNER_L4_UDP;
293 * Build a table to translate packet to checksum type of Verbs.
296 mlx5_set_cksum_table(void)
302 * The index should have:
303 * bit[0] = PKT_TX_TCP_SEG
304 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
305 * bit[4] = PKT_TX_IP_CKSUM
306 * bit[8] = PKT_TX_OUTER_IP_CKSUM
309 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
312 /* Tunneled packet. */
313 if (i & (1 << 8)) /* Outer IP. */
314 v |= MLX5_ETH_WQE_L3_CSUM;
315 if (i & (1 << 4)) /* Inner IP. */
316 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
317 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
318 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
321 if (i & (1 << 4)) /* IP. */
322 v |= MLX5_ETH_WQE_L3_CSUM;
323 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
324 v |= MLX5_ETH_WQE_L4_CSUM;
326 mlx5_cksum_table[i] = v;
331 * Build a table to translate packet type of mbuf to SWP type of Verbs.
334 mlx5_set_swp_types_table(void)
340 * The index should have:
341 * bit[0:1] = PKT_TX_L4_MASK
342 * bit[4] = PKT_TX_IPV6
343 * bit[8] = PKT_TX_OUTER_IPV6
344 * bit[9] = PKT_TX_OUTER_UDP
346 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
349 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
351 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
353 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
354 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
355 v |= MLX5_ETH_WQE_L4_INNER_UDP;
356 mlx5_swp_types_table[i] = v;
361 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
362 * Flags must be preliminary initialized to zero.
365 * Pointer to burst routine local context.
367 * Pointer to store Software Parser flags
369 * Configured Tx offloads mask. It is fully defined at
370 * compile time and may be used for optimization.
373 * Software Parser offsets packed in dword.
374 * Software Parser flags are set by pointer.
376 static __rte_always_inline uint32_t
377 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
382 unsigned int idx, off;
385 if (!MLX5_TXOFF_CONFIG(SWP))
387 ol = loc->mbuf->ol_flags;
388 tunnel = ol & PKT_TX_TUNNEL_MASK;
390 * Check whether Software Parser is required.
391 * Only customized tunnels may ask for.
393 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
396 * The index should have:
397 * bit[0:1] = PKT_TX_L4_MASK
398 * bit[4] = PKT_TX_IPV6
399 * bit[8] = PKT_TX_OUTER_IPV6
400 * bit[9] = PKT_TX_OUTER_UDP
402 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
403 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
404 *swp_flags = mlx5_swp_types_table[idx];
406 * Set offsets for SW parser. Since ConnectX-5, SW parser just
407 * complements HW parser. SW parser starts to engage only if HW parser
408 * can't reach a header. For the older devices, HW parser will not kick
409 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
410 * should be set regardless of HW offload.
412 off = loc->mbuf->outer_l2_len;
413 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
414 off += sizeof(struct rte_vlan_hdr);
415 set = (off >> 1) << 8; /* Outer L3 offset. */
416 off += loc->mbuf->outer_l3_len;
417 if (tunnel == PKT_TX_TUNNEL_UDP)
418 set |= off >> 1; /* Outer L4 offset. */
419 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
420 const uint64_t csum = ol & PKT_TX_L4_MASK;
421 off += loc->mbuf->l2_len;
422 set |= (off >> 1) << 24; /* Inner L3 offset. */
423 if (csum == PKT_TX_TCP_CKSUM ||
424 csum == PKT_TX_UDP_CKSUM ||
425 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
426 off += loc->mbuf->l3_len;
427 set |= (off >> 1) << 16; /* Inner L4 offset. */
430 set = rte_cpu_to_le_32(set);
435 * Convert the Checksum offloads to Verbs.
438 * Pointer to the mbuf.
441 * Converted checksum flags.
443 static __rte_always_inline uint8_t
444 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
447 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
448 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
449 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
452 * The index should have:
453 * bit[0] = PKT_TX_TCP_SEG
454 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
455 * bit[4] = PKT_TX_IP_CKSUM
456 * bit[8] = PKT_TX_OUTER_IP_CKSUM
459 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
460 return mlx5_cksum_table[idx];
464 * Internal function to compute the number of used descriptors in an RX queue
470 * The number of used rx descriptor.
473 rx_queue_count(struct mlx5_rxq_data *rxq)
475 struct rxq_zip *zip = &rxq->zip;
476 volatile struct mlx5_cqe *cqe;
477 const unsigned int cqe_n = (1 << rxq->cqe_n);
478 const unsigned int cqe_cnt = cqe_n - 1;
482 /* if we are processing a compressed cqe */
484 used = zip->cqe_cnt - zip->ca;
490 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
491 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
495 op_own = cqe->op_own;
496 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
497 n = rte_be_to_cpu_32(cqe->byte_cnt);
502 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
504 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
509 * DPDK callback to check the status of a rx descriptor.
514 * The index of the descriptor in the ring.
517 * The status of the tx descriptor.
520 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
522 struct mlx5_rxq_data *rxq = rx_queue;
523 struct mlx5_rxq_ctrl *rxq_ctrl =
524 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
525 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
527 if (dev->rx_pkt_burst != mlx5_rx_burst) {
531 if (offset >= (1 << rxq->elts_n)) {
535 if (offset < rx_queue_count(rxq))
536 return RTE_ETH_RX_DESC_DONE;
537 return RTE_ETH_RX_DESC_AVAIL;
541 * DPDK callback to get the RX queue information
544 * Pointer to the device structure.
547 * Rx queue identificator.
550 * Pointer to the RX queue information structure.
557 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
558 struct rte_eth_rxq_info *qinfo)
560 struct mlx5_priv *priv = dev->data->dev_private;
561 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
562 struct mlx5_rxq_ctrl *rxq_ctrl =
563 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
567 qinfo->mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
568 rxq->mprq_mp : rxq->mp;
569 qinfo->conf.rx_thresh.pthresh = 0;
570 qinfo->conf.rx_thresh.hthresh = 0;
571 qinfo->conf.rx_thresh.wthresh = 0;
572 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
573 qinfo->conf.rx_drop_en = 1;
574 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
575 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
576 qinfo->scattered_rx = dev->data->scattered_rx;
577 qinfo->nb_desc = 1 << rxq->elts_n;
581 * DPDK callback to get the RX packet burst mode information
584 * Pointer to the device structure.
587 * Rx queue identificatior.
590 * Pointer to the burts mode information.
593 * 0 as success, -EINVAL as failure.
597 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
598 uint16_t rx_queue_id __rte_unused,
599 struct rte_eth_burst_mode *mode)
601 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
603 if (pkt_burst == mlx5_rx_burst) {
604 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
605 } else if (pkt_burst == mlx5_rx_burst_mprq) {
606 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
607 } else if (pkt_burst == mlx5_rx_burst_vec) {
608 #if defined RTE_ARCH_X86_64
609 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
610 #elif defined RTE_ARCH_ARM64
611 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
612 #elif defined RTE_ARCH_PPC_64
613 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
624 * DPDK callback to get the number of used descriptors in a RX queue
627 * Pointer to the device structure.
633 * The number of used rx descriptor.
634 * -EINVAL if the queue is invalid
637 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
639 struct mlx5_priv *priv = dev->data->dev_private;
640 struct mlx5_rxq_data *rxq;
642 if (dev->rx_pkt_burst != mlx5_rx_burst) {
646 rxq = (*priv->rxqs)[rx_queue_id];
651 return rx_queue_count(rxq);
654 #define MLX5_SYSTEM_LOG_DIR "/var/log"
656 * Dump debug information to log file.
661 * If not NULL this string is printed as a header to the output
662 * and the output will be in hexadecimal view.
664 * This is the buffer address to print out.
666 * The number of bytes to dump out.
669 mlx5_dump_debug_information(const char *fname, const char *hex_title,
670 const void *buf, unsigned int hex_len)
674 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
675 fd = fopen(path, "a+");
677 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
678 MKSTR(path2, "./%s", fname);
679 fd = fopen(path2, "a+");
681 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
684 DRV_LOG(INFO, "New debug dump in file %s", path2);
686 DRV_LOG(INFO, "New debug dump in file %s", path);
689 rte_hexdump(fd, hex_title, buf, hex_len);
691 fprintf(fd, "%s", (const char *)buf);
692 fprintf(fd, "\n\n\n");
697 * Move QP from error state to running state and initialize indexes.
700 * Pointer to TX queue control structure.
703 * 0 on success, else -1.
706 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
708 struct mlx5_mp_arg_queue_state_modify sm = {
710 .queue_id = txq_ctrl->txq.idx,
713 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
715 txq_ctrl->txq.wqe_ci = 0;
716 txq_ctrl->txq.wqe_pi = 0;
717 txq_ctrl->txq.elts_comp = 0;
721 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
723 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
725 static const uint8_t magic[] = "seen";
729 for (i = 0; i < sizeof(magic); ++i)
730 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
732 err_cqe->rsvd1[i] = magic[i];
741 * Pointer to TX queue structure.
743 * Pointer to the error CQE.
746 * Negative value if queue recovery failed, otherwise
747 * the error completion entry is handled successfully.
750 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
751 volatile struct mlx5_err_cqe *err_cqe)
753 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
754 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
755 struct mlx5_txq_ctrl *txq_ctrl =
756 container_of(txq, struct mlx5_txq_ctrl, txq);
757 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
758 int seen = check_err_cqe_seen(err_cqe);
760 if (!seen && txq_ctrl->dump_file_n <
761 txq_ctrl->priv->config.max_dump_files_num) {
762 MKSTR(err_str, "Unexpected CQE error syndrome "
763 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
764 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
765 txq->cqe_s, txq->qp_num_8s >> 8,
766 rte_be_to_cpu_16(err_cqe->wqe_counter),
767 txq->wqe_ci, txq->cq_ci);
768 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
769 PORT_ID(txq_ctrl->priv), txq->idx,
770 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
771 mlx5_dump_debug_information(name, NULL, err_str, 0);
772 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
773 (const void *)((uintptr_t)
777 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
778 (const void *)((uintptr_t)
782 txq_ctrl->dump_file_n++;
786 * Count errors in WQEs units.
787 * Later it can be improved to count error packets,
788 * for example, by SQ parsing to find how much packets
789 * should be counted for each WQE.
791 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
793 if (tx_recover_qp(txq_ctrl)) {
794 /* Recovering failed - retry later on the same WQE. */
797 /* Release all the remaining buffers. */
798 txq_free_elts(txq_ctrl);
804 * Translate RX completion flags to packet type.
807 * Pointer to RX queue structure.
811 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
814 * Packet type for struct rte_mbuf.
816 static inline uint32_t
817 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
820 uint8_t pinfo = cqe->pkt_info;
821 uint16_t ptype = cqe->hdr_type_etc;
824 * The index to the array should have:
825 * bit[1:0] = l3_hdr_type
826 * bit[4:2] = l4_hdr_type
829 * bit[7] = outer_l3_type
831 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
832 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
836 * Initialize Rx WQ and indexes.
839 * Pointer to RX queue structure.
842 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
844 const unsigned int wqe_n = 1 << rxq->elts_n;
847 for (i = 0; (i != wqe_n); ++i) {
848 volatile struct mlx5_wqe_data_seg *scat;
852 if (mlx5_rxq_mprq_enabled(rxq)) {
853 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
855 scat = &((volatile struct mlx5_wqe_mprq *)
857 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
858 1 << rxq->strd_num_n);
859 byte_count = (1 << rxq->strd_sz_n) *
860 (1 << rxq->strd_num_n);
862 struct rte_mbuf *buf = (*rxq->elts)[i];
864 scat = &((volatile struct mlx5_wqe_data_seg *)
866 addr = rte_pktmbuf_mtod(buf, uintptr_t);
867 byte_count = DATA_LEN(buf);
869 /* scat->addr must be able to store a pointer. */
870 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
871 *scat = (struct mlx5_wqe_data_seg){
872 .addr = rte_cpu_to_be_64(addr),
873 .byte_count = rte_cpu_to_be_32(byte_count),
874 .lkey = mlx5_rx_addr2mr(rxq, addr),
877 rxq->consumed_strd = 0;
878 rxq->decompressed = 0;
880 rxq->zip = (struct rxq_zip){
883 /* Update doorbell counter. */
884 rxq->rq_ci = wqe_n >> rxq->sges_n;
886 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
890 * Modify a Verbs/DevX queue state.
891 * This must be called from the primary process.
894 * Pointer to Ethernet device.
896 * State modify request parameters.
899 * 0 in case of success else non-zero value and rte_errno is set.
902 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
903 const struct mlx5_mp_arg_queue_state_modify *sm)
906 struct mlx5_priv *priv = dev->data->dev_private;
909 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
910 struct mlx5_rxq_ctrl *rxq_ctrl =
911 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
913 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
914 struct ibv_wq_attr mod = {
915 .attr_mask = IBV_WQ_ATTR_STATE,
916 .wq_state = sm->state,
919 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
920 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
921 struct mlx5_devx_modify_rq_attr rq_attr;
923 memset(&rq_attr, 0, sizeof(rq_attr));
924 if (sm->state == IBV_WQS_RESET) {
925 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
926 rq_attr.state = MLX5_RQC_STATE_RST;
927 } else if (sm->state == IBV_WQS_RDY) {
928 rq_attr.rq_state = MLX5_RQC_STATE_RST;
929 rq_attr.state = MLX5_RQC_STATE_RDY;
930 } else if (sm->state == IBV_WQS_ERR) {
931 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
932 rq_attr.state = MLX5_RQC_STATE_ERR;
934 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
938 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
939 sm->state, strerror(errno));
944 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
945 struct mlx5_txq_ctrl *txq_ctrl =
946 container_of(txq, struct mlx5_txq_ctrl, txq);
948 if (txq_ctrl->obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
949 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
951 /* Change queue state to reset. */
952 msq_attr.sq_state = MLX5_SQC_STATE_ERR;
953 msq_attr.state = MLX5_SQC_STATE_RST;
954 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq_devx,
957 DRV_LOG(ERR, "Cannot change the "
958 "Tx QP state to RESET %s",
963 /* Change queue state to ready. */
964 msq_attr.sq_state = MLX5_SQC_STATE_RST;
965 msq_attr.state = MLX5_SQC_STATE_RDY;
966 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq_devx,
969 DRV_LOG(ERR, "Cannot change the "
970 "Tx QP state to READY %s",
976 struct ibv_qp_attr mod = {
977 .qp_state = IBV_QPS_RESET,
978 .port_num = (uint8_t)priv->dev_port,
980 struct ibv_qp *qp = txq_ctrl->obj->qp;
983 (txq_ctrl->obj->type == MLX5_TXQ_OBJ_TYPE_IBV);
985 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
987 DRV_LOG(ERR, "Cannot change the "
988 "Tx QP state to RESET %s",
993 mod.qp_state = IBV_QPS_INIT;
994 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
996 DRV_LOG(ERR, "Cannot change the "
997 "Tx QP state to INIT %s",
1002 mod.qp_state = IBV_QPS_RTR;
1003 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
1005 DRV_LOG(ERR, "Cannot change the "
1006 "Tx QP state to RTR %s",
1011 mod.qp_state = IBV_QPS_RTS;
1012 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
1014 DRV_LOG(ERR, "Cannot change the "
1015 "Tx QP state to RTS %s",
1026 * Modify a Verbs queue state.
1029 * Pointer to Ethernet device.
1031 * State modify request parameters.
1034 * 0 in case of success else non-zero value.
1037 mlx5_queue_state_modify(struct rte_eth_dev *dev,
1038 struct mlx5_mp_arg_queue_state_modify *sm)
1040 struct mlx5_priv *priv = dev->data->dev_private;
1043 switch (rte_eal_process_type()) {
1044 case RTE_PROC_PRIMARY:
1045 ret = mlx5_queue_state_modify_primary(dev, sm);
1047 case RTE_PROC_SECONDARY:
1048 ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
1057 * Handle a Rx error.
1058 * The function inserts the RQ state to reset when the first error CQE is
1059 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
1060 * it moves the RQ state to ready and initializes the RQ.
1061 * Next CQE identification and error counting are in the caller responsibility.
1064 * Pointer to RX queue structure.
1066 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
1067 * 0 when called from non-vectorized Rx burst.
1070 * -1 in case of recovery error, otherwise the CQE status.
1073 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
1075 const uint16_t cqe_n = 1 << rxq->cqe_n;
1076 const uint16_t cqe_mask = cqe_n - 1;
1077 const unsigned int wqe_n = 1 << rxq->elts_n;
1078 struct mlx5_rxq_ctrl *rxq_ctrl =
1079 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1081 volatile struct mlx5_cqe *cqe;
1082 volatile struct mlx5_err_cqe *err_cqe;
1084 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
1086 struct mlx5_mp_arg_queue_state_modify sm;
1089 switch (rxq->err_state) {
1090 case MLX5_RXQ_ERR_STATE_NO_ERROR:
1091 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
1093 case MLX5_RXQ_ERR_STATE_NEED_RESET:
1095 sm.queue_id = rxq->idx;
1096 sm.state = IBV_WQS_RESET;
1097 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
1099 if (rxq_ctrl->dump_file_n <
1100 rxq_ctrl->priv->config.max_dump_files_num) {
1101 MKSTR(err_str, "Unexpected CQE error syndrome "
1102 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
1103 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
1104 rxq->cqn, rxq_ctrl->wqn,
1105 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
1106 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
1107 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
1108 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
1109 mlx5_dump_debug_information(name, NULL, err_str, 0);
1110 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
1111 (const void *)((uintptr_t)
1113 sizeof(*u.cqe) * cqe_n);
1114 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
1115 (const void *)((uintptr_t)
1118 rxq_ctrl->dump_file_n++;
1120 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
1122 case MLX5_RXQ_ERR_STATE_NEED_READY:
1123 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1124 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1126 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1129 * The RQ consumer index must be zeroed while moving
1130 * from RESET state to RDY state.
1132 *rxq->rq_db = rte_cpu_to_be_32(0);
1135 sm.queue_id = rxq->idx;
1136 sm.state = IBV_WQS_RDY;
1137 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1141 const uint16_t q_mask = wqe_n - 1;
1143 struct rte_mbuf **elt;
1145 unsigned int n = wqe_n - (rxq->rq_ci -
1148 for (i = 0; i < (int)n; ++i) {
1149 elt_idx = (rxq->rq_ci + i) & q_mask;
1150 elt = &(*rxq->elts)[elt_idx];
1151 *elt = rte_mbuf_raw_alloc(rxq->mp);
1153 for (i--; i >= 0; --i) {
1154 elt_idx = (rxq->rq_ci +
1158 rte_pktmbuf_free_seg
1164 for (i = 0; i < (int)wqe_n; ++i) {
1165 elt = &(*rxq->elts)[i];
1167 (uint16_t)((*elt)->buf_len -
1168 rte_pktmbuf_headroom(*elt));
1170 /* Padding with a fake mbuf for vec Rx. */
1171 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1172 (*rxq->elts)[wqe_n + i] =
1175 mlx5_rxq_initialize(rxq);
1176 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1185 * Get size of the next packet for a given CQE. For compressed CQEs, the
1186 * consumer index is updated only once all packets of the current one have
1190 * Pointer to RX queue.
1194 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1198 * 0 in case of empty CQE, otherwise the packet size in bytes.
1201 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1202 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1204 struct rxq_zip *zip = &rxq->zip;
1205 uint16_t cqe_n = cqe_cnt + 1;
1211 /* Process compressed data in the CQE and mini arrays. */
1213 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1214 (volatile struct mlx5_mini_cqe8 (*)[8])
1215 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1218 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1219 *mcqe = &(*mc)[zip->ai & 7];
1220 if ((++zip->ai & 7) == 0) {
1221 /* Invalidate consumed CQEs */
1224 while (idx != end) {
1225 (*rxq->cqes)[idx & cqe_cnt].op_own =
1226 MLX5_CQE_INVALIDATE;
1230 * Increment consumer index to skip the number
1231 * of CQEs consumed. Hardware leaves holes in
1232 * the CQ ring for software use.
1237 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1238 /* Invalidate the rest */
1242 while (idx != end) {
1243 (*rxq->cqes)[idx & cqe_cnt].op_own =
1244 MLX5_CQE_INVALIDATE;
1247 rxq->cq_ci = zip->cq_ci;
1251 * No compressed data, get next CQE and verify if it is
1258 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1259 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1260 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1262 ret = mlx5_rx_err_handle(rxq, 0);
1263 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1271 op_own = cqe->op_own;
1272 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1273 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1274 (volatile struct mlx5_mini_cqe8 (*)[8])
1275 (uintptr_t)(&(*rxq->cqes)
1279 /* Fix endianness. */
1280 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1282 * Current mini array position is the one
1283 * returned by check_cqe64().
1285 * If completion comprises several mini arrays,
1286 * as a special case the second one is located
1287 * 7 CQEs after the initial CQE instead of 8
1288 * for subsequent ones.
1290 zip->ca = rxq->cq_ci;
1291 zip->na = zip->ca + 7;
1292 /* Compute the next non compressed CQE. */
1294 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1295 /* Get packet size to return. */
1296 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1299 /* Prefetch all to be invalidated */
1302 while (idx != end) {
1303 rte_prefetch0(&(*rxq->cqes)[(idx) &
1308 len = rte_be_to_cpu_32(cqe->byte_cnt);
1311 if (unlikely(rxq->err_state)) {
1312 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1313 ++rxq->stats.idropped;
1321 * Translate RX completion flags to offload flags.
1327 * Offload flags (ol_flags) for struct rte_mbuf.
1329 static inline uint32_t
1330 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1332 uint32_t ol_flags = 0;
1333 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1337 MLX5_CQE_RX_L3_HDR_VALID,
1338 PKT_RX_IP_CKSUM_GOOD) |
1340 MLX5_CQE_RX_L4_HDR_VALID,
1341 PKT_RX_L4_CKSUM_GOOD);
1346 * Fill in mbuf fields from RX completion flags.
1347 * Note that pkt->ol_flags should be initialized outside of this function.
1350 * Pointer to RX queue.
1355 * @param rss_hash_res
1356 * Packet RSS Hash result.
1359 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1360 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1362 /* Update packet information. */
1363 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1364 if (rss_hash_res && rxq->rss_hash) {
1365 pkt->hash.rss = rss_hash_res;
1366 pkt->ol_flags |= PKT_RX_RSS_HASH;
1368 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1369 pkt->ol_flags |= PKT_RX_FDIR;
1370 if (cqe->sop_drop_qpn !=
1371 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1372 uint32_t mark = cqe->sop_drop_qpn;
1374 pkt->ol_flags |= PKT_RX_FDIR_ID;
1375 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1378 if (rxq->dynf_meta && cqe->flow_table_metadata) {
1379 pkt->ol_flags |= rxq->flow_meta_mask;
1380 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, uint32_t *) =
1381 cqe->flow_table_metadata;
1384 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1385 if (rxq->vlan_strip &&
1386 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1387 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1388 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1390 if (rxq->hw_timestamp) {
1391 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1392 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1397 * DPDK callback for RX.
1400 * Generic pointer to RX queue structure.
1402 * Array to store received packets.
1404 * Maximum number of packets in array.
1407 * Number of packets successfully received (<= pkts_n).
1410 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1412 struct mlx5_rxq_data *rxq = dpdk_rxq;
1413 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1414 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1415 const unsigned int sges_n = rxq->sges_n;
1416 struct rte_mbuf *pkt = NULL;
1417 struct rte_mbuf *seg = NULL;
1418 volatile struct mlx5_cqe *cqe =
1419 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1421 unsigned int rq_ci = rxq->rq_ci << sges_n;
1422 int len = 0; /* keep its value across iterations. */
1425 unsigned int idx = rq_ci & wqe_cnt;
1426 volatile struct mlx5_wqe_data_seg *wqe =
1427 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1428 struct rte_mbuf *rep = (*rxq->elts)[idx];
1429 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1430 uint32_t rss_hash_res;
1438 rep = rte_mbuf_raw_alloc(rxq->mp);
1439 if (unlikely(rep == NULL)) {
1440 ++rxq->stats.rx_nombuf;
1443 * no buffers before we even started,
1444 * bail out silently.
1448 while (pkt != seg) {
1449 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
1453 rte_mbuf_raw_free(pkt);
1459 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1460 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1462 rte_mbuf_raw_free(rep);
1466 MLX5_ASSERT(len >= (rxq->crc_present << 2));
1467 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1468 /* If compressed, take hash result from mini-CQE. */
1469 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1471 mcqe->rx_hash_result);
1472 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1473 if (rxq->crc_present)
1474 len -= RTE_ETHER_CRC_LEN;
1476 if (cqe->lro_num_seg > 1) {
1478 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1480 pkt->ol_flags |= PKT_RX_LRO;
1481 pkt->tso_segsz = len / cqe->lro_num_seg;
1484 DATA_LEN(rep) = DATA_LEN(seg);
1485 PKT_LEN(rep) = PKT_LEN(seg);
1486 SET_DATA_OFF(rep, DATA_OFF(seg));
1487 PORT(rep) = PORT(seg);
1488 (*rxq->elts)[idx] = rep;
1490 * Fill NIC descriptor with the new buffer. The lkey and size
1491 * of the buffers are already known, only the buffer address
1494 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1495 /* If there's only one MR, no need to replace LKey in WQE. */
1496 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1497 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1498 if (len > DATA_LEN(seg)) {
1499 len -= DATA_LEN(seg);
1504 DATA_LEN(seg) = len;
1505 #ifdef MLX5_PMD_SOFT_COUNTERS
1506 /* Increment bytes counter. */
1507 rxq->stats.ibytes += PKT_LEN(pkt);
1509 /* Return packet. */
1514 /* Align consumer index to the next stride. */
1519 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1521 /* Update the consumer index. */
1522 rxq->rq_ci = rq_ci >> sges_n;
1524 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1526 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1527 #ifdef MLX5_PMD_SOFT_COUNTERS
1528 /* Increment packets counter. */
1529 rxq->stats.ipackets += i;
1535 * Update LRO packet TCP header.
1536 * The HW LRO feature doesn't update the TCP header after coalescing the
1537 * TCP segments but supplies information in CQE to fill it by SW.
1540 * Pointer to the TCP header.
1542 * Pointer to the completion entry..
1544 * The L3 pseudo-header checksum.
1547 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
1548 volatile struct mlx5_cqe *__rte_restrict cqe,
1551 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1552 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1554 * The HW calculates only the TCP payload checksum, need to complete
1555 * the TCP header checksum and the L3 pseudo-header checksum.
1557 uint32_t csum = phcsum + cqe->csum;
1559 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1560 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1561 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1562 tcp->recv_ack = cqe->lro_ack_seq_num;
1563 tcp->rx_win = cqe->lro_tcp_win;
1565 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1566 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1568 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
1569 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1570 csum = (~csum) & 0xffff;
1577 * Update LRO packet headers.
1578 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1579 * TCP segments but supply information in CQE to fill it by SW.
1582 * The packet address.
1584 * Pointer to the completion entry..
1586 * The packet length.
1589 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
1590 volatile struct mlx5_cqe *__rte_restrict cqe,
1594 struct rte_ether_hdr *eth;
1595 struct rte_vlan_hdr *vlan;
1596 struct rte_ipv4_hdr *ipv4;
1597 struct rte_ipv6_hdr *ipv6;
1598 struct rte_tcp_hdr *tcp;
1603 uint16_t proto = h.eth->ether_type;
1607 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1608 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1609 proto = h.vlan->eth_proto;
1612 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1613 h.ipv4->time_to_live = cqe->lro_min_ttl;
1614 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1615 h.ipv4->hdr_checksum = 0;
1616 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1617 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1620 h.ipv6->hop_limits = cqe->lro_min_ttl;
1621 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1623 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1626 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1630 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1632 struct mlx5_mprq_buf *buf = opaque;
1634 if (rte_atomic16_read(&buf->refcnt) == 1) {
1635 rte_mempool_put(buf->mp, buf);
1636 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1637 rte_atomic16_set(&buf->refcnt, 1);
1638 rte_mempool_put(buf->mp, buf);
1643 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1645 mlx5_mprq_buf_free_cb(NULL, buf);
1649 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1650 const unsigned int strd_n)
1652 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1653 volatile struct mlx5_wqe_data_seg *wqe =
1654 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1657 MLX5_ASSERT(rep != NULL);
1658 /* Replace MPRQ buf. */
1659 (*rxq->mprq_bufs)[rq_idx] = rep;
1661 addr = mlx5_mprq_buf_addr(rep, strd_n);
1662 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1663 /* If there's only one MR, no need to replace LKey in WQE. */
1664 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1665 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1666 /* Stash a mbuf for next replacement. */
1667 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1668 rxq->mprq_repl = rep;
1670 rxq->mprq_repl = NULL;
1674 * DPDK callback for RX with Multi-Packet RQ support.
1677 * Generic pointer to RX queue structure.
1679 * Array to store received packets.
1681 * Maximum number of packets in array.
1684 * Number of packets successfully received (<= pkts_n).
1687 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1689 struct mlx5_rxq_data *rxq = dpdk_rxq;
1690 const unsigned int strd_n = 1 << rxq->strd_num_n;
1691 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1692 const unsigned int strd_shift =
1693 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1694 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1695 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1696 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1698 uint32_t rq_ci = rxq->rq_ci;
1699 uint16_t consumed_strd = rxq->consumed_strd;
1700 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1702 while (i < pkts_n) {
1703 struct rte_mbuf *pkt;
1711 int32_t hdrm_overlap;
1712 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1713 uint32_t rss_hash_res = 0;
1715 if (consumed_strd == strd_n) {
1716 /* Replace WQE only if the buffer is still in use. */
1717 if (rte_atomic16_read(&buf->refcnt) > 1) {
1718 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1719 /* Release the old buffer. */
1720 mlx5_mprq_buf_free(buf);
1721 } else if (unlikely(rxq->mprq_repl == NULL)) {
1722 struct mlx5_mprq_buf *rep;
1725 * Currently, the MPRQ mempool is out of buffer
1726 * and doing memcpy regardless of the size of Rx
1727 * packet. Retry allocation to get back to
1730 if (!rte_mempool_get(rxq->mprq_mp,
1732 rxq->mprq_repl = rep;
1734 /* Advance to the next WQE. */
1737 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1739 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1740 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1744 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1745 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1746 MLX5_ASSERT(strd_cnt);
1747 consumed_strd += strd_cnt;
1748 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1751 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1752 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1754 /* mini-CQE for MPRQ doesn't have hash result. */
1755 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1757 MLX5_ASSERT(strd_idx < strd_n);
1758 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1760 pkt = rte_pktmbuf_alloc(rxq->mp);
1761 if (unlikely(pkt == NULL)) {
1762 ++rxq->stats.rx_nombuf;
1765 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1766 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1767 if (rxq->crc_present)
1768 len -= RTE_ETHER_CRC_LEN;
1769 offset = strd_idx * strd_sz + strd_shift;
1770 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1771 hdrm_overlap = len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz;
1773 * Memcpy packets to the target mbuf if:
1774 * - The size of packet is smaller than mprq_max_memcpy_len.
1775 * - Out of buffer in the Mempool for Multi-Packet RQ.
1776 * - The packet's stride overlaps a headroom and scatter is off.
1778 if (len <= rxq->mprq_max_memcpy_len ||
1779 rxq->mprq_repl == NULL ||
1780 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
1781 if (likely(rte_pktmbuf_tailroom(pkt) >= len)) {
1782 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
1784 DATA_LEN(pkt) = len;
1785 } else if (rxq->strd_scatter_en) {
1786 struct rte_mbuf *prev = pkt;
1788 RTE_MIN(rte_pktmbuf_tailroom(pkt), len);
1789 uint32_t rem_len = len - seg_len;
1791 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
1793 DATA_LEN(pkt) = seg_len;
1795 struct rte_mbuf *next =
1796 rte_pktmbuf_alloc(rxq->mp);
1798 if (unlikely(next == NULL)) {
1799 rte_pktmbuf_free(pkt);
1800 ++rxq->stats.rx_nombuf;
1804 SET_DATA_OFF(next, 0);
1805 addr = RTE_PTR_ADD(addr, seg_len);
1807 (rte_pktmbuf_tailroom(next),
1810 (rte_pktmbuf_mtod(next, void *),
1812 DATA_LEN(next) = seg_len;
1818 rte_pktmbuf_free_seg(pkt);
1819 ++rxq->stats.idropped;
1823 rte_iova_t buf_iova;
1824 struct rte_mbuf_ext_shared_info *shinfo;
1825 uint16_t buf_len = strd_cnt * strd_sz;
1828 /* Increment the refcnt of the whole chunk. */
1829 rte_atomic16_add_return(&buf->refcnt, 1);
1830 MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1832 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
1834 * MLX5 device doesn't use iova but it is necessary in a
1835 * case where the Rx packet is transmitted via a
1838 buf_iova = rte_mempool_virt2iova(buf) +
1839 RTE_PTR_DIFF(buf_addr, buf);
1840 shinfo = &buf->shinfos[strd_idx];
1841 rte_mbuf_ext_refcnt_set(shinfo, 1);
1843 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1844 * attaching the stride to mbuf and more offload flags
1845 * will be added below by calling rxq_cq_to_mbuf().
1846 * Other fields will be overwritten.
1848 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1850 /* Set mbuf head-room. */
1851 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
1852 MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
1853 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
1854 len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
1855 DATA_LEN(pkt) = len;
1857 * Copy the last fragment of a packet (up to headroom
1858 * size bytes) in case there is a stride overlap with
1859 * a next packet's headroom. Allocate a separate mbuf
1860 * to store this fragment and link it. Scatter is on.
1862 if (hdrm_overlap > 0) {
1863 MLX5_ASSERT(rxq->strd_scatter_en);
1864 struct rte_mbuf *seg =
1865 rte_pktmbuf_alloc(rxq->mp);
1867 if (unlikely(seg == NULL)) {
1868 rte_pktmbuf_free_seg(pkt);
1869 ++rxq->stats.rx_nombuf;
1872 SET_DATA_OFF(seg, 0);
1873 rte_memcpy(rte_pktmbuf_mtod(seg, void *),
1874 RTE_PTR_ADD(addr, len - hdrm_overlap),
1876 DATA_LEN(seg) = hdrm_overlap;
1877 DATA_LEN(pkt) = len - hdrm_overlap;
1882 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1883 if (cqe->lro_num_seg > 1) {
1884 mlx5_lro_update_hdr(addr, cqe, len);
1885 pkt->ol_flags |= PKT_RX_LRO;
1886 pkt->tso_segsz = len / cqe->lro_num_seg;
1889 PORT(pkt) = rxq->port_id;
1890 #ifdef MLX5_PMD_SOFT_COUNTERS
1891 /* Increment bytes counter. */
1892 rxq->stats.ibytes += PKT_LEN(pkt);
1894 /* Return packet. */
1899 /* Update the consumer indexes. */
1900 rxq->consumed_strd = consumed_strd;
1902 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1903 if (rq_ci != rxq->rq_ci) {
1906 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1908 #ifdef MLX5_PMD_SOFT_COUNTERS
1909 /* Increment packets counter. */
1910 rxq->stats.ipackets += i;
1916 * Dummy DPDK callback for TX.
1918 * This function is used to temporarily replace the real callback during
1919 * unsafe control operations on the queue, or in case of error.
1922 * Generic pointer to TX queue structure.
1924 * Packets to transmit.
1926 * Number of packets in array.
1929 * Number of packets successfully transmitted (<= pkts_n).
1932 removed_tx_burst(void *dpdk_txq __rte_unused,
1933 struct rte_mbuf **pkts __rte_unused,
1934 uint16_t pkts_n __rte_unused)
1941 * Dummy DPDK callback for RX.
1943 * This function is used to temporarily replace the real callback during
1944 * unsafe control operations on the queue, or in case of error.
1947 * Generic pointer to RX queue structure.
1949 * Array to store received packets.
1951 * Maximum number of packets in array.
1954 * Number of packets successfully received (<= pkts_n).
1957 removed_rx_burst(void *dpdk_txq __rte_unused,
1958 struct rte_mbuf **pkts __rte_unused,
1959 uint16_t pkts_n __rte_unused)
1966 * Vectorized Rx/Tx routines are not compiled in when required vector
1967 * instructions are not supported on a target architecture. The following null
1968 * stubs are needed for linkage when those are not included outside of this file
1969 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1973 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1974 struct rte_mbuf **pkts __rte_unused,
1975 uint16_t pkts_n __rte_unused)
1981 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1987 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1993 * Free the mbufs from the linear array of pointers.
1996 * Pointer to array of packets to be free.
1998 * Number of packets to be freed.
2000 * Configured Tx offloads mask. It is fully defined at
2001 * compile time and may be used for optimization.
2003 static __rte_always_inline void
2004 mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
2005 unsigned int pkts_n,
2006 unsigned int olx __rte_unused)
2008 struct rte_mempool *pool = NULL;
2009 struct rte_mbuf **p_free = NULL;
2010 struct rte_mbuf *mbuf;
2011 unsigned int n_free = 0;
2014 * The implemented algorithm eliminates
2015 * copying pointers to temporary array
2016 * for rte_mempool_put_bulk() calls.
2019 MLX5_ASSERT(pkts_n);
2023 * Decrement mbuf reference counter, detach
2024 * indirect and external buffers if needed.
2026 mbuf = rte_pktmbuf_prefree_seg(*pkts);
2027 if (likely(mbuf != NULL)) {
2028 MLX5_ASSERT(mbuf == *pkts);
2029 if (likely(n_free != 0)) {
2030 if (unlikely(pool != mbuf->pool))
2031 /* From different pool. */
2034 /* Start new scan array. */
2041 if (unlikely(pkts_n == 0)) {
2047 * This happens if mbuf is still referenced.
2048 * We can't put it back to the pool, skip.
2052 if (unlikely(n_free != 0))
2053 /* There is some array to free.*/
2055 if (unlikely(pkts_n == 0))
2056 /* Last mbuf, nothing to free. */
2062 * This loop is implemented to avoid multiple
2063 * inlining of rte_mempool_put_bulk().
2066 MLX5_ASSERT(p_free);
2067 MLX5_ASSERT(n_free);
2069 * Free the array of pre-freed mbufs
2070 * belonging to the same memory pool.
2072 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
2073 if (unlikely(mbuf != NULL)) {
2074 /* There is the request to start new scan. */
2079 if (likely(pkts_n != 0))
2082 * This is the last mbuf to be freed.
2083 * Do one more loop iteration to complete.
2084 * This is rare case of the last unique mbuf.
2089 if (likely(pkts_n == 0))
2098 * Free the mbuf from the elts ring buffer till new tail.
2101 * Pointer to Tx queue structure.
2103 * Index in elts to free up to, becomes new elts tail.
2105 * Configured Tx offloads mask. It is fully defined at
2106 * compile time and may be used for optimization.
2108 static __rte_always_inline void
2109 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
2111 unsigned int olx __rte_unused)
2113 uint16_t n_elts = tail - txq->elts_tail;
2115 MLX5_ASSERT(n_elts);
2116 MLX5_ASSERT(n_elts <= txq->elts_s);
2118 * Implement a loop to support ring buffer wraparound
2119 * with single inlining of mlx5_tx_free_mbuf().
2124 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
2125 part = RTE_MIN(part, n_elts);
2127 MLX5_ASSERT(part <= txq->elts_s);
2128 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
2130 txq->elts_tail += part;
2136 * Store the mbuf being sent into elts ring buffer.
2137 * On Tx completion these mbufs will be freed.
2140 * Pointer to Tx queue structure.
2142 * Pointer to array of packets to be stored.
2144 * Number of packets to be stored.
2146 * Configured Tx offloads mask. It is fully defined at
2147 * compile time and may be used for optimization.
2149 static __rte_always_inline void
2150 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
2151 struct rte_mbuf **__rte_restrict pkts,
2152 unsigned int pkts_n,
2153 unsigned int olx __rte_unused)
2156 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2159 MLX5_ASSERT(pkts_n);
2160 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2162 MLX5_ASSERT(part <= txq->elts_s);
2163 /* This code is a good candidate for vectorizing with SIMD. */
2164 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2166 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2167 txq->elts_head += pkts_n;
2168 if (unlikely(part < pkts_n))
2169 /* The copy is wrapping around the elts array. */
2170 rte_memcpy((void *)elts, (void *)(pkts + part),
2171 (pkts_n - part) * sizeof(struct rte_mbuf *));
2175 * Update completion queue consuming index via doorbell
2176 * and flush the completed data buffers.
2179 * Pointer to TX queue structure.
2180 * @param valid CQE pointer
2181 * if not NULL update txq->wqe_pi and flush the buffers
2183 * Configured Tx offloads mask. It is fully defined at
2184 * compile time and may be used for optimization.
2186 static __rte_always_inline void
2187 mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
2188 volatile struct mlx5_cqe *last_cqe,
2189 unsigned int olx __rte_unused)
2191 if (likely(last_cqe != NULL)) {
2194 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2195 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2196 if (likely(tail != txq->elts_tail)) {
2197 mlx5_tx_free_elts(txq, tail, olx);
2198 MLX5_ASSERT(tail == txq->elts_tail);
2204 * Manage TX completions. This routine checks the CQ for
2205 * arrived CQEs, deduces the last accomplished WQE in SQ,
2206 * updates SQ producing index and frees all completed mbufs.
2209 * Pointer to TX queue structure.
2211 * Configured Tx offloads mask. It is fully defined at
2212 * compile time and may be used for optimization.
2214 * NOTE: not inlined intentionally, it makes tx_burst
2215 * routine smaller, simple and faster - from experiments.
2218 mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
2219 unsigned int olx __rte_unused)
2221 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2222 volatile struct mlx5_cqe *last_cqe = NULL;
2223 bool ring_doorbell = false;
2226 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2227 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2229 volatile struct mlx5_cqe *cqe;
2231 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
2232 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
2233 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2234 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2235 /* No new CQEs in completion queue. */
2236 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
2240 * Some error occurred, try to restart.
2241 * We have no barrier after WQE related Doorbell
2242 * written, make sure all writes are completed
2243 * here, before we might perform SQ reset.
2246 ret = mlx5_tx_error_cqe_handle
2247 (txq, (volatile struct mlx5_err_cqe *)cqe);
2248 if (unlikely(ret < 0)) {
2250 * Some error occurred on queue error
2251 * handling, we do not advance the index
2252 * here, allowing to retry on next call.
2257 * We are going to fetch all entries with
2258 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2259 * The send queue is supposed to be empty.
2261 ring_doorbell = true;
2263 txq->cq_pi = txq->cq_ci;
2267 /* Normal transmit completion. */
2268 MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
2269 MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
2271 ring_doorbell = true;
2275 * We have to restrict the amount of processed CQEs
2276 * in one tx_burst routine call. The CQ may be large
2277 * and many CQEs may be updated by the NIC in one
2278 * transaction. Buffers freeing is time consuming,
2279 * multiple iterations may introduce significant
2282 if (likely(--count == 0))
2285 if (likely(ring_doorbell)) {
2286 /* Ring doorbell to notify hardware. */
2287 rte_compiler_barrier();
2288 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
2289 mlx5_tx_comp_flush(txq, last_cqe, olx);
2294 * Check if the completion request flag should be set in the last WQE.
2295 * Both pushed mbufs and WQEs are monitored and the completion request
2296 * flag is set if any of thresholds is reached.
2299 * Pointer to TX queue structure.
2301 * Pointer to burst routine local context.
2303 * Configured Tx offloads mask. It is fully defined at
2304 * compile time and may be used for optimization.
2306 static __rte_always_inline void
2307 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
2308 struct mlx5_txq_local *__rte_restrict loc,
2311 uint16_t head = txq->elts_head;
2314 part = MLX5_TXOFF_CONFIG(INLINE) ?
2315 0 : loc->pkts_sent - loc->pkts_copy;
2317 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2318 (MLX5_TXOFF_CONFIG(INLINE) &&
2319 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2320 volatile struct mlx5_wqe *last = loc->wqe_last;
2323 txq->elts_comp = head;
2324 if (MLX5_TXOFF_CONFIG(INLINE))
2325 txq->wqe_comp = txq->wqe_ci;
2326 /* Request unconditional completion on last WQE. */
2327 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2328 MLX5_COMP_MODE_OFFSET);
2329 /* Save elts_head in dedicated free on completion queue. */
2330 #ifdef RTE_LIBRTE_MLX5_DEBUG
2331 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2332 (last->cseg.opcode >> 8) << 16;
2334 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2336 /* A CQE slot must always be available. */
2337 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2342 * DPDK callback to check the status of a tx descriptor.
2347 * The index of the descriptor in the ring.
2350 * The status of the tx descriptor.
2353 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2355 struct mlx5_txq_data *__rte_restrict txq = tx_queue;
2358 mlx5_tx_handle_completion(txq, 0);
2359 used = txq->elts_head - txq->elts_tail;
2361 return RTE_ETH_TX_DESC_FULL;
2362 return RTE_ETH_TX_DESC_DONE;
2366 * Build the Control Segment with specified opcode:
2367 * - MLX5_OPCODE_SEND
2368 * - MLX5_OPCODE_ENHANCED_MPSW
2372 * Pointer to TX queue structure.
2374 * Pointer to burst routine local context.
2376 * Pointer to WQE to fill with built Control Segment.
2378 * Supposed length of WQE in segments.
2380 * SQ WQE opcode to put into Control Segment.
2382 * Configured Tx offloads mask. It is fully defined at
2383 * compile time and may be used for optimization.
2385 static __rte_always_inline void
2386 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
2387 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2388 struct mlx5_wqe *__rte_restrict wqe,
2390 unsigned int opcode,
2391 unsigned int olx __rte_unused)
2393 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
2395 /* For legacy MPW replace the EMPW by TSO with modifier. */
2396 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2397 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2398 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2399 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2400 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2401 MLX5_COMP_MODE_OFFSET);
2402 cs->misc = RTE_BE32(0);
2406 * Build the Ethernet Segment without inlined data.
2407 * Supports Software Parser, Checksums and VLAN
2408 * insertion Tx offload features.
2411 * Pointer to TX queue structure.
2413 * Pointer to burst routine local context.
2415 * Pointer to WQE to fill with built Ethernet Segment.
2417 * Configured Tx offloads mask. It is fully defined at
2418 * compile time and may be used for optimization.
2420 static __rte_always_inline void
2421 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2422 struct mlx5_txq_local *__rte_restrict loc,
2423 struct mlx5_wqe *__rte_restrict wqe,
2426 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2430 * Calculate and set check sum flags first, dword field
2431 * in segment may be shared with Software Parser flags.
2433 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2434 es->flags = rte_cpu_to_le_32(csum);
2436 * Calculate and set Software Parser offsets and flags.
2437 * These flags a set for custom UDP and IP tunnel packets.
2439 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2440 /* Fill metadata field if needed. */
2441 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2442 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2443 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2444 /* Engage VLAN tag insertion feature if requested. */
2445 if (MLX5_TXOFF_CONFIG(VLAN) &&
2446 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2448 * We should get here only if device support
2449 * this feature correctly.
2451 MLX5_ASSERT(txq->vlan_en);
2452 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2453 loc->mbuf->vlan_tci);
2455 es->inline_hdr = RTE_BE32(0);
2460 * Build the Ethernet Segment with minimal inlined data
2461 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2462 * used to fill the gap in single WQEBB WQEs.
2463 * Supports Software Parser, Checksums and VLAN
2464 * insertion Tx offload features.
2467 * Pointer to TX queue structure.
2469 * Pointer to burst routine local context.
2471 * Pointer to WQE to fill with built Ethernet Segment.
2473 * Length of VLAN tag insertion if any.
2475 * Configured Tx offloads mask. It is fully defined at
2476 * compile time and may be used for optimization.
2478 static __rte_always_inline void
2479 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2480 struct mlx5_txq_local *__rte_restrict loc,
2481 struct mlx5_wqe *__rte_restrict wqe,
2485 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2487 uint8_t *psrc, *pdst;
2490 * Calculate and set check sum flags first, dword field
2491 * in segment may be shared with Software Parser flags.
2493 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2494 es->flags = rte_cpu_to_le_32(csum);
2496 * Calculate and set Software Parser offsets and flags.
2497 * These flags a set for custom UDP and IP tunnel packets.
2499 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2500 /* Fill metadata field if needed. */
2501 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2502 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2503 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2504 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2506 sizeof(rte_v128u32_t)),
2507 "invalid Ethernet Segment data size");
2508 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2510 sizeof(struct rte_vlan_hdr) +
2511 2 * RTE_ETHER_ADDR_LEN),
2512 "invalid Ethernet Segment data size");
2513 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2514 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2515 es->inline_data = *(unaligned_uint16_t *)psrc;
2516 psrc += sizeof(uint16_t);
2517 pdst = (uint8_t *)(es + 1);
2518 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2519 /* Implement VLAN tag insertion as part inline data. */
2520 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2521 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2522 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2523 /* Insert VLAN ethertype + VLAN tag. */
2524 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2525 ((RTE_ETHER_TYPE_VLAN << 16) |
2526 loc->mbuf->vlan_tci);
2527 pdst += sizeof(struct rte_vlan_hdr);
2528 /* Copy the rest two bytes from packet data. */
2529 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2530 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2532 /* Fill the gap in the title WQEBB with inline data. */
2533 rte_mov16(pdst, psrc);
2538 * Build the Ethernet Segment with entire packet
2539 * data inlining. Checks the boundary of WQEBB and
2540 * ring buffer wrapping, supports Software Parser,
2541 * Checksums and VLAN insertion Tx offload features.
2544 * Pointer to TX queue structure.
2546 * Pointer to burst routine local context.
2548 * Pointer to WQE to fill with built Ethernet Segment.
2550 * Length of VLAN tag insertion if any.
2552 * Length of data to inline (VLAN included, if any).
2554 * TSO flag, set mss field from the packet.
2556 * Configured Tx offloads mask. It is fully defined at
2557 * compile time and may be used for optimization.
2560 * Pointer to the next Data Segment (aligned and wrapped around).
2562 static __rte_always_inline struct mlx5_wqe_dseg *
2563 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
2564 struct mlx5_txq_local *__rte_restrict loc,
2565 struct mlx5_wqe *__rte_restrict wqe,
2571 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2573 uint8_t *psrc, *pdst;
2577 * Calculate and set check sum flags first, dword field
2578 * in segment may be shared with Software Parser flags.
2580 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2583 csum |= loc->mbuf->tso_segsz;
2584 es->flags = rte_cpu_to_be_32(csum);
2586 es->flags = rte_cpu_to_le_32(csum);
2589 * Calculate and set Software Parser offsets and flags.
2590 * These flags a set for custom UDP and IP tunnel packets.
2592 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2593 /* Fill metadata field if needed. */
2594 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2595 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2596 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2597 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2599 sizeof(rte_v128u32_t)),
2600 "invalid Ethernet Segment data size");
2601 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2603 sizeof(struct rte_vlan_hdr) +
2604 2 * RTE_ETHER_ADDR_LEN),
2605 "invalid Ethernet Segment data size");
2606 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2607 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2608 es->inline_data = *(unaligned_uint16_t *)psrc;
2609 psrc += sizeof(uint16_t);
2610 pdst = (uint8_t *)(es + 1);
2611 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2612 /* Implement VLAN tag insertion as part inline data. */
2613 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2614 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2615 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2616 /* Insert VLAN ethertype + VLAN tag. */
2617 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2618 ((RTE_ETHER_TYPE_VLAN << 16) |
2619 loc->mbuf->vlan_tci);
2620 pdst += sizeof(struct rte_vlan_hdr);
2621 /* Copy the rest two bytes from packet data. */
2622 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2623 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2624 psrc += sizeof(uint16_t);
2626 /* Fill the gap in the title WQEBB with inline data. */
2627 rte_mov16(pdst, psrc);
2628 psrc += sizeof(rte_v128u32_t);
2630 pdst = (uint8_t *)(es + 2);
2631 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2632 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2633 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2635 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2636 return (struct mlx5_wqe_dseg *)pdst;
2639 * The WQEBB space availability is checked by caller.
2640 * Here we should be aware of WQE ring buffer wraparound only.
2642 part = (uint8_t *)txq->wqes_end - pdst;
2643 part = RTE_MIN(part, inlen);
2645 rte_memcpy(pdst, psrc, part);
2647 if (likely(!inlen)) {
2649 * If return value is not used by the caller
2650 * the code below will be optimized out.
2653 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2654 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2655 pdst = (uint8_t *)txq->wqes;
2656 return (struct mlx5_wqe_dseg *)pdst;
2658 pdst = (uint8_t *)txq->wqes;
2665 * Copy data from chain of mbuf to the specified linear buffer.
2666 * Checksums and VLAN insertion Tx offload features. If data
2667 * from some mbuf copied completely this mbuf is freed. Local
2668 * structure is used to keep the byte stream state.
2671 * Pointer to the destination linear buffer.
2673 * Pointer to burst routine local context.
2675 * Length of data to be copied.
2677 * Length of data to be copied ignoring no inline hint.
2679 * Configured Tx offloads mask. It is fully defined at
2680 * compile time and may be used for optimization.
2683 * Number of actual copied data bytes. This is always greater than or
2684 * equal to must parameter and might be lesser than len in no inline
2685 * hint flag is encountered.
2687 static __rte_always_inline unsigned int
2688 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2689 struct mlx5_txq_local *__rte_restrict loc,
2692 unsigned int olx __rte_unused)
2694 struct rte_mbuf *mbuf;
2695 unsigned int part, dlen, copy = 0;
2699 MLX5_ASSERT(must <= len);
2701 /* Allow zero length packets, must check first. */
2702 dlen = rte_pktmbuf_data_len(loc->mbuf);
2703 if (dlen <= loc->mbuf_off) {
2704 /* Exhausted packet, just free. */
2706 loc->mbuf = mbuf->next;
2707 rte_pktmbuf_free_seg(mbuf);
2709 MLX5_ASSERT(loc->mbuf_nseg > 1);
2710 MLX5_ASSERT(loc->mbuf);
2712 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2717 * We already copied the minimal
2718 * requested amount of data.
2723 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
2725 * Copy only the minimal required
2726 * part of the data buffer.
2733 dlen -= loc->mbuf_off;
2734 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2736 part = RTE_MIN(len, dlen);
2737 rte_memcpy(pdst, psrc, part);
2739 loc->mbuf_off += part;
2742 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2744 /* Exhausted packet, just free. */
2746 loc->mbuf = mbuf->next;
2747 rte_pktmbuf_free_seg(mbuf);
2749 MLX5_ASSERT(loc->mbuf_nseg >= 1);
2759 * Build the Ethernet Segment with inlined data from
2760 * multi-segment packet. Checks the boundary of WQEBB
2761 * and ring buffer wrapping, supports Software Parser,
2762 * Checksums and VLAN insertion Tx offload features.
2765 * Pointer to TX queue structure.
2767 * Pointer to burst routine local context.
2769 * Pointer to WQE to fill with built Ethernet Segment.
2771 * Length of VLAN tag insertion if any.
2773 * Length of data to inline (VLAN included, if any).
2775 * TSO flag, set mss field from the packet.
2777 * Configured Tx offloads mask. It is fully defined at
2778 * compile time and may be used for optimization.
2781 * Pointer to the next Data Segment (aligned and
2782 * possible NOT wrapped around - caller should do
2783 * wrapping check on its own).
2785 static __rte_always_inline struct mlx5_wqe_dseg *
2786 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
2787 struct mlx5_txq_local *__rte_restrict loc,
2788 struct mlx5_wqe *__rte_restrict wqe,
2794 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2797 unsigned int part, tlen = 0;
2800 * Calculate and set check sum flags first, uint32_t field
2801 * in segment may be shared with Software Parser flags.
2803 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2806 csum |= loc->mbuf->tso_segsz;
2807 es->flags = rte_cpu_to_be_32(csum);
2809 es->flags = rte_cpu_to_le_32(csum);
2812 * Calculate and set Software Parser offsets and flags.
2813 * These flags a set for custom UDP and IP tunnel packets.
2815 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2816 /* Fill metadata field if needed. */
2817 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2818 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2819 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2820 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2822 sizeof(rte_v128u32_t)),
2823 "invalid Ethernet Segment data size");
2824 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2826 sizeof(struct rte_vlan_hdr) +
2827 2 * RTE_ETHER_ADDR_LEN),
2828 "invalid Ethernet Segment data size");
2829 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2830 pdst = (uint8_t *)&es->inline_data;
2831 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2832 /* Implement VLAN tag insertion as part inline data. */
2833 mlx5_tx_mseg_memcpy(pdst, loc,
2834 2 * RTE_ETHER_ADDR_LEN,
2835 2 * RTE_ETHER_ADDR_LEN, olx);
2836 pdst += 2 * RTE_ETHER_ADDR_LEN;
2837 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2838 ((RTE_ETHER_TYPE_VLAN << 16) |
2839 loc->mbuf->vlan_tci);
2840 pdst += sizeof(struct rte_vlan_hdr);
2841 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2843 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2845 * The WQEBB space availability is checked by caller.
2846 * Here we should be aware of WQE ring buffer wraparound only.
2848 part = (uint8_t *)txq->wqes_end - pdst;
2849 part = RTE_MIN(part, inlen - tlen);
2855 * Copying may be interrupted inside the routine
2856 * if run into no inline hint flag.
2858 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
2859 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
2861 if (likely(inlen <= tlen) || copy < part) {
2862 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
2864 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2865 return (struct mlx5_wqe_dseg *)pdst;
2867 pdst = (uint8_t *)txq->wqes;
2868 part = inlen - tlen;
2873 * Build the Data Segment of pointer type.
2876 * Pointer to TX queue structure.
2878 * Pointer to burst routine local context.
2880 * Pointer to WQE to fill with built Data Segment.
2882 * Data buffer to point.
2884 * Data buffer length.
2886 * Configured Tx offloads mask. It is fully defined at
2887 * compile time and may be used for optimization.
2889 static __rte_always_inline void
2890 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
2891 struct mlx5_txq_local *__rte_restrict loc,
2892 struct mlx5_wqe_dseg *__rte_restrict dseg,
2895 unsigned int olx __rte_unused)
2899 dseg->bcount = rte_cpu_to_be_32(len);
2900 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2901 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2905 * Build the Data Segment of pointer type or inline
2906 * if data length is less than buffer in minimal
2907 * Data Segment size.
2910 * Pointer to TX queue structure.
2912 * Pointer to burst routine local context.
2914 * Pointer to WQE to fill with built Data Segment.
2916 * Data buffer to point.
2918 * Data buffer length.
2920 * Configured Tx offloads mask. It is fully defined at
2921 * compile time and may be used for optimization.
2923 static __rte_always_inline void
2924 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
2925 struct mlx5_txq_local *__rte_restrict loc,
2926 struct mlx5_wqe_dseg *__rte_restrict dseg,
2929 unsigned int olx __rte_unused)
2935 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2936 dseg->bcount = rte_cpu_to_be_32(len);
2937 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2938 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2942 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2943 /* Unrolled implementation of generic rte_memcpy. */
2944 dst = (uintptr_t)&dseg->inline_data[0];
2945 src = (uintptr_t)buf;
2947 #ifdef RTE_ARCH_STRICT_ALIGN
2948 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2949 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2950 dst += sizeof(uint32_t);
2951 src += sizeof(uint32_t);
2952 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2953 dst += sizeof(uint32_t);
2954 src += sizeof(uint32_t);
2956 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2957 dst += sizeof(uint64_t);
2958 src += sizeof(uint64_t);
2962 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2963 dst += sizeof(uint32_t);
2964 src += sizeof(uint32_t);
2967 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2968 dst += sizeof(uint16_t);
2969 src += sizeof(uint16_t);
2972 *(uint8_t *)dst = *(uint8_t *)src;
2976 * Build the Data Segment of inlined data from single
2977 * segment packet, no VLAN insertion.
2980 * Pointer to TX queue structure.
2982 * Pointer to burst routine local context.
2984 * Pointer to WQE to fill with built Data Segment.
2986 * Data buffer to point.
2988 * Data buffer length.
2990 * Configured Tx offloads mask. It is fully defined at
2991 * compile time and may be used for optimization.
2994 * Pointer to the next Data Segment after inlined data.
2995 * Ring buffer wraparound check is needed. We do not
2996 * do it here because it may not be needed for the
2997 * last packet in the eMPW session.
2999 static __rte_always_inline struct mlx5_wqe_dseg *
3000 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
3001 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
3002 struct mlx5_wqe_dseg *__rte_restrict dseg,
3005 unsigned int olx __rte_unused)
3010 if (!MLX5_TXOFF_CONFIG(MPW)) {
3011 /* Store the descriptor byte counter for eMPW sessions. */
3012 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
3013 pdst = &dseg->inline_data[0];
3015 /* The entire legacy MPW session counter is stored on close. */
3016 pdst = (uint8_t *)dseg;
3019 * The WQEBB space availability is checked by caller.
3020 * Here we should be aware of WQE ring buffer wraparound only.
3022 part = (uint8_t *)txq->wqes_end - pdst;
3023 part = RTE_MIN(part, len);
3025 rte_memcpy(pdst, buf, part);
3029 if (!MLX5_TXOFF_CONFIG(MPW))
3030 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
3031 /* Note: no final wraparound check here. */
3032 return (struct mlx5_wqe_dseg *)pdst;
3034 pdst = (uint8_t *)txq->wqes;
3041 * Build the Data Segment of inlined data from single
3042 * segment packet with VLAN insertion.
3045 * Pointer to TX queue structure.
3047 * Pointer to burst routine local context.
3049 * Pointer to the dseg fill with built Data Segment.
3051 * Data buffer to point.
3053 * Data buffer length.
3055 * Configured Tx offloads mask. It is fully defined at
3056 * compile time and may be used for optimization.
3059 * Pointer to the next Data Segment after inlined data.
3060 * Ring buffer wraparound check is needed.
3062 static __rte_always_inline struct mlx5_wqe_dseg *
3063 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
3064 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
3065 struct mlx5_wqe_dseg *__rte_restrict dseg,
3068 unsigned int olx __rte_unused)
3074 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
3075 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
3076 (2 * RTE_ETHER_ADDR_LEN),
3077 "invalid Data Segment data size");
3078 if (!MLX5_TXOFF_CONFIG(MPW)) {
3079 /* Store the descriptor byte counter for eMPW sessions. */
3080 dseg->bcount = rte_cpu_to_be_32
3081 ((len + sizeof(struct rte_vlan_hdr)) |
3082 MLX5_ETH_WQE_DATA_INLINE);
3083 pdst = &dseg->inline_data[0];
3085 /* The entire legacy MPW session counter is stored on close. */
3086 pdst = (uint8_t *)dseg;
3088 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
3089 buf += MLX5_DSEG_MIN_INLINE_SIZE;
3090 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
3091 len -= MLX5_DSEG_MIN_INLINE_SIZE;
3092 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
3093 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
3094 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
3095 pdst = (uint8_t *)txq->wqes;
3096 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
3097 loc->mbuf->vlan_tci);
3098 pdst += sizeof(struct rte_vlan_hdr);
3100 * The WQEBB space availability is checked by caller.
3101 * Here we should be aware of WQE ring buffer wraparound only.
3103 part = (uint8_t *)txq->wqes_end - pdst;
3104 part = RTE_MIN(part, len);
3106 rte_memcpy(pdst, buf, part);
3110 if (!MLX5_TXOFF_CONFIG(MPW))
3111 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
3112 /* Note: no final wraparound check here. */
3113 return (struct mlx5_wqe_dseg *)pdst;
3115 pdst = (uint8_t *)txq->wqes;
3122 * Build the Ethernet Segment with optionally inlined data with
3123 * VLAN insertion and following Data Segments (if any) from
3124 * multi-segment packet. Used by ordinary send and TSO.
3127 * Pointer to TX queue structure.
3129 * Pointer to burst routine local context.
3131 * Pointer to WQE to fill with built Ethernet/Data Segments.
3133 * Length of VLAN header to insert, 0 means no VLAN insertion.
3135 * Data length to inline. For TSO this parameter specifies
3136 * exact value, for ordinary send routine can be aligned by
3137 * caller to provide better WQE space saving and data buffer
3138 * start address alignment. This length includes VLAN header
3141 * Zero means ordinary send, inlined data can be extended,
3142 * otherwise this is TSO, inlined data length is fixed.
3144 * Configured Tx offloads mask. It is fully defined at
3145 * compile time and may be used for optimization.
3148 * Actual size of built WQE in segments.
3150 static __rte_always_inline unsigned int
3151 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
3152 struct mlx5_txq_local *__rte_restrict loc,
3153 struct mlx5_wqe *__rte_restrict wqe,
3157 unsigned int olx __rte_unused)
3159 struct mlx5_wqe_dseg *__rte_restrict dseg;
3162 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
3163 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
3166 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
3167 if (!loc->mbuf_nseg)
3170 * There are still some mbuf remaining, not inlined.
3171 * The first mbuf may be partially inlined and we
3172 * must process the possible non-zero data offset.
3174 if (loc->mbuf_off) {
3179 * Exhausted packets must be dropped before.
3180 * Non-zero offset means there are some data
3181 * remained in the packet.
3183 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
3184 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
3185 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
3187 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
3189 * Build the pointer/minimal data Data Segment.
3190 * Do ring buffer wrapping check in advance.
3192 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3193 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3194 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
3195 /* Store the mbuf to be freed on completion. */
3196 MLX5_ASSERT(loc->elts_free);
3197 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3200 if (--loc->mbuf_nseg == 0)
3202 loc->mbuf = loc->mbuf->next;
3206 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3207 struct rte_mbuf *mbuf;
3209 /* Zero length segment found, just skip. */
3211 loc->mbuf = loc->mbuf->next;
3212 rte_pktmbuf_free_seg(mbuf);
3213 if (--loc->mbuf_nseg == 0)
3216 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3217 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3220 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3221 rte_pktmbuf_data_len(loc->mbuf), olx);
3222 MLX5_ASSERT(loc->elts_free);
3223 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3226 if (--loc->mbuf_nseg == 0)
3228 loc->mbuf = loc->mbuf->next;
3233 /* Calculate actual segments used from the dseg pointer. */
3234 if ((uintptr_t)wqe < (uintptr_t)dseg)
3235 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3237 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3238 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3243 * Tx one packet function for multi-segment TSO. Supports all
3244 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3245 * sends one packet per WQE.
3247 * This routine is responsible for storing processed mbuf
3248 * into elts ring buffer and update elts_head.
3251 * Pointer to TX queue structure.
3253 * Pointer to burst routine local context.
3255 * Configured Tx offloads mask. It is fully defined at
3256 * compile time and may be used for optimization.
3259 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3260 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3261 * Local context variables partially updated.
3263 static __rte_always_inline enum mlx5_txcmp_code
3264 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
3265 struct mlx5_txq_local *__rte_restrict loc,
3268 struct mlx5_wqe *__rte_restrict wqe;
3269 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3272 * Calculate data length to be inlined to estimate
3273 * the required space in WQE ring buffer.
3275 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3276 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3277 vlan = sizeof(struct rte_vlan_hdr);
3278 inlen = loc->mbuf->l2_len + vlan +
3279 loc->mbuf->l3_len + loc->mbuf->l4_len;
3280 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3281 return MLX5_TXCMP_CODE_ERROR;
3282 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3283 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3284 /* Packet must contain all TSO headers. */
3285 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3286 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3287 inlen > (dlen + vlan)))
3288 return MLX5_TXCMP_CODE_ERROR;
3289 MLX5_ASSERT(inlen >= txq->inlen_mode);
3291 * Check whether there are enough free WQEBBs:
3293 * - Ethernet Segment
3294 * - First Segment of inlined Ethernet data
3295 * - ... data continued ...
3296 * - Data Segments of pointer/min inline type
3298 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3299 MLX5_ESEG_MIN_INLINE_SIZE +
3301 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3302 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3303 return MLX5_TXCMP_CODE_EXIT;
3304 /* Check for maximal WQE size. */
3305 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3306 return MLX5_TXCMP_CODE_ERROR;
3307 #ifdef MLX5_PMD_SOFT_COUNTERS
3308 /* Update sent data bytes/packets counters. */
3309 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3310 loc->mbuf->tso_segsz;
3312 * One will be added for mbuf itself
3313 * at the end of the mlx5_tx_burst from
3314 * loc->pkts_sent field.
3317 txq->stats.opackets += ntcp;
3318 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3320 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3321 loc->wqe_last = wqe;
3322 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3323 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3324 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3325 txq->wqe_ci += (ds + 3) / 4;
3326 loc->wqe_free -= (ds + 3) / 4;
3327 return MLX5_TXCMP_CODE_MULTI;
3331 * Tx one packet function for multi-segment SEND. Supports all
3332 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3333 * sends one packet per WQE, without any data inlining in
3336 * This routine is responsible for storing processed mbuf
3337 * into elts ring buffer and update elts_head.
3340 * Pointer to TX queue structure.
3342 * Pointer to burst routine local context.
3344 * Configured Tx offloads mask. It is fully defined at
3345 * compile time and may be used for optimization.
3348 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3349 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3350 * Local context variables partially updated.
3352 static __rte_always_inline enum mlx5_txcmp_code
3353 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
3354 struct mlx5_txq_local *__rte_restrict loc,
3357 struct mlx5_wqe_dseg *__rte_restrict dseg;
3358 struct mlx5_wqe *__rte_restrict wqe;
3359 unsigned int ds, nseg;
3361 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3363 * No inline at all, it means the CPU cycles saving
3364 * is prioritized at configuration, we should not
3365 * copy any packet data to WQE.
3367 nseg = NB_SEGS(loc->mbuf);
3369 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3370 return MLX5_TXCMP_CODE_EXIT;
3371 /* Check for maximal WQE size. */
3372 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3373 return MLX5_TXCMP_CODE_ERROR;
3375 * Some Tx offloads may cause an error if
3376 * packet is not long enough, check against
3377 * assumed minimal length.
3379 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3380 return MLX5_TXCMP_CODE_ERROR;
3381 #ifdef MLX5_PMD_SOFT_COUNTERS
3382 /* Update sent data bytes counter. */
3383 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3384 if (MLX5_TXOFF_CONFIG(VLAN) &&
3385 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3386 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3389 * SEND WQE, one WQEBB:
3390 * - Control Segment, SEND opcode
3391 * - Ethernet Segment, optional VLAN, no inline
3392 * - Data Segments, pointer only type
3394 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3395 loc->wqe_last = wqe;
3396 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3397 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3398 dseg = &wqe->dseg[0];
3400 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3401 struct rte_mbuf *mbuf;
3404 * Zero length segment found, have to
3405 * correct total size of WQE in segments.
3406 * It is supposed to be rare occasion, so
3407 * in normal case (no zero length segments)
3408 * we avoid extra writing to the Control
3412 wqe->cseg.sq_ds -= RTE_BE32(1);
3414 loc->mbuf = mbuf->next;
3415 rte_pktmbuf_free_seg(mbuf);
3421 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3422 rte_pktmbuf_data_len(loc->mbuf), olx);
3423 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3428 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3429 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3430 loc->mbuf = loc->mbuf->next;
3433 txq->wqe_ci += (ds + 3) / 4;
3434 loc->wqe_free -= (ds + 3) / 4;
3435 return MLX5_TXCMP_CODE_MULTI;
3439 * Tx one packet function for multi-segment SEND. Supports all
3440 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3441 * sends one packet per WQE, with data inlining in
3442 * Ethernet Segment and minimal Data Segments.
3444 * This routine is responsible for storing processed mbuf
3445 * into elts ring buffer and update elts_head.
3448 * Pointer to TX queue structure.
3450 * Pointer to burst routine local context.
3452 * Configured Tx offloads mask. It is fully defined at
3453 * compile time and may be used for optimization.
3456 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3457 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3458 * Local context variables partially updated.
3460 static __rte_always_inline enum mlx5_txcmp_code
3461 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
3462 struct mlx5_txq_local *__rte_restrict loc,
3465 struct mlx5_wqe *__rte_restrict wqe;
3466 unsigned int ds, inlen, dlen, vlan = 0;
3468 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3469 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3471 * First calculate data length to be inlined
3472 * to estimate the required space for WQE.
3474 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3475 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3476 vlan = sizeof(struct rte_vlan_hdr);
3477 inlen = dlen + vlan;
3478 /* Check against minimal length. */
3479 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3480 return MLX5_TXCMP_CODE_ERROR;
3481 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3482 if (inlen > txq->inlen_send ||
3483 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
3484 struct rte_mbuf *mbuf;
3489 * Packet length exceeds the allowed inline
3490 * data length, check whether the minimal
3491 * inlining is required.
3493 if (txq->inlen_mode) {
3494 MLX5_ASSERT(txq->inlen_mode >=
3495 MLX5_ESEG_MIN_INLINE_SIZE);
3496 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
3497 inlen = txq->inlen_mode;
3499 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
3500 !vlan || txq->vlan_en) {
3502 * VLAN insertion will be done inside by HW.
3503 * It is not utmost effective - VLAN flag is
3504 * checked twice, but we should proceed the
3505 * inlining length correctly and take into
3506 * account the VLAN header being inserted.
3508 return mlx5_tx_packet_multi_send
3511 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3514 * Now we know the minimal amount of data is requested
3515 * to inline. Check whether we should inline the buffers
3516 * from the chain beginning to eliminate some mbufs.
3519 nxlen = rte_pktmbuf_data_len(mbuf);
3520 if (unlikely(nxlen <= txq->inlen_send)) {
3521 /* We can inline first mbuf at least. */
3522 if (nxlen < inlen) {
3525 /* Scan mbufs till inlen filled. */
3530 nxlen = rte_pktmbuf_data_len(mbuf);
3532 } while (unlikely(nxlen < inlen));
3533 if (unlikely(nxlen > txq->inlen_send)) {
3534 /* We cannot inline entire mbuf. */
3535 smlen = inlen - smlen;
3536 start = rte_pktmbuf_mtod_offset
3537 (mbuf, uintptr_t, smlen);
3544 /* There should be not end of packet. */
3546 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3547 } while (unlikely(nxlen < txq->inlen_send));
3549 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3551 * Check whether we can do inline to align start
3552 * address of data buffer to cacheline.
3555 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3556 if (unlikely(start)) {
3558 if (start <= txq->inlen_send)
3563 * Check whether there are enough free WQEBBs:
3565 * - Ethernet Segment
3566 * - First Segment of inlined Ethernet data
3567 * - ... data continued ...
3568 * - Data Segments of pointer/min inline type
3570 * Estimate the number of Data Segments conservatively,
3571 * supposing no any mbufs is being freed during inlining.
3573 MLX5_ASSERT(inlen <= txq->inlen_send);
3574 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3575 MLX5_ESEG_MIN_INLINE_SIZE +
3577 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3578 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3579 return MLX5_TXCMP_CODE_EXIT;
3580 /* Check for maximal WQE size. */
3581 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3582 return MLX5_TXCMP_CODE_ERROR;
3583 #ifdef MLX5_PMD_SOFT_COUNTERS
3584 /* Update sent data bytes/packets counters. */
3585 txq->stats.obytes += dlen + vlan;
3587 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3588 loc->wqe_last = wqe;
3589 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3590 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3591 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3592 txq->wqe_ci += (ds + 3) / 4;
3593 loc->wqe_free -= (ds + 3) / 4;
3594 return MLX5_TXCMP_CODE_MULTI;
3598 * Tx burst function for multi-segment packets. Supports all
3599 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3600 * sends one packet per WQE. Function stops sending if it
3601 * encounters the single-segment packet.
3603 * This routine is responsible for storing processed mbuf
3604 * into elts ring buffer and update elts_head.
3607 * Pointer to TX queue structure.
3609 * Packets to transmit.
3611 * Number of packets in array.
3613 * Pointer to burst routine local context.
3615 * Configured Tx offloads mask. It is fully defined at
3616 * compile time and may be used for optimization.
3619 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3620 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3621 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3622 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3623 * Local context variables updated.
3625 static __rte_always_inline enum mlx5_txcmp_code
3626 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
3627 struct rte_mbuf **__rte_restrict pkts,
3628 unsigned int pkts_n,
3629 struct mlx5_txq_local *__rte_restrict loc,
3632 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3633 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3634 pkts += loc->pkts_sent + 1;
3635 pkts_n -= loc->pkts_sent;
3637 enum mlx5_txcmp_code ret;
3639 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3641 * Estimate the number of free elts quickly but
3642 * conservatively. Some segment may be fully inlined
3643 * and freed, ignore this here - precise estimation
3646 if (loc->elts_free < NB_SEGS(loc->mbuf))
3647 return MLX5_TXCMP_CODE_EXIT;
3648 if (MLX5_TXOFF_CONFIG(TSO) &&
3649 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3650 /* Proceed with multi-segment TSO. */
3651 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3652 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3653 /* Proceed with multi-segment SEND with inlining. */
3654 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3656 /* Proceed with multi-segment SEND w/o inlining. */
3657 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3659 if (ret == MLX5_TXCMP_CODE_EXIT)
3660 return MLX5_TXCMP_CODE_EXIT;
3661 if (ret == MLX5_TXCMP_CODE_ERROR)
3662 return MLX5_TXCMP_CODE_ERROR;
3663 /* WQE is built, go to the next packet. */
3666 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3667 return MLX5_TXCMP_CODE_EXIT;
3668 loc->mbuf = *pkts++;
3670 rte_prefetch0(*pkts);
3671 if (likely(NB_SEGS(loc->mbuf) > 1))
3673 /* Here ends the series of multi-segment packets. */
3674 if (MLX5_TXOFF_CONFIG(TSO) &&
3675 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3676 return MLX5_TXCMP_CODE_TSO;
3677 return MLX5_TXCMP_CODE_SINGLE;
3683 * Tx burst function for single-segment packets with TSO.
3684 * Supports all types of Tx offloads, except multi-packets.
3685 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3686 * Function stops sending if it encounters the multi-segment
3687 * packet or packet without TSO requested.
3689 * The routine is responsible for storing processed mbuf
3690 * into elts ring buffer and update elts_head if inline
3691 * offloads is requested due to possible early freeing
3692 * of the inlined mbufs (can not store pkts array in elts
3696 * Pointer to TX queue structure.
3698 * Packets to transmit.
3700 * Number of packets in array.
3702 * Pointer to burst routine local context.
3704 * Configured Tx offloads mask. It is fully defined at
3705 * compile time and may be used for optimization.
3708 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3709 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3710 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3711 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3712 * Local context variables updated.
3714 static __rte_always_inline enum mlx5_txcmp_code
3715 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
3716 struct rte_mbuf **__rte_restrict pkts,
3717 unsigned int pkts_n,
3718 struct mlx5_txq_local *__rte_restrict loc,
3721 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3722 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3723 pkts += loc->pkts_sent + 1;
3724 pkts_n -= loc->pkts_sent;
3726 struct mlx5_wqe_dseg *__rte_restrict dseg;
3727 struct mlx5_wqe *__rte_restrict wqe;
3728 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3731 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3732 dlen = rte_pktmbuf_data_len(loc->mbuf);
3733 if (MLX5_TXOFF_CONFIG(VLAN) &&
3734 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3735 vlan = sizeof(struct rte_vlan_hdr);
3738 * First calculate the WQE size to check
3739 * whether we have enough space in ring buffer.
3741 hlen = loc->mbuf->l2_len + vlan +
3742 loc->mbuf->l3_len + loc->mbuf->l4_len;
3743 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3744 return MLX5_TXCMP_CODE_ERROR;
3745 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3746 hlen += loc->mbuf->outer_l2_len +
3747 loc->mbuf->outer_l3_len;
3748 /* Segment must contain all TSO headers. */
3749 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3750 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3751 hlen > (dlen + vlan)))
3752 return MLX5_TXCMP_CODE_ERROR;
3754 * Check whether there are enough free WQEBBs:
3756 * - Ethernet Segment
3757 * - First Segment of inlined Ethernet data
3758 * - ... data continued ...
3759 * - Finishing Data Segment of pointer type
3761 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3762 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3763 if (loc->wqe_free < ((ds + 3) / 4))
3764 return MLX5_TXCMP_CODE_EXIT;
3765 #ifdef MLX5_PMD_SOFT_COUNTERS
3766 /* Update sent data bytes/packets counters. */
3767 ntcp = (dlen + vlan - hlen +
3768 loc->mbuf->tso_segsz - 1) /
3769 loc->mbuf->tso_segsz;
3771 * One will be added for mbuf itself at the end
3772 * of the mlx5_tx_burst from loc->pkts_sent field.
3775 txq->stats.opackets += ntcp;
3776 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3779 * Build the TSO WQE:
3781 * - Ethernet Segment with hlen bytes inlined
3782 * - Data Segment of pointer type
3784 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3785 loc->wqe_last = wqe;
3786 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3787 MLX5_OPCODE_TSO, olx);
3788 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3789 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3790 dlen -= hlen - vlan;
3791 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3793 * WQE is built, update the loop parameters
3794 * and go to the next packet.
3796 txq->wqe_ci += (ds + 3) / 4;
3797 loc->wqe_free -= (ds + 3) / 4;
3798 if (MLX5_TXOFF_CONFIG(INLINE))
3799 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3803 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3804 return MLX5_TXCMP_CODE_EXIT;
3805 loc->mbuf = *pkts++;
3807 rte_prefetch0(*pkts);
3808 if (MLX5_TXOFF_CONFIG(MULTI) &&
3809 unlikely(NB_SEGS(loc->mbuf) > 1))
3810 return MLX5_TXCMP_CODE_MULTI;
3811 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3812 return MLX5_TXCMP_CODE_SINGLE;
3813 /* Continue with the next TSO packet. */
3819 * Analyze the packet and select the best method to send.
3822 * Pointer to TX queue structure.
3824 * Pointer to burst routine local context.
3826 * Configured Tx offloads mask. It is fully defined at
3827 * compile time and may be used for optimization.
3829 * The predefined flag whether do complete check for
3830 * multi-segment packets and TSO.
3833 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3834 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3835 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3836 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3838 static __rte_always_inline enum mlx5_txcmp_code
3839 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
3840 struct mlx5_txq_local *__rte_restrict loc,
3844 /* Check for multi-segment packet. */
3846 MLX5_TXOFF_CONFIG(MULTI) &&
3847 unlikely(NB_SEGS(loc->mbuf) > 1))
3848 return MLX5_TXCMP_CODE_MULTI;
3849 /* Check for TSO packet. */
3851 MLX5_TXOFF_CONFIG(TSO) &&
3852 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3853 return MLX5_TXCMP_CODE_TSO;
3854 /* Check if eMPW is enabled at all. */
3855 if (!MLX5_TXOFF_CONFIG(EMPW))
3856 return MLX5_TXCMP_CODE_SINGLE;
3857 /* Check if eMPW can be engaged. */
3858 if (MLX5_TXOFF_CONFIG(VLAN) &&
3859 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3860 (!MLX5_TXOFF_CONFIG(INLINE) ||
3861 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3862 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3864 * eMPW does not support VLAN insertion offload,
3865 * we have to inline the entire packet but
3866 * packet is too long for inlining.
3868 return MLX5_TXCMP_CODE_SINGLE;
3870 return MLX5_TXCMP_CODE_EMPW;
3874 * Check the next packet attributes to match with the eMPW batch ones.
3875 * In addition, for legacy MPW the packet length is checked either.
3878 * Pointer to TX queue structure.
3880 * Pointer to Ethernet Segment of eMPW batch.
3882 * Pointer to burst routine local context.
3884 * Length of previous packet in MPW descriptor.
3886 * Configured Tx offloads mask. It is fully defined at
3887 * compile time and may be used for optimization.
3890 * true - packet match with eMPW batch attributes.
3891 * false - no match, eMPW should be restarted.
3893 static __rte_always_inline bool
3894 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
3895 struct mlx5_wqe_eseg *__rte_restrict es,
3896 struct mlx5_txq_local *__rte_restrict loc,
3900 uint8_t swp_flags = 0;
3902 /* Compare the checksum flags, if any. */
3903 if (MLX5_TXOFF_CONFIG(CSUM) &&
3904 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3906 /* Compare the Software Parser offsets and flags. */
3907 if (MLX5_TXOFF_CONFIG(SWP) &&
3908 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3909 es->swp_flags != swp_flags))
3911 /* Fill metadata field if needed. */
3912 if (MLX5_TXOFF_CONFIG(METADATA) &&
3913 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3914 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3916 /* Legacy MPW can send packets with the same lengt only. */
3917 if (MLX5_TXOFF_CONFIG(MPW) &&
3918 dlen != rte_pktmbuf_data_len(loc->mbuf))
3920 /* There must be no VLAN packets in eMPW loop. */
3921 if (MLX5_TXOFF_CONFIG(VLAN))
3922 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3927 * Update send loop variables and WQE for eMPW loop
3928 * without data inlining. Number of Data Segments is
3929 * equal to the number of sent packets.
3932 * Pointer to TX queue structure.
3934 * Pointer to burst routine local context.
3936 * Number of packets/Data Segments/Packets.
3938 * Accumulated statistics, bytes sent
3940 * Configured Tx offloads mask. It is fully defined at
3941 * compile time and may be used for optimization.
3944 * true - packet match with eMPW batch attributes.
3945 * false - no match, eMPW should be restarted.
3947 static __rte_always_inline void
3948 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
3949 struct mlx5_txq_local *__rte_restrict loc,
3952 unsigned int olx __rte_unused)
3954 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3955 #ifdef MLX5_PMD_SOFT_COUNTERS
3956 /* Update sent data bytes counter. */
3957 txq->stats.obytes += slen;
3961 loc->elts_free -= ds;
3962 loc->pkts_sent += ds;
3964 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3965 txq->wqe_ci += (ds + 3) / 4;
3966 loc->wqe_free -= (ds + 3) / 4;
3970 * Update send loop variables and WQE for eMPW loop
3971 * with data inlining. Gets the size of pushed descriptors
3972 * and data to the WQE.
3975 * Pointer to TX queue structure.
3977 * Pointer to burst routine local context.
3979 * Total size of descriptor/data in bytes.
3981 * Accumulated statistics, data bytes sent.
3983 * The base WQE for the eMPW/MPW descriptor.
3985 * Configured Tx offloads mask. It is fully defined at
3986 * compile time and may be used for optimization.
3989 * true - packet match with eMPW batch attributes.
3990 * false - no match, eMPW should be restarted.
3992 static __rte_always_inline void
3993 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
3994 struct mlx5_txq_local *__rte_restrict loc,
3997 struct mlx5_wqe *__rte_restrict wqem,
3998 unsigned int olx __rte_unused)
4000 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
4002 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4003 #ifdef MLX5_PMD_SOFT_COUNTERS
4004 /* Update sent data bytes counter. */
4005 txq->stats.obytes += slen;
4009 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
4011 * If the legacy MPW session contains the inline packets
4012 * we should set the only inline data segment length
4013 * and align the total length to the segment size.
4015 MLX5_ASSERT(len > sizeof(dseg->bcount));
4016 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
4017 MLX5_ETH_WQE_DATA_INLINE);
4018 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
4021 * The session is not legacy MPW or contains the
4022 * data buffer pointer segments.
4024 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
4025 len = len / MLX5_WSEG_SIZE + 2;
4027 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
4028 txq->wqe_ci += (len + 3) / 4;
4029 loc->wqe_free -= (len + 3) / 4;
4030 loc->wqe_last = wqem;
4034 * The set of Tx burst functions for single-segment packets
4035 * without TSO and with Multi-Packet Writing feature support.
4036 * Supports all types of Tx offloads, except multi-packets
4039 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
4040 * as many packet per WQE as it can. If eMPW is not configured
4041 * or packet can not be sent with eMPW (VLAN insertion) the
4042 * ordinary SEND opcode is used and only one packet placed
4045 * Functions stop sending if it encounters the multi-segment
4046 * packet or packet with TSO requested.
4048 * The routines are responsible for storing processed mbuf
4049 * into elts ring buffer and update elts_head if inlining
4050 * offload is requested. Otherwise the copying mbufs to elts
4051 * can be postponed and completed at the end of burst routine.
4054 * Pointer to TX queue structure.
4056 * Packets to transmit.
4058 * Number of packets in array.
4060 * Pointer to burst routine local context.
4062 * Configured Tx offloads mask. It is fully defined at
4063 * compile time and may be used for optimization.
4066 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
4067 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
4068 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
4069 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
4070 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
4071 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
4073 * Local context variables updated.
4076 * The routine sends packets with MLX5_OPCODE_EMPW
4077 * without inlining, this is dedicated optimized branch.
4078 * No VLAN insertion is supported.
4080 static __rte_always_inline enum mlx5_txcmp_code
4081 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
4082 struct rte_mbuf **__rte_restrict pkts,
4083 unsigned int pkts_n,
4084 struct mlx5_txq_local *__rte_restrict loc,
4088 * Subroutine is the part of mlx5_tx_burst_single()
4089 * and sends single-segment packet with eMPW opcode
4090 * without data inlining.
4092 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4093 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4094 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4095 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4096 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4097 pkts += loc->pkts_sent + 1;
4098 pkts_n -= loc->pkts_sent;
4100 struct mlx5_wqe_dseg *__rte_restrict dseg;
4101 struct mlx5_wqe_eseg *__rte_restrict eseg;
4102 enum mlx5_txcmp_code ret;
4103 unsigned int part, loop;
4104 unsigned int slen = 0;
4107 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4108 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4109 MLX5_MPW_MAX_PACKETS :
4110 MLX5_EMPW_MAX_PACKETS);
4111 if (unlikely(loc->elts_free < part)) {
4112 /* We have no enough elts to save all mbufs. */
4113 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
4114 return MLX5_TXCMP_CODE_EXIT;
4115 /* But we still able to send at least minimal eMPW. */
4116 part = loc->elts_free;
4118 /* Check whether we have enough WQEs */
4119 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
4120 if (unlikely(loc->wqe_free <
4121 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4122 return MLX5_TXCMP_CODE_EXIT;
4123 part = (loc->wqe_free * 4) - 2;
4125 if (likely(part > 1))
4126 rte_prefetch0(*pkts);
4127 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4129 * Build eMPW title WQEBB:
4130 * - Control Segment, eMPW opcode
4131 * - Ethernet Segment, no inline
4133 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
4134 MLX5_OPCODE_ENHANCED_MPSW, olx);
4135 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4136 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4137 eseg = &loc->wqe_last->eseg;
4138 dseg = &loc->wqe_last->dseg[0];
4140 /* Store the packet length for legacy MPW. */
4141 if (MLX5_TXOFF_CONFIG(MPW))
4142 eseg->mss = rte_cpu_to_be_16
4143 (rte_pktmbuf_data_len(loc->mbuf));
4145 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4146 #ifdef MLX5_PMD_SOFT_COUNTERS
4147 /* Update sent data bytes counter. */
4152 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4154 if (unlikely(--loop == 0))
4156 loc->mbuf = *pkts++;
4157 if (likely(loop > 1))
4158 rte_prefetch0(*pkts);
4159 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4161 * Unroll the completion code to avoid
4162 * returning variable value - it results in
4163 * unoptimized sequent checking in caller.
4165 if (ret == MLX5_TXCMP_CODE_MULTI) {
4167 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4168 if (unlikely(!loc->elts_free ||
4170 return MLX5_TXCMP_CODE_EXIT;
4171 return MLX5_TXCMP_CODE_MULTI;
4173 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4174 if (ret == MLX5_TXCMP_CODE_TSO) {
4176 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4177 if (unlikely(!loc->elts_free ||
4179 return MLX5_TXCMP_CODE_EXIT;
4180 return MLX5_TXCMP_CODE_TSO;
4182 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4184 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4185 if (unlikely(!loc->elts_free ||
4187 return MLX5_TXCMP_CODE_EXIT;
4188 return MLX5_TXCMP_CODE_SINGLE;
4190 if (ret != MLX5_TXCMP_CODE_EMPW) {
4193 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4194 return MLX5_TXCMP_CODE_ERROR;
4197 * Check whether packet parameters coincide
4198 * within assumed eMPW batch:
4199 * - check sum settings
4201 * - software parser settings
4202 * - packets length (legacy MPW only)
4204 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
4207 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4208 if (unlikely(!loc->elts_free ||
4210 return MLX5_TXCMP_CODE_EXIT;
4214 /* Packet attributes match, continue the same eMPW. */
4216 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4217 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4219 /* eMPW is built successfully, update loop parameters. */
4221 MLX5_ASSERT(pkts_n >= part);
4222 #ifdef MLX5_PMD_SOFT_COUNTERS
4223 /* Update sent data bytes counter. */
4224 txq->stats.obytes += slen;
4226 loc->elts_free -= part;
4227 loc->pkts_sent += part;
4228 txq->wqe_ci += (2 + part + 3) / 4;
4229 loc->wqe_free -= (2 + part + 3) / 4;
4231 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4232 return MLX5_TXCMP_CODE_EXIT;
4233 loc->mbuf = *pkts++;
4234 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4235 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4237 /* Continue sending eMPW batches. */
4243 * The routine sends packets with MLX5_OPCODE_EMPW
4244 * with inlining, optionally supports VLAN insertion.
4246 static __rte_always_inline enum mlx5_txcmp_code
4247 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
4248 struct rte_mbuf **__rte_restrict pkts,
4249 unsigned int pkts_n,
4250 struct mlx5_txq_local *__rte_restrict loc,
4254 * Subroutine is the part of mlx5_tx_burst_single()
4255 * and sends single-segment packet with eMPW opcode
4256 * with data inlining.
4258 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4259 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4260 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4261 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4262 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4263 pkts += loc->pkts_sent + 1;
4264 pkts_n -= loc->pkts_sent;
4266 struct mlx5_wqe_dseg *__rte_restrict dseg;
4267 struct mlx5_wqe *__rte_restrict wqem;
4268 enum mlx5_txcmp_code ret;
4269 unsigned int room, part, nlim;
4270 unsigned int slen = 0;
4272 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4274 * Limits the amount of packets in one WQE
4275 * to improve CQE latency generation.
4277 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4278 MLX5_MPW_INLINE_MAX_PACKETS :
4279 MLX5_EMPW_MAX_PACKETS);
4280 /* Check whether we have minimal amount WQEs */
4281 if (unlikely(loc->wqe_free <
4282 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4283 return MLX5_TXCMP_CODE_EXIT;
4284 if (likely(pkts_n > 1))
4285 rte_prefetch0(*pkts);
4286 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4288 * Build eMPW title WQEBB:
4289 * - Control Segment, eMPW opcode, zero DS
4290 * - Ethernet Segment, no inline
4292 mlx5_tx_cseg_init(txq, loc, wqem, 0,
4293 MLX5_OPCODE_ENHANCED_MPSW, olx);
4294 mlx5_tx_eseg_none(txq, loc, wqem,
4295 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4296 dseg = &wqem->dseg[0];
4297 /* Store the packet length for legacy MPW. */
4298 if (MLX5_TXOFF_CONFIG(MPW))
4299 wqem->eseg.mss = rte_cpu_to_be_16
4300 (rte_pktmbuf_data_len(loc->mbuf));
4301 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4302 loc->wqe_free) * MLX5_WQE_SIZE -
4303 MLX5_WQE_CSEG_SIZE -
4305 /* Limit the room for legacy MPW sessions for performance. */
4306 if (MLX5_TXOFF_CONFIG(MPW))
4307 room = RTE_MIN(room,
4308 RTE_MAX(txq->inlen_empw +
4309 sizeof(dseg->bcount) +
4310 (MLX5_TXOFF_CONFIG(VLAN) ?
4311 sizeof(struct rte_vlan_hdr) : 0),
4312 MLX5_MPW_INLINE_MAX_PACKETS *
4313 MLX5_WQE_DSEG_SIZE));
4314 /* Build WQE till we have space, packets and resources. */
4317 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4318 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4321 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4322 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
4323 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4325 * Some Tx offloads may cause an error if
4326 * packet is not long enough, check against
4327 * assumed minimal length.
4329 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4331 if (unlikely(!part))
4332 return MLX5_TXCMP_CODE_ERROR;
4334 * We have some successfully built
4335 * packet Data Segments to send.
4337 mlx5_tx_idone_empw(txq, loc, part,
4339 return MLX5_TXCMP_CODE_ERROR;
4341 /* Inline or not inline - that's the Question. */
4342 if (dlen > txq->inlen_empw ||
4343 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
4345 if (MLX5_TXOFF_CONFIG(MPW)) {
4346 if (dlen > txq->inlen_send)
4350 /* Open new inline MPW session. */
4351 tlen += sizeof(dseg->bcount);
4352 dseg->bcount = RTE_BE32(0);
4354 (dseg, sizeof(dseg->bcount));
4357 * No pointer and inline descriptor
4358 * intermix for legacy MPW sessions.
4360 if (wqem->dseg[0].bcount)
4364 tlen = sizeof(dseg->bcount) + dlen;
4366 /* Inline entire packet, optional VLAN insertion. */
4367 if (MLX5_TXOFF_CONFIG(VLAN) &&
4368 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4370 * The packet length must be checked in
4371 * mlx5_tx_able_to_empw() and packet
4372 * fits into inline length guaranteed.
4375 sizeof(struct rte_vlan_hdr)) <=
4377 tlen += sizeof(struct rte_vlan_hdr);
4380 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4382 #ifdef MLX5_PMD_SOFT_COUNTERS
4383 /* Update sent data bytes counter. */
4384 slen += sizeof(struct rte_vlan_hdr);
4389 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4392 if (!MLX5_TXOFF_CONFIG(MPW))
4393 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4394 MLX5_ASSERT(room >= tlen);
4397 * Packet data are completely inlined,
4398 * free the packet immediately.
4400 rte_pktmbuf_free_seg(loc->mbuf);
4404 * No pointer and inline descriptor
4405 * intermix for legacy MPW sessions.
4407 if (MLX5_TXOFF_CONFIG(MPW) &&
4409 wqem->dseg[0].bcount == RTE_BE32(0))
4412 * Not inlinable VLAN packets are
4413 * proceeded outside of this routine.
4415 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4416 if (MLX5_TXOFF_CONFIG(VLAN))
4417 MLX5_ASSERT(!(loc->mbuf->ol_flags &
4419 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4420 /* We have to store mbuf in elts.*/
4421 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4422 room -= MLX5_WQE_DSEG_SIZE;
4423 /* Ring buffer wraparound is checked at the loop end.*/
4426 #ifdef MLX5_PMD_SOFT_COUNTERS
4427 /* Update sent data bytes counter. */
4433 if (unlikely(!pkts_n || !loc->elts_free)) {
4435 * We have no resources/packets to
4436 * continue build descriptors.
4439 mlx5_tx_idone_empw(txq, loc, part,
4441 return MLX5_TXCMP_CODE_EXIT;
4443 loc->mbuf = *pkts++;
4444 if (likely(pkts_n > 1))
4445 rte_prefetch0(*pkts);
4446 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4448 * Unroll the completion code to avoid
4449 * returning variable value - it results in
4450 * unoptimized sequent checking in caller.
4452 if (ret == MLX5_TXCMP_CODE_MULTI) {
4454 mlx5_tx_idone_empw(txq, loc, part,
4456 if (unlikely(!loc->elts_free ||
4458 return MLX5_TXCMP_CODE_EXIT;
4459 return MLX5_TXCMP_CODE_MULTI;
4461 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4462 if (ret == MLX5_TXCMP_CODE_TSO) {
4464 mlx5_tx_idone_empw(txq, loc, part,
4466 if (unlikely(!loc->elts_free ||
4468 return MLX5_TXCMP_CODE_EXIT;
4469 return MLX5_TXCMP_CODE_TSO;
4471 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4473 mlx5_tx_idone_empw(txq, loc, part,
4475 if (unlikely(!loc->elts_free ||
4477 return MLX5_TXCMP_CODE_EXIT;
4478 return MLX5_TXCMP_CODE_SINGLE;
4480 if (ret != MLX5_TXCMP_CODE_EMPW) {
4483 mlx5_tx_idone_empw(txq, loc, part,
4485 return MLX5_TXCMP_CODE_ERROR;
4487 /* Check if we have minimal room left. */
4489 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4492 * Check whether packet parameters coincide
4493 * within assumed eMPW batch:
4494 * - check sum settings
4496 * - software parser settings
4497 * - packets length (legacy MPW only)
4499 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
4502 /* Packet attributes match, continue the same eMPW. */
4503 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4504 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4507 * We get here to close an existing eMPW
4508 * session and start the new one.
4510 MLX5_ASSERT(pkts_n);
4512 if (unlikely(!part))
4513 return MLX5_TXCMP_CODE_EXIT;
4514 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
4515 if (unlikely(!loc->elts_free ||
4517 return MLX5_TXCMP_CODE_EXIT;
4518 /* Continue the loop with new eMPW session. */
4524 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4525 * Data inlining and VLAN insertion are supported.
4527 static __rte_always_inline enum mlx5_txcmp_code
4528 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
4529 struct rte_mbuf **__rte_restrict pkts,
4530 unsigned int pkts_n,
4531 struct mlx5_txq_local *__rte_restrict loc,
4535 * Subroutine is the part of mlx5_tx_burst_single()
4536 * and sends single-segment packet with SEND opcode.
4538 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4539 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4540 pkts += loc->pkts_sent + 1;
4541 pkts_n -= loc->pkts_sent;
4543 struct mlx5_wqe *__rte_restrict wqe;
4544 enum mlx5_txcmp_code ret;
4546 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4547 if (MLX5_TXOFF_CONFIG(INLINE)) {
4548 unsigned int inlen, vlan = 0;
4550 inlen = rte_pktmbuf_data_len(loc->mbuf);
4551 if (MLX5_TXOFF_CONFIG(VLAN) &&
4552 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4553 vlan = sizeof(struct rte_vlan_hdr);
4555 static_assert((sizeof(struct rte_vlan_hdr) +
4556 sizeof(struct rte_ether_hdr)) ==
4557 MLX5_ESEG_MIN_INLINE_SIZE,
4558 "invalid min inline data size");
4561 * If inlining is enabled at configuration time
4562 * the limit must be not less than minimal size.
4563 * Otherwise we would do extra check for data
4564 * size to avoid crashes due to length overflow.
4566 MLX5_ASSERT(txq->inlen_send >=
4567 MLX5_ESEG_MIN_INLINE_SIZE);
4568 if (inlen <= txq->inlen_send) {
4569 unsigned int seg_n, wqe_n;
4571 rte_prefetch0(rte_pktmbuf_mtod
4572 (loc->mbuf, uint8_t *));
4573 /* Check against minimal length. */
4574 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4575 return MLX5_TXCMP_CODE_ERROR;
4576 if (loc->mbuf->ol_flags &
4577 PKT_TX_DYNF_NOINLINE) {
4579 * The hint flag not to inline packet
4580 * data is set. Check whether we can
4583 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
4585 (MLX5_TXOFF_CONFIG(MPW) &&
4588 * The hardware requires the
4589 * minimal inline data header.
4591 goto single_min_inline;
4593 if (MLX5_TXOFF_CONFIG(VLAN) &&
4594 vlan && !txq->vlan_en) {
4596 * We must insert VLAN tag
4597 * by software means.
4599 goto single_part_inline;
4601 goto single_no_inline;
4604 * Completely inlined packet data WQE:
4605 * - Control Segment, SEND opcode
4606 * - Ethernet Segment, no VLAN insertion
4607 * - Data inlined, VLAN optionally inserted
4608 * - Alignment to MLX5_WSEG_SIZE
4609 * Have to estimate amount of WQEBBs
4611 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4612 MLX5_ESEG_MIN_INLINE_SIZE +
4613 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4614 /* Check if there are enough WQEBBs. */
4615 wqe_n = (seg_n + 3) / 4;
4616 if (wqe_n > loc->wqe_free)
4617 return MLX5_TXCMP_CODE_EXIT;
4618 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4619 loc->wqe_last = wqe;
4620 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4621 MLX5_OPCODE_SEND, olx);
4622 mlx5_tx_eseg_data(txq, loc, wqe,
4623 vlan, inlen, 0, olx);
4624 txq->wqe_ci += wqe_n;
4625 loc->wqe_free -= wqe_n;
4627 * Packet data are completely inlined,
4628 * free the packet immediately.
4630 rte_pktmbuf_free_seg(loc->mbuf);
4631 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4632 MLX5_TXOFF_CONFIG(MPW)) &&
4635 * If minimal inlining is requested the eMPW
4636 * feature should be disabled due to data is
4637 * inlined into Ethernet Segment, which can
4638 * not contain inlined data for eMPW due to
4639 * segment shared for all packets.
4641 struct mlx5_wqe_dseg *__rte_restrict dseg;
4646 * The inline-mode settings require
4647 * to inline the specified amount of
4648 * data bytes to the Ethernet Segment.
4649 * We should check the free space in
4650 * WQE ring buffer to inline partially.
4653 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
4654 MLX5_ASSERT(inlen > txq->inlen_mode);
4655 MLX5_ASSERT(txq->inlen_mode >=
4656 MLX5_ESEG_MIN_INLINE_SIZE);
4658 * Check whether there are enough free WQEBBs:
4660 * - Ethernet Segment
4661 * - First Segment of inlined Ethernet data
4662 * - ... data continued ...
4663 * - Finishing Data Segment of pointer type
4665 ds = (MLX5_WQE_CSEG_SIZE +
4666 MLX5_WQE_ESEG_SIZE +
4667 MLX5_WQE_DSEG_SIZE +
4669 MLX5_ESEG_MIN_INLINE_SIZE +
4670 MLX5_WQE_DSEG_SIZE +
4671 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4672 if (loc->wqe_free < ((ds + 3) / 4))
4673 return MLX5_TXCMP_CODE_EXIT;
4675 * Build the ordinary SEND WQE:
4677 * - Ethernet Segment, inline inlen_mode bytes
4678 * - Data Segment of pointer type
4680 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4681 loc->wqe_last = wqe;
4682 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4683 MLX5_OPCODE_SEND, olx);
4684 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4687 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4688 txq->inlen_mode - vlan;
4689 inlen -= txq->inlen_mode;
4690 mlx5_tx_dseg_ptr(txq, loc, dseg,
4693 * WQE is built, update the loop parameters
4694 * and got to the next packet.
4696 txq->wqe_ci += (ds + 3) / 4;
4697 loc->wqe_free -= (ds + 3) / 4;
4698 /* We have to store mbuf in elts.*/
4699 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4700 txq->elts[txq->elts_head++ & txq->elts_m] =
4708 * Partially inlined packet data WQE, we have
4709 * some space in title WQEBB, we can fill it
4710 * with some packet data. It takes one WQEBB,
4711 * it is available, no extra space check:
4712 * - Control Segment, SEND opcode
4713 * - Ethernet Segment, no VLAN insertion
4714 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4715 * - Data Segment, pointer type
4717 * We also get here if VLAN insertion is not
4718 * supported by HW, the inline is enabled.
4721 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4722 loc->wqe_last = wqe;
4723 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4724 MLX5_OPCODE_SEND, olx);
4725 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4726 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4727 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4729 * The length check is performed above, by
4730 * comparing with txq->inlen_send. We should
4731 * not get overflow here.
4733 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4734 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4735 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4739 /* We have to store mbuf in elts.*/
4740 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4741 txq->elts[txq->elts_head++ & txq->elts_m] =
4745 #ifdef MLX5_PMD_SOFT_COUNTERS
4746 /* Update sent data bytes counter. */
4747 txq->stats.obytes += vlan +
4748 rte_pktmbuf_data_len(loc->mbuf);
4752 * No inline at all, it means the CPU cycles saving
4753 * is prioritized at configuration, we should not
4754 * copy any packet data to WQE.
4756 * SEND WQE, one WQEBB:
4757 * - Control Segment, SEND opcode
4758 * - Ethernet Segment, optional VLAN, no inline
4759 * - Data Segment, pointer type
4762 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4763 loc->wqe_last = wqe;
4764 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4765 MLX5_OPCODE_SEND, olx);
4766 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4768 (txq, loc, &wqe->dseg[0],
4769 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4770 rte_pktmbuf_data_len(loc->mbuf), olx);
4774 * We should not store mbuf pointer in elts
4775 * if no inlining is configured, this is done
4776 * by calling routine in a batch copy.
4778 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4780 #ifdef MLX5_PMD_SOFT_COUNTERS
4781 /* Update sent data bytes counter. */
4782 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4783 if (MLX5_TXOFF_CONFIG(VLAN) &&
4784 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4785 txq->stats.obytes +=
4786 sizeof(struct rte_vlan_hdr);
4791 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4792 return MLX5_TXCMP_CODE_EXIT;
4793 loc->mbuf = *pkts++;
4795 rte_prefetch0(*pkts);
4796 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4797 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4803 static __rte_always_inline enum mlx5_txcmp_code
4804 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
4805 struct rte_mbuf **__rte_restrict pkts,
4806 unsigned int pkts_n,
4807 struct mlx5_txq_local *__rte_restrict loc,
4810 enum mlx5_txcmp_code ret;
4812 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4813 if (ret == MLX5_TXCMP_CODE_SINGLE)
4815 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
4817 /* Optimize for inline/no inline eMPW send. */
4818 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4819 mlx5_tx_burst_empw_inline
4820 (txq, pkts, pkts_n, loc, olx) :
4821 mlx5_tx_burst_empw_simple
4822 (txq, pkts, pkts_n, loc, olx);
4823 if (ret != MLX5_TXCMP_CODE_SINGLE)
4825 /* The resources to send one packet should remain. */
4826 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4828 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4829 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
4830 if (ret != MLX5_TXCMP_CODE_EMPW)
4832 /* The resources to send one packet should remain. */
4833 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4838 * DPDK Tx callback template. This is configured template
4839 * used to generate routines optimized for specified offload setup.
4840 * One of this generated functions is chosen at SQ configuration
4844 * Generic pointer to TX queue structure.
4846 * Packets to transmit.
4848 * Number of packets in array.
4850 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4851 * values. Should be static to take compile time static configuration
4855 * Number of packets successfully transmitted (<= pkts_n).
4857 static __rte_always_inline uint16_t
4858 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
4859 struct rte_mbuf **__rte_restrict pkts,
4863 struct mlx5_txq_local loc;
4864 enum mlx5_txcmp_code ret;
4867 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4868 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4869 if (unlikely(!pkts_n))
4873 loc.wqe_last = NULL;
4876 loc.pkts_loop = loc.pkts_sent;
4878 * Check if there are some CQEs, if any:
4879 * - process an encountered errors
4880 * - process the completed WQEs
4881 * - free related mbufs
4882 * - doorbell the NIC about processed CQEs
4884 rte_prefetch0(*(pkts + loc.pkts_sent));
4885 mlx5_tx_handle_completion(txq, olx);
4887 * Calculate the number of available resources - elts and WQEs.
4888 * There are two possible different scenarios:
4889 * - no data inlining into WQEs, one WQEBB may contains up to
4890 * four packets, in this case elts become scarce resource
4891 * - data inlining into WQEs, one packet may require multiple
4892 * WQEBBs, the WQEs become the limiting factor.
4894 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4895 loc.elts_free = txq->elts_s -
4896 (uint16_t)(txq->elts_head - txq->elts_tail);
4897 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4898 loc.wqe_free = txq->wqe_s -
4899 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4900 if (unlikely(!loc.elts_free || !loc.wqe_free))
4904 * Fetch the packet from array. Usually this is
4905 * the first packet in series of multi/single
4908 loc.mbuf = *(pkts + loc.pkts_sent);
4909 /* Dedicated branch for multi-segment packets. */
4910 if (MLX5_TXOFF_CONFIG(MULTI) &&
4911 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4913 * Multi-segment packet encountered.
4914 * Hardware is able to process it only
4915 * with SEND/TSO opcodes, one packet
4916 * per WQE, do it in dedicated routine.
4919 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
4920 part = loc.pkts_sent - loc.pkts_copy;
4921 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4923 * There are some single-segment mbufs not
4924 * stored in elts. The mbufs must be in the
4925 * same order as WQEs, so we must copy the
4926 * mbufs to elts here, before the coming
4927 * multi-segment packet mbufs is appended.
4929 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4931 loc.pkts_copy = loc.pkts_sent;
4933 MLX5_ASSERT(pkts_n > loc.pkts_sent);
4934 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4935 if (!MLX5_TXOFF_CONFIG(INLINE))
4936 loc.pkts_copy = loc.pkts_sent;
4938 * These returned code checks are supposed
4939 * to be optimized out due to routine inlining.
4941 if (ret == MLX5_TXCMP_CODE_EXIT) {
4943 * The routine returns this code when
4944 * all packets are sent or there is no
4945 * enough resources to complete request.
4949 if (ret == MLX5_TXCMP_CODE_ERROR) {
4951 * The routine returns this code when
4952 * some error in the incoming packets
4955 txq->stats.oerrors++;
4958 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4960 * The single-segment packet was encountered
4961 * in the array, try to send it with the
4962 * best optimized way, possible engaging eMPW.
4964 goto enter_send_single;
4966 if (MLX5_TXOFF_CONFIG(TSO) &&
4967 ret == MLX5_TXCMP_CODE_TSO) {
4969 * The single-segment TSO packet was
4970 * encountered in the array.
4972 goto enter_send_tso;
4974 /* We must not get here. Something is going wrong. */
4976 txq->stats.oerrors++;
4979 /* Dedicated branch for single-segment TSO packets. */
4980 if (MLX5_TXOFF_CONFIG(TSO) &&
4981 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4983 * TSO might require special way for inlining
4984 * (dedicated parameters) and is sent with
4985 * MLX5_OPCODE_TSO opcode only, provide this
4986 * in dedicated branch.
4989 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
4990 MLX5_ASSERT(pkts_n > loc.pkts_sent);
4991 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4993 * These returned code checks are supposed
4994 * to be optimized out due to routine inlining.
4996 if (ret == MLX5_TXCMP_CODE_EXIT)
4998 if (ret == MLX5_TXCMP_CODE_ERROR) {
4999 txq->stats.oerrors++;
5002 if (ret == MLX5_TXCMP_CODE_SINGLE)
5003 goto enter_send_single;
5004 if (MLX5_TXOFF_CONFIG(MULTI) &&
5005 ret == MLX5_TXCMP_CODE_MULTI) {
5007 * The multi-segment packet was
5008 * encountered in the array.
5010 goto enter_send_multi;
5012 /* We must not get here. Something is going wrong. */
5014 txq->stats.oerrors++;
5018 * The dedicated branch for the single-segment packets
5019 * without TSO. Often these ones can be sent using
5020 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
5021 * The routine builds the WQEs till it encounters
5022 * the TSO or multi-segment packet (in case if these
5023 * offloads are requested at SQ configuration time).
5026 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5027 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
5029 * These returned code checks are supposed
5030 * to be optimized out due to routine inlining.
5032 if (ret == MLX5_TXCMP_CODE_EXIT)
5034 if (ret == MLX5_TXCMP_CODE_ERROR) {
5035 txq->stats.oerrors++;
5038 if (MLX5_TXOFF_CONFIG(MULTI) &&
5039 ret == MLX5_TXCMP_CODE_MULTI) {
5041 * The multi-segment packet was
5042 * encountered in the array.
5044 goto enter_send_multi;
5046 if (MLX5_TXOFF_CONFIG(TSO) &&
5047 ret == MLX5_TXCMP_CODE_TSO) {
5049 * The single-segment TSO packet was
5050 * encountered in the array.
5052 goto enter_send_tso;
5054 /* We must not get here. Something is going wrong. */
5056 txq->stats.oerrors++;
5060 * Main Tx loop is completed, do the rest:
5061 * - set completion request if thresholds are reached
5062 * - doorbell the hardware
5063 * - copy the rest of mbufs to elts (if any)
5065 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
5066 loc.pkts_sent >= loc.pkts_copy);
5067 /* Take a shortcut if nothing is sent. */
5068 if (unlikely(loc.pkts_sent == loc.pkts_loop))
5070 /* Request CQE generation if limits are reached. */
5071 mlx5_tx_request_completion(txq, &loc, olx);
5073 * Ring QP doorbell immediately after WQE building completion
5074 * to improve latencies. The pure software related data treatment
5075 * can be completed after doorbell. Tx CQEs for this SQ are
5076 * processed in this thread only by the polling.
5078 * The rdma core library can map doorbell register in two ways,
5079 * depending on the environment variable "MLX5_SHUT_UP_BF":
5081 * - as regular cached memory, the variable is either missing or
5082 * set to zero. This type of mapping may cause the significant
5083 * doorbell register writing latency and requires explicit
5084 * memory write barrier to mitigate this issue and prevent
5087 * - as non-cached memory, the variable is present and set to
5088 * not "0" value. This type of mapping may cause performance
5089 * impact under heavy loading conditions but the explicit write
5090 * memory barrier is not required and it may improve core
5093 * - the legacy behaviour (prior 19.08 release) was to use some
5094 * heuristics to decide whether write memory barrier should
5095 * be performed. This behavior is supported with specifying
5096 * tx_db_nc=2, write barrier is skipped if application
5097 * provides the full recommended burst of packets, it
5098 * supposes the next packets are coming and the write barrier
5099 * will be issued on the next burst (after descriptor writing,
5102 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
5103 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
5104 /* Not all of the mbufs may be stored into elts yet. */
5105 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
5106 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
5108 * There are some single-segment mbufs not stored in elts.
5109 * It can be only if the last packet was single-segment.
5110 * The copying is gathered into one place due to it is
5111 * a good opportunity to optimize that with SIMD.
5112 * Unfortunately if inlining is enabled the gaps in
5113 * pointer array may happen due to early freeing of the
5116 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
5117 loc.pkts_copy = loc.pkts_sent;
5119 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
5120 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
5121 if (pkts_n > loc.pkts_sent) {
5123 * If burst size is large there might be no enough CQE
5124 * fetched from completion queue and no enough resources
5125 * freed to send all the packets.
5130 #ifdef MLX5_PMD_SOFT_COUNTERS
5131 /* Increment sent packets counter. */
5132 txq->stats.opackets += loc.pkts_sent;
5134 return loc.pkts_sent;
5137 /* Generate routines with Enhanced Multi-Packet Write support. */
5138 MLX5_TXOFF_DECL(full_empw,
5139 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
5141 MLX5_TXOFF_DECL(none_empw,
5142 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5144 MLX5_TXOFF_DECL(md_empw,
5145 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5147 MLX5_TXOFF_DECL(mt_empw,
5148 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5149 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5151 MLX5_TXOFF_DECL(mtsc_empw,
5152 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5153 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5154 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5156 MLX5_TXOFF_DECL(mti_empw,
5157 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5158 MLX5_TXOFF_CONFIG_INLINE |
5159 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5161 MLX5_TXOFF_DECL(mtv_empw,
5162 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5163 MLX5_TXOFF_CONFIG_VLAN |
5164 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5166 MLX5_TXOFF_DECL(mtiv_empw,
5167 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5168 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5169 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5171 MLX5_TXOFF_DECL(sc_empw,
5172 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5173 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5175 MLX5_TXOFF_DECL(sci_empw,
5176 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5177 MLX5_TXOFF_CONFIG_INLINE |
5178 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5180 MLX5_TXOFF_DECL(scv_empw,
5181 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5182 MLX5_TXOFF_CONFIG_VLAN |
5183 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5185 MLX5_TXOFF_DECL(sciv_empw,
5186 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5187 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5188 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5190 MLX5_TXOFF_DECL(i_empw,
5191 MLX5_TXOFF_CONFIG_INLINE |
5192 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5194 MLX5_TXOFF_DECL(v_empw,
5195 MLX5_TXOFF_CONFIG_VLAN |
5196 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5198 MLX5_TXOFF_DECL(iv_empw,
5199 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5200 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5202 /* Generate routines without Enhanced Multi-Packet Write support. */
5203 MLX5_TXOFF_DECL(full,
5204 MLX5_TXOFF_CONFIG_FULL)
5206 MLX5_TXOFF_DECL(none,
5207 MLX5_TXOFF_CONFIG_NONE)
5210 MLX5_TXOFF_CONFIG_METADATA)
5213 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5214 MLX5_TXOFF_CONFIG_METADATA)
5216 MLX5_TXOFF_DECL(mtsc,
5217 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5218 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5219 MLX5_TXOFF_CONFIG_METADATA)
5221 MLX5_TXOFF_DECL(mti,
5222 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5223 MLX5_TXOFF_CONFIG_INLINE |
5224 MLX5_TXOFF_CONFIG_METADATA)
5227 MLX5_TXOFF_DECL(mtv,
5228 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5229 MLX5_TXOFF_CONFIG_VLAN |
5230 MLX5_TXOFF_CONFIG_METADATA)
5233 MLX5_TXOFF_DECL(mtiv,
5234 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5235 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5236 MLX5_TXOFF_CONFIG_METADATA)
5239 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5240 MLX5_TXOFF_CONFIG_METADATA)
5242 MLX5_TXOFF_DECL(sci,
5243 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5244 MLX5_TXOFF_CONFIG_INLINE |
5245 MLX5_TXOFF_CONFIG_METADATA)
5248 MLX5_TXOFF_DECL(scv,
5249 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5250 MLX5_TXOFF_CONFIG_VLAN |
5251 MLX5_TXOFF_CONFIG_METADATA)
5254 MLX5_TXOFF_DECL(sciv,
5255 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5256 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5257 MLX5_TXOFF_CONFIG_METADATA)
5260 MLX5_TXOFF_CONFIG_INLINE |
5261 MLX5_TXOFF_CONFIG_METADATA)
5264 MLX5_TXOFF_CONFIG_VLAN |
5265 MLX5_TXOFF_CONFIG_METADATA)
5268 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5269 MLX5_TXOFF_CONFIG_METADATA)
5272 * Generate routines with Legacy Multi-Packet Write support.
5273 * This mode is supported by ConnectX-4 Lx only and imposes
5274 * offload limitations, not supported:
5275 * - ACL/Flows (metadata are becoming meaningless)
5276 * - WQE Inline headers
5277 * - SRIOV (E-Switch offloads)
5279 * - tunnel encapsulation/decapsulation
5282 MLX5_TXOFF_DECL(none_mpw,
5283 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5284 MLX5_TXOFF_CONFIG_MPW)
5286 MLX5_TXOFF_DECL(mci_mpw,
5287 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5288 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5289 MLX5_TXOFF_CONFIG_MPW)
5291 MLX5_TXOFF_DECL(mc_mpw,
5292 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5293 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5295 MLX5_TXOFF_DECL(i_mpw,
5296 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5297 MLX5_TXOFF_CONFIG_MPW)
5300 * Array of declared and compiled Tx burst function and corresponding
5301 * supported offloads set. The array is used to select the Tx burst
5302 * function for specified offloads set at Tx queue configuration time.
5305 eth_tx_burst_t func;
5308 MLX5_TXOFF_INFO(full_empw,
5309 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5310 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5311 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5312 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5314 MLX5_TXOFF_INFO(none_empw,
5315 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5317 MLX5_TXOFF_INFO(md_empw,
5318 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5320 MLX5_TXOFF_INFO(mt_empw,
5321 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5322 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5324 MLX5_TXOFF_INFO(mtsc_empw,
5325 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5326 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5327 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5329 MLX5_TXOFF_INFO(mti_empw,
5330 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5331 MLX5_TXOFF_CONFIG_INLINE |
5332 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5334 MLX5_TXOFF_INFO(mtv_empw,
5335 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5336 MLX5_TXOFF_CONFIG_VLAN |
5337 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5339 MLX5_TXOFF_INFO(mtiv_empw,
5340 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5341 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5342 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5344 MLX5_TXOFF_INFO(sc_empw,
5345 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5346 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5348 MLX5_TXOFF_INFO(sci_empw,
5349 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5350 MLX5_TXOFF_CONFIG_INLINE |
5351 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5353 MLX5_TXOFF_INFO(scv_empw,
5354 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5355 MLX5_TXOFF_CONFIG_VLAN |
5356 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5358 MLX5_TXOFF_INFO(sciv_empw,
5359 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5360 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5361 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5363 MLX5_TXOFF_INFO(i_empw,
5364 MLX5_TXOFF_CONFIG_INLINE |
5365 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5367 MLX5_TXOFF_INFO(v_empw,
5368 MLX5_TXOFF_CONFIG_VLAN |
5369 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5371 MLX5_TXOFF_INFO(iv_empw,
5372 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5373 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5375 MLX5_TXOFF_INFO(full,
5376 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5377 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5378 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5379 MLX5_TXOFF_CONFIG_METADATA)
5381 MLX5_TXOFF_INFO(none,
5382 MLX5_TXOFF_CONFIG_NONE)
5385 MLX5_TXOFF_CONFIG_METADATA)
5388 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5389 MLX5_TXOFF_CONFIG_METADATA)
5391 MLX5_TXOFF_INFO(mtsc,
5392 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5393 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5394 MLX5_TXOFF_CONFIG_METADATA)
5396 MLX5_TXOFF_INFO(mti,
5397 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5398 MLX5_TXOFF_CONFIG_INLINE |
5399 MLX5_TXOFF_CONFIG_METADATA)
5401 MLX5_TXOFF_INFO(mtv,
5402 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5403 MLX5_TXOFF_CONFIG_VLAN |
5404 MLX5_TXOFF_CONFIG_METADATA)
5406 MLX5_TXOFF_INFO(mtiv,
5407 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5408 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5409 MLX5_TXOFF_CONFIG_METADATA)
5412 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5413 MLX5_TXOFF_CONFIG_METADATA)
5415 MLX5_TXOFF_INFO(sci,
5416 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5417 MLX5_TXOFF_CONFIG_INLINE |
5418 MLX5_TXOFF_CONFIG_METADATA)
5420 MLX5_TXOFF_INFO(scv,
5421 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5422 MLX5_TXOFF_CONFIG_VLAN |
5423 MLX5_TXOFF_CONFIG_METADATA)
5425 MLX5_TXOFF_INFO(sciv,
5426 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5427 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5428 MLX5_TXOFF_CONFIG_METADATA)
5431 MLX5_TXOFF_CONFIG_INLINE |
5432 MLX5_TXOFF_CONFIG_METADATA)
5435 MLX5_TXOFF_CONFIG_VLAN |
5436 MLX5_TXOFF_CONFIG_METADATA)
5439 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5440 MLX5_TXOFF_CONFIG_METADATA)
5442 MLX5_TXOFF_INFO(none_mpw,
5443 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5444 MLX5_TXOFF_CONFIG_MPW)
5446 MLX5_TXOFF_INFO(mci_mpw,
5447 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5448 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5449 MLX5_TXOFF_CONFIG_MPW)
5451 MLX5_TXOFF_INFO(mc_mpw,
5452 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5453 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5455 MLX5_TXOFF_INFO(i_mpw,
5456 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5457 MLX5_TXOFF_CONFIG_MPW)
5461 * Configure the Tx function to use. The routine checks configured
5462 * Tx offloads for the device and selects appropriate Tx burst
5463 * routine. There are multiple Tx burst routines compiled from
5464 * the same template in the most optimal way for the dedicated
5468 * Pointer to private data structure.
5471 * Pointer to selected Tx burst function.
5474 mlx5_select_tx_function(struct rte_eth_dev *dev)
5476 struct mlx5_priv *priv = dev->data->dev_private;
5477 struct mlx5_dev_config *config = &priv->config;
5478 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5479 unsigned int diff = 0, olx = 0, i, m;
5481 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5482 MLX5_DSEG_MAX, "invalid WQE max size");
5483 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5484 "invalid WQE Control Segment size");
5485 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5486 "invalid WQE Ethernet Segment size");
5487 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5488 "invalid WQE Data Segment size");
5489 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5490 "invalid WQE size");
5492 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5493 /* We should support Multi-Segment Packets. */
5494 olx |= MLX5_TXOFF_CONFIG_MULTI;
5496 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5497 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5498 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5499 DEV_TX_OFFLOAD_IP_TNL_TSO |
5500 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5501 /* We should support TCP Send Offload. */
5502 olx |= MLX5_TXOFF_CONFIG_TSO;
5504 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5505 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5506 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5507 /* We should support Software Parser for Tunnels. */
5508 olx |= MLX5_TXOFF_CONFIG_SWP;
5510 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5511 DEV_TX_OFFLOAD_UDP_CKSUM |
5512 DEV_TX_OFFLOAD_TCP_CKSUM |
5513 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5514 /* We should support IP/TCP/UDP Checksums. */
5515 olx |= MLX5_TXOFF_CONFIG_CSUM;
5517 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5518 /* We should support VLAN insertion. */
5519 olx |= MLX5_TXOFF_CONFIG_VLAN;
5521 if (priv->txqs_n && (*priv->txqs)[0]) {
5522 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5524 if (txd->inlen_send) {
5526 * Check the data inline requirements. Data inline
5527 * is enabled on per device basis, we can check
5528 * the first Tx queue only.
5530 * If device does not support VLAN insertion in WQE
5531 * and some queues are requested to perform VLAN
5532 * insertion offload than inline must be enabled.
5534 olx |= MLX5_TXOFF_CONFIG_INLINE;
5537 if (config->mps == MLX5_MPW_ENHANCED &&
5538 config->txq_inline_min <= 0) {
5540 * The NIC supports Enhanced Multi-Packet Write
5541 * and does not require minimal inline data.
5543 olx |= MLX5_TXOFF_CONFIG_EMPW;
5545 if (rte_flow_dynf_metadata_avail()) {
5546 /* We should support Flow metadata. */
5547 olx |= MLX5_TXOFF_CONFIG_METADATA;
5549 if (config->mps == MLX5_MPW) {
5551 * The NIC supports Legacy Multi-Packet Write.
5552 * The MLX5_TXOFF_CONFIG_MPW controls the
5553 * descriptor building method in combination
5554 * with MLX5_TXOFF_CONFIG_EMPW.
5556 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5557 MLX5_TXOFF_CONFIG_SWP |
5558 MLX5_TXOFF_CONFIG_VLAN |
5559 MLX5_TXOFF_CONFIG_METADATA)))
5560 olx |= MLX5_TXOFF_CONFIG_EMPW |
5561 MLX5_TXOFF_CONFIG_MPW;
5564 * Scan the routines table to find the minimal
5565 * satisfying routine with requested offloads.
5567 m = RTE_DIM(txoff_func);
5568 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5571 tmp = txoff_func[i].olx;
5573 /* Meets requested offloads exactly.*/
5577 if ((tmp & olx) != olx) {
5578 /* Does not meet requested offloads at all. */
5581 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW)
5582 /* Do not enable legacy MPW if not configured. */
5584 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5585 /* Do not enable eMPW if not configured. */
5587 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5588 /* Do not enable inlining if not configured. */
5591 * Some routine meets the requirements.
5592 * Check whether it has minimal amount
5593 * of not requested offloads.
5595 tmp = __builtin_popcountl(tmp & ~olx);
5596 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5597 /* First or better match, save and continue. */
5603 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5604 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5605 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5606 /* Lighter not requested offload. */
5611 if (m >= RTE_DIM(txoff_func)) {
5612 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5613 " for requested offloads %04X",
5614 dev->data->port_id, olx);
5617 DRV_LOG(DEBUG, "port %u has selected Tx function"
5618 " supporting offloads %04X/%04X",
5619 dev->data->port_id, olx, txoff_func[m].olx);
5620 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5621 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5622 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5623 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5624 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5625 DRV_LOG(DEBUG, "\tSWP (software parser)");
5626 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5627 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5628 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5629 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5630 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5631 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5632 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5633 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5634 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5635 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5636 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5638 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5640 return txoff_func[m].func;
5644 * DPDK callback to get the TX queue information
5647 * Pointer to the device structure.
5649 * @param tx_queue_id
5650 * Tx queue identificator.
5653 * Pointer to the TX queue information structure.
5660 mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
5661 struct rte_eth_txq_info *qinfo)
5663 struct mlx5_priv *priv = dev->data->dev_private;
5664 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
5665 struct mlx5_txq_ctrl *txq_ctrl =
5666 container_of(txq, struct mlx5_txq_ctrl, txq);
5670 qinfo->nb_desc = txq->elts_s;
5671 qinfo->conf.tx_thresh.pthresh = 0;
5672 qinfo->conf.tx_thresh.hthresh = 0;
5673 qinfo->conf.tx_thresh.wthresh = 0;
5674 qinfo->conf.tx_rs_thresh = 0;
5675 qinfo->conf.tx_free_thresh = 0;
5676 qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
5677 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
5681 * DPDK callback to get the TX packet burst mode information
5684 * Pointer to the device structure.
5686 * @param tx_queue_id
5687 * Tx queue identificatior.
5690 * Pointer to the burts mode information.
5693 * 0 as success, -EINVAL as failure.
5697 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
5698 uint16_t tx_queue_id __rte_unused,
5699 struct rte_eth_burst_mode *mode)
5701 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
5702 unsigned int i, olx;
5704 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5705 if (pkt_burst == txoff_func[i].func) {
5706 olx = txoff_func[i].olx;
5707 snprintf(mode->info, sizeof(mode->info),
5709 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
5710 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
5711 "Legacy MPW" : "Enhanced MPW") : "No MPW",
5712 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
5714 (olx & MLX5_TXOFF_CONFIG_TSO) ?
5716 (olx & MLX5_TXOFF_CONFIG_SWP) ?
5718 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
5720 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
5722 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
5724 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
5725 " + METADATA" : "");