1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
25 #include <rte_common.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_ether.h>
28 #include <rte_cycles.h>
32 #include "mlx5_devx_cmds.h"
33 #include "mlx5_utils.h"
34 #include "mlx5_rxtx.h"
35 #include "mlx5_autoconf.h"
36 #include "mlx5_defs.h"
39 /* TX burst subroutines return codes. */
40 enum mlx5_txcmp_code {
41 MLX5_TXCMP_CODE_EXIT = 0,
42 MLX5_TXCMP_CODE_ERROR,
43 MLX5_TXCMP_CODE_SINGLE,
44 MLX5_TXCMP_CODE_MULTI,
50 * These defines are used to configure Tx burst routine option set
51 * supported at compile time. The not specified options are optimized out
52 * out due to if conditions can be explicitly calculated at compile time.
53 * The offloads with bigger runtime check (require more CPU cycles to
54 * skip) overhead should have the bigger index - this is needed to
55 * select the better matching routine function if no exact match and
56 * some offloads are not actually requested.
58 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
59 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
60 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
61 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
62 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
63 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
64 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
65 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
66 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
68 /* The most common offloads groups. */
69 #define MLX5_TXOFF_CONFIG_NONE 0
70 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
71 MLX5_TXOFF_CONFIG_TSO | \
72 MLX5_TXOFF_CONFIG_SWP | \
73 MLX5_TXOFF_CONFIG_CSUM | \
74 MLX5_TXOFF_CONFIG_INLINE | \
75 MLX5_TXOFF_CONFIG_VLAN | \
76 MLX5_TXOFF_CONFIG_METADATA)
78 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
80 #define MLX5_TXOFF_DECL(func, olx) \
81 static uint16_t mlx5_tx_burst_##func(void *txq, \
82 struct rte_mbuf **pkts, \
85 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
86 pkts, pkts_n, (olx)); \
89 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
91 static __rte_always_inline uint32_t
92 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
94 static __rte_always_inline int
95 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
96 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
98 static __rte_always_inline uint32_t
99 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
101 static __rte_always_inline void
102 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
103 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
105 static __rte_always_inline void
106 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
107 const unsigned int strd_n);
110 mlx5_queue_state_modify(struct rte_eth_dev *dev,
111 struct mlx5_mp_arg_queue_state_modify *sm);
114 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
115 volatile struct mlx5_cqe *restrict cqe,
119 mlx5_lro_update_hdr(uint8_t *restrict padd,
120 volatile struct mlx5_cqe *restrict cqe,
123 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
124 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
127 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
128 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
131 * Build a table to translate Rx completion flags to packet type.
133 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
136 mlx5_set_ptype_table(void)
139 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
141 /* Last entry must not be overwritten, reserved for errored packet. */
142 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
143 (*p)[i] = RTE_PTYPE_UNKNOWN;
145 * The index to the array should have:
146 * bit[1:0] = l3_hdr_type
147 * bit[4:2] = l4_hdr_type
150 * bit[7] = outer_l3_type
153 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
155 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
156 RTE_PTYPE_L4_NONFRAG;
157 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
158 RTE_PTYPE_L4_NONFRAG;
160 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
162 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
165 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
167 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
169 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
171 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
173 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
175 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
178 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
180 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
182 /* Repeat with outer_l3_type being set. Just in case. */
183 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
184 RTE_PTYPE_L4_NONFRAG;
185 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
186 RTE_PTYPE_L4_NONFRAG;
187 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
189 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
191 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
193 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
195 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
197 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
199 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
201 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
203 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
205 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
208 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
209 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
210 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
211 RTE_PTYPE_INNER_L4_NONFRAG;
212 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
213 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L4_NONFRAG;
215 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
216 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L4_NONFRAG;
219 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
220 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
221 RTE_PTYPE_INNER_L4_NONFRAG;
222 /* Tunneled - Fragmented */
223 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L4_FRAG;
226 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
228 RTE_PTYPE_INNER_L4_FRAG;
229 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L4_FRAG;
232 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L4_FRAG;
236 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
237 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L4_TCP;
239 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
240 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
241 RTE_PTYPE_INNER_L4_TCP;
242 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
244 RTE_PTYPE_INNER_L4_TCP;
245 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
247 RTE_PTYPE_INNER_L4_TCP;
248 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
250 RTE_PTYPE_INNER_L4_TCP;
251 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L4_TCP;
254 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L4_TCP;
257 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
259 RTE_PTYPE_INNER_L4_TCP;
260 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
262 RTE_PTYPE_INNER_L4_TCP;
263 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
265 RTE_PTYPE_INNER_L4_TCP;
266 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
268 RTE_PTYPE_INNER_L4_TCP;
269 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L4_TCP;
273 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
274 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
275 RTE_PTYPE_INNER_L4_UDP;
276 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
277 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
278 RTE_PTYPE_INNER_L4_UDP;
279 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
280 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
281 RTE_PTYPE_INNER_L4_UDP;
282 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
283 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
284 RTE_PTYPE_INNER_L4_UDP;
288 * Build a table to translate packet to checksum type of Verbs.
291 mlx5_set_cksum_table(void)
297 * The index should have:
298 * bit[0] = PKT_TX_TCP_SEG
299 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
300 * bit[4] = PKT_TX_IP_CKSUM
301 * bit[8] = PKT_TX_OUTER_IP_CKSUM
304 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
307 /* Tunneled packet. */
308 if (i & (1 << 8)) /* Outer IP. */
309 v |= MLX5_ETH_WQE_L3_CSUM;
310 if (i & (1 << 4)) /* Inner IP. */
311 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
312 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
313 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
316 if (i & (1 << 4)) /* IP. */
317 v |= MLX5_ETH_WQE_L3_CSUM;
318 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
319 v |= MLX5_ETH_WQE_L4_CSUM;
321 mlx5_cksum_table[i] = v;
326 * Build a table to translate packet type of mbuf to SWP type of Verbs.
329 mlx5_set_swp_types_table(void)
335 * The index should have:
336 * bit[0:1] = PKT_TX_L4_MASK
337 * bit[4] = PKT_TX_IPV6
338 * bit[8] = PKT_TX_OUTER_IPV6
339 * bit[9] = PKT_TX_OUTER_UDP
341 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
344 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
346 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
348 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
349 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
350 v |= MLX5_ETH_WQE_L4_INNER_UDP;
351 mlx5_swp_types_table[i] = v;
356 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
357 * Flags must be preliminary initialized to zero.
360 * Pointer to burst routine local context.
362 * Pointer to store Software Parser flags
364 * Configured Tx offloads mask. It is fully defined at
365 * compile time and may be used for optimization.
368 * Software Parser offsets packed in dword.
369 * Software Parser flags are set by pointer.
371 static __rte_always_inline uint32_t
372 txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc,
377 unsigned int idx, off;
380 if (!MLX5_TXOFF_CONFIG(SWP))
382 ol = loc->mbuf->ol_flags;
383 tunnel = ol & PKT_TX_TUNNEL_MASK;
385 * Check whether Software Parser is required.
386 * Only customized tunnels may ask for.
388 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
391 * The index should have:
392 * bit[0:1] = PKT_TX_L4_MASK
393 * bit[4] = PKT_TX_IPV6
394 * bit[8] = PKT_TX_OUTER_IPV6
395 * bit[9] = PKT_TX_OUTER_UDP
397 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
398 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
399 *swp_flags = mlx5_swp_types_table[idx];
401 * Set offsets for SW parser. Since ConnectX-5, SW parser just
402 * complements HW parser. SW parser starts to engage only if HW parser
403 * can't reach a header. For the older devices, HW parser will not kick
404 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
405 * should be set regardless of HW offload.
407 off = loc->mbuf->outer_l2_len;
408 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
409 off += sizeof(struct rte_vlan_hdr);
410 set = (off >> 1) << 8; /* Outer L3 offset. */
411 off += loc->mbuf->outer_l3_len;
412 if (tunnel == PKT_TX_TUNNEL_UDP)
413 set |= off >> 1; /* Outer L4 offset. */
414 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
415 const uint64_t csum = ol & PKT_TX_L4_MASK;
416 off += loc->mbuf->l2_len;
417 set |= (off >> 1) << 24; /* Inner L3 offset. */
418 if (csum == PKT_TX_TCP_CKSUM ||
419 csum == PKT_TX_UDP_CKSUM ||
420 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
421 off += loc->mbuf->l3_len;
422 set |= (off >> 1) << 16; /* Inner L4 offset. */
425 set = rte_cpu_to_le_32(set);
430 * Convert the Checksum offloads to Verbs.
433 * Pointer to the mbuf.
436 * Converted checksum flags.
438 static __rte_always_inline uint8_t
439 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
442 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
443 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
444 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
447 * The index should have:
448 * bit[0] = PKT_TX_TCP_SEG
449 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
450 * bit[4] = PKT_TX_IP_CKSUM
451 * bit[8] = PKT_TX_OUTER_IP_CKSUM
454 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
455 return mlx5_cksum_table[idx];
459 * Internal function to compute the number of used descriptors in an RX queue
465 * The number of used rx descriptor.
468 rx_queue_count(struct mlx5_rxq_data *rxq)
470 struct rxq_zip *zip = &rxq->zip;
471 volatile struct mlx5_cqe *cqe;
472 const unsigned int cqe_n = (1 << rxq->cqe_n);
473 const unsigned int cqe_cnt = cqe_n - 1;
477 /* if we are processing a compressed cqe */
479 used = zip->cqe_cnt - zip->ca;
485 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
486 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
490 op_own = cqe->op_own;
491 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
492 n = rte_be_to_cpu_32(cqe->byte_cnt);
497 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
499 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
504 * DPDK callback to check the status of a rx descriptor.
509 * The index of the descriptor in the ring.
512 * The status of the tx descriptor.
515 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
517 struct mlx5_rxq_data *rxq = rx_queue;
518 struct mlx5_rxq_ctrl *rxq_ctrl =
519 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
520 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
522 if (dev->rx_pkt_burst != mlx5_rx_burst) {
526 if (offset >= (1 << rxq->elts_n)) {
530 if (offset < rx_queue_count(rxq))
531 return RTE_ETH_RX_DESC_DONE;
532 return RTE_ETH_RX_DESC_AVAIL;
536 * DPDK callback to get the number of used descriptors in a RX queue
539 * Pointer to the device structure.
545 * The number of used rx descriptor.
546 * -EINVAL if the queue is invalid
549 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
551 struct mlx5_priv *priv = dev->data->dev_private;
552 struct mlx5_rxq_data *rxq;
554 if (dev->rx_pkt_burst != mlx5_rx_burst) {
558 rxq = (*priv->rxqs)[rx_queue_id];
563 return rx_queue_count(rxq);
566 #define MLX5_SYSTEM_LOG_DIR "/var/log"
568 * Dump debug information to log file.
573 * If not NULL this string is printed as a header to the output
574 * and the output will be in hexadecimal view.
576 * This is the buffer address to print out.
578 * The number of bytes to dump out.
581 mlx5_dump_debug_information(const char *fname, const char *hex_title,
582 const void *buf, unsigned int hex_len)
586 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
587 fd = fopen(path, "a+");
589 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
590 MKSTR(path2, "./%s", fname);
591 fd = fopen(path2, "a+");
593 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
596 DRV_LOG(INFO, "New debug dump in file %s", path2);
598 DRV_LOG(INFO, "New debug dump in file %s", path);
601 rte_hexdump(fd, hex_title, buf, hex_len);
603 fprintf(fd, "%s", (const char *)buf);
604 fprintf(fd, "\n\n\n");
609 * Move QP from error state to running state and initialize indexes.
612 * Pointer to TX queue control structure.
615 * 0 on success, else -1.
618 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
620 struct mlx5_mp_arg_queue_state_modify sm = {
622 .queue_id = txq_ctrl->txq.idx,
625 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
627 txq_ctrl->txq.wqe_ci = 0;
628 txq_ctrl->txq.wqe_pi = 0;
629 txq_ctrl->txq.elts_comp = 0;
633 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
635 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
637 static const uint8_t magic[] = "seen";
641 for (i = 0; i < sizeof(magic); ++i)
642 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
644 err_cqe->rsvd1[i] = magic[i];
653 * Pointer to TX queue structure.
655 * Pointer to the error CQE.
658 * Negative value if queue recovery failed, otherwise
659 * the error completion entry is handled successfully.
662 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
663 volatile struct mlx5_err_cqe *err_cqe)
665 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
666 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
667 struct mlx5_txq_ctrl *txq_ctrl =
668 container_of(txq, struct mlx5_txq_ctrl, txq);
669 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
670 int seen = check_err_cqe_seen(err_cqe);
672 if (!seen && txq_ctrl->dump_file_n <
673 txq_ctrl->priv->config.max_dump_files_num) {
674 MKSTR(err_str, "Unexpected CQE error syndrome "
675 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
676 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
677 txq->cqe_s, txq->qp_num_8s >> 8,
678 rte_be_to_cpu_16(err_cqe->wqe_counter),
679 txq->wqe_ci, txq->cq_ci);
680 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
681 PORT_ID(txq_ctrl->priv), txq->idx,
682 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
683 mlx5_dump_debug_information(name, NULL, err_str, 0);
684 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
685 (const void *)((uintptr_t)
689 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
690 (const void *)((uintptr_t)
694 txq_ctrl->dump_file_n++;
698 * Count errors in WQEs units.
699 * Later it can be improved to count error packets,
700 * for example, by SQ parsing to find how much packets
701 * should be counted for each WQE.
703 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
705 if (tx_recover_qp(txq_ctrl)) {
706 /* Recovering failed - retry later on the same WQE. */
709 /* Release all the remaining buffers. */
710 txq_free_elts(txq_ctrl);
716 * Translate RX completion flags to packet type.
719 * Pointer to RX queue structure.
723 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
726 * Packet type for struct rte_mbuf.
728 static inline uint32_t
729 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
732 uint8_t pinfo = cqe->pkt_info;
733 uint16_t ptype = cqe->hdr_type_etc;
736 * The index to the array should have:
737 * bit[1:0] = l3_hdr_type
738 * bit[4:2] = l4_hdr_type
741 * bit[7] = outer_l3_type
743 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
744 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
748 * Initialize Rx WQ and indexes.
751 * Pointer to RX queue structure.
754 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
756 const unsigned int wqe_n = 1 << rxq->elts_n;
759 for (i = 0; (i != wqe_n); ++i) {
760 volatile struct mlx5_wqe_data_seg *scat;
764 if (mlx5_rxq_mprq_enabled(rxq)) {
765 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
767 scat = &((volatile struct mlx5_wqe_mprq *)
769 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
770 1 << rxq->strd_num_n);
771 byte_count = (1 << rxq->strd_sz_n) *
772 (1 << rxq->strd_num_n);
774 struct rte_mbuf *buf = (*rxq->elts)[i];
776 scat = &((volatile struct mlx5_wqe_data_seg *)
778 addr = rte_pktmbuf_mtod(buf, uintptr_t);
779 byte_count = DATA_LEN(buf);
781 /* scat->addr must be able to store a pointer. */
782 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
783 *scat = (struct mlx5_wqe_data_seg){
784 .addr = rte_cpu_to_be_64(addr),
785 .byte_count = rte_cpu_to_be_32(byte_count),
786 .lkey = mlx5_rx_addr2mr(rxq, addr),
789 rxq->consumed_strd = 0;
790 rxq->decompressed = 0;
792 rxq->zip = (struct rxq_zip){
795 /* Update doorbell counter. */
796 rxq->rq_ci = wqe_n >> rxq->sges_n;
798 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
802 * Modify a Verbs/DevX queue state.
803 * This must be called from the primary process.
806 * Pointer to Ethernet device.
808 * State modify request parameters.
811 * 0 in case of success else non-zero value and rte_errno is set.
814 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
815 const struct mlx5_mp_arg_queue_state_modify *sm)
818 struct mlx5_priv *priv = dev->data->dev_private;
821 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
822 struct mlx5_rxq_ctrl *rxq_ctrl =
823 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
825 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
826 struct ibv_wq_attr mod = {
827 .attr_mask = IBV_WQ_ATTR_STATE,
828 .wq_state = sm->state,
831 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
832 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
833 struct mlx5_devx_modify_rq_attr rq_attr;
835 memset(&rq_attr, 0, sizeof(rq_attr));
836 if (sm->state == IBV_WQS_RESET) {
837 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
838 rq_attr.state = MLX5_RQC_STATE_RST;
839 } else if (sm->state == IBV_WQS_RDY) {
840 rq_attr.rq_state = MLX5_RQC_STATE_RST;
841 rq_attr.state = MLX5_RQC_STATE_RDY;
842 } else if (sm->state == IBV_WQS_ERR) {
843 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
844 rq_attr.state = MLX5_RQC_STATE_ERR;
846 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
850 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
851 sm->state, strerror(errno));
856 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
857 struct mlx5_txq_ctrl *txq_ctrl =
858 container_of(txq, struct mlx5_txq_ctrl, txq);
859 struct ibv_qp_attr mod = {
860 .qp_state = IBV_QPS_RESET,
861 .port_num = (uint8_t)priv->ibv_port,
863 struct ibv_qp *qp = txq_ctrl->obj->qp;
865 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
867 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
868 "%s", strerror(errno));
872 mod.qp_state = IBV_QPS_INIT;
873 ret = mlx5_glue->modify_qp(qp, &mod,
874 (IBV_QP_STATE | IBV_QP_PORT));
876 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
881 mod.qp_state = IBV_QPS_RTR;
882 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
884 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
889 mod.qp_state = IBV_QPS_RTS;
890 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
892 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
902 * Modify a Verbs queue state.
905 * Pointer to Ethernet device.
907 * State modify request parameters.
910 * 0 in case of success else non-zero value.
913 mlx5_queue_state_modify(struct rte_eth_dev *dev,
914 struct mlx5_mp_arg_queue_state_modify *sm)
918 switch (rte_eal_process_type()) {
919 case RTE_PROC_PRIMARY:
920 ret = mlx5_queue_state_modify_primary(dev, sm);
922 case RTE_PROC_SECONDARY:
923 ret = mlx5_mp_req_queue_state_modify(dev, sm);
933 * The function inserts the RQ state to reset when the first error CQE is
934 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
935 * it moves the RQ state to ready and initializes the RQ.
936 * Next CQE identification and error counting are in the caller responsibility.
939 * Pointer to RX queue structure.
941 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
942 * 0 when called from non-vectorized Rx burst.
945 * -1 in case of recovery error, otherwise the CQE status.
948 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
950 const uint16_t cqe_n = 1 << rxq->cqe_n;
951 const uint16_t cqe_mask = cqe_n - 1;
952 const unsigned int wqe_n = 1 << rxq->elts_n;
953 struct mlx5_rxq_ctrl *rxq_ctrl =
954 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
956 volatile struct mlx5_cqe *cqe;
957 volatile struct mlx5_err_cqe *err_cqe;
959 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
961 struct mlx5_mp_arg_queue_state_modify sm;
964 switch (rxq->err_state) {
965 case MLX5_RXQ_ERR_STATE_NO_ERROR:
966 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
968 case MLX5_RXQ_ERR_STATE_NEED_RESET:
970 sm.queue_id = rxq->idx;
971 sm.state = IBV_WQS_RESET;
972 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
974 if (rxq_ctrl->dump_file_n <
975 rxq_ctrl->priv->config.max_dump_files_num) {
976 MKSTR(err_str, "Unexpected CQE error syndrome "
977 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
978 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
979 rxq->cqn, rxq_ctrl->wqn,
980 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
981 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
982 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
983 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
984 mlx5_dump_debug_information(name, NULL, err_str, 0);
985 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
986 (const void *)((uintptr_t)
988 sizeof(*u.cqe) * cqe_n);
989 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
990 (const void *)((uintptr_t)
993 rxq_ctrl->dump_file_n++;
995 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
997 case MLX5_RXQ_ERR_STATE_NEED_READY:
998 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
999 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1001 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1004 * The RQ consumer index must be zeroed while moving
1005 * from RESET state to RDY state.
1007 *rxq->rq_db = rte_cpu_to_be_32(0);
1010 sm.queue_id = rxq->idx;
1011 sm.state = IBV_WQS_RDY;
1012 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1016 const uint16_t q_mask = wqe_n - 1;
1018 struct rte_mbuf **elt;
1020 unsigned int n = wqe_n - (rxq->rq_ci -
1023 for (i = 0; i < (int)n; ++i) {
1024 elt_idx = (rxq->rq_ci + i) & q_mask;
1025 elt = &(*rxq->elts)[elt_idx];
1026 *elt = rte_mbuf_raw_alloc(rxq->mp);
1028 for (i--; i >= 0; --i) {
1029 elt_idx = (rxq->rq_ci +
1033 rte_pktmbuf_free_seg
1039 for (i = 0; i < (int)wqe_n; ++i) {
1040 elt = &(*rxq->elts)[i];
1042 (uint16_t)((*elt)->buf_len -
1043 rte_pktmbuf_headroom(*elt));
1045 /* Padding with a fake mbuf for vec Rx. */
1046 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1047 (*rxq->elts)[wqe_n + i] =
1050 mlx5_rxq_initialize(rxq);
1051 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1060 * Get size of the next packet for a given CQE. For compressed CQEs, the
1061 * consumer index is updated only once all packets of the current one have
1065 * Pointer to RX queue.
1069 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1073 * 0 in case of empty CQE, otherwise the packet size in bytes.
1076 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1077 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1079 struct rxq_zip *zip = &rxq->zip;
1080 uint16_t cqe_n = cqe_cnt + 1;
1086 /* Process compressed data in the CQE and mini arrays. */
1088 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1089 (volatile struct mlx5_mini_cqe8 (*)[8])
1090 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1093 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1094 *mcqe = &(*mc)[zip->ai & 7];
1095 if ((++zip->ai & 7) == 0) {
1096 /* Invalidate consumed CQEs */
1099 while (idx != end) {
1100 (*rxq->cqes)[idx & cqe_cnt].op_own =
1101 MLX5_CQE_INVALIDATE;
1105 * Increment consumer index to skip the number
1106 * of CQEs consumed. Hardware leaves holes in
1107 * the CQ ring for software use.
1112 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1113 /* Invalidate the rest */
1117 while (idx != end) {
1118 (*rxq->cqes)[idx & cqe_cnt].op_own =
1119 MLX5_CQE_INVALIDATE;
1122 rxq->cq_ci = zip->cq_ci;
1126 * No compressed data, get next CQE and verify if it is
1133 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1134 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1135 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1137 ret = mlx5_rx_err_handle(rxq, 0);
1138 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1146 op_own = cqe->op_own;
1147 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1148 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1149 (volatile struct mlx5_mini_cqe8 (*)[8])
1150 (uintptr_t)(&(*rxq->cqes)
1154 /* Fix endianness. */
1155 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1157 * Current mini array position is the one
1158 * returned by check_cqe64().
1160 * If completion comprises several mini arrays,
1161 * as a special case the second one is located
1162 * 7 CQEs after the initial CQE instead of 8
1163 * for subsequent ones.
1165 zip->ca = rxq->cq_ci;
1166 zip->na = zip->ca + 7;
1167 /* Compute the next non compressed CQE. */
1169 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1170 /* Get packet size to return. */
1171 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1174 /* Prefetch all to be invalidated */
1177 while (idx != end) {
1178 rte_prefetch0(&(*rxq->cqes)[(idx) &
1183 len = rte_be_to_cpu_32(cqe->byte_cnt);
1186 if (unlikely(rxq->err_state)) {
1187 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1188 ++rxq->stats.idropped;
1196 * Translate RX completion flags to offload flags.
1202 * Offload flags (ol_flags) for struct rte_mbuf.
1204 static inline uint32_t
1205 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1207 uint32_t ol_flags = 0;
1208 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1212 MLX5_CQE_RX_L3_HDR_VALID,
1213 PKT_RX_IP_CKSUM_GOOD) |
1215 MLX5_CQE_RX_L4_HDR_VALID,
1216 PKT_RX_L4_CKSUM_GOOD);
1221 * Fill in mbuf fields from RX completion flags.
1222 * Note that pkt->ol_flags should be initialized outside of this function.
1225 * Pointer to RX queue.
1230 * @param rss_hash_res
1231 * Packet RSS Hash result.
1234 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1235 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1237 /* Update packet information. */
1238 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1239 if (rss_hash_res && rxq->rss_hash) {
1240 pkt->hash.rss = rss_hash_res;
1241 pkt->ol_flags |= PKT_RX_RSS_HASH;
1243 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1244 pkt->ol_flags |= PKT_RX_FDIR;
1245 if (cqe->sop_drop_qpn !=
1246 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1247 uint32_t mark = cqe->sop_drop_qpn;
1249 pkt->ol_flags |= PKT_RX_FDIR_ID;
1250 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1253 if (rte_flow_dynf_metadata_avail() && cqe->flow_table_metadata) {
1254 pkt->ol_flags |= PKT_RX_DYNF_METADATA;
1255 *RTE_FLOW_DYNF_METADATA(pkt) = cqe->flow_table_metadata;
1258 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1259 if (rxq->vlan_strip &&
1260 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1261 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1262 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1264 if (rxq->hw_timestamp) {
1265 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1266 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1271 * DPDK callback for RX.
1274 * Generic pointer to RX queue structure.
1276 * Array to store received packets.
1278 * Maximum number of packets in array.
1281 * Number of packets successfully received (<= pkts_n).
1284 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1286 struct mlx5_rxq_data *rxq = dpdk_rxq;
1287 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1288 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1289 const unsigned int sges_n = rxq->sges_n;
1290 struct rte_mbuf *pkt = NULL;
1291 struct rte_mbuf *seg = NULL;
1292 volatile struct mlx5_cqe *cqe =
1293 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1295 unsigned int rq_ci = rxq->rq_ci << sges_n;
1296 int len = 0; /* keep its value across iterations. */
1299 unsigned int idx = rq_ci & wqe_cnt;
1300 volatile struct mlx5_wqe_data_seg *wqe =
1301 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1302 struct rte_mbuf *rep = (*rxq->elts)[idx];
1303 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1304 uint32_t rss_hash_res;
1312 rep = rte_mbuf_raw_alloc(rxq->mp);
1313 if (unlikely(rep == NULL)) {
1314 ++rxq->stats.rx_nombuf;
1317 * no buffers before we even started,
1318 * bail out silently.
1322 while (pkt != seg) {
1323 assert(pkt != (*rxq->elts)[idx]);
1327 rte_mbuf_raw_free(pkt);
1333 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1334 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1336 rte_mbuf_raw_free(rep);
1340 assert(len >= (rxq->crc_present << 2));
1341 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1342 /* If compressed, take hash result from mini-CQE. */
1343 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1345 mcqe->rx_hash_result);
1346 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1347 if (rxq->crc_present)
1348 len -= RTE_ETHER_CRC_LEN;
1350 if (cqe->lro_num_seg > 1) {
1352 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1354 pkt->ol_flags |= PKT_RX_LRO;
1355 pkt->tso_segsz = len / cqe->lro_num_seg;
1358 DATA_LEN(rep) = DATA_LEN(seg);
1359 PKT_LEN(rep) = PKT_LEN(seg);
1360 SET_DATA_OFF(rep, DATA_OFF(seg));
1361 PORT(rep) = PORT(seg);
1362 (*rxq->elts)[idx] = rep;
1364 * Fill NIC descriptor with the new buffer. The lkey and size
1365 * of the buffers are already known, only the buffer address
1368 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1369 /* If there's only one MR, no need to replace LKey in WQE. */
1370 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1371 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1372 if (len > DATA_LEN(seg)) {
1373 len -= DATA_LEN(seg);
1378 DATA_LEN(seg) = len;
1379 #ifdef MLX5_PMD_SOFT_COUNTERS
1380 /* Increment bytes counter. */
1381 rxq->stats.ibytes += PKT_LEN(pkt);
1383 /* Return packet. */
1388 /* Align consumer index to the next stride. */
1393 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1395 /* Update the consumer index. */
1396 rxq->rq_ci = rq_ci >> sges_n;
1398 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1400 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1401 #ifdef MLX5_PMD_SOFT_COUNTERS
1402 /* Increment packets counter. */
1403 rxq->stats.ipackets += i;
1409 * Update LRO packet TCP header.
1410 * The HW LRO feature doesn't update the TCP header after coalescing the
1411 * TCP segments but supplies information in CQE to fill it by SW.
1414 * Pointer to the TCP header.
1416 * Pointer to the completion entry..
1418 * The L3 pseudo-header checksum.
1421 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
1422 volatile struct mlx5_cqe *restrict cqe,
1425 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1426 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1428 * The HW calculates only the TCP payload checksum, need to complete
1429 * the TCP header checksum and the L3 pseudo-header checksum.
1431 uint32_t csum = phcsum + cqe->csum;
1433 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1434 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1435 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1436 tcp->recv_ack = cqe->lro_ack_seq_num;
1437 tcp->rx_win = cqe->lro_tcp_win;
1439 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1440 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1442 csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4);
1443 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1444 csum = (~csum) & 0xffff;
1451 * Update LRO packet headers.
1452 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1453 * TCP segments but supply information in CQE to fill it by SW.
1456 * The packet address.
1458 * Pointer to the completion entry..
1460 * The packet length.
1463 mlx5_lro_update_hdr(uint8_t *restrict padd,
1464 volatile struct mlx5_cqe *restrict cqe,
1468 struct rte_ether_hdr *eth;
1469 struct rte_vlan_hdr *vlan;
1470 struct rte_ipv4_hdr *ipv4;
1471 struct rte_ipv6_hdr *ipv6;
1472 struct rte_tcp_hdr *tcp;
1477 uint16_t proto = h.eth->ether_type;
1481 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1482 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1483 proto = h.vlan->eth_proto;
1486 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1487 h.ipv4->time_to_live = cqe->lro_min_ttl;
1488 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1489 h.ipv4->hdr_checksum = 0;
1490 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1491 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1494 h.ipv6->hop_limits = cqe->lro_min_ttl;
1495 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1497 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1500 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1504 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1506 struct mlx5_mprq_buf *buf = opaque;
1508 if (rte_atomic16_read(&buf->refcnt) == 1) {
1509 rte_mempool_put(buf->mp, buf);
1510 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1511 rte_atomic16_set(&buf->refcnt, 1);
1512 rte_mempool_put(buf->mp, buf);
1517 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1519 mlx5_mprq_buf_free_cb(NULL, buf);
1523 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1524 const unsigned int strd_n)
1526 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1527 volatile struct mlx5_wqe_data_seg *wqe =
1528 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1531 assert(rep != NULL);
1532 /* Replace MPRQ buf. */
1533 (*rxq->mprq_bufs)[rq_idx] = rep;
1535 addr = mlx5_mprq_buf_addr(rep, strd_n);
1536 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1537 /* If there's only one MR, no need to replace LKey in WQE. */
1538 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1539 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1540 /* Stash a mbuf for next replacement. */
1541 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1542 rxq->mprq_repl = rep;
1544 rxq->mprq_repl = NULL;
1548 * DPDK callback for RX with Multi-Packet RQ support.
1551 * Generic pointer to RX queue structure.
1553 * Array to store received packets.
1555 * Maximum number of packets in array.
1558 * Number of packets successfully received (<= pkts_n).
1561 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1563 struct mlx5_rxq_data *rxq = dpdk_rxq;
1564 const unsigned int strd_n = 1 << rxq->strd_num_n;
1565 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1566 const unsigned int strd_shift =
1567 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1568 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1569 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1570 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1572 uint32_t rq_ci = rxq->rq_ci;
1573 uint16_t consumed_strd = rxq->consumed_strd;
1574 uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM;
1575 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1577 while (i < pkts_n) {
1578 struct rte_mbuf *pkt;
1586 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1587 uint32_t rss_hash_res = 0;
1588 uint8_t lro_num_seg;
1590 if (consumed_strd == strd_n) {
1591 /* Replace WQE only if the buffer is still in use. */
1592 if (rte_atomic16_read(&buf->refcnt) > 1) {
1593 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1594 /* Release the old buffer. */
1595 mlx5_mprq_buf_free(buf);
1596 } else if (unlikely(rxq->mprq_repl == NULL)) {
1597 struct mlx5_mprq_buf *rep;
1600 * Currently, the MPRQ mempool is out of buffer
1601 * and doing memcpy regardless of the size of Rx
1602 * packet. Retry allocation to get back to
1605 if (!rte_mempool_get(rxq->mprq_mp,
1607 rxq->mprq_repl = rep;
1609 /* Advance to the next WQE. */
1612 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1614 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1615 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1619 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1620 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1622 consumed_strd += strd_cnt;
1623 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1626 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1627 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1629 /* mini-CQE for MPRQ doesn't have hash result. */
1630 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1632 assert(strd_idx < strd_n);
1633 assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
1634 lro_num_seg = cqe->lro_num_seg;
1636 * Currently configured to receive a packet per a stride. But if
1637 * MTU is adjusted through kernel interface, device could
1638 * consume multiple strides without raising an error. In this
1639 * case, the packet should be dropped because it is bigger than
1640 * the max_rx_pkt_len.
1642 if (unlikely(!lro_num_seg && strd_cnt > 1)) {
1643 ++rxq->stats.idropped;
1646 pkt = rte_pktmbuf_alloc(rxq->mp);
1647 if (unlikely(pkt == NULL)) {
1648 ++rxq->stats.rx_nombuf;
1651 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1652 assert((int)len >= (rxq->crc_present << 2));
1653 if (rxq->crc_present)
1654 len -= RTE_ETHER_CRC_LEN;
1655 offset = strd_idx * strd_sz + strd_shift;
1656 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1658 * Memcpy packets to the target mbuf if:
1659 * - The size of packet is smaller than mprq_max_memcpy_len.
1660 * - Out of buffer in the Mempool for Multi-Packet RQ.
1662 if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
1664 * When memcpy'ing packet due to out-of-buffer, the
1665 * packet must be smaller than the target mbuf.
1667 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1668 rte_pktmbuf_free_seg(pkt);
1669 ++rxq->stats.idropped;
1672 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
1673 DATA_LEN(pkt) = len;
1675 rte_iova_t buf_iova;
1676 struct rte_mbuf_ext_shared_info *shinfo;
1677 uint16_t buf_len = strd_cnt * strd_sz;
1680 /* Increment the refcnt of the whole chunk. */
1681 rte_atomic16_add_return(&buf->refcnt, 1);
1682 assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1684 buf_addr = RTE_PTR_SUB(addr, headroom_sz);
1686 * MLX5 device doesn't use iova but it is necessary in a
1687 * case where the Rx packet is transmitted via a
1690 buf_iova = rte_mempool_virt2iova(buf) +
1691 RTE_PTR_DIFF(buf_addr, buf);
1692 shinfo = &buf->shinfos[strd_idx];
1693 rte_mbuf_ext_refcnt_set(shinfo, 1);
1695 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1696 * attaching the stride to mbuf and more offload flags
1697 * will be added below by calling rxq_cq_to_mbuf().
1698 * Other fields will be overwritten.
1700 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1702 /* Set mbuf head-room. */
1703 pkt->data_off = headroom_sz;
1704 assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
1706 * Prevent potential overflow due to MTU change through
1709 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1710 rte_pktmbuf_free_seg(pkt);
1711 ++rxq->stats.idropped;
1714 DATA_LEN(pkt) = len;
1716 * LRO packet may consume all the stride memory, in this
1717 * case packet head-room space is not guaranteed so must
1718 * to add an empty mbuf for the head-room.
1720 if (!rxq->strd_headroom_en) {
1721 struct rte_mbuf *headroom_mbuf =
1722 rte_pktmbuf_alloc(rxq->mp);
1724 if (unlikely(headroom_mbuf == NULL)) {
1725 rte_pktmbuf_free_seg(pkt);
1726 ++rxq->stats.rx_nombuf;
1729 PORT(pkt) = rxq->port_id;
1730 NEXT(headroom_mbuf) = pkt;
1731 pkt = headroom_mbuf;
1735 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1736 if (lro_num_seg > 1) {
1737 mlx5_lro_update_hdr(addr, cqe, len);
1738 pkt->ol_flags |= PKT_RX_LRO;
1739 pkt->tso_segsz = strd_sz;
1742 PORT(pkt) = rxq->port_id;
1743 #ifdef MLX5_PMD_SOFT_COUNTERS
1744 /* Increment bytes counter. */
1745 rxq->stats.ibytes += PKT_LEN(pkt);
1747 /* Return packet. */
1751 /* Update the consumer indexes. */
1752 rxq->consumed_strd = consumed_strd;
1754 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1755 if (rq_ci != rxq->rq_ci) {
1758 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1760 #ifdef MLX5_PMD_SOFT_COUNTERS
1761 /* Increment packets counter. */
1762 rxq->stats.ipackets += i;
1768 * Dummy DPDK callback for TX.
1770 * This function is used to temporarily replace the real callback during
1771 * unsafe control operations on the queue, or in case of error.
1774 * Generic pointer to TX queue structure.
1776 * Packets to transmit.
1778 * Number of packets in array.
1781 * Number of packets successfully transmitted (<= pkts_n).
1784 removed_tx_burst(void *dpdk_txq __rte_unused,
1785 struct rte_mbuf **pkts __rte_unused,
1786 uint16_t pkts_n __rte_unused)
1793 * Dummy DPDK callback for RX.
1795 * This function is used to temporarily replace the real callback during
1796 * unsafe control operations on the queue, or in case of error.
1799 * Generic pointer to RX queue structure.
1801 * Array to store received packets.
1803 * Maximum number of packets in array.
1806 * Number of packets successfully received (<= pkts_n).
1809 removed_rx_burst(void *dpdk_txq __rte_unused,
1810 struct rte_mbuf **pkts __rte_unused,
1811 uint16_t pkts_n __rte_unused)
1818 * Vectorized Rx/Tx routines are not compiled in when required vector
1819 * instructions are not supported on a target architecture. The following null
1820 * stubs are needed for linkage when those are not included outside of this file
1821 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1825 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1826 struct rte_mbuf **pkts __rte_unused,
1827 uint16_t pkts_n __rte_unused)
1833 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1839 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1845 * Free the mbufs from the linear array of pointers.
1848 * Pointer to array of packets to be free.
1850 * Number of packets to be freed.
1852 * Configured Tx offloads mask. It is fully defined at
1853 * compile time and may be used for optimization.
1855 static __rte_always_inline void
1856 mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
1857 unsigned int pkts_n,
1858 unsigned int olx __rte_unused)
1860 struct rte_mempool *pool = NULL;
1861 struct rte_mbuf **p_free = NULL;
1862 struct rte_mbuf *mbuf;
1863 unsigned int n_free = 0;
1866 * The implemented algorithm eliminates
1867 * copying pointers to temporary array
1868 * for rte_mempool_put_bulk() calls.
1875 * Decrement mbuf reference counter, detach
1876 * indirect and external buffers if needed.
1878 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1879 if (likely(mbuf != NULL)) {
1880 assert(mbuf == *pkts);
1881 if (likely(n_free != 0)) {
1882 if (unlikely(pool != mbuf->pool))
1883 /* From different pool. */
1886 /* Start new scan array. */
1893 if (unlikely(pkts_n == 0)) {
1899 * This happens if mbuf is still referenced.
1900 * We can't put it back to the pool, skip.
1904 if (unlikely(n_free != 0))
1905 /* There is some array to free.*/
1907 if (unlikely(pkts_n == 0))
1908 /* Last mbuf, nothing to free. */
1914 * This loop is implemented to avoid multiple
1915 * inlining of rte_mempool_put_bulk().
1921 * Free the array of pre-freed mbufs
1922 * belonging to the same memory pool.
1924 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1925 if (unlikely(mbuf != NULL)) {
1926 /* There is the request to start new scan. */
1931 if (likely(pkts_n != 0))
1934 * This is the last mbuf to be freed.
1935 * Do one more loop iteration to complete.
1936 * This is rare case of the last unique mbuf.
1941 if (likely(pkts_n == 0))
1950 * Free the mbuf from the elts ring buffer till new tail.
1953 * Pointer to Tx queue structure.
1955 * Index in elts to free up to, becomes new elts tail.
1957 * Configured Tx offloads mask. It is fully defined at
1958 * compile time and may be used for optimization.
1960 static __rte_always_inline void
1961 mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
1963 unsigned int olx __rte_unused)
1965 uint16_t n_elts = tail - txq->elts_tail;
1968 assert(n_elts <= txq->elts_s);
1970 * Implement a loop to support ring buffer wraparound
1971 * with single inlining of mlx5_tx_free_mbuf().
1976 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
1977 part = RTE_MIN(part, n_elts);
1979 assert(part <= txq->elts_s);
1980 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
1982 txq->elts_tail += part;
1988 * Store the mbuf being sent into elts ring buffer.
1989 * On Tx completion these mbufs will be freed.
1992 * Pointer to Tx queue structure.
1994 * Pointer to array of packets to be stored.
1996 * Number of packets to be stored.
1998 * Configured Tx offloads mask. It is fully defined at
1999 * compile time and may be used for optimization.
2001 static __rte_always_inline void
2002 mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
2003 struct rte_mbuf **restrict pkts,
2004 unsigned int pkts_n,
2005 unsigned int olx __rte_unused)
2008 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2012 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2014 assert(part <= txq->elts_s);
2015 /* This code is a good candidate for vectorizing with SIMD. */
2016 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2018 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2019 txq->elts_head += pkts_n;
2020 if (unlikely(part < pkts_n))
2021 /* The copy is wrapping around the elts array. */
2022 rte_memcpy((void *)elts, (void *)(pkts + part),
2023 (pkts_n - part) * sizeof(struct rte_mbuf *));
2027 * Update completion queue consuming index via doorbell
2028 * and flush the completed data buffers.
2031 * Pointer to TX queue structure.
2032 * @param valid CQE pointer
2033 * if not NULL update txq->wqe_pi and flush the buffers
2035 * Configured Tx offloads mask. It is fully defined at
2036 * compile time and may be used for optimization.
2038 static __rte_always_inline void
2039 mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
2040 volatile struct mlx5_cqe *last_cqe,
2041 unsigned int olx __rte_unused)
2043 if (likely(last_cqe != NULL)) {
2046 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2047 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2048 if (likely(tail != txq->elts_tail)) {
2049 mlx5_tx_free_elts(txq, tail, olx);
2050 assert(tail == txq->elts_tail);
2056 * Manage TX completions. This routine checks the CQ for
2057 * arrived CQEs, deduces the last accomplished WQE in SQ,
2058 * updates SQ producing index and frees all completed mbufs.
2061 * Pointer to TX queue structure.
2063 * Configured Tx offloads mask. It is fully defined at
2064 * compile time and may be used for optimization.
2066 * NOTE: not inlined intentionally, it makes tx_burst
2067 * routine smaller, simple and faster - from experiments.
2070 mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
2071 unsigned int olx __rte_unused)
2073 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2074 volatile struct mlx5_cqe *last_cqe = NULL;
2075 uint16_t ci = txq->cq_ci;
2078 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2079 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2081 volatile struct mlx5_cqe *cqe;
2083 cqe = &txq->cqes[ci & txq->cqe_m];
2084 ret = check_cqe(cqe, txq->cqe_s, ci);
2085 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2086 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2087 /* No new CQEs in completion queue. */
2088 assert(ret == MLX5_CQE_STATUS_HW_OWN);
2092 * Some error occurred, try to restart.
2093 * We have no barrier after WQE related Doorbell
2094 * written, make sure all writes are completed
2095 * here, before we might perform SQ reset.
2099 ret = mlx5_tx_error_cqe_handle
2100 (txq, (volatile struct mlx5_err_cqe *)cqe);
2101 if (unlikely(ret < 0)) {
2103 * Some error occurred on queue error
2104 * handling, we do not advance the index
2105 * here, allowing to retry on next call.
2110 * We are going to fetch all entries with
2111 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2112 * The send queue is supposed to be empty.
2119 /* Normal transmit completion. */
2120 assert(ci != txq->cq_pi);
2121 assert((txq->fcqs[ci & txq->cqe_m] >> 16) == cqe->wqe_counter);
2125 * We have to restrict the amount of processed CQEs
2126 * in one tx_burst routine call. The CQ may be large
2127 * and many CQEs may be updated by the NIC in one
2128 * transaction. Buffers freeing is time consuming,
2129 * multiple iterations may introduce significant
2132 if (likely(--count == 0))
2135 if (likely(ci != txq->cq_ci)) {
2137 * Update completion queue consuming index
2138 * and ring doorbell to notify hardware.
2140 rte_compiler_barrier();
2142 *txq->cq_db = rte_cpu_to_be_32(ci);
2143 mlx5_tx_comp_flush(txq, last_cqe, olx);
2148 * Check if the completion request flag should be set in the last WQE.
2149 * Both pushed mbufs and WQEs are monitored and the completion request
2150 * flag is set if any of thresholds is reached.
2153 * Pointer to TX queue structure.
2155 * Pointer to burst routine local context.
2157 * Configured Tx offloads mask. It is fully defined at
2158 * compile time and may be used for optimization.
2160 static __rte_always_inline void
2161 mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
2162 struct mlx5_txq_local *restrict loc,
2165 uint16_t head = txq->elts_head;
2168 part = MLX5_TXOFF_CONFIG(INLINE) ?
2169 0 : loc->pkts_sent - loc->pkts_copy;
2171 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2172 (MLX5_TXOFF_CONFIG(INLINE) &&
2173 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2174 volatile struct mlx5_wqe *last = loc->wqe_last;
2176 txq->elts_comp = head;
2177 if (MLX5_TXOFF_CONFIG(INLINE))
2178 txq->wqe_comp = txq->wqe_ci;
2179 /* Request unconditional completion on last WQE. */
2180 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2181 MLX5_COMP_MODE_OFFSET);
2182 /* Save elts_head in dedicated free on completion queue. */
2184 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2186 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2187 (last->cseg.opcode >> 8) << 16;
2189 /* A CQE slot must always be available. */
2190 assert((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2195 * DPDK callback to check the status of a tx descriptor.
2200 * The index of the descriptor in the ring.
2203 * The status of the tx descriptor.
2206 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2208 struct mlx5_txq_data *restrict txq = tx_queue;
2211 mlx5_tx_handle_completion(txq, 0);
2212 used = txq->elts_head - txq->elts_tail;
2214 return RTE_ETH_TX_DESC_FULL;
2215 return RTE_ETH_TX_DESC_DONE;
2219 * Build the Control Segment with specified opcode:
2220 * - MLX5_OPCODE_SEND
2221 * - MLX5_OPCODE_ENHANCED_MPSW
2225 * Pointer to TX queue structure.
2227 * Pointer to burst routine local context.
2229 * Pointer to WQE to fill with built Control Segment.
2231 * Supposed length of WQE in segments.
2233 * SQ WQE opcode to put into Control Segment.
2235 * Configured Tx offloads mask. It is fully defined at
2236 * compile time and may be used for optimization.
2238 static __rte_always_inline void
2239 mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq,
2240 struct mlx5_txq_local *restrict loc __rte_unused,
2241 struct mlx5_wqe *restrict wqe,
2243 unsigned int opcode,
2244 unsigned int olx __rte_unused)
2246 struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
2248 /* For legacy MPW replace the EMPW by TSO with modifier. */
2249 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2250 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2251 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2252 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2253 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2254 MLX5_COMP_MODE_OFFSET);
2255 cs->misc = RTE_BE32(0);
2259 * Build the Ethernet Segment without inlined data.
2260 * Supports Software Parser, Checksums and VLAN
2261 * insertion Tx offload features.
2264 * Pointer to TX queue structure.
2266 * Pointer to burst routine local context.
2268 * Pointer to WQE to fill with built Ethernet Segment.
2270 * Configured Tx offloads mask. It is fully defined at
2271 * compile time and may be used for optimization.
2273 static __rte_always_inline void
2274 mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
2275 struct mlx5_txq_local *restrict loc,
2276 struct mlx5_wqe *restrict wqe,
2279 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2283 * Calculate and set check sum flags first, dword field
2284 * in segment may be shared with Software Parser flags.
2286 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2287 es->flags = rte_cpu_to_le_32(csum);
2289 * Calculate and set Software Parser offsets and flags.
2290 * These flags a set for custom UDP and IP tunnel packets.
2292 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2293 /* Fill metadata field if needed. */
2294 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2295 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2296 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2297 /* Engage VLAN tag insertion feature if requested. */
2298 if (MLX5_TXOFF_CONFIG(VLAN) &&
2299 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2301 * We should get here only if device support
2302 * this feature correctly.
2304 assert(txq->vlan_en);
2305 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2306 loc->mbuf->vlan_tci);
2308 es->inline_hdr = RTE_BE32(0);
2313 * Build the Ethernet Segment with minimal inlined data
2314 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2315 * used to fill the gap in single WQEBB WQEs.
2316 * Supports Software Parser, Checksums and VLAN
2317 * insertion Tx offload features.
2320 * Pointer to TX queue structure.
2322 * Pointer to burst routine local context.
2324 * Pointer to WQE to fill with built Ethernet Segment.
2326 * Length of VLAN tag insertion if any.
2328 * Configured Tx offloads mask. It is fully defined at
2329 * compile time and may be used for optimization.
2331 static __rte_always_inline void
2332 mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
2333 struct mlx5_txq_local *restrict loc,
2334 struct mlx5_wqe *restrict wqe,
2338 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2340 uint8_t *psrc, *pdst;
2343 * Calculate and set check sum flags first, dword field
2344 * in segment may be shared with Software Parser flags.
2346 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2347 es->flags = rte_cpu_to_le_32(csum);
2349 * Calculate and set Software Parser offsets and flags.
2350 * These flags a set for custom UDP and IP tunnel packets.
2352 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2353 /* Fill metadata field if needed. */
2354 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2355 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2356 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2357 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2359 sizeof(rte_v128u32_t)),
2360 "invalid Ethernet Segment data size");
2361 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2363 sizeof(struct rte_vlan_hdr) +
2364 2 * RTE_ETHER_ADDR_LEN),
2365 "invalid Ethernet Segment data size");
2366 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2367 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2368 es->inline_data = *(unaligned_uint16_t *)psrc;
2369 psrc += sizeof(uint16_t);
2370 pdst = (uint8_t *)(es + 1);
2371 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2372 /* Implement VLAN tag insertion as part inline data. */
2373 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2374 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2375 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2376 /* Insert VLAN ethertype + VLAN tag. */
2377 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2378 ((RTE_ETHER_TYPE_VLAN << 16) |
2379 loc->mbuf->vlan_tci);
2380 pdst += sizeof(struct rte_vlan_hdr);
2381 /* Copy the rest two bytes from packet data. */
2382 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2383 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2385 /* Fill the gap in the title WQEBB with inline data. */
2386 rte_mov16(pdst, psrc);
2391 * Build the Ethernet Segment with entire packet
2392 * data inlining. Checks the boundary of WQEBB and
2393 * ring buffer wrapping, supports Software Parser,
2394 * Checksums and VLAN insertion Tx offload features.
2397 * Pointer to TX queue structure.
2399 * Pointer to burst routine local context.
2401 * Pointer to WQE to fill with built Ethernet Segment.
2403 * Length of VLAN tag insertion if any.
2405 * Length of data to inline (VLAN included, if any).
2407 * TSO flag, set mss field from the packet.
2409 * Configured Tx offloads mask. It is fully defined at
2410 * compile time and may be used for optimization.
2413 * Pointer to the next Data Segment (aligned and wrapped around).
2415 static __rte_always_inline struct mlx5_wqe_dseg *
2416 mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
2417 struct mlx5_txq_local *restrict loc,
2418 struct mlx5_wqe *restrict wqe,
2424 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2426 uint8_t *psrc, *pdst;
2430 * Calculate and set check sum flags first, dword field
2431 * in segment may be shared with Software Parser flags.
2433 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2436 csum |= loc->mbuf->tso_segsz;
2437 es->flags = rte_cpu_to_be_32(csum);
2439 es->flags = rte_cpu_to_le_32(csum);
2442 * Calculate and set Software Parser offsets and flags.
2443 * These flags a set for custom UDP and IP tunnel packets.
2445 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2446 /* Fill metadata field if needed. */
2447 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2448 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2449 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2450 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2452 sizeof(rte_v128u32_t)),
2453 "invalid Ethernet Segment data size");
2454 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2456 sizeof(struct rte_vlan_hdr) +
2457 2 * RTE_ETHER_ADDR_LEN),
2458 "invalid Ethernet Segment data size");
2459 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2460 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2461 es->inline_data = *(unaligned_uint16_t *)psrc;
2462 psrc += sizeof(uint16_t);
2463 pdst = (uint8_t *)(es + 1);
2464 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2465 /* Implement VLAN tag insertion as part inline data. */
2466 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2467 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2468 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2469 /* Insert VLAN ethertype + VLAN tag. */
2470 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2471 ((RTE_ETHER_TYPE_VLAN << 16) |
2472 loc->mbuf->vlan_tci);
2473 pdst += sizeof(struct rte_vlan_hdr);
2474 /* Copy the rest two bytes from packet data. */
2475 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2476 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2477 psrc += sizeof(uint16_t);
2479 /* Fill the gap in the title WQEBB with inline data. */
2480 rte_mov16(pdst, psrc);
2481 psrc += sizeof(rte_v128u32_t);
2483 pdst = (uint8_t *)(es + 2);
2484 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2485 assert(pdst < (uint8_t *)txq->wqes_end);
2486 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2488 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2489 return (struct mlx5_wqe_dseg *)pdst;
2492 * The WQEBB space availability is checked by caller.
2493 * Here we should be aware of WQE ring buffer wraparound only.
2495 part = (uint8_t *)txq->wqes_end - pdst;
2496 part = RTE_MIN(part, inlen);
2498 rte_memcpy(pdst, psrc, part);
2500 if (likely(!inlen)) {
2502 * If return value is not used by the caller
2503 * the code below will be optimized out.
2506 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2507 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2508 pdst = (uint8_t *)txq->wqes;
2509 return (struct mlx5_wqe_dseg *)pdst;
2511 pdst = (uint8_t *)txq->wqes;
2518 * Copy data from chain of mbuf to the specified linear buffer.
2519 * Checksums and VLAN insertion Tx offload features. If data
2520 * from some mbuf copied completely this mbuf is freed. Local
2521 * structure is used to keep the byte stream state.
2524 * Pointer to the destination linear buffer.
2526 * Pointer to burst routine local context.
2528 * Length of data to be copied.
2530 * Configured Tx offloads mask. It is fully defined at
2531 * compile time and may be used for optimization.
2533 static __rte_always_inline void
2534 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2535 struct mlx5_txq_local *restrict loc,
2537 unsigned int olx __rte_unused)
2539 struct rte_mbuf *mbuf;
2540 unsigned int part, dlen;
2545 /* Allow zero length packets, must check first. */
2546 dlen = rte_pktmbuf_data_len(loc->mbuf);
2547 if (dlen <= loc->mbuf_off) {
2548 /* Exhausted packet, just free. */
2550 loc->mbuf = mbuf->next;
2551 rte_pktmbuf_free_seg(mbuf);
2553 assert(loc->mbuf_nseg > 1);
2558 dlen -= loc->mbuf_off;
2559 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2561 part = RTE_MIN(len, dlen);
2562 rte_memcpy(pdst, psrc, part);
2563 loc->mbuf_off += part;
2566 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2568 /* Exhausted packet, just free. */
2570 loc->mbuf = mbuf->next;
2571 rte_pktmbuf_free_seg(mbuf);
2573 assert(loc->mbuf_nseg >= 1);
2583 * Build the Ethernet Segment with inlined data from
2584 * multi-segment packet. Checks the boundary of WQEBB
2585 * and ring buffer wrapping, supports Software Parser,
2586 * Checksums and VLAN insertion Tx offload features.
2589 * Pointer to TX queue structure.
2591 * Pointer to burst routine local context.
2593 * Pointer to WQE to fill with built Ethernet Segment.
2595 * Length of VLAN tag insertion if any.
2597 * Length of data to inline (VLAN included, if any).
2599 * TSO flag, set mss field from the packet.
2601 * Configured Tx offloads mask. It is fully defined at
2602 * compile time and may be used for optimization.
2605 * Pointer to the next Data Segment (aligned and
2606 * possible NOT wrapped around - caller should do
2607 * wrapping check on its own).
2609 static __rte_always_inline struct mlx5_wqe_dseg *
2610 mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
2611 struct mlx5_txq_local *restrict loc,
2612 struct mlx5_wqe *restrict wqe,
2618 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2624 * Calculate and set check sum flags first, uint32_t field
2625 * in segment may be shared with Software Parser flags.
2627 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2630 csum |= loc->mbuf->tso_segsz;
2631 es->flags = rte_cpu_to_be_32(csum);
2633 es->flags = rte_cpu_to_le_32(csum);
2636 * Calculate and set Software Parser offsets and flags.
2637 * These flags a set for custom UDP and IP tunnel packets.
2639 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2640 /* Fill metadata field if needed. */
2641 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2642 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2643 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2644 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2646 sizeof(rte_v128u32_t)),
2647 "invalid Ethernet Segment data size");
2648 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2650 sizeof(struct rte_vlan_hdr) +
2651 2 * RTE_ETHER_ADDR_LEN),
2652 "invalid Ethernet Segment data size");
2653 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2654 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2655 pdst = (uint8_t *)&es->inline_data;
2656 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2657 /* Implement VLAN tag insertion as part inline data. */
2658 mlx5_tx_mseg_memcpy(pdst, loc, 2 * RTE_ETHER_ADDR_LEN, olx);
2659 pdst += 2 * RTE_ETHER_ADDR_LEN;
2660 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2661 ((RTE_ETHER_TYPE_VLAN << 16) |
2662 loc->mbuf->vlan_tci);
2663 pdst += sizeof(struct rte_vlan_hdr);
2664 inlen -= 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2666 assert(pdst < (uint8_t *)txq->wqes_end);
2668 * The WQEBB space availability is checked by caller.
2669 * Here we should be aware of WQE ring buffer wraparound only.
2671 part = (uint8_t *)txq->wqes_end - pdst;
2672 part = RTE_MIN(part, inlen);
2675 mlx5_tx_mseg_memcpy(pdst, loc, part, olx);
2677 if (likely(!inlen)) {
2679 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2680 return (struct mlx5_wqe_dseg *)pdst;
2682 pdst = (uint8_t *)txq->wqes;
2688 * Build the Data Segment of pointer type.
2691 * Pointer to TX queue structure.
2693 * Pointer to burst routine local context.
2695 * Pointer to WQE to fill with built Data Segment.
2697 * Data buffer to point.
2699 * Data buffer length.
2701 * Configured Tx offloads mask. It is fully defined at
2702 * compile time and may be used for optimization.
2704 static __rte_always_inline void
2705 mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
2706 struct mlx5_txq_local *restrict loc,
2707 struct mlx5_wqe_dseg *restrict dseg,
2710 unsigned int olx __rte_unused)
2714 dseg->bcount = rte_cpu_to_be_32(len);
2715 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2716 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2720 * Build the Data Segment of pointer type or inline
2721 * if data length is less than buffer in minimal
2722 * Data Segment size.
2725 * Pointer to TX queue structure.
2727 * Pointer to burst routine local context.
2729 * Pointer to WQE to fill with built Data Segment.
2731 * Data buffer to point.
2733 * Data buffer length.
2735 * Configured Tx offloads mask. It is fully defined at
2736 * compile time and may be used for optimization.
2738 static __rte_always_inline void
2739 mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
2740 struct mlx5_txq_local *restrict loc,
2741 struct mlx5_wqe_dseg *restrict dseg,
2744 unsigned int olx __rte_unused)
2750 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2751 dseg->bcount = rte_cpu_to_be_32(len);
2752 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2753 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2757 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2758 /* Unrolled implementation of generic rte_memcpy. */
2759 dst = (uintptr_t)&dseg->inline_data[0];
2760 src = (uintptr_t)buf;
2762 #ifdef RTE_ARCH_STRICT_ALIGN
2763 assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2764 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2765 dst += sizeof(uint32_t);
2766 src += sizeof(uint32_t);
2767 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2768 dst += sizeof(uint32_t);
2769 src += sizeof(uint32_t);
2771 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2772 dst += sizeof(uint64_t);
2773 src += sizeof(uint64_t);
2777 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2778 dst += sizeof(uint32_t);
2779 src += sizeof(uint32_t);
2782 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2783 dst += sizeof(uint16_t);
2784 src += sizeof(uint16_t);
2787 *(uint8_t *)dst = *(uint8_t *)src;
2791 * Build the Data Segment of inlined data from single
2792 * segment packet, no VLAN insertion.
2795 * Pointer to TX queue structure.
2797 * Pointer to burst routine local context.
2799 * Pointer to WQE to fill with built Data Segment.
2801 * Data buffer to point.
2803 * Data buffer length.
2805 * Configured Tx offloads mask. It is fully defined at
2806 * compile time and may be used for optimization.
2809 * Pointer to the next Data Segment after inlined data.
2810 * Ring buffer wraparound check is needed. We do not
2811 * do it here because it may not be needed for the
2812 * last packet in the eMPW session.
2814 static __rte_always_inline struct mlx5_wqe_dseg *
2815 mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq,
2816 struct mlx5_txq_local *restrict loc __rte_unused,
2817 struct mlx5_wqe_dseg *restrict dseg,
2820 unsigned int olx __rte_unused)
2825 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2826 pdst = &dseg->inline_data[0];
2828 * The WQEBB space availability is checked by caller.
2829 * Here we should be aware of WQE ring buffer wraparound only.
2831 part = (uint8_t *)txq->wqes_end - pdst;
2832 part = RTE_MIN(part, len);
2834 rte_memcpy(pdst, buf, part);
2838 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2839 /* Note: no final wraparound check here. */
2840 return (struct mlx5_wqe_dseg *)pdst;
2842 pdst = (uint8_t *)txq->wqes;
2849 * Build the Data Segment of inlined data from single
2850 * segment packet with VLAN insertion.
2853 * Pointer to TX queue structure.
2855 * Pointer to burst routine local context.
2857 * Pointer to the dseg fill with built Data Segment.
2859 * Data buffer to point.
2861 * Data buffer length.
2863 * Configured Tx offloads mask. It is fully defined at
2864 * compile time and may be used for optimization.
2867 * Pointer to the next Data Segment after inlined data.
2868 * Ring buffer wraparound check is needed.
2870 static __rte_always_inline struct mlx5_wqe_dseg *
2871 mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
2872 struct mlx5_txq_local *restrict loc __rte_unused,
2873 struct mlx5_wqe_dseg *restrict dseg,
2876 unsigned int olx __rte_unused)
2882 assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
2883 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
2884 (2 * RTE_ETHER_ADDR_LEN),
2885 "invalid Data Segment data size");
2886 dseg->bcount = rte_cpu_to_be_32((len + sizeof(struct rte_vlan_hdr)) |
2887 MLX5_ETH_WQE_DATA_INLINE);
2888 pdst = &dseg->inline_data[0];
2889 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
2890 buf += MLX5_DSEG_MIN_INLINE_SIZE;
2891 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
2892 len -= MLX5_DSEG_MIN_INLINE_SIZE;
2893 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
2894 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2895 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2896 pdst = (uint8_t *)txq->wqes;
2897 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
2898 loc->mbuf->vlan_tci);
2899 pdst += sizeof(struct rte_vlan_hdr);
2901 * The WQEBB space availability is checked by caller.
2902 * Here we should be aware of WQE ring buffer wraparound only.
2904 part = (uint8_t *)txq->wqes_end - pdst;
2905 part = RTE_MIN(part, len);
2907 rte_memcpy(pdst, buf, part);
2911 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2912 /* Note: no final wraparound check here. */
2913 return (struct mlx5_wqe_dseg *)pdst;
2915 pdst = (uint8_t *)txq->wqes;
2922 * Build the Ethernet Segment with optionally inlined data with
2923 * VLAN insertion and following Data Segments (if any) from
2924 * multi-segment packet. Used by ordinary send and TSO.
2927 * Pointer to TX queue structure.
2929 * Pointer to burst routine local context.
2931 * Pointer to WQE to fill with built Ethernet/Data Segments.
2933 * Length of VLAN header to insert, 0 means no VLAN insertion.
2935 * Data length to inline. For TSO this parameter specifies
2936 * exact value, for ordinary send routine can be aligned by
2937 * caller to provide better WQE space saving and data buffer
2938 * start address alignment. This length includes VLAN header
2941 * Zero means ordinary send, inlined data can be extended,
2942 * otherwise this is TSO, inlined data length is fixed.
2944 * Configured Tx offloads mask. It is fully defined at
2945 * compile time and may be used for optimization.
2948 * Actual size of built WQE in segments.
2950 static __rte_always_inline unsigned int
2951 mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
2952 struct mlx5_txq_local *restrict loc,
2953 struct mlx5_wqe *restrict wqe,
2957 unsigned int olx __rte_unused)
2959 struct mlx5_wqe_dseg *restrict dseg;
2962 assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
2963 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
2966 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
2967 if (!loc->mbuf_nseg)
2970 * There are still some mbuf remaining, not inlined.
2971 * The first mbuf may be partially inlined and we
2972 * must process the possible non-zero data offset.
2974 if (loc->mbuf_off) {
2979 * Exhausted packets must be dropped before.
2980 * Non-zero offset means there are some data
2981 * remained in the packet.
2983 assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
2984 assert(rte_pktmbuf_data_len(loc->mbuf));
2985 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2987 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
2989 * Build the pointer/minimal data Data Segment.
2990 * Do ring buffer wrapping check in advance.
2992 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2993 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2994 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
2995 /* Store the mbuf to be freed on completion. */
2996 assert(loc->elts_free);
2997 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3000 if (--loc->mbuf_nseg == 0)
3002 loc->mbuf = loc->mbuf->next;
3006 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3007 struct rte_mbuf *mbuf;
3009 /* Zero length segment found, just skip. */
3011 loc->mbuf = loc->mbuf->next;
3012 rte_pktmbuf_free_seg(mbuf);
3013 if (--loc->mbuf_nseg == 0)
3016 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3017 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3020 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3021 rte_pktmbuf_data_len(loc->mbuf), olx);
3022 assert(loc->elts_free);
3023 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3026 if (--loc->mbuf_nseg == 0)
3028 loc->mbuf = loc->mbuf->next;
3033 /* Calculate actual segments used from the dseg pointer. */
3034 if ((uintptr_t)wqe < (uintptr_t)dseg)
3035 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3037 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3038 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3043 * Tx one packet function for multi-segment TSO. Supports all
3044 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3045 * sends one packet per WQE.
3047 * This routine is responsible for storing processed mbuf
3048 * into elts ring buffer and update elts_head.
3051 * Pointer to TX queue structure.
3053 * Pointer to burst routine local context.
3055 * Configured Tx offloads mask. It is fully defined at
3056 * compile time and may be used for optimization.
3059 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3060 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3061 * Local context variables partially updated.
3063 static __rte_always_inline enum mlx5_txcmp_code
3064 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
3065 struct mlx5_txq_local *restrict loc,
3068 struct mlx5_wqe *restrict wqe;
3069 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3072 * Calculate data length to be inlined to estimate
3073 * the required space in WQE ring buffer.
3075 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3076 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3077 vlan = sizeof(struct rte_vlan_hdr);
3078 inlen = loc->mbuf->l2_len + vlan +
3079 loc->mbuf->l3_len + loc->mbuf->l4_len;
3080 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3081 return MLX5_TXCMP_CODE_ERROR;
3082 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3083 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3084 /* Packet must contain all TSO headers. */
3085 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3086 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3087 inlen > (dlen + vlan)))
3088 return MLX5_TXCMP_CODE_ERROR;
3089 assert(inlen >= txq->inlen_mode);
3091 * Check whether there are enough free WQEBBs:
3093 * - Ethernet Segment
3094 * - First Segment of inlined Ethernet data
3095 * - ... data continued ...
3096 * - Data Segments of pointer/min inline type
3098 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3099 MLX5_ESEG_MIN_INLINE_SIZE +
3101 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3102 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3103 return MLX5_TXCMP_CODE_EXIT;
3104 /* Check for maximal WQE size. */
3105 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3106 return MLX5_TXCMP_CODE_ERROR;
3107 #ifdef MLX5_PMD_SOFT_COUNTERS
3108 /* Update sent data bytes/packets counters. */
3109 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3110 loc->mbuf->tso_segsz;
3112 * One will be added for mbuf itself
3113 * at the end of the mlx5_tx_burst from
3114 * loc->pkts_sent field.
3117 txq->stats.opackets += ntcp;
3118 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3120 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3121 loc->wqe_last = wqe;
3122 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3123 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3124 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3125 txq->wqe_ci += (ds + 3) / 4;
3126 loc->wqe_free -= (ds + 3) / 4;
3127 return MLX5_TXCMP_CODE_MULTI;
3131 * Tx one packet function for multi-segment SEND. Supports all
3132 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3133 * sends one packet per WQE, without any data inlining in
3136 * This routine is responsible for storing processed mbuf
3137 * into elts ring buffer and update elts_head.
3140 * Pointer to TX queue structure.
3142 * Pointer to burst routine local context.
3144 * Configured Tx offloads mask. It is fully defined at
3145 * compile time and may be used for optimization.
3148 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3149 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3150 * Local context variables partially updated.
3152 static __rte_always_inline enum mlx5_txcmp_code
3153 mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
3154 struct mlx5_txq_local *restrict loc,
3157 struct mlx5_wqe_dseg *restrict dseg;
3158 struct mlx5_wqe *restrict wqe;
3159 unsigned int ds, nseg;
3161 assert(NB_SEGS(loc->mbuf) > 1);
3163 * No inline at all, it means the CPU cycles saving
3164 * is prioritized at configuration, we should not
3165 * copy any packet data to WQE.
3167 nseg = NB_SEGS(loc->mbuf);
3169 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3170 return MLX5_TXCMP_CODE_EXIT;
3171 /* Check for maximal WQE size. */
3172 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3173 return MLX5_TXCMP_CODE_ERROR;
3175 * Some Tx offloads may cause an error if
3176 * packet is not long enough, check against
3177 * assumed minimal length.
3179 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3180 return MLX5_TXCMP_CODE_ERROR;
3181 #ifdef MLX5_PMD_SOFT_COUNTERS
3182 /* Update sent data bytes counter. */
3183 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3184 if (MLX5_TXOFF_CONFIG(VLAN) &&
3185 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3186 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3189 * SEND WQE, one WQEBB:
3190 * - Control Segment, SEND opcode
3191 * - Ethernet Segment, optional VLAN, no inline
3192 * - Data Segments, pointer only type
3194 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3195 loc->wqe_last = wqe;
3196 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3197 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3198 dseg = &wqe->dseg[0];
3200 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3201 struct rte_mbuf *mbuf;
3204 * Zero length segment found, have to
3205 * correct total size of WQE in segments.
3206 * It is supposed to be rare occasion, so
3207 * in normal case (no zero length segments)
3208 * we avoid extra writing to the Control
3212 wqe->cseg.sq_ds -= RTE_BE32(1);
3214 loc->mbuf = mbuf->next;
3215 rte_pktmbuf_free_seg(mbuf);
3221 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3222 rte_pktmbuf_data_len(loc->mbuf), olx);
3223 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3228 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3229 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3230 loc->mbuf = loc->mbuf->next;
3233 txq->wqe_ci += (ds + 3) / 4;
3234 loc->wqe_free -= (ds + 3) / 4;
3235 return MLX5_TXCMP_CODE_MULTI;
3239 * Tx one packet function for multi-segment SEND. Supports all
3240 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3241 * sends one packet per WQE, with data inlining in
3242 * Ethernet Segment and minimal Data Segments.
3244 * This routine is responsible for storing processed mbuf
3245 * into elts ring buffer and update elts_head.
3248 * Pointer to TX queue structure.
3250 * Pointer to burst routine local context.
3252 * Configured Tx offloads mask. It is fully defined at
3253 * compile time and may be used for optimization.
3256 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3257 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3258 * Local context variables partially updated.
3260 static __rte_always_inline enum mlx5_txcmp_code
3261 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
3262 struct mlx5_txq_local *restrict loc,
3265 struct mlx5_wqe *restrict wqe;
3266 unsigned int ds, inlen, dlen, vlan = 0;
3268 assert(MLX5_TXOFF_CONFIG(INLINE));
3269 assert(NB_SEGS(loc->mbuf) > 1);
3271 * First calculate data length to be inlined
3272 * to estimate the required space for WQE.
3274 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3275 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3276 vlan = sizeof(struct rte_vlan_hdr);
3277 inlen = dlen + vlan;
3278 /* Check against minimal length. */
3279 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3280 return MLX5_TXCMP_CODE_ERROR;
3281 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3282 if (inlen > txq->inlen_send) {
3283 struct rte_mbuf *mbuf;
3288 * Packet length exceeds the allowed inline
3289 * data length, check whether the minimal
3290 * inlining is required.
3292 if (txq->inlen_mode) {
3293 assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
3294 assert(txq->inlen_mode <= txq->inlen_send);
3295 inlen = txq->inlen_mode;
3297 if (!vlan || txq->vlan_en) {
3299 * VLAN insertion will be done inside by HW.
3300 * It is not utmost effective - VLAN flag is
3301 * checked twice, but we should proceed the
3302 * inlining length correctly and take into
3303 * account the VLAN header being inserted.
3305 return mlx5_tx_packet_multi_send
3308 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3311 * Now we know the minimal amount of data is requested
3312 * to inline. Check whether we should inline the buffers
3313 * from the chain beginning to eliminate some mbufs.
3316 nxlen = rte_pktmbuf_data_len(mbuf);
3317 if (unlikely(nxlen <= txq->inlen_send)) {
3318 /* We can inline first mbuf at least. */
3319 if (nxlen < inlen) {
3322 /* Scan mbufs till inlen filled. */
3327 nxlen = rte_pktmbuf_data_len(mbuf);
3329 } while (unlikely(nxlen < inlen));
3330 if (unlikely(nxlen > txq->inlen_send)) {
3331 /* We cannot inline entire mbuf. */
3332 smlen = inlen - smlen;
3333 start = rte_pktmbuf_mtod_offset
3334 (mbuf, uintptr_t, smlen);
3341 /* There should be not end of packet. */
3343 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3344 } while (unlikely(nxlen < txq->inlen_send));
3346 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3348 * Check whether we can do inline to align start
3349 * address of data buffer to cacheline.
3352 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3353 if (unlikely(start)) {
3355 if (start <= txq->inlen_send)
3360 * Check whether there are enough free WQEBBs:
3362 * - Ethernet Segment
3363 * - First Segment of inlined Ethernet data
3364 * - ... data continued ...
3365 * - Data Segments of pointer/min inline type
3367 * Estimate the number of Data Segments conservatively,
3368 * supposing no any mbufs is being freed during inlining.
3370 assert(inlen <= txq->inlen_send);
3371 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3372 MLX5_ESEG_MIN_INLINE_SIZE +
3374 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3375 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3376 return MLX5_TXCMP_CODE_EXIT;
3377 /* Check for maximal WQE size. */
3378 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3379 return MLX5_TXCMP_CODE_ERROR;
3380 #ifdef MLX5_PMD_SOFT_COUNTERS
3381 /* Update sent data bytes/packets counters. */
3382 txq->stats.obytes += dlen + vlan;
3384 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3385 loc->wqe_last = wqe;
3386 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3387 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3388 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3389 txq->wqe_ci += (ds + 3) / 4;
3390 loc->wqe_free -= (ds + 3) / 4;
3391 return MLX5_TXCMP_CODE_MULTI;
3395 * Tx burst function for multi-segment packets. Supports all
3396 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3397 * sends one packet per WQE. Function stops sending if it
3398 * encounters the single-segment packet.
3400 * This routine is responsible for storing processed mbuf
3401 * into elts ring buffer and update elts_head.
3404 * Pointer to TX queue structure.
3406 * Packets to transmit.
3408 * Number of packets in array.
3410 * Pointer to burst routine local context.
3412 * Configured Tx offloads mask. It is fully defined at
3413 * compile time and may be used for optimization.
3416 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3417 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3418 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3419 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3420 * Local context variables updated.
3422 static __rte_always_inline enum mlx5_txcmp_code
3423 mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
3424 struct rte_mbuf **restrict pkts,
3425 unsigned int pkts_n,
3426 struct mlx5_txq_local *restrict loc,
3429 assert(loc->elts_free && loc->wqe_free);
3430 assert(pkts_n > loc->pkts_sent);
3431 pkts += loc->pkts_sent + 1;
3432 pkts_n -= loc->pkts_sent;
3434 enum mlx5_txcmp_code ret;
3436 assert(NB_SEGS(loc->mbuf) > 1);
3438 * Estimate the number of free elts quickly but
3439 * conservatively. Some segment may be fully inlined
3440 * and freed, ignore this here - precise estimation
3443 if (loc->elts_free < NB_SEGS(loc->mbuf))
3444 return MLX5_TXCMP_CODE_EXIT;
3445 if (MLX5_TXOFF_CONFIG(TSO) &&
3446 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3447 /* Proceed with multi-segment TSO. */
3448 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3449 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3450 /* Proceed with multi-segment SEND with inlining. */
3451 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3453 /* Proceed with multi-segment SEND w/o inlining. */
3454 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3456 if (ret == MLX5_TXCMP_CODE_EXIT)
3457 return MLX5_TXCMP_CODE_EXIT;
3458 if (ret == MLX5_TXCMP_CODE_ERROR)
3459 return MLX5_TXCMP_CODE_ERROR;
3460 /* WQE is built, go to the next packet. */
3463 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3464 return MLX5_TXCMP_CODE_EXIT;
3465 loc->mbuf = *pkts++;
3467 rte_prefetch0(*pkts);
3468 if (likely(NB_SEGS(loc->mbuf) > 1))
3470 /* Here ends the series of multi-segment packets. */
3471 if (MLX5_TXOFF_CONFIG(TSO) &&
3472 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3473 return MLX5_TXCMP_CODE_TSO;
3474 return MLX5_TXCMP_CODE_SINGLE;
3480 * Tx burst function for single-segment packets with TSO.
3481 * Supports all types of Tx offloads, except multi-packets.
3482 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3483 * Function stops sending if it encounters the multi-segment
3484 * packet or packet without TSO requested.
3486 * The routine is responsible for storing processed mbuf
3487 * into elts ring buffer and update elts_head if inline
3488 * offloads is requested due to possible early freeing
3489 * of the inlined mbufs (can not store pkts array in elts
3493 * Pointer to TX queue structure.
3495 * Packets to transmit.
3497 * Number of packets in array.
3499 * Pointer to burst routine local context.
3501 * Configured Tx offloads mask. It is fully defined at
3502 * compile time and may be used for optimization.
3505 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3506 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3507 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3508 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3509 * Local context variables updated.
3511 static __rte_always_inline enum mlx5_txcmp_code
3512 mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
3513 struct rte_mbuf **restrict pkts,
3514 unsigned int pkts_n,
3515 struct mlx5_txq_local *restrict loc,
3518 assert(loc->elts_free && loc->wqe_free);
3519 assert(pkts_n > loc->pkts_sent);
3520 pkts += loc->pkts_sent + 1;
3521 pkts_n -= loc->pkts_sent;
3523 struct mlx5_wqe_dseg *restrict dseg;
3524 struct mlx5_wqe *restrict wqe;
3525 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3528 assert(NB_SEGS(loc->mbuf) == 1);
3529 dlen = rte_pktmbuf_data_len(loc->mbuf);
3530 if (MLX5_TXOFF_CONFIG(VLAN) &&
3531 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3532 vlan = sizeof(struct rte_vlan_hdr);
3535 * First calculate the WQE size to check
3536 * whether we have enough space in ring buffer.
3538 hlen = loc->mbuf->l2_len + vlan +
3539 loc->mbuf->l3_len + loc->mbuf->l4_len;
3540 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3541 return MLX5_TXCMP_CODE_ERROR;
3542 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3543 hlen += loc->mbuf->outer_l2_len +
3544 loc->mbuf->outer_l3_len;
3545 /* Segment must contain all TSO headers. */
3546 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3547 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3548 hlen > (dlen + vlan)))
3549 return MLX5_TXCMP_CODE_ERROR;
3551 * Check whether there are enough free WQEBBs:
3553 * - Ethernet Segment
3554 * - First Segment of inlined Ethernet data
3555 * - ... data continued ...
3556 * - Finishing Data Segment of pointer type
3558 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3559 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3560 if (loc->wqe_free < ((ds + 3) / 4))
3561 return MLX5_TXCMP_CODE_EXIT;
3562 #ifdef MLX5_PMD_SOFT_COUNTERS
3563 /* Update sent data bytes/packets counters. */
3564 ntcp = (dlen + vlan - hlen +
3565 loc->mbuf->tso_segsz - 1) /
3566 loc->mbuf->tso_segsz;
3568 * One will be added for mbuf itself at the end
3569 * of the mlx5_tx_burst from loc->pkts_sent field.
3572 txq->stats.opackets += ntcp;
3573 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3576 * Build the TSO WQE:
3578 * - Ethernet Segment with hlen bytes inlined
3579 * - Data Segment of pointer type
3581 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3582 loc->wqe_last = wqe;
3583 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3584 MLX5_OPCODE_TSO, olx);
3585 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3586 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3587 dlen -= hlen - vlan;
3588 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3590 * WQE is built, update the loop parameters
3591 * and go to the next packet.
3593 txq->wqe_ci += (ds + 3) / 4;
3594 loc->wqe_free -= (ds + 3) / 4;
3595 if (MLX5_TXOFF_CONFIG(INLINE))
3596 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3600 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3601 return MLX5_TXCMP_CODE_EXIT;
3602 loc->mbuf = *pkts++;
3604 rte_prefetch0(*pkts);
3605 if (MLX5_TXOFF_CONFIG(MULTI) &&
3606 unlikely(NB_SEGS(loc->mbuf) > 1))
3607 return MLX5_TXCMP_CODE_MULTI;
3608 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3609 return MLX5_TXCMP_CODE_SINGLE;
3610 /* Continue with the next TSO packet. */
3616 * Analyze the packet and select the best method to send.
3619 * Pointer to TX queue structure.
3621 * Pointer to burst routine local context.
3623 * Configured Tx offloads mask. It is fully defined at
3624 * compile time and may be used for optimization.
3626 * The predefined flag whether do complete check for
3627 * multi-segment packets and TSO.
3630 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3631 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3632 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3633 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3635 static __rte_always_inline enum mlx5_txcmp_code
3636 mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq,
3637 struct mlx5_txq_local *restrict loc,
3641 /* Check for multi-segment packet. */
3643 MLX5_TXOFF_CONFIG(MULTI) &&
3644 unlikely(NB_SEGS(loc->mbuf) > 1))
3645 return MLX5_TXCMP_CODE_MULTI;
3646 /* Check for TSO packet. */
3648 MLX5_TXOFF_CONFIG(TSO) &&
3649 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3650 return MLX5_TXCMP_CODE_TSO;
3651 /* Check if eMPW is enabled at all. */
3652 if (!MLX5_TXOFF_CONFIG(EMPW))
3653 return MLX5_TXCMP_CODE_SINGLE;
3654 /* Check if eMPW can be engaged. */
3655 if (MLX5_TXOFF_CONFIG(VLAN) &&
3656 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3657 (!MLX5_TXOFF_CONFIG(INLINE) ||
3658 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3659 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3661 * eMPW does not support VLAN insertion offload,
3662 * we have to inline the entire packet but
3663 * packet is too long for inlining.
3665 return MLX5_TXCMP_CODE_SINGLE;
3667 return MLX5_TXCMP_CODE_EMPW;
3671 * Check the next packet attributes to match with the eMPW batch ones.
3672 * In addition, for legacy MPW the packet length is checked either.
3675 * Pointer to TX queue structure.
3677 * Pointer to Ethernet Segment of eMPW batch.
3679 * Pointer to burst routine local context.
3681 * Length of previous packet in MPW descriptor.
3683 * Configured Tx offloads mask. It is fully defined at
3684 * compile time and may be used for optimization.
3687 * true - packet match with eMPW batch attributes.
3688 * false - no match, eMPW should be restarted.
3690 static __rte_always_inline bool
3691 mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
3692 struct mlx5_wqe_eseg *restrict es,
3693 struct mlx5_txq_local *restrict loc,
3697 uint8_t swp_flags = 0;
3699 /* Compare the checksum flags, if any. */
3700 if (MLX5_TXOFF_CONFIG(CSUM) &&
3701 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3703 /* Compare the Software Parser offsets and flags. */
3704 if (MLX5_TXOFF_CONFIG(SWP) &&
3705 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3706 es->swp_flags != swp_flags))
3708 /* Fill metadata field if needed. */
3709 if (MLX5_TXOFF_CONFIG(METADATA) &&
3710 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3711 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3713 /* Legacy MPW can send packets with the same lengt only. */
3714 if (MLX5_TXOFF_CONFIG(MPW) &&
3715 dlen != rte_pktmbuf_data_len(loc->mbuf))
3717 /* There must be no VLAN packets in eMPW loop. */
3718 if (MLX5_TXOFF_CONFIG(VLAN))
3719 assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3724 * Update send loop variables and WQE for eMPW loop
3725 * without data inlining. Number of Data Segments is
3726 * equal to the number of sent packets.
3729 * Pointer to TX queue structure.
3731 * Pointer to burst routine local context.
3733 * Number of packets/Data Segments/Packets.
3735 * Accumulated statistics, bytes sent
3737 * Configured Tx offloads mask. It is fully defined at
3738 * compile time and may be used for optimization.
3741 * true - packet match with eMPW batch attributes.
3742 * false - no match, eMPW should be restarted.
3744 static __rte_always_inline void
3745 mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
3746 struct mlx5_txq_local *restrict loc,
3749 unsigned int olx __rte_unused)
3751 assert(!MLX5_TXOFF_CONFIG(INLINE));
3752 #ifdef MLX5_PMD_SOFT_COUNTERS
3753 /* Update sent data bytes counter. */
3754 txq->stats.obytes += slen;
3758 loc->elts_free -= ds;
3759 loc->pkts_sent += ds;
3761 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3762 txq->wqe_ci += (ds + 3) / 4;
3763 loc->wqe_free -= (ds + 3) / 4;
3767 * Update send loop variables and WQE for eMPW loop
3768 * with data inlining. Gets the size of pushed descriptors
3769 * and data to the WQE.
3772 * Pointer to TX queue structure.
3774 * Pointer to burst routine local context.
3776 * Total size of descriptor/data in bytes.
3778 * Accumulated statistics, data bytes sent.
3780 * Configured Tx offloads mask. It is fully defined at
3781 * compile time and may be used for optimization.
3784 * true - packet match with eMPW batch attributes.
3785 * false - no match, eMPW should be restarted.
3787 static __rte_always_inline void
3788 mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
3789 struct mlx5_txq_local *restrict loc,
3792 unsigned int olx __rte_unused)
3794 assert(MLX5_TXOFF_CONFIG(INLINE));
3795 assert((len % MLX5_WSEG_SIZE) == 0);
3796 #ifdef MLX5_PMD_SOFT_COUNTERS
3797 /* Update sent data bytes counter. */
3798 txq->stats.obytes += slen;
3802 len = len / MLX5_WSEG_SIZE + 2;
3803 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
3804 txq->wqe_ci += (len + 3) / 4;
3805 loc->wqe_free -= (len + 3) / 4;
3809 * The set of Tx burst functions for single-segment packets
3810 * without TSO and with Multi-Packet Writing feature support.
3811 * Supports all types of Tx offloads, except multi-packets
3814 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
3815 * as many packet per WQE as it can. If eMPW is not configured
3816 * or packet can not be sent with eMPW (VLAN insertion) the
3817 * ordinary SEND opcode is used and only one packet placed
3820 * Functions stop sending if it encounters the multi-segment
3821 * packet or packet with TSO requested.
3823 * The routines are responsible for storing processed mbuf
3824 * into elts ring buffer and update elts_head if inlining
3825 * offload is requested. Otherwise the copying mbufs to elts
3826 * can be postponed and completed at the end of burst routine.
3829 * Pointer to TX queue structure.
3831 * Packets to transmit.
3833 * Number of packets in array.
3835 * Pointer to burst routine local context.
3837 * Configured Tx offloads mask. It is fully defined at
3838 * compile time and may be used for optimization.
3841 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3842 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3843 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3844 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
3845 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
3846 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
3848 * Local context variables updated.
3851 * The routine sends packets with MLX5_OPCODE_EMPW
3852 * without inlining, this is dedicated optimized branch.
3853 * No VLAN insertion is supported.
3855 static __rte_always_inline enum mlx5_txcmp_code
3856 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
3857 struct rte_mbuf **restrict pkts,
3858 unsigned int pkts_n,
3859 struct mlx5_txq_local *restrict loc,
3863 * Subroutine is the part of mlx5_tx_burst_single()
3864 * and sends single-segment packet with eMPW opcode
3865 * without data inlining.
3867 assert(!MLX5_TXOFF_CONFIG(INLINE));
3868 assert(MLX5_TXOFF_CONFIG(EMPW));
3869 assert(loc->elts_free && loc->wqe_free);
3870 assert(pkts_n > loc->pkts_sent);
3871 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
3872 pkts += loc->pkts_sent + 1;
3873 pkts_n -= loc->pkts_sent;
3875 struct mlx5_wqe_dseg *restrict dseg;
3876 struct mlx5_wqe_eseg *restrict eseg;
3877 enum mlx5_txcmp_code ret;
3878 unsigned int part, loop;
3879 unsigned int slen = 0;
3882 assert(NB_SEGS(loc->mbuf) == 1);
3883 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
3884 MLX5_MPW_MAX_PACKETS :
3885 MLX5_EMPW_MAX_PACKETS);
3886 if (unlikely(loc->elts_free < part)) {
3887 /* We have no enough elts to save all mbufs. */
3888 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
3889 return MLX5_TXCMP_CODE_EXIT;
3890 /* But we still able to send at least minimal eMPW. */
3891 part = loc->elts_free;
3893 /* Check whether we have enough WQEs */
3894 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
3895 if (unlikely(loc->wqe_free <
3896 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3897 return MLX5_TXCMP_CODE_EXIT;
3898 part = (loc->wqe_free * 4) - 2;
3900 if (likely(part > 1))
3901 rte_prefetch0(*pkts);
3902 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3904 * Build eMPW title WQEBB:
3905 * - Control Segment, eMPW opcode
3906 * - Ethernet Segment, no inline
3908 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
3909 MLX5_OPCODE_ENHANCED_MPSW, olx);
3910 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
3911 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3912 eseg = &loc->wqe_last->eseg;
3913 dseg = &loc->wqe_last->dseg[0];
3915 /* Store the packet length for legacy MPW. */
3916 if (MLX5_TXOFF_CONFIG(MPW))
3917 eseg->mss = rte_cpu_to_be_16
3918 (rte_pktmbuf_data_len(loc->mbuf));
3920 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3921 #ifdef MLX5_PMD_SOFT_COUNTERS
3922 /* Update sent data bytes counter. */
3927 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3929 if (unlikely(--loop == 0))
3931 loc->mbuf = *pkts++;
3932 if (likely(loop > 1))
3933 rte_prefetch0(*pkts);
3934 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3936 * Unroll the completion code to avoid
3937 * returning variable value - it results in
3938 * unoptimized sequent checking in caller.
3940 if (ret == MLX5_TXCMP_CODE_MULTI) {
3942 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3943 if (unlikely(!loc->elts_free ||
3945 return MLX5_TXCMP_CODE_EXIT;
3946 return MLX5_TXCMP_CODE_MULTI;
3948 assert(NB_SEGS(loc->mbuf) == 1);
3949 if (ret == MLX5_TXCMP_CODE_TSO) {
3951 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3952 if (unlikely(!loc->elts_free ||
3954 return MLX5_TXCMP_CODE_EXIT;
3955 return MLX5_TXCMP_CODE_TSO;
3957 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3959 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3960 if (unlikely(!loc->elts_free ||
3962 return MLX5_TXCMP_CODE_EXIT;
3963 return MLX5_TXCMP_CODE_SINGLE;
3965 if (ret != MLX5_TXCMP_CODE_EMPW) {
3968 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3969 return MLX5_TXCMP_CODE_ERROR;
3972 * Check whether packet parameters coincide
3973 * within assumed eMPW batch:
3974 * - check sum settings
3976 * - software parser settings
3977 * - packets length (legacy MPW only)
3979 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
3982 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3983 if (unlikely(!loc->elts_free ||
3985 return MLX5_TXCMP_CODE_EXIT;
3989 /* Packet attributes match, continue the same eMPW. */
3991 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3992 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3994 /* eMPW is built successfully, update loop parameters. */
3996 assert(pkts_n >= part);
3997 #ifdef MLX5_PMD_SOFT_COUNTERS
3998 /* Update sent data bytes counter. */
3999 txq->stats.obytes += slen;
4001 loc->elts_free -= part;
4002 loc->pkts_sent += part;
4003 txq->wqe_ci += (2 + part + 3) / 4;
4004 loc->wqe_free -= (2 + part + 3) / 4;
4006 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4007 return MLX5_TXCMP_CODE_EXIT;
4008 loc->mbuf = *pkts++;
4009 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4010 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4012 /* Continue sending eMPW batches. */
4018 * The routine sends packets with MLX5_OPCODE_EMPW
4019 * with inlining, optionally supports VLAN insertion.
4021 static __rte_always_inline enum mlx5_txcmp_code
4022 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
4023 struct rte_mbuf **restrict pkts,
4024 unsigned int pkts_n,
4025 struct mlx5_txq_local *restrict loc,
4029 * Subroutine is the part of mlx5_tx_burst_single()
4030 * and sends single-segment packet with eMPW opcode
4031 * with data inlining.
4033 assert(MLX5_TXOFF_CONFIG(INLINE));
4034 assert(MLX5_TXOFF_CONFIG(EMPW));
4035 assert(loc->elts_free && loc->wqe_free);
4036 assert(pkts_n > loc->pkts_sent);
4037 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4038 pkts += loc->pkts_sent + 1;
4039 pkts_n -= loc->pkts_sent;
4041 struct mlx5_wqe_dseg *restrict dseg;
4042 struct mlx5_wqe_eseg *restrict eseg;
4043 enum mlx5_txcmp_code ret;
4044 unsigned int room, part, nlim;
4045 unsigned int slen = 0;
4047 assert(NB_SEGS(loc->mbuf) == 1);
4049 * Limits the amount of packets in one WQE
4050 * to improve CQE latency generation.
4052 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4053 MLX5_MPW_INLINE_MAX_PACKETS :
4054 MLX5_EMPW_MAX_PACKETS);
4055 /* Check whether we have minimal amount WQEs */
4056 if (unlikely(loc->wqe_free <
4057 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4058 return MLX5_TXCMP_CODE_EXIT;
4059 if (likely(pkts_n > 1))
4060 rte_prefetch0(*pkts);
4061 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4063 * Build eMPW title WQEBB:
4064 * - Control Segment, eMPW opcode, zero DS
4065 * - Ethernet Segment, no inline
4067 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0,
4068 MLX5_OPCODE_ENHANCED_MPSW, olx);
4069 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4070 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4071 eseg = &loc->wqe_last->eseg;
4072 dseg = &loc->wqe_last->dseg[0];
4073 /* Store the packet length for legacy MPW. */
4074 if (MLX5_TXOFF_CONFIG(MPW))
4075 eseg->mss = rte_cpu_to_be_16
4076 (rte_pktmbuf_data_len(loc->mbuf));
4077 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4078 loc->wqe_free) * MLX5_WQE_SIZE -
4079 MLX5_WQE_CSEG_SIZE -
4081 /* Build WQE till we have space, packets and resources. */
4084 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4085 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4088 assert(room >= MLX5_WQE_DSEG_SIZE);
4089 assert((room % MLX5_WQE_DSEG_SIZE) == 0);
4090 assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4092 * Some Tx offloads may cause an error if
4093 * packet is not long enough, check against
4094 * assumed minimal length.
4096 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4098 if (unlikely(!part))
4099 return MLX5_TXCMP_CODE_ERROR;
4101 * We have some successfully built
4102 * packet Data Segments to send.
4104 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4105 return MLX5_TXCMP_CODE_ERROR;
4107 /* Inline or not inline - that's the Question. */
4108 if (dlen > txq->inlen_empw)
4110 /* Inline entire packet, optional VLAN insertion. */
4111 tlen = sizeof(dseg->bcount) + dlen;
4112 if (MLX5_TXOFF_CONFIG(VLAN) &&
4113 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4115 * The packet length must be checked in
4116 * mlx5_tx_able_to_empw() and packet
4117 * fits into inline length guaranteed.
4119 assert((dlen + sizeof(struct rte_vlan_hdr)) <=
4121 tlen += sizeof(struct rte_vlan_hdr);
4124 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4126 #ifdef MLX5_PMD_SOFT_COUNTERS
4127 /* Update sent data bytes counter. */
4128 slen += sizeof(struct rte_vlan_hdr);
4133 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4136 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4137 assert(room >= tlen);
4140 * Packet data are completely inlined,
4141 * free the packet immediately.
4143 rte_pktmbuf_free_seg(loc->mbuf);
4147 * Not inlinable VLAN packets are
4148 * proceeded outside of this routine.
4150 assert(room >= MLX5_WQE_DSEG_SIZE);
4151 if (MLX5_TXOFF_CONFIG(VLAN))
4152 assert(!(loc->mbuf->ol_flags &
4154 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4155 /* We have to store mbuf in elts.*/
4156 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4157 room -= MLX5_WQE_DSEG_SIZE;
4158 /* Ring buffer wraparound is checked at the loop end.*/
4161 #ifdef MLX5_PMD_SOFT_COUNTERS
4162 /* Update sent data bytes counter. */
4168 if (unlikely(!pkts_n || !loc->elts_free)) {
4170 * We have no resources/packets to
4171 * continue build descriptors.
4174 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4175 return MLX5_TXCMP_CODE_EXIT;
4177 loc->mbuf = *pkts++;
4178 if (likely(pkts_n > 1))
4179 rte_prefetch0(*pkts);
4180 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4182 * Unroll the completion code to avoid
4183 * returning variable value - it results in
4184 * unoptimized sequent checking in caller.
4186 if (ret == MLX5_TXCMP_CODE_MULTI) {
4188 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4189 if (unlikely(!loc->elts_free ||
4191 return MLX5_TXCMP_CODE_EXIT;
4192 return MLX5_TXCMP_CODE_MULTI;
4194 assert(NB_SEGS(loc->mbuf) == 1);
4195 if (ret == MLX5_TXCMP_CODE_TSO) {
4197 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4198 if (unlikely(!loc->elts_free ||
4200 return MLX5_TXCMP_CODE_EXIT;
4201 return MLX5_TXCMP_CODE_TSO;
4203 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4205 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4206 if (unlikely(!loc->elts_free ||
4208 return MLX5_TXCMP_CODE_EXIT;
4209 return MLX5_TXCMP_CODE_SINGLE;
4211 if (ret != MLX5_TXCMP_CODE_EMPW) {
4214 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4215 return MLX5_TXCMP_CODE_ERROR;
4217 /* Check if we have minimal room left. */
4219 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4222 * Check whether packet parameters coincide
4223 * within assumed eMPW batch:
4224 * - check sum settings
4226 * - software parser settings
4227 * - packets length (legacy MPW only)
4229 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx))
4231 /* Packet attributes match, continue the same eMPW. */
4232 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4233 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4236 * We get here to close an existing eMPW
4237 * session and start the new one.
4241 if (unlikely(!part))
4242 return MLX5_TXCMP_CODE_EXIT;
4243 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4244 if (unlikely(!loc->elts_free ||
4246 return MLX5_TXCMP_CODE_EXIT;
4247 /* Continue the loop with new eMPW session. */
4253 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4254 * Data inlining and VLAN insertion are supported.
4256 static __rte_always_inline enum mlx5_txcmp_code
4257 mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
4258 struct rte_mbuf **restrict pkts,
4259 unsigned int pkts_n,
4260 struct mlx5_txq_local *restrict loc,
4264 * Subroutine is the part of mlx5_tx_burst_single()
4265 * and sends single-segment packet with SEND opcode.
4267 assert(loc->elts_free && loc->wqe_free);
4268 assert(pkts_n > loc->pkts_sent);
4269 pkts += loc->pkts_sent + 1;
4270 pkts_n -= loc->pkts_sent;
4272 struct mlx5_wqe *restrict wqe;
4273 enum mlx5_txcmp_code ret;
4275 assert(NB_SEGS(loc->mbuf) == 1);
4276 if (MLX5_TXOFF_CONFIG(INLINE)) {
4277 unsigned int inlen, vlan = 0;
4279 inlen = rte_pktmbuf_data_len(loc->mbuf);
4280 if (MLX5_TXOFF_CONFIG(VLAN) &&
4281 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4282 vlan = sizeof(struct rte_vlan_hdr);
4284 static_assert((sizeof(struct rte_vlan_hdr) +
4285 sizeof(struct rte_ether_hdr)) ==
4286 MLX5_ESEG_MIN_INLINE_SIZE,
4287 "invalid min inline data size");
4290 * If inlining is enabled at configuration time
4291 * the limit must be not less than minimal size.
4292 * Otherwise we would do extra check for data
4293 * size to avoid crashes due to length overflow.
4295 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
4296 if (inlen <= txq->inlen_send) {
4297 unsigned int seg_n, wqe_n;
4299 rte_prefetch0(rte_pktmbuf_mtod
4300 (loc->mbuf, uint8_t *));
4301 /* Check against minimal length. */
4302 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4303 return MLX5_TXCMP_CODE_ERROR;
4305 * Completely inlined packet data WQE:
4306 * - Control Segment, SEND opcode
4307 * - Ethernet Segment, no VLAN insertion
4308 * - Data inlined, VLAN optionally inserted
4309 * - Alignment to MLX5_WSEG_SIZE
4310 * Have to estimate amount of WQEBBs
4312 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4313 MLX5_ESEG_MIN_INLINE_SIZE +
4314 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4315 /* Check if there are enough WQEBBs. */
4316 wqe_n = (seg_n + 3) / 4;
4317 if (wqe_n > loc->wqe_free)
4318 return MLX5_TXCMP_CODE_EXIT;
4319 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4320 loc->wqe_last = wqe;
4321 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4322 MLX5_OPCODE_SEND, olx);
4323 mlx5_tx_eseg_data(txq, loc, wqe,
4324 vlan, inlen, 0, olx);
4325 txq->wqe_ci += wqe_n;
4326 loc->wqe_free -= wqe_n;
4328 * Packet data are completely inlined,
4329 * free the packet immediately.
4331 rte_pktmbuf_free_seg(loc->mbuf);
4332 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4333 MLX5_TXOFF_CONFIG(MPW)) &&
4336 * If minimal inlining is requested the eMPW
4337 * feature should be disabled due to data is
4338 * inlined into Ethernet Segment, which can
4339 * not contain inlined data for eMPW due to
4340 * segment shared for all packets.
4342 struct mlx5_wqe_dseg *restrict dseg;
4347 * The inline-mode settings require
4348 * to inline the specified amount of
4349 * data bytes to the Ethernet Segment.
4350 * We should check the free space in
4351 * WQE ring buffer to inline partially.
4353 assert(txq->inlen_send >= txq->inlen_mode);
4354 assert(inlen > txq->inlen_mode);
4355 assert(txq->inlen_mode >=
4356 MLX5_ESEG_MIN_INLINE_SIZE);
4358 * Check whether there are enough free WQEBBs:
4360 * - Ethernet Segment
4361 * - First Segment of inlined Ethernet data
4362 * - ... data continued ...
4363 * - Finishing Data Segment of pointer type
4365 ds = (MLX5_WQE_CSEG_SIZE +
4366 MLX5_WQE_ESEG_SIZE +
4367 MLX5_WQE_DSEG_SIZE +
4369 MLX5_ESEG_MIN_INLINE_SIZE +
4370 MLX5_WQE_DSEG_SIZE +
4371 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4372 if (loc->wqe_free < ((ds + 3) / 4))
4373 return MLX5_TXCMP_CODE_EXIT;
4375 * Build the ordinary SEND WQE:
4377 * - Ethernet Segment, inline inlen_mode bytes
4378 * - Data Segment of pointer type
4380 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4381 loc->wqe_last = wqe;
4382 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4383 MLX5_OPCODE_SEND, olx);
4384 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4387 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4388 txq->inlen_mode - vlan;
4389 inlen -= txq->inlen_mode;
4390 mlx5_tx_dseg_ptr(txq, loc, dseg,
4393 * WQE is built, update the loop parameters
4394 * and got to the next packet.
4396 txq->wqe_ci += (ds + 3) / 4;
4397 loc->wqe_free -= (ds + 3) / 4;
4398 /* We have to store mbuf in elts.*/
4399 assert(MLX5_TXOFF_CONFIG(INLINE));
4400 txq->elts[txq->elts_head++ & txq->elts_m] =
4408 * Partially inlined packet data WQE, we have
4409 * some space in title WQEBB, we can fill it
4410 * with some packet data. It takes one WQEBB,
4411 * it is available, no extra space check:
4412 * - Control Segment, SEND opcode
4413 * - Ethernet Segment, no VLAN insertion
4414 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4415 * - Data Segment, pointer type
4417 * We also get here if VLAN insertion is not
4418 * supported by HW, the inline is enabled.
4420 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4421 loc->wqe_last = wqe;
4422 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4423 MLX5_OPCODE_SEND, olx);
4424 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4425 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4426 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4428 * The length check is performed above, by
4429 * comparing with txq->inlen_send. We should
4430 * not get overflow here.
4432 assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4433 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4434 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4438 /* We have to store mbuf in elts.*/
4439 assert(MLX5_TXOFF_CONFIG(INLINE));
4440 txq->elts[txq->elts_head++ & txq->elts_m] =
4444 #ifdef MLX5_PMD_SOFT_COUNTERS
4445 /* Update sent data bytes counter. */
4446 txq->stats.obytes += vlan +
4447 rte_pktmbuf_data_len(loc->mbuf);
4451 * No inline at all, it means the CPU cycles saving
4452 * is prioritized at configuration, we should not
4453 * copy any packet data to WQE.
4455 * SEND WQE, one WQEBB:
4456 * - Control Segment, SEND opcode
4457 * - Ethernet Segment, optional VLAN, no inline
4458 * - Data Segment, pointer type
4460 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4461 loc->wqe_last = wqe;
4462 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4463 MLX5_OPCODE_SEND, olx);
4464 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4466 (txq, loc, &wqe->dseg[0],
4467 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4468 rte_pktmbuf_data_len(loc->mbuf), olx);
4472 * We should not store mbuf pointer in elts
4473 * if no inlining is configured, this is done
4474 * by calling routine in a batch copy.
4476 assert(!MLX5_TXOFF_CONFIG(INLINE));
4478 #ifdef MLX5_PMD_SOFT_COUNTERS
4479 /* Update sent data bytes counter. */
4480 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4481 if (MLX5_TXOFF_CONFIG(VLAN) &&
4482 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4483 txq->stats.obytes +=
4484 sizeof(struct rte_vlan_hdr);
4489 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4490 return MLX5_TXCMP_CODE_EXIT;
4491 loc->mbuf = *pkts++;
4493 rte_prefetch0(*pkts);
4494 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4495 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4501 static __rte_always_inline enum mlx5_txcmp_code
4502 mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
4503 struct rte_mbuf **restrict pkts,
4504 unsigned int pkts_n,
4505 struct mlx5_txq_local *restrict loc,
4508 enum mlx5_txcmp_code ret;
4510 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4511 if (ret == MLX5_TXCMP_CODE_SINGLE)
4513 assert(ret == MLX5_TXCMP_CODE_EMPW);
4515 /* Optimize for inline/no inline eMPW send. */
4516 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4517 mlx5_tx_burst_empw_inline
4518 (txq, pkts, pkts_n, loc, olx) :
4519 mlx5_tx_burst_empw_simple
4520 (txq, pkts, pkts_n, loc, olx);
4521 if (ret != MLX5_TXCMP_CODE_SINGLE)
4523 /* The resources to send one packet should remain. */
4524 assert(loc->elts_free && loc->wqe_free);
4526 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4527 assert(ret != MLX5_TXCMP_CODE_SINGLE);
4528 if (ret != MLX5_TXCMP_CODE_EMPW)
4530 /* The resources to send one packet should remain. */
4531 assert(loc->elts_free && loc->wqe_free);
4536 * DPDK Tx callback template. This is configured template
4537 * used to generate routines optimized for specified offload setup.
4538 * One of this generated functions is chosen at SQ configuration
4542 * Generic pointer to TX queue structure.
4544 * Packets to transmit.
4546 * Number of packets in array.
4548 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4549 * values. Should be static to take compile time static configuration
4553 * Number of packets successfully transmitted (<= pkts_n).
4555 static __rte_always_inline uint16_t
4556 mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
4557 struct rte_mbuf **restrict pkts,
4561 struct mlx5_txq_local loc;
4562 enum mlx5_txcmp_code ret;
4565 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4566 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4567 if (unlikely(!pkts_n))
4571 loc.wqe_last = NULL;
4574 loc.pkts_loop = loc.pkts_sent;
4576 * Check if there are some CQEs, if any:
4577 * - process an encountered errors
4578 * - process the completed WQEs
4579 * - free related mbufs
4580 * - doorbell the NIC about processed CQEs
4582 rte_prefetch0(*(pkts + loc.pkts_sent));
4583 mlx5_tx_handle_completion(txq, olx);
4585 * Calculate the number of available resources - elts and WQEs.
4586 * There are two possible different scenarios:
4587 * - no data inlining into WQEs, one WQEBB may contains upto
4588 * four packets, in this case elts become scarce resource
4589 * - data inlining into WQEs, one packet may require multiple
4590 * WQEBBs, the WQEs become the limiting factor.
4592 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4593 loc.elts_free = txq->elts_s -
4594 (uint16_t)(txq->elts_head - txq->elts_tail);
4595 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4596 loc.wqe_free = txq->wqe_s -
4597 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4598 if (unlikely(!loc.elts_free || !loc.wqe_free))
4602 * Fetch the packet from array. Usually this is
4603 * the first packet in series of multi/single
4606 loc.mbuf = *(pkts + loc.pkts_sent);
4607 /* Dedicated branch for multi-segment packets. */
4608 if (MLX5_TXOFF_CONFIG(MULTI) &&
4609 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4611 * Multi-segment packet encountered.
4612 * Hardware is able to process it only
4613 * with SEND/TSO opcodes, one packet
4614 * per WQE, do it in dedicated routine.
4617 assert(loc.pkts_sent >= loc.pkts_copy);
4618 part = loc.pkts_sent - loc.pkts_copy;
4619 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4621 * There are some single-segment mbufs not
4622 * stored in elts. The mbufs must be in the
4623 * same order as WQEs, so we must copy the
4624 * mbufs to elts here, before the coming
4625 * multi-segment packet mbufs is appended.
4627 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4629 loc.pkts_copy = loc.pkts_sent;
4631 assert(pkts_n > loc.pkts_sent);
4632 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4633 if (!MLX5_TXOFF_CONFIG(INLINE))
4634 loc.pkts_copy = loc.pkts_sent;
4636 * These returned code checks are supposed
4637 * to be optimized out due to routine inlining.
4639 if (ret == MLX5_TXCMP_CODE_EXIT) {
4641 * The routine returns this code when
4642 * all packets are sent or there is no
4643 * enough resources to complete request.
4647 if (ret == MLX5_TXCMP_CODE_ERROR) {
4649 * The routine returns this code when
4650 * some error in the incoming packets
4653 txq->stats.oerrors++;
4656 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4658 * The single-segment packet was encountered
4659 * in the array, try to send it with the
4660 * best optimized way, possible engaging eMPW.
4662 goto enter_send_single;
4664 if (MLX5_TXOFF_CONFIG(TSO) &&
4665 ret == MLX5_TXCMP_CODE_TSO) {
4667 * The single-segment TSO packet was
4668 * encountered in the array.
4670 goto enter_send_tso;
4672 /* We must not get here. Something is going wrong. */
4674 txq->stats.oerrors++;
4677 /* Dedicated branch for single-segment TSO packets. */
4678 if (MLX5_TXOFF_CONFIG(TSO) &&
4679 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4681 * TSO might require special way for inlining
4682 * (dedicated parameters) and is sent with
4683 * MLX5_OPCODE_TSO opcode only, provide this
4684 * in dedicated branch.
4687 assert(NB_SEGS(loc.mbuf) == 1);
4688 assert(pkts_n > loc.pkts_sent);
4689 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4691 * These returned code checks are supposed
4692 * to be optimized out due to routine inlining.
4694 if (ret == MLX5_TXCMP_CODE_EXIT)
4696 if (ret == MLX5_TXCMP_CODE_ERROR) {
4697 txq->stats.oerrors++;
4700 if (ret == MLX5_TXCMP_CODE_SINGLE)
4701 goto enter_send_single;
4702 if (MLX5_TXOFF_CONFIG(MULTI) &&
4703 ret == MLX5_TXCMP_CODE_MULTI) {
4705 * The multi-segment packet was
4706 * encountered in the array.
4708 goto enter_send_multi;
4710 /* We must not get here. Something is going wrong. */
4712 txq->stats.oerrors++;
4716 * The dedicated branch for the single-segment packets
4717 * without TSO. Often these ones can be sent using
4718 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
4719 * The routine builds the WQEs till it encounters
4720 * the TSO or multi-segment packet (in case if these
4721 * offloads are requested at SQ configuration time).
4724 assert(pkts_n > loc.pkts_sent);
4725 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
4727 * These returned code checks are supposed
4728 * to be optimized out due to routine inlining.
4730 if (ret == MLX5_TXCMP_CODE_EXIT)
4732 if (ret == MLX5_TXCMP_CODE_ERROR) {
4733 txq->stats.oerrors++;
4736 if (MLX5_TXOFF_CONFIG(MULTI) &&
4737 ret == MLX5_TXCMP_CODE_MULTI) {
4739 * The multi-segment packet was
4740 * encountered in the array.
4742 goto enter_send_multi;
4744 if (MLX5_TXOFF_CONFIG(TSO) &&
4745 ret == MLX5_TXCMP_CODE_TSO) {
4747 * The single-segment TSO packet was
4748 * encountered in the array.
4750 goto enter_send_tso;
4752 /* We must not get here. Something is going wrong. */
4754 txq->stats.oerrors++;
4758 * Main Tx loop is completed, do the rest:
4759 * - set completion request if thresholds are reached
4760 * - doorbell the hardware
4761 * - copy the rest of mbufs to elts (if any)
4763 assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
4764 /* Take a shortcut if nothing is sent. */
4765 if (unlikely(loc.pkts_sent == loc.pkts_loop))
4767 /* Request CQE generation if limits are reached. */
4768 mlx5_tx_request_completion(txq, &loc, olx);
4770 * Ring QP doorbell immediately after WQE building completion
4771 * to improve latencies. The pure software related data treatment
4772 * can be completed after doorbell. Tx CQEs for this SQ are
4773 * processed in this thread only by the polling.
4775 * The rdma core library can map doorbell register in two ways,
4776 * depending on the environment variable "MLX5_SHUT_UP_BF":
4778 * - as regular cached memory, the variable is either missing or
4779 * set to zero. This type of mapping may cause the significant
4780 * doorbell register writing latency and requires explicit
4781 * memory write barrier to mitigate this issue and prevent
4784 * - as non-cached memory, the variable is present and set to
4785 * not "0" value. This type of mapping may cause performance
4786 * impact under heavy loading conditions but the explicit write
4787 * memory barrier is not required and it may improve core
4790 * - the legacy behaviour (prior 19.08 release) was to use some
4791 * heuristics to decide whether write memory barrier should
4792 * be performed. This behavior is supported with specifying
4793 * tx_db_nc=2, write barrier is skipped if application
4794 * provides the full recommended burst of packets, it
4795 * supposes the next packets are coming and the write barrier
4796 * will be issued on the next burst (after descriptor writing,
4799 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
4800 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
4801 /* Not all of the mbufs may be stored into elts yet. */
4802 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
4803 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4805 * There are some single-segment mbufs not stored in elts.
4806 * It can be only if the last packet was single-segment.
4807 * The copying is gathered into one place due to it is
4808 * a good opportunity to optimize that with SIMD.
4809 * Unfortunately if inlining is enabled the gaps in
4810 * pointer array may happen due to early freeing of the
4813 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
4814 loc.pkts_copy = loc.pkts_sent;
4816 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4817 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4818 if (pkts_n > loc.pkts_sent) {
4820 * If burst size is large there might be no enough CQE
4821 * fetched from completion queue and no enough resources
4822 * freed to send all the packets.
4827 #ifdef MLX5_PMD_SOFT_COUNTERS
4828 /* Increment sent packets counter. */
4829 txq->stats.opackets += loc.pkts_sent;
4831 return loc.pkts_sent;
4834 /* Generate routines with Enhanced Multi-Packet Write support. */
4835 MLX5_TXOFF_DECL(full_empw,
4836 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
4838 MLX5_TXOFF_DECL(none_empw,
4839 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4841 MLX5_TXOFF_DECL(md_empw,
4842 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4844 MLX5_TXOFF_DECL(mt_empw,
4845 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4846 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4848 MLX5_TXOFF_DECL(mtsc_empw,
4849 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4850 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4851 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4853 MLX5_TXOFF_DECL(mti_empw,
4854 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4855 MLX5_TXOFF_CONFIG_INLINE |
4856 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4858 MLX5_TXOFF_DECL(mtv_empw,
4859 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4860 MLX5_TXOFF_CONFIG_VLAN |
4861 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4863 MLX5_TXOFF_DECL(mtiv_empw,
4864 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4865 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4866 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4868 MLX5_TXOFF_DECL(sc_empw,
4869 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4870 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4872 MLX5_TXOFF_DECL(sci_empw,
4873 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4874 MLX5_TXOFF_CONFIG_INLINE |
4875 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4877 MLX5_TXOFF_DECL(scv_empw,
4878 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4879 MLX5_TXOFF_CONFIG_VLAN |
4880 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4882 MLX5_TXOFF_DECL(sciv_empw,
4883 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4884 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4885 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4887 MLX5_TXOFF_DECL(i_empw,
4888 MLX5_TXOFF_CONFIG_INLINE |
4889 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4891 MLX5_TXOFF_DECL(v_empw,
4892 MLX5_TXOFF_CONFIG_VLAN |
4893 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4895 MLX5_TXOFF_DECL(iv_empw,
4896 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4897 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4899 /* Generate routines without Enhanced Multi-Packet Write support. */
4900 MLX5_TXOFF_DECL(full,
4901 MLX5_TXOFF_CONFIG_FULL)
4903 MLX5_TXOFF_DECL(none,
4904 MLX5_TXOFF_CONFIG_NONE)
4907 MLX5_TXOFF_CONFIG_METADATA)
4910 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4911 MLX5_TXOFF_CONFIG_METADATA)
4913 MLX5_TXOFF_DECL(mtsc,
4914 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4915 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4916 MLX5_TXOFF_CONFIG_METADATA)
4918 MLX5_TXOFF_DECL(mti,
4919 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4920 MLX5_TXOFF_CONFIG_INLINE |
4921 MLX5_TXOFF_CONFIG_METADATA)
4924 MLX5_TXOFF_DECL(mtv,
4925 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4926 MLX5_TXOFF_CONFIG_VLAN |
4927 MLX5_TXOFF_CONFIG_METADATA)
4930 MLX5_TXOFF_DECL(mtiv,
4931 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4932 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4933 MLX5_TXOFF_CONFIG_METADATA)
4936 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4937 MLX5_TXOFF_CONFIG_METADATA)
4939 MLX5_TXOFF_DECL(sci,
4940 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4941 MLX5_TXOFF_CONFIG_INLINE |
4942 MLX5_TXOFF_CONFIG_METADATA)
4945 MLX5_TXOFF_DECL(scv,
4946 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4947 MLX5_TXOFF_CONFIG_VLAN |
4948 MLX5_TXOFF_CONFIG_METADATA)
4951 MLX5_TXOFF_DECL(sciv,
4952 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4953 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4954 MLX5_TXOFF_CONFIG_METADATA)
4957 MLX5_TXOFF_CONFIG_INLINE |
4958 MLX5_TXOFF_CONFIG_METADATA)
4961 MLX5_TXOFF_CONFIG_VLAN |
4962 MLX5_TXOFF_CONFIG_METADATA)
4965 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4966 MLX5_TXOFF_CONFIG_METADATA)
4969 * Generate routines with Legacy Multi-Packet Write support.
4970 * This mode is supported by ConnectX-4LX only and imposes
4971 * offload limitations, not supported:
4972 * - ACL/Flows (metadata are becoming meaningless)
4973 * - WQE Inline headers
4974 * - SRIOV (E-Switch offloads)
4976 * - tunnel encapsulation/decapsulation
4979 MLX5_TXOFF_DECL(none_mpw,
4980 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
4981 MLX5_TXOFF_CONFIG_MPW)
4983 MLX5_TXOFF_DECL(mci_mpw,
4984 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4985 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4986 MLX5_TXOFF_CONFIG_MPW)
4988 MLX5_TXOFF_DECL(mc_mpw,
4989 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
4990 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
4992 MLX5_TXOFF_DECL(i_mpw,
4993 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
4994 MLX5_TXOFF_CONFIG_MPW)
4997 * Array of declared and compiled Tx burst function and corresponding
4998 * supported offloads set. The array is used to select the Tx burst
4999 * function for specified offloads set at Tx queue configuration time.
5002 eth_tx_burst_t func;
5005 MLX5_TXOFF_INFO(full_empw,
5006 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5007 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5008 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5009 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5011 MLX5_TXOFF_INFO(none_empw,
5012 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5014 MLX5_TXOFF_INFO(md_empw,
5015 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5017 MLX5_TXOFF_INFO(mt_empw,
5018 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5019 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5021 MLX5_TXOFF_INFO(mtsc_empw,
5022 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5023 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5024 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5026 MLX5_TXOFF_INFO(mti_empw,
5027 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5028 MLX5_TXOFF_CONFIG_INLINE |
5029 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5031 MLX5_TXOFF_INFO(mtv_empw,
5032 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5033 MLX5_TXOFF_CONFIG_VLAN |
5034 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5036 MLX5_TXOFF_INFO(mtiv_empw,
5037 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5038 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5039 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5041 MLX5_TXOFF_INFO(sc_empw,
5042 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5043 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5045 MLX5_TXOFF_INFO(sci_empw,
5046 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5047 MLX5_TXOFF_CONFIG_INLINE |
5048 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5050 MLX5_TXOFF_INFO(scv_empw,
5051 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5052 MLX5_TXOFF_CONFIG_VLAN |
5053 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5055 MLX5_TXOFF_INFO(sciv_empw,
5056 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5057 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5058 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5060 MLX5_TXOFF_INFO(i_empw,
5061 MLX5_TXOFF_CONFIG_INLINE |
5062 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5064 MLX5_TXOFF_INFO(v_empw,
5065 MLX5_TXOFF_CONFIG_VLAN |
5066 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5068 MLX5_TXOFF_INFO(iv_empw,
5069 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5070 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5072 MLX5_TXOFF_INFO(full,
5073 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5074 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5075 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5076 MLX5_TXOFF_CONFIG_METADATA)
5078 MLX5_TXOFF_INFO(none,
5079 MLX5_TXOFF_CONFIG_NONE)
5082 MLX5_TXOFF_CONFIG_METADATA)
5085 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5086 MLX5_TXOFF_CONFIG_METADATA)
5088 MLX5_TXOFF_INFO(mtsc,
5089 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5090 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5091 MLX5_TXOFF_CONFIG_METADATA)
5093 MLX5_TXOFF_INFO(mti,
5094 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5095 MLX5_TXOFF_CONFIG_INLINE |
5096 MLX5_TXOFF_CONFIG_METADATA)
5098 MLX5_TXOFF_INFO(mtv,
5099 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5100 MLX5_TXOFF_CONFIG_VLAN |
5101 MLX5_TXOFF_CONFIG_METADATA)
5103 MLX5_TXOFF_INFO(mtiv,
5104 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5105 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5106 MLX5_TXOFF_CONFIG_METADATA)
5109 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5110 MLX5_TXOFF_CONFIG_METADATA)
5112 MLX5_TXOFF_INFO(sci,
5113 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5114 MLX5_TXOFF_CONFIG_INLINE |
5115 MLX5_TXOFF_CONFIG_METADATA)
5117 MLX5_TXOFF_INFO(scv,
5118 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5119 MLX5_TXOFF_CONFIG_VLAN |
5120 MLX5_TXOFF_CONFIG_METADATA)
5122 MLX5_TXOFF_INFO(sciv,
5123 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5124 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5125 MLX5_TXOFF_CONFIG_METADATA)
5128 MLX5_TXOFF_CONFIG_INLINE |
5129 MLX5_TXOFF_CONFIG_METADATA)
5132 MLX5_TXOFF_CONFIG_VLAN |
5133 MLX5_TXOFF_CONFIG_METADATA)
5136 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5137 MLX5_TXOFF_CONFIG_METADATA)
5139 MLX5_TXOFF_INFO(none_mpw,
5140 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5141 MLX5_TXOFF_CONFIG_MPW)
5143 MLX5_TXOFF_INFO(mci_mpw,
5144 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5145 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5146 MLX5_TXOFF_CONFIG_MPW)
5148 MLX5_TXOFF_INFO(mc_mpw,
5149 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5150 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5152 MLX5_TXOFF_INFO(i_mpw,
5153 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5154 MLX5_TXOFF_CONFIG_MPW)
5158 * Configure the Tx function to use. The routine checks configured
5159 * Tx offloads for the device and selects appropriate Tx burst
5160 * routine. There are multiple Tx burst routines compiled from
5161 * the same template in the most optimal way for the dedicated
5165 * Pointer to private data structure.
5168 * Pointer to selected Tx burst function.
5171 mlx5_select_tx_function(struct rte_eth_dev *dev)
5173 struct mlx5_priv *priv = dev->data->dev_private;
5174 struct mlx5_dev_config *config = &priv->config;
5175 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5176 unsigned int diff = 0, olx = 0, i, m;
5178 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5179 MLX5_DSEG_MAX, "invalid WQE max size");
5180 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5181 "invalid WQE Control Segment size");
5182 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5183 "invalid WQE Ethernet Segment size");
5184 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5185 "invalid WQE Data Segment size");
5186 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5187 "invalid WQE size");
5189 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5190 /* We should support Multi-Segment Packets. */
5191 olx |= MLX5_TXOFF_CONFIG_MULTI;
5193 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5194 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5195 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5196 DEV_TX_OFFLOAD_IP_TNL_TSO |
5197 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5198 /* We should support TCP Send Offload. */
5199 olx |= MLX5_TXOFF_CONFIG_TSO;
5201 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5202 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5203 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5204 /* We should support Software Parser for Tunnels. */
5205 olx |= MLX5_TXOFF_CONFIG_SWP;
5207 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5208 DEV_TX_OFFLOAD_UDP_CKSUM |
5209 DEV_TX_OFFLOAD_TCP_CKSUM |
5210 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5211 /* We should support IP/TCP/UDP Checksums. */
5212 olx |= MLX5_TXOFF_CONFIG_CSUM;
5214 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5215 /* We should support VLAN insertion. */
5216 olx |= MLX5_TXOFF_CONFIG_VLAN;
5218 if (priv->txqs_n && (*priv->txqs)[0]) {
5219 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5221 if (txd->inlen_send) {
5223 * Check the data inline requirements. Data inline
5224 * is enabled on per device basis, we can check
5225 * the first Tx queue only.
5227 * If device does not support VLAN insertion in WQE
5228 * and some queues are requested to perform VLAN
5229 * insertion offload than inline must be enabled.
5231 olx |= MLX5_TXOFF_CONFIG_INLINE;
5234 if (config->mps == MLX5_MPW_ENHANCED &&
5235 config->txq_inline_min <= 0) {
5237 * The NIC supports Enhanced Multi-Packet Write
5238 * and does not require minimal inline data.
5240 olx |= MLX5_TXOFF_CONFIG_EMPW;
5242 if (rte_flow_dynf_metadata_avail()) {
5243 /* We should support Flow metadata. */
5244 olx |= MLX5_TXOFF_CONFIG_METADATA;
5246 if (config->mps == MLX5_MPW) {
5248 * The NIC supports Legacy Multi-Packet Write.
5249 * The MLX5_TXOFF_CONFIG_MPW controls the
5250 * descriptor building method in combination
5251 * with MLX5_TXOFF_CONFIG_EMPW.
5253 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5254 MLX5_TXOFF_CONFIG_SWP |
5255 MLX5_TXOFF_CONFIG_VLAN |
5256 MLX5_TXOFF_CONFIG_METADATA)))
5257 olx |= MLX5_TXOFF_CONFIG_EMPW |
5258 MLX5_TXOFF_CONFIG_MPW;
5261 * Scan the routines table to find the minimal
5262 * satisfying routine with requested offloads.
5264 m = RTE_DIM(txoff_func);
5265 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5268 tmp = txoff_func[i].olx;
5270 /* Meets requested offloads exactly.*/
5274 if ((tmp & olx) != olx) {
5275 /* Does not meet requested offloads at all. */
5278 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5279 /* Do not enable eMPW if not configured. */
5281 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5282 /* Do not enable inlining if not configured. */
5285 * Some routine meets the requirements.
5286 * Check whether it has minimal amount
5287 * of not requested offloads.
5289 tmp = __builtin_popcountl(tmp & ~olx);
5290 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5291 /* First or better match, save and continue. */
5297 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5298 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5299 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5300 /* Lighter not requested offload. */
5305 if (m >= RTE_DIM(txoff_func)) {
5306 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5307 " for requested offloads %04X",
5308 dev->data->port_id, olx);
5311 DRV_LOG(DEBUG, "port %u has selected Tx function"
5312 " supporting offloads %04X/%04X",
5313 dev->data->port_id, olx, txoff_func[m].olx);
5314 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5315 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5316 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5317 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5318 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5319 DRV_LOG(DEBUG, "\tSWP (software parser)");
5320 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5321 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5322 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5323 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5324 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5325 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5326 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5327 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5328 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5329 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5330 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5332 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5334 return txoff_func[m].func;