1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
19 #include <mlx5_glue.h>
20 #include <mlx5_devx_cmds.h>
22 #include <mlx5_common.h>
24 #include "mlx5_defs.h"
27 #include "mlx5_utils.h"
28 #include "mlx5_rxtx.h"
29 #include "mlx5_autoconf.h"
31 /* TX burst subroutines return codes. */
32 enum mlx5_txcmp_code {
33 MLX5_TXCMP_CODE_EXIT = 0,
34 MLX5_TXCMP_CODE_ERROR,
35 MLX5_TXCMP_CODE_SINGLE,
36 MLX5_TXCMP_CODE_MULTI,
42 * These defines are used to configure Tx burst routine option set
43 * supported at compile time. The not specified options are optimized out
44 * out due to if conditions can be explicitly calculated at compile time.
45 * The offloads with bigger runtime check (require more CPU cycles to
46 * skip) overhead should have the bigger index - this is needed to
47 * select the better matching routine function if no exact match and
48 * some offloads are not actually requested.
50 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
51 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
52 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
53 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
54 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
55 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
56 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
57 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
58 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
59 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
61 /* The most common offloads groups. */
62 #define MLX5_TXOFF_CONFIG_NONE 0
63 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
64 MLX5_TXOFF_CONFIG_TSO | \
65 MLX5_TXOFF_CONFIG_SWP | \
66 MLX5_TXOFF_CONFIG_CSUM | \
67 MLX5_TXOFF_CONFIG_INLINE | \
68 MLX5_TXOFF_CONFIG_VLAN | \
69 MLX5_TXOFF_CONFIG_METADATA)
71 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
73 #define MLX5_TXOFF_DECL(func, olx) \
74 static uint16_t mlx5_tx_burst_##func(void *txq, \
75 struct rte_mbuf **pkts, \
78 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
79 pkts, pkts_n, (olx)); \
82 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
84 static __rte_always_inline uint32_t
85 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
87 static __rte_always_inline int
88 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
89 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
91 static __rte_always_inline uint32_t
92 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
94 static __rte_always_inline void
95 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
96 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
98 static __rte_always_inline void
99 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
100 const unsigned int strd_n);
103 mlx5_queue_state_modify(struct rte_eth_dev *dev,
104 struct mlx5_mp_arg_queue_state_modify *sm);
107 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
108 volatile struct mlx5_cqe *__rte_restrict cqe,
112 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
113 volatile struct mlx5_cqe *__rte_restrict cqe,
116 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
117 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
120 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
121 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
123 uint64_t rte_net_mlx5_dynf_inline_mask;
124 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
127 * Build a table to translate Rx completion flags to packet type.
129 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
132 mlx5_set_ptype_table(void)
135 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
137 /* Last entry must not be overwritten, reserved for errored packet. */
138 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
139 (*p)[i] = RTE_PTYPE_UNKNOWN;
141 * The index to the array should have:
142 * bit[1:0] = l3_hdr_type
143 * bit[4:2] = l4_hdr_type
146 * bit[7] = outer_l3_type
149 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
151 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
152 RTE_PTYPE_L4_NONFRAG;
153 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
154 RTE_PTYPE_L4_NONFRAG;
156 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
158 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
161 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
163 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
165 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
167 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
169 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
171 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
174 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
176 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
178 /* Repeat with outer_l3_type being set. Just in case. */
179 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
180 RTE_PTYPE_L4_NONFRAG;
181 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
182 RTE_PTYPE_L4_NONFRAG;
183 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
185 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
187 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
189 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
191 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
193 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
195 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
197 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
199 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
201 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
204 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
205 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
206 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
207 RTE_PTYPE_INNER_L4_NONFRAG;
208 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
209 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
210 RTE_PTYPE_INNER_L4_NONFRAG;
211 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
212 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
213 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L4_NONFRAG;
215 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
216 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L4_NONFRAG;
218 /* Tunneled - Fragmented */
219 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
220 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
221 RTE_PTYPE_INNER_L4_FRAG;
222 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L4_FRAG;
225 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L4_FRAG;
228 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L4_FRAG;
232 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L4_TCP;
235 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
237 RTE_PTYPE_INNER_L4_TCP;
238 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
240 RTE_PTYPE_INNER_L4_TCP;
241 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L4_TCP;
244 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L4_TCP;
247 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L4_TCP;
250 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L4_TCP;
253 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L4_TCP;
256 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L4_TCP;
259 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_TCP;
262 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L4_TCP;
265 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L4_TCP;
269 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L4_UDP;
272 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
273 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
274 RTE_PTYPE_INNER_L4_UDP;
275 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
276 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
277 RTE_PTYPE_INNER_L4_UDP;
278 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
279 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
280 RTE_PTYPE_INNER_L4_UDP;
284 * Build a table to translate packet to checksum type of Verbs.
287 mlx5_set_cksum_table(void)
293 * The index should have:
294 * bit[0] = PKT_TX_TCP_SEG
295 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
296 * bit[4] = PKT_TX_IP_CKSUM
297 * bit[8] = PKT_TX_OUTER_IP_CKSUM
300 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
303 /* Tunneled packet. */
304 if (i & (1 << 8)) /* Outer IP. */
305 v |= MLX5_ETH_WQE_L3_CSUM;
306 if (i & (1 << 4)) /* Inner IP. */
307 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
308 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
309 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
312 if (i & (1 << 4)) /* IP. */
313 v |= MLX5_ETH_WQE_L3_CSUM;
314 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
315 v |= MLX5_ETH_WQE_L4_CSUM;
317 mlx5_cksum_table[i] = v;
322 * Build a table to translate packet type of mbuf to SWP type of Verbs.
325 mlx5_set_swp_types_table(void)
331 * The index should have:
332 * bit[0:1] = PKT_TX_L4_MASK
333 * bit[4] = PKT_TX_IPV6
334 * bit[8] = PKT_TX_OUTER_IPV6
335 * bit[9] = PKT_TX_OUTER_UDP
337 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
340 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
342 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
344 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
345 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
346 v |= MLX5_ETH_WQE_L4_INNER_UDP;
347 mlx5_swp_types_table[i] = v;
352 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
353 * Flags must be preliminary initialized to zero.
356 * Pointer to burst routine local context.
358 * Pointer to store Software Parser flags
360 * Configured Tx offloads mask. It is fully defined at
361 * compile time and may be used for optimization.
364 * Software Parser offsets packed in dword.
365 * Software Parser flags are set by pointer.
367 static __rte_always_inline uint32_t
368 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
373 unsigned int idx, off;
376 if (!MLX5_TXOFF_CONFIG(SWP))
378 ol = loc->mbuf->ol_flags;
379 tunnel = ol & PKT_TX_TUNNEL_MASK;
381 * Check whether Software Parser is required.
382 * Only customized tunnels may ask for.
384 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
387 * The index should have:
388 * bit[0:1] = PKT_TX_L4_MASK
389 * bit[4] = PKT_TX_IPV6
390 * bit[8] = PKT_TX_OUTER_IPV6
391 * bit[9] = PKT_TX_OUTER_UDP
393 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
394 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
395 *swp_flags = mlx5_swp_types_table[idx];
397 * Set offsets for SW parser. Since ConnectX-5, SW parser just
398 * complements HW parser. SW parser starts to engage only if HW parser
399 * can't reach a header. For the older devices, HW parser will not kick
400 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
401 * should be set regardless of HW offload.
403 off = loc->mbuf->outer_l2_len;
404 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
405 off += sizeof(struct rte_vlan_hdr);
406 set = (off >> 1) << 8; /* Outer L3 offset. */
407 off += loc->mbuf->outer_l3_len;
408 if (tunnel == PKT_TX_TUNNEL_UDP)
409 set |= off >> 1; /* Outer L4 offset. */
410 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
411 const uint64_t csum = ol & PKT_TX_L4_MASK;
412 off += loc->mbuf->l2_len;
413 set |= (off >> 1) << 24; /* Inner L3 offset. */
414 if (csum == PKT_TX_TCP_CKSUM ||
415 csum == PKT_TX_UDP_CKSUM ||
416 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
417 off += loc->mbuf->l3_len;
418 set |= (off >> 1) << 16; /* Inner L4 offset. */
421 set = rte_cpu_to_le_32(set);
426 * Convert the Checksum offloads to Verbs.
429 * Pointer to the mbuf.
432 * Converted checksum flags.
434 static __rte_always_inline uint8_t
435 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
438 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
439 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
440 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
443 * The index should have:
444 * bit[0] = PKT_TX_TCP_SEG
445 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
446 * bit[4] = PKT_TX_IP_CKSUM
447 * bit[8] = PKT_TX_OUTER_IP_CKSUM
450 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
451 return mlx5_cksum_table[idx];
455 * Internal function to compute the number of used descriptors in an RX queue
461 * The number of used rx descriptor.
464 rx_queue_count(struct mlx5_rxq_data *rxq)
466 struct rxq_zip *zip = &rxq->zip;
467 volatile struct mlx5_cqe *cqe;
468 unsigned int cq_ci = rxq->cq_ci;
469 const unsigned int cqe_n = (1 << rxq->cqe_n);
470 const unsigned int cqe_cnt = cqe_n - 1;
471 unsigned int used = 0;
473 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
474 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
478 op_own = cqe->op_own;
479 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
480 if (unlikely(zip->ai))
481 n = zip->cqe_cnt - zip->ai;
483 n = rte_be_to_cpu_32(cqe->byte_cnt);
488 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
490 used = RTE_MIN(used, cqe_n);
495 * DPDK callback to check the status of a rx descriptor.
500 * The index of the descriptor in the ring.
503 * The status of the tx descriptor.
506 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
508 struct mlx5_rxq_data *rxq = rx_queue;
509 struct mlx5_rxq_ctrl *rxq_ctrl =
510 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
511 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
513 if (dev->rx_pkt_burst == NULL ||
514 dev->rx_pkt_burst == removed_rx_burst) {
518 if (offset >= (1 << rxq->cqe_n)) {
522 if (offset < rx_queue_count(rxq))
523 return RTE_ETH_RX_DESC_DONE;
524 return RTE_ETH_RX_DESC_AVAIL;
528 * DPDK callback to get the RX queue information
531 * Pointer to the device structure.
534 * Rx queue identificator.
537 * Pointer to the RX queue information structure.
544 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
545 struct rte_eth_rxq_info *qinfo)
547 struct mlx5_priv *priv = dev->data->dev_private;
548 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
549 struct mlx5_rxq_ctrl *rxq_ctrl =
550 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
554 qinfo->mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
555 rxq->mprq_mp : rxq->mp;
556 qinfo->conf.rx_thresh.pthresh = 0;
557 qinfo->conf.rx_thresh.hthresh = 0;
558 qinfo->conf.rx_thresh.wthresh = 0;
559 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
560 qinfo->conf.rx_drop_en = 1;
561 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
562 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
563 qinfo->scattered_rx = dev->data->scattered_rx;
564 qinfo->nb_desc = 1 << rxq->elts_n;
568 * DPDK callback to get the RX packet burst mode information
571 * Pointer to the device structure.
574 * Rx queue identificatior.
577 * Pointer to the burts mode information.
580 * 0 as success, -EINVAL as failure.
584 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
585 uint16_t rx_queue_id __rte_unused,
586 struct rte_eth_burst_mode *mode)
588 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
590 if (pkt_burst == mlx5_rx_burst) {
591 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
592 } else if (pkt_burst == mlx5_rx_burst_mprq) {
593 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
594 } else if (pkt_burst == mlx5_rx_burst_vec) {
595 #if defined RTE_ARCH_X86_64
596 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
597 #elif defined RTE_ARCH_ARM64
598 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
599 #elif defined RTE_ARCH_PPC_64
600 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
611 * DPDK callback to get the number of used descriptors in a RX queue
614 * Pointer to the device structure.
620 * The number of used rx descriptor.
621 * -EINVAL if the queue is invalid
624 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
626 struct mlx5_priv *priv = dev->data->dev_private;
627 struct mlx5_rxq_data *rxq;
629 if (dev->rx_pkt_burst == NULL ||
630 dev->rx_pkt_burst == removed_rx_burst) {
634 rxq = (*priv->rxqs)[rx_queue_id];
639 return rx_queue_count(rxq);
642 #define MLX5_SYSTEM_LOG_DIR "/var/log"
644 * Dump debug information to log file.
649 * If not NULL this string is printed as a header to the output
650 * and the output will be in hexadecimal view.
652 * This is the buffer address to print out.
654 * The number of bytes to dump out.
657 mlx5_dump_debug_information(const char *fname, const char *hex_title,
658 const void *buf, unsigned int hex_len)
662 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
663 fd = fopen(path, "a+");
665 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
666 MKSTR(path2, "./%s", fname);
667 fd = fopen(path2, "a+");
669 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
672 DRV_LOG(INFO, "New debug dump in file %s", path2);
674 DRV_LOG(INFO, "New debug dump in file %s", path);
677 rte_hexdump(fd, hex_title, buf, hex_len);
679 fprintf(fd, "%s", (const char *)buf);
680 fprintf(fd, "\n\n\n");
685 * Move QP from error state to running state and initialize indexes.
688 * Pointer to TX queue control structure.
691 * 0 on success, else -1.
694 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
696 struct mlx5_mp_arg_queue_state_modify sm = {
698 .queue_id = txq_ctrl->txq.idx,
701 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
703 txq_ctrl->txq.wqe_ci = 0;
704 txq_ctrl->txq.wqe_pi = 0;
705 txq_ctrl->txq.elts_comp = 0;
709 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
711 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
713 static const uint8_t magic[] = "seen";
717 for (i = 0; i < sizeof(magic); ++i)
718 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
720 err_cqe->rsvd1[i] = magic[i];
729 * Pointer to TX queue structure.
731 * Pointer to the error CQE.
734 * Negative value if queue recovery failed, otherwise
735 * the error completion entry is handled successfully.
738 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
739 volatile struct mlx5_err_cqe *err_cqe)
741 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
742 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
743 struct mlx5_txq_ctrl *txq_ctrl =
744 container_of(txq, struct mlx5_txq_ctrl, txq);
745 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
746 int seen = check_err_cqe_seen(err_cqe);
748 if (!seen && txq_ctrl->dump_file_n <
749 txq_ctrl->priv->config.max_dump_files_num) {
750 MKSTR(err_str, "Unexpected CQE error syndrome "
751 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
752 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
753 txq->cqe_s, txq->qp_num_8s >> 8,
754 rte_be_to_cpu_16(err_cqe->wqe_counter),
755 txq->wqe_ci, txq->cq_ci);
756 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
757 PORT_ID(txq_ctrl->priv), txq->idx,
758 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
759 mlx5_dump_debug_information(name, NULL, err_str, 0);
760 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
761 (const void *)((uintptr_t)
765 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
766 (const void *)((uintptr_t)
770 txq_ctrl->dump_file_n++;
774 * Count errors in WQEs units.
775 * Later it can be improved to count error packets,
776 * for example, by SQ parsing to find how much packets
777 * should be counted for each WQE.
779 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
781 if (tx_recover_qp(txq_ctrl)) {
782 /* Recovering failed - retry later on the same WQE. */
785 /* Release all the remaining buffers. */
786 txq_free_elts(txq_ctrl);
792 * Translate RX completion flags to packet type.
795 * Pointer to RX queue structure.
799 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
802 * Packet type for struct rte_mbuf.
804 static inline uint32_t
805 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
808 uint8_t pinfo = cqe->pkt_info;
809 uint16_t ptype = cqe->hdr_type_etc;
812 * The index to the array should have:
813 * bit[1:0] = l3_hdr_type
814 * bit[4:2] = l4_hdr_type
817 * bit[7] = outer_l3_type
819 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
820 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
824 * Initialize Rx WQ and indexes.
827 * Pointer to RX queue structure.
830 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
832 const unsigned int wqe_n = 1 << rxq->elts_n;
835 for (i = 0; (i != wqe_n); ++i) {
836 volatile struct mlx5_wqe_data_seg *scat;
840 if (mlx5_rxq_mprq_enabled(rxq)) {
841 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
843 scat = &((volatile struct mlx5_wqe_mprq *)
845 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
846 1 << rxq->strd_num_n);
847 byte_count = (1 << rxq->strd_sz_n) *
848 (1 << rxq->strd_num_n);
850 struct rte_mbuf *buf = (*rxq->elts)[i];
852 scat = &((volatile struct mlx5_wqe_data_seg *)
854 addr = rte_pktmbuf_mtod(buf, uintptr_t);
855 byte_count = DATA_LEN(buf);
857 /* scat->addr must be able to store a pointer. */
858 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
859 *scat = (struct mlx5_wqe_data_seg){
860 .addr = rte_cpu_to_be_64(addr),
861 .byte_count = rte_cpu_to_be_32(byte_count),
862 .lkey = mlx5_rx_addr2mr(rxq, addr),
865 rxq->consumed_strd = 0;
866 rxq->decompressed = 0;
868 rxq->zip = (struct rxq_zip){
871 /* Update doorbell counter. */
872 rxq->rq_ci = wqe_n >> rxq->sges_n;
874 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
878 * Modify a Verbs/DevX queue state.
879 * This must be called from the primary process.
882 * Pointer to Ethernet device.
884 * State modify request parameters.
887 * 0 in case of success else non-zero value and rte_errno is set.
890 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
891 const struct mlx5_mp_arg_queue_state_modify *sm)
894 struct mlx5_priv *priv = dev->data->dev_private;
897 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
898 struct mlx5_rxq_ctrl *rxq_ctrl =
899 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
901 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
902 struct ibv_wq_attr mod = {
903 .attr_mask = IBV_WQ_ATTR_STATE,
904 .wq_state = sm->state,
907 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
908 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
909 struct mlx5_devx_modify_rq_attr rq_attr;
911 memset(&rq_attr, 0, sizeof(rq_attr));
912 if (sm->state == IBV_WQS_RESET) {
913 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
914 rq_attr.state = MLX5_RQC_STATE_RST;
915 } else if (sm->state == IBV_WQS_RDY) {
916 rq_attr.rq_state = MLX5_RQC_STATE_RST;
917 rq_attr.state = MLX5_RQC_STATE_RDY;
918 } else if (sm->state == IBV_WQS_ERR) {
919 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
920 rq_attr.state = MLX5_RQC_STATE_ERR;
922 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
926 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
927 sm->state, strerror(errno));
932 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
933 struct mlx5_txq_ctrl *txq_ctrl =
934 container_of(txq, struct mlx5_txq_ctrl, txq);
936 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
937 MLX5_TXQ_MOD_ERR2RDY,
938 (uint8_t)priv->dev_port);
946 * Modify a Verbs queue state.
949 * Pointer to Ethernet device.
951 * State modify request parameters.
954 * 0 in case of success else non-zero value.
957 mlx5_queue_state_modify(struct rte_eth_dev *dev,
958 struct mlx5_mp_arg_queue_state_modify *sm)
960 struct mlx5_priv *priv = dev->data->dev_private;
963 switch (rte_eal_process_type()) {
964 case RTE_PROC_PRIMARY:
965 ret = mlx5_queue_state_modify_primary(dev, sm);
967 case RTE_PROC_SECONDARY:
968 ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
978 * The function inserts the RQ state to reset when the first error CQE is
979 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
980 * it moves the RQ state to ready and initializes the RQ.
981 * Next CQE identification and error counting are in the caller responsibility.
984 * Pointer to RX queue structure.
986 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
987 * 0 when called from non-vectorized Rx burst.
990 * -1 in case of recovery error, otherwise the CQE status.
993 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
995 const uint16_t cqe_n = 1 << rxq->cqe_n;
996 const uint16_t cqe_mask = cqe_n - 1;
997 const unsigned int wqe_n = 1 << rxq->elts_n;
998 struct mlx5_rxq_ctrl *rxq_ctrl =
999 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1001 volatile struct mlx5_cqe *cqe;
1002 volatile struct mlx5_err_cqe *err_cqe;
1004 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
1006 struct mlx5_mp_arg_queue_state_modify sm;
1009 switch (rxq->err_state) {
1010 case MLX5_RXQ_ERR_STATE_NO_ERROR:
1011 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
1013 case MLX5_RXQ_ERR_STATE_NEED_RESET:
1015 sm.queue_id = rxq->idx;
1016 sm.state = IBV_WQS_RESET;
1017 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
1019 if (rxq_ctrl->dump_file_n <
1020 rxq_ctrl->priv->config.max_dump_files_num) {
1021 MKSTR(err_str, "Unexpected CQE error syndrome "
1022 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
1023 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
1024 rxq->cqn, rxq_ctrl->wqn,
1025 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
1026 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
1027 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
1028 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
1029 mlx5_dump_debug_information(name, NULL, err_str, 0);
1030 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
1031 (const void *)((uintptr_t)
1033 sizeof(*u.cqe) * cqe_n);
1034 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
1035 (const void *)((uintptr_t)
1038 rxq_ctrl->dump_file_n++;
1040 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
1042 case MLX5_RXQ_ERR_STATE_NEED_READY:
1043 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1044 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1046 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1049 * The RQ consumer index must be zeroed while moving
1050 * from RESET state to RDY state.
1052 *rxq->rq_db = rte_cpu_to_be_32(0);
1055 sm.queue_id = rxq->idx;
1056 sm.state = IBV_WQS_RDY;
1057 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1061 const uint16_t q_mask = wqe_n - 1;
1063 struct rte_mbuf **elt;
1065 unsigned int n = wqe_n - (rxq->rq_ci -
1068 for (i = 0; i < (int)n; ++i) {
1069 elt_idx = (rxq->rq_ci + i) & q_mask;
1070 elt = &(*rxq->elts)[elt_idx];
1071 *elt = rte_mbuf_raw_alloc(rxq->mp);
1073 for (i--; i >= 0; --i) {
1074 elt_idx = (rxq->rq_ci +
1078 rte_pktmbuf_free_seg
1084 for (i = 0; i < (int)wqe_n; ++i) {
1085 elt = &(*rxq->elts)[i];
1087 (uint16_t)((*elt)->buf_len -
1088 rte_pktmbuf_headroom(*elt));
1090 /* Padding with a fake mbuf for vec Rx. */
1091 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1092 (*rxq->elts)[wqe_n + i] =
1095 mlx5_rxq_initialize(rxq);
1096 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1105 * Get size of the next packet for a given CQE. For compressed CQEs, the
1106 * consumer index is updated only once all packets of the current one have
1110 * Pointer to RX queue.
1114 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1118 * 0 in case of empty CQE, otherwise the packet size in bytes.
1121 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1122 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1124 struct rxq_zip *zip = &rxq->zip;
1125 uint16_t cqe_n = cqe_cnt + 1;
1131 /* Process compressed data in the CQE and mini arrays. */
1133 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1134 (volatile struct mlx5_mini_cqe8 (*)[8])
1135 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1138 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1139 *mcqe = &(*mc)[zip->ai & 7];
1140 if ((++zip->ai & 7) == 0) {
1141 /* Invalidate consumed CQEs */
1144 while (idx != end) {
1145 (*rxq->cqes)[idx & cqe_cnt].op_own =
1146 MLX5_CQE_INVALIDATE;
1150 * Increment consumer index to skip the number
1151 * of CQEs consumed. Hardware leaves holes in
1152 * the CQ ring for software use.
1157 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1158 /* Invalidate the rest */
1162 while (idx != end) {
1163 (*rxq->cqes)[idx & cqe_cnt].op_own =
1164 MLX5_CQE_INVALIDATE;
1167 rxq->cq_ci = zip->cq_ci;
1171 * No compressed data, get next CQE and verify if it is
1178 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1179 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1180 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1182 ret = mlx5_rx_err_handle(rxq, 0);
1183 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1191 op_own = cqe->op_own;
1192 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1193 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1194 (volatile struct mlx5_mini_cqe8 (*)[8])
1195 (uintptr_t)(&(*rxq->cqes)
1199 /* Fix endianness. */
1200 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1202 * Current mini array position is the one
1203 * returned by check_cqe64().
1205 * If completion comprises several mini arrays,
1206 * as a special case the second one is located
1207 * 7 CQEs after the initial CQE instead of 8
1208 * for subsequent ones.
1210 zip->ca = rxq->cq_ci;
1211 zip->na = zip->ca + 7;
1212 /* Compute the next non compressed CQE. */
1214 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1215 /* Get packet size to return. */
1216 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1219 /* Prefetch all to be invalidated */
1222 while (idx != end) {
1223 rte_prefetch0(&(*rxq->cqes)[(idx) &
1228 len = rte_be_to_cpu_32(cqe->byte_cnt);
1231 if (unlikely(rxq->err_state)) {
1232 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1233 ++rxq->stats.idropped;
1241 * Translate RX completion flags to offload flags.
1247 * Offload flags (ol_flags) for struct rte_mbuf.
1249 static inline uint32_t
1250 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1252 uint32_t ol_flags = 0;
1253 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1257 MLX5_CQE_RX_L3_HDR_VALID,
1258 PKT_RX_IP_CKSUM_GOOD) |
1260 MLX5_CQE_RX_L4_HDR_VALID,
1261 PKT_RX_L4_CKSUM_GOOD);
1266 * Fill in mbuf fields from RX completion flags.
1267 * Note that pkt->ol_flags should be initialized outside of this function.
1270 * Pointer to RX queue.
1275 * @param rss_hash_res
1276 * Packet RSS Hash result.
1279 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1280 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1282 /* Update packet information. */
1283 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1284 if (rss_hash_res && rxq->rss_hash) {
1285 pkt->hash.rss = rss_hash_res;
1286 pkt->ol_flags |= PKT_RX_RSS_HASH;
1288 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1289 pkt->ol_flags |= PKT_RX_FDIR;
1290 if (cqe->sop_drop_qpn !=
1291 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1292 uint32_t mark = cqe->sop_drop_qpn;
1294 pkt->ol_flags |= PKT_RX_FDIR_ID;
1295 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1298 if (rxq->dynf_meta && cqe->flow_table_metadata) {
1299 pkt->ol_flags |= rxq->flow_meta_mask;
1300 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, uint32_t *) =
1301 cqe->flow_table_metadata;
1304 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1305 if (rxq->vlan_strip &&
1306 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1307 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1308 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1310 if (rxq->hw_timestamp) {
1311 uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
1313 if (rxq->rt_timestamp)
1314 ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
1315 pkt->timestamp = ts;
1316 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1321 * DPDK callback for RX.
1324 * Generic pointer to RX queue structure.
1326 * Array to store received packets.
1328 * Maximum number of packets in array.
1331 * Number of packets successfully received (<= pkts_n).
1334 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1336 struct mlx5_rxq_data *rxq = dpdk_rxq;
1337 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1338 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1339 const unsigned int sges_n = rxq->sges_n;
1340 struct rte_mbuf *pkt = NULL;
1341 struct rte_mbuf *seg = NULL;
1342 volatile struct mlx5_cqe *cqe =
1343 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1345 unsigned int rq_ci = rxq->rq_ci << sges_n;
1346 int len = 0; /* keep its value across iterations. */
1349 unsigned int idx = rq_ci & wqe_cnt;
1350 volatile struct mlx5_wqe_data_seg *wqe =
1351 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1352 struct rte_mbuf *rep = (*rxq->elts)[idx];
1353 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1354 uint32_t rss_hash_res;
1362 rep = rte_mbuf_raw_alloc(rxq->mp);
1363 if (unlikely(rep == NULL)) {
1364 ++rxq->stats.rx_nombuf;
1367 * no buffers before we even started,
1368 * bail out silently.
1372 while (pkt != seg) {
1373 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
1377 rte_mbuf_raw_free(pkt);
1383 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1384 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1386 rte_mbuf_raw_free(rep);
1390 MLX5_ASSERT(len >= (rxq->crc_present << 2));
1391 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1392 /* If compressed, take hash result from mini-CQE. */
1393 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1395 mcqe->rx_hash_result);
1396 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1397 if (rxq->crc_present)
1398 len -= RTE_ETHER_CRC_LEN;
1400 if (cqe->lro_num_seg > 1) {
1402 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1404 pkt->ol_flags |= PKT_RX_LRO;
1405 pkt->tso_segsz = len / cqe->lro_num_seg;
1408 DATA_LEN(rep) = DATA_LEN(seg);
1409 PKT_LEN(rep) = PKT_LEN(seg);
1410 SET_DATA_OFF(rep, DATA_OFF(seg));
1411 PORT(rep) = PORT(seg);
1412 (*rxq->elts)[idx] = rep;
1414 * Fill NIC descriptor with the new buffer. The lkey and size
1415 * of the buffers are already known, only the buffer address
1418 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1419 /* If there's only one MR, no need to replace LKey in WQE. */
1420 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1421 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1422 if (len > DATA_LEN(seg)) {
1423 len -= DATA_LEN(seg);
1428 DATA_LEN(seg) = len;
1429 #ifdef MLX5_PMD_SOFT_COUNTERS
1430 /* Increment bytes counter. */
1431 rxq->stats.ibytes += PKT_LEN(pkt);
1433 /* Return packet. */
1438 /* Align consumer index to the next stride. */
1443 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1445 /* Update the consumer index. */
1446 rxq->rq_ci = rq_ci >> sges_n;
1448 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1450 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1451 #ifdef MLX5_PMD_SOFT_COUNTERS
1452 /* Increment packets counter. */
1453 rxq->stats.ipackets += i;
1459 * Update LRO packet TCP header.
1460 * The HW LRO feature doesn't update the TCP header after coalescing the
1461 * TCP segments but supplies information in CQE to fill it by SW.
1464 * Pointer to the TCP header.
1466 * Pointer to the completion entry..
1468 * The L3 pseudo-header checksum.
1471 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
1472 volatile struct mlx5_cqe *__rte_restrict cqe,
1475 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1476 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1478 * The HW calculates only the TCP payload checksum, need to complete
1479 * the TCP header checksum and the L3 pseudo-header checksum.
1481 uint32_t csum = phcsum + cqe->csum;
1483 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1484 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1485 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1486 tcp->recv_ack = cqe->lro_ack_seq_num;
1487 tcp->rx_win = cqe->lro_tcp_win;
1489 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1490 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1492 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
1493 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1494 csum = (~csum) & 0xffff;
1501 * Update LRO packet headers.
1502 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1503 * TCP segments but supply information in CQE to fill it by SW.
1506 * The packet address.
1508 * Pointer to the completion entry..
1510 * The packet length.
1513 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
1514 volatile struct mlx5_cqe *__rte_restrict cqe,
1518 struct rte_ether_hdr *eth;
1519 struct rte_vlan_hdr *vlan;
1520 struct rte_ipv4_hdr *ipv4;
1521 struct rte_ipv6_hdr *ipv6;
1522 struct rte_tcp_hdr *tcp;
1527 uint16_t proto = h.eth->ether_type;
1531 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1532 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1533 proto = h.vlan->eth_proto;
1536 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1537 h.ipv4->time_to_live = cqe->lro_min_ttl;
1538 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1539 h.ipv4->hdr_checksum = 0;
1540 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1541 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1544 h.ipv6->hop_limits = cqe->lro_min_ttl;
1545 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1547 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1550 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1554 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1556 struct mlx5_mprq_buf *buf = opaque;
1558 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
1559 rte_mempool_put(buf->mp, buf);
1560 } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
1561 __ATOMIC_RELAXED) == 0)) {
1562 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1563 rte_mempool_put(buf->mp, buf);
1568 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1570 mlx5_mprq_buf_free_cb(NULL, buf);
1574 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1575 const unsigned int strd_n)
1577 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1578 volatile struct mlx5_wqe_data_seg *wqe =
1579 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1582 MLX5_ASSERT(rep != NULL);
1583 /* Replace MPRQ buf. */
1584 (*rxq->mprq_bufs)[rq_idx] = rep;
1586 addr = mlx5_mprq_buf_addr(rep, strd_n);
1587 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1588 /* If there's only one MR, no need to replace LKey in WQE. */
1589 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1590 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1591 /* Stash a mbuf for next replacement. */
1592 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1593 rxq->mprq_repl = rep;
1595 rxq->mprq_repl = NULL;
1599 * DPDK callback for RX with Multi-Packet RQ support.
1602 * Generic pointer to RX queue structure.
1604 * Array to store received packets.
1606 * Maximum number of packets in array.
1609 * Number of packets successfully received (<= pkts_n).
1612 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1614 struct mlx5_rxq_data *rxq = dpdk_rxq;
1615 const unsigned int strd_n = 1 << rxq->strd_num_n;
1616 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1617 const unsigned int strd_shift =
1618 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1619 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1620 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1621 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1623 uint32_t rq_ci = rxq->rq_ci;
1624 uint16_t consumed_strd = rxq->consumed_strd;
1625 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1627 while (i < pkts_n) {
1628 struct rte_mbuf *pkt;
1636 int32_t hdrm_overlap;
1637 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1638 uint32_t rss_hash_res = 0;
1640 if (consumed_strd == strd_n) {
1641 /* Replace WQE only if the buffer is still in use. */
1642 if (__atomic_load_n(&buf->refcnt,
1643 __ATOMIC_RELAXED) > 1) {
1644 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1645 /* Release the old buffer. */
1646 mlx5_mprq_buf_free(buf);
1647 } else if (unlikely(rxq->mprq_repl == NULL)) {
1648 struct mlx5_mprq_buf *rep;
1651 * Currently, the MPRQ mempool is out of buffer
1652 * and doing memcpy regardless of the size of Rx
1653 * packet. Retry allocation to get back to
1656 if (!rte_mempool_get(rxq->mprq_mp,
1658 rxq->mprq_repl = rep;
1660 /* Advance to the next WQE. */
1663 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1665 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1666 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1670 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1671 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1672 MLX5_ASSERT(strd_cnt);
1673 consumed_strd += strd_cnt;
1674 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1677 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1678 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1680 /* mini-CQE for MPRQ doesn't have hash result. */
1681 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1683 MLX5_ASSERT(strd_idx < strd_n);
1684 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1686 pkt = rte_pktmbuf_alloc(rxq->mp);
1687 if (unlikely(pkt == NULL)) {
1688 ++rxq->stats.rx_nombuf;
1691 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1692 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1693 if (rxq->crc_present)
1694 len -= RTE_ETHER_CRC_LEN;
1695 offset = strd_idx * strd_sz + strd_shift;
1696 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1697 hdrm_overlap = len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz;
1699 * Memcpy packets to the target mbuf if:
1700 * - The size of packet is smaller than mprq_max_memcpy_len.
1701 * - Out of buffer in the Mempool for Multi-Packet RQ.
1702 * - The packet's stride overlaps a headroom and scatter is off.
1704 if (len <= rxq->mprq_max_memcpy_len ||
1705 rxq->mprq_repl == NULL ||
1706 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
1707 if (likely(rte_pktmbuf_tailroom(pkt) >= len)) {
1708 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
1710 DATA_LEN(pkt) = len;
1711 } else if (rxq->strd_scatter_en) {
1712 struct rte_mbuf *prev = pkt;
1714 RTE_MIN(rte_pktmbuf_tailroom(pkt), len);
1715 uint32_t rem_len = len - seg_len;
1717 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
1719 DATA_LEN(pkt) = seg_len;
1721 struct rte_mbuf *next =
1722 rte_pktmbuf_alloc(rxq->mp);
1724 if (unlikely(next == NULL)) {
1725 rte_pktmbuf_free(pkt);
1726 ++rxq->stats.rx_nombuf;
1730 SET_DATA_OFF(next, 0);
1731 addr = RTE_PTR_ADD(addr, seg_len);
1733 (rte_pktmbuf_tailroom(next),
1736 (rte_pktmbuf_mtod(next, void *),
1738 DATA_LEN(next) = seg_len;
1744 rte_pktmbuf_free_seg(pkt);
1745 ++rxq->stats.idropped;
1749 rte_iova_t buf_iova;
1750 struct rte_mbuf_ext_shared_info *shinfo;
1751 uint16_t buf_len = strd_cnt * strd_sz;
1754 /* Increment the refcnt of the whole chunk. */
1755 __atomic_add_fetch(&buf->refcnt, 1, __ATOMIC_RELAXED);
1756 MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
1757 __ATOMIC_RELAXED) <= strd_n + 1);
1758 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
1760 * MLX5 device doesn't use iova but it is necessary in a
1761 * case where the Rx packet is transmitted via a
1764 buf_iova = rte_mempool_virt2iova(buf) +
1765 RTE_PTR_DIFF(buf_addr, buf);
1766 shinfo = &buf->shinfos[strd_idx];
1767 rte_mbuf_ext_refcnt_set(shinfo, 1);
1769 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1770 * attaching the stride to mbuf and more offload flags
1771 * will be added below by calling rxq_cq_to_mbuf().
1772 * Other fields will be overwritten.
1774 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1776 /* Set mbuf head-room. */
1777 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
1778 MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
1779 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
1780 len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
1781 DATA_LEN(pkt) = len;
1783 * Copy the last fragment of a packet (up to headroom
1784 * size bytes) in case there is a stride overlap with
1785 * a next packet's headroom. Allocate a separate mbuf
1786 * to store this fragment and link it. Scatter is on.
1788 if (hdrm_overlap > 0) {
1789 MLX5_ASSERT(rxq->strd_scatter_en);
1790 struct rte_mbuf *seg =
1791 rte_pktmbuf_alloc(rxq->mp);
1793 if (unlikely(seg == NULL)) {
1794 rte_pktmbuf_free_seg(pkt);
1795 ++rxq->stats.rx_nombuf;
1798 SET_DATA_OFF(seg, 0);
1799 rte_memcpy(rte_pktmbuf_mtod(seg, void *),
1800 RTE_PTR_ADD(addr, len - hdrm_overlap),
1802 DATA_LEN(seg) = hdrm_overlap;
1803 DATA_LEN(pkt) = len - hdrm_overlap;
1808 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1809 if (cqe->lro_num_seg > 1) {
1810 mlx5_lro_update_hdr(addr, cqe, len);
1811 pkt->ol_flags |= PKT_RX_LRO;
1812 pkt->tso_segsz = len / cqe->lro_num_seg;
1815 PORT(pkt) = rxq->port_id;
1816 #ifdef MLX5_PMD_SOFT_COUNTERS
1817 /* Increment bytes counter. */
1818 rxq->stats.ibytes += PKT_LEN(pkt);
1820 /* Return packet. */
1825 /* Update the consumer indexes. */
1826 rxq->consumed_strd = consumed_strd;
1828 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1829 if (rq_ci != rxq->rq_ci) {
1832 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1834 #ifdef MLX5_PMD_SOFT_COUNTERS
1835 /* Increment packets counter. */
1836 rxq->stats.ipackets += i;
1842 * Dummy DPDK callback for TX.
1844 * This function is used to temporarily replace the real callback during
1845 * unsafe control operations on the queue, or in case of error.
1848 * Generic pointer to TX queue structure.
1850 * Packets to transmit.
1852 * Number of packets in array.
1855 * Number of packets successfully transmitted (<= pkts_n).
1858 removed_tx_burst(void *dpdk_txq __rte_unused,
1859 struct rte_mbuf **pkts __rte_unused,
1860 uint16_t pkts_n __rte_unused)
1867 * Dummy DPDK callback for RX.
1869 * This function is used to temporarily replace the real callback during
1870 * unsafe control operations on the queue, or in case of error.
1873 * Generic pointer to RX queue structure.
1875 * Array to store received packets.
1877 * Maximum number of packets in array.
1880 * Number of packets successfully received (<= pkts_n).
1883 removed_rx_burst(void *dpdk_txq __rte_unused,
1884 struct rte_mbuf **pkts __rte_unused,
1885 uint16_t pkts_n __rte_unused)
1892 * Vectorized Rx/Tx routines are not compiled in when required vector
1893 * instructions are not supported on a target architecture. The following null
1894 * stubs are needed for linkage when those are not included outside of this file
1895 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1899 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1900 struct rte_mbuf **pkts __rte_unused,
1901 uint16_t pkts_n __rte_unused)
1907 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1913 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1919 * Free the mbufs from the linear array of pointers.
1922 * Pointer to array of packets to be free.
1924 * Number of packets to be freed.
1926 * Configured Tx offloads mask. It is fully defined at
1927 * compile time and may be used for optimization.
1929 static __rte_always_inline void
1930 mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
1931 unsigned int pkts_n,
1932 unsigned int olx __rte_unused)
1934 struct rte_mempool *pool = NULL;
1935 struct rte_mbuf **p_free = NULL;
1936 struct rte_mbuf *mbuf;
1937 unsigned int n_free = 0;
1940 * The implemented algorithm eliminates
1941 * copying pointers to temporary array
1942 * for rte_mempool_put_bulk() calls.
1945 MLX5_ASSERT(pkts_n);
1949 * Decrement mbuf reference counter, detach
1950 * indirect and external buffers if needed.
1952 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1953 if (likely(mbuf != NULL)) {
1954 MLX5_ASSERT(mbuf == *pkts);
1955 if (likely(n_free != 0)) {
1956 if (unlikely(pool != mbuf->pool))
1957 /* From different pool. */
1960 /* Start new scan array. */
1967 if (unlikely(pkts_n == 0)) {
1973 * This happens if mbuf is still referenced.
1974 * We can't put it back to the pool, skip.
1978 if (unlikely(n_free != 0))
1979 /* There is some array to free.*/
1981 if (unlikely(pkts_n == 0))
1982 /* Last mbuf, nothing to free. */
1988 * This loop is implemented to avoid multiple
1989 * inlining of rte_mempool_put_bulk().
1992 MLX5_ASSERT(p_free);
1993 MLX5_ASSERT(n_free);
1995 * Free the array of pre-freed mbufs
1996 * belonging to the same memory pool.
1998 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1999 if (unlikely(mbuf != NULL)) {
2000 /* There is the request to start new scan. */
2005 if (likely(pkts_n != 0))
2008 * This is the last mbuf to be freed.
2009 * Do one more loop iteration to complete.
2010 * This is rare case of the last unique mbuf.
2015 if (likely(pkts_n == 0))
2024 * Free the mbuf from the elts ring buffer till new tail.
2027 * Pointer to Tx queue structure.
2029 * Index in elts to free up to, becomes new elts tail.
2031 * Configured Tx offloads mask. It is fully defined at
2032 * compile time and may be used for optimization.
2034 static __rte_always_inline void
2035 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
2037 unsigned int olx __rte_unused)
2039 uint16_t n_elts = tail - txq->elts_tail;
2041 MLX5_ASSERT(n_elts);
2042 MLX5_ASSERT(n_elts <= txq->elts_s);
2044 * Implement a loop to support ring buffer wraparound
2045 * with single inlining of mlx5_tx_free_mbuf().
2050 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
2051 part = RTE_MIN(part, n_elts);
2053 MLX5_ASSERT(part <= txq->elts_s);
2054 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
2056 txq->elts_tail += part;
2062 * Store the mbuf being sent into elts ring buffer.
2063 * On Tx completion these mbufs will be freed.
2066 * Pointer to Tx queue structure.
2068 * Pointer to array of packets to be stored.
2070 * Number of packets to be stored.
2072 * Configured Tx offloads mask. It is fully defined at
2073 * compile time and may be used for optimization.
2075 static __rte_always_inline void
2076 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
2077 struct rte_mbuf **__rte_restrict pkts,
2078 unsigned int pkts_n,
2079 unsigned int olx __rte_unused)
2082 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2085 MLX5_ASSERT(pkts_n);
2086 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2088 MLX5_ASSERT(part <= txq->elts_s);
2089 /* This code is a good candidate for vectorizing with SIMD. */
2090 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2092 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2093 txq->elts_head += pkts_n;
2094 if (unlikely(part < pkts_n))
2095 /* The copy is wrapping around the elts array. */
2096 rte_memcpy((void *)elts, (void *)(pkts + part),
2097 (pkts_n - part) * sizeof(struct rte_mbuf *));
2101 * Update completion queue consuming index via doorbell
2102 * and flush the completed data buffers.
2105 * Pointer to TX queue structure.
2106 * @param valid CQE pointer
2107 * if not NULL update txq->wqe_pi and flush the buffers
2109 * Configured Tx offloads mask. It is fully defined at
2110 * compile time and may be used for optimization.
2112 static __rte_always_inline void
2113 mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
2114 volatile struct mlx5_cqe *last_cqe,
2115 unsigned int olx __rte_unused)
2117 if (likely(last_cqe != NULL)) {
2120 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2121 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2122 if (likely(tail != txq->elts_tail)) {
2123 mlx5_tx_free_elts(txq, tail, olx);
2124 MLX5_ASSERT(tail == txq->elts_tail);
2130 * Manage TX completions. This routine checks the CQ for
2131 * arrived CQEs, deduces the last accomplished WQE in SQ,
2132 * updates SQ producing index and frees all completed mbufs.
2135 * Pointer to TX queue structure.
2137 * Configured Tx offloads mask. It is fully defined at
2138 * compile time and may be used for optimization.
2140 * NOTE: not inlined intentionally, it makes tx_burst
2141 * routine smaller, simple and faster - from experiments.
2144 mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
2145 unsigned int olx __rte_unused)
2147 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2148 volatile struct mlx5_cqe *last_cqe = NULL;
2149 bool ring_doorbell = false;
2152 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2153 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2155 volatile struct mlx5_cqe *cqe;
2157 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
2158 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
2159 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2160 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2161 /* No new CQEs in completion queue. */
2162 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
2166 * Some error occurred, try to restart.
2167 * We have no barrier after WQE related Doorbell
2168 * written, make sure all writes are completed
2169 * here, before we might perform SQ reset.
2172 ret = mlx5_tx_error_cqe_handle
2173 (txq, (volatile struct mlx5_err_cqe *)cqe);
2174 if (unlikely(ret < 0)) {
2176 * Some error occurred on queue error
2177 * handling, we do not advance the index
2178 * here, allowing to retry on next call.
2183 * We are going to fetch all entries with
2184 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2185 * The send queue is supposed to be empty.
2187 ring_doorbell = true;
2189 txq->cq_pi = txq->cq_ci;
2193 /* Normal transmit completion. */
2194 MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
2195 MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
2197 ring_doorbell = true;
2201 * We have to restrict the amount of processed CQEs
2202 * in one tx_burst routine call. The CQ may be large
2203 * and many CQEs may be updated by the NIC in one
2204 * transaction. Buffers freeing is time consuming,
2205 * multiple iterations may introduce significant
2208 if (likely(--count == 0))
2211 if (likely(ring_doorbell)) {
2212 /* Ring doorbell to notify hardware. */
2213 rte_compiler_barrier();
2214 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
2215 mlx5_tx_comp_flush(txq, last_cqe, olx);
2220 * Check if the completion request flag should be set in the last WQE.
2221 * Both pushed mbufs and WQEs are monitored and the completion request
2222 * flag is set if any of thresholds is reached.
2225 * Pointer to TX queue structure.
2227 * Pointer to burst routine local context.
2229 * Configured Tx offloads mask. It is fully defined at
2230 * compile time and may be used for optimization.
2232 static __rte_always_inline void
2233 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
2234 struct mlx5_txq_local *__rte_restrict loc,
2237 uint16_t head = txq->elts_head;
2240 part = MLX5_TXOFF_CONFIG(INLINE) ?
2241 0 : loc->pkts_sent - loc->pkts_copy;
2243 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2244 (MLX5_TXOFF_CONFIG(INLINE) &&
2245 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2246 volatile struct mlx5_wqe *last = loc->wqe_last;
2249 txq->elts_comp = head;
2250 if (MLX5_TXOFF_CONFIG(INLINE))
2251 txq->wqe_comp = txq->wqe_ci;
2252 /* Request unconditional completion on last WQE. */
2253 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2254 MLX5_COMP_MODE_OFFSET);
2255 /* Save elts_head in dedicated free on completion queue. */
2256 #ifdef RTE_LIBRTE_MLX5_DEBUG
2257 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2258 (last->cseg.opcode >> 8) << 16;
2260 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2262 /* A CQE slot must always be available. */
2263 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2268 * DPDK callback to check the status of a tx descriptor.
2273 * The index of the descriptor in the ring.
2276 * The status of the tx descriptor.
2279 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2281 struct mlx5_txq_data *__rte_restrict txq = tx_queue;
2284 mlx5_tx_handle_completion(txq, 0);
2285 used = txq->elts_head - txq->elts_tail;
2287 return RTE_ETH_TX_DESC_FULL;
2288 return RTE_ETH_TX_DESC_DONE;
2292 * Build the Control Segment with specified opcode:
2293 * - MLX5_OPCODE_SEND
2294 * - MLX5_OPCODE_ENHANCED_MPSW
2298 * Pointer to TX queue structure.
2300 * Pointer to burst routine local context.
2302 * Pointer to WQE to fill with built Control Segment.
2304 * Supposed length of WQE in segments.
2306 * SQ WQE opcode to put into Control Segment.
2308 * Configured Tx offloads mask. It is fully defined at
2309 * compile time and may be used for optimization.
2311 static __rte_always_inline void
2312 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
2313 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2314 struct mlx5_wqe *__rte_restrict wqe,
2316 unsigned int opcode,
2317 unsigned int olx __rte_unused)
2319 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
2321 /* For legacy MPW replace the EMPW by TSO with modifier. */
2322 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2323 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2324 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2325 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2326 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2327 MLX5_COMP_MODE_OFFSET);
2328 cs->misc = RTE_BE32(0);
2332 * Build the Synchronize Queue Segment with specified completion index.
2335 * Pointer to TX queue structure.
2337 * Pointer to burst routine local context.
2339 * Pointer to WQE to fill with built Control Segment.
2341 * Completion index in Clock Queue to wait.
2343 * Configured Tx offloads mask. It is fully defined at
2344 * compile time and may be used for optimization.
2346 static __rte_always_inline void
2347 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
2348 struct mlx5_txq_local *restrict loc __rte_unused,
2349 struct mlx5_wqe *restrict wqe,
2351 unsigned int olx __rte_unused)
2353 struct mlx5_wqe_qseg *qs;
2355 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
2356 qs->max_index = rte_cpu_to_be_32(wci);
2357 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq->id);
2358 qs->reserved0 = RTE_BE32(0);
2359 qs->reserved1 = RTE_BE32(0);
2363 * Build the Ethernet Segment without inlined data.
2364 * Supports Software Parser, Checksums and VLAN
2365 * insertion Tx offload features.
2368 * Pointer to TX queue structure.
2370 * Pointer to burst routine local context.
2372 * Pointer to WQE to fill with built Ethernet Segment.
2374 * Configured Tx offloads mask. It is fully defined at
2375 * compile time and may be used for optimization.
2377 static __rte_always_inline void
2378 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2379 struct mlx5_txq_local *__rte_restrict loc,
2380 struct mlx5_wqe *__rte_restrict wqe,
2383 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2387 * Calculate and set check sum flags first, dword field
2388 * in segment may be shared with Software Parser flags.
2390 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2391 es->flags = rte_cpu_to_le_32(csum);
2393 * Calculate and set Software Parser offsets and flags.
2394 * These flags a set for custom UDP and IP tunnel packets.
2396 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2397 /* Fill metadata field if needed. */
2398 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2399 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2400 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2401 /* Engage VLAN tag insertion feature if requested. */
2402 if (MLX5_TXOFF_CONFIG(VLAN) &&
2403 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2405 * We should get here only if device support
2406 * this feature correctly.
2408 MLX5_ASSERT(txq->vlan_en);
2409 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2410 loc->mbuf->vlan_tci);
2412 es->inline_hdr = RTE_BE32(0);
2417 * Build the Ethernet Segment with minimal inlined data
2418 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2419 * used to fill the gap in single WQEBB WQEs.
2420 * Supports Software Parser, Checksums and VLAN
2421 * insertion Tx offload features.
2424 * Pointer to TX queue structure.
2426 * Pointer to burst routine local context.
2428 * Pointer to WQE to fill with built Ethernet Segment.
2430 * Length of VLAN tag insertion if any.
2432 * Configured Tx offloads mask. It is fully defined at
2433 * compile time and may be used for optimization.
2435 static __rte_always_inline void
2436 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2437 struct mlx5_txq_local *__rte_restrict loc,
2438 struct mlx5_wqe *__rte_restrict wqe,
2442 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2444 uint8_t *psrc, *pdst;
2447 * Calculate and set check sum flags first, dword field
2448 * in segment may be shared with Software Parser flags.
2450 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2451 es->flags = rte_cpu_to_le_32(csum);
2453 * Calculate and set Software Parser offsets and flags.
2454 * These flags a set for custom UDP and IP tunnel packets.
2456 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2457 /* Fill metadata field if needed. */
2458 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2459 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2460 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2461 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2463 sizeof(rte_v128u32_t)),
2464 "invalid Ethernet Segment data size");
2465 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2467 sizeof(struct rte_vlan_hdr) +
2468 2 * RTE_ETHER_ADDR_LEN),
2469 "invalid Ethernet Segment data size");
2470 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2471 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2472 es->inline_data = *(unaligned_uint16_t *)psrc;
2473 psrc += sizeof(uint16_t);
2474 pdst = (uint8_t *)(es + 1);
2475 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2476 /* Implement VLAN tag insertion as part inline data. */
2477 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2478 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2479 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2480 /* Insert VLAN ethertype + VLAN tag. */
2481 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2482 ((RTE_ETHER_TYPE_VLAN << 16) |
2483 loc->mbuf->vlan_tci);
2484 pdst += sizeof(struct rte_vlan_hdr);
2485 /* Copy the rest two bytes from packet data. */
2486 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2487 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2489 /* Fill the gap in the title WQEBB with inline data. */
2490 rte_mov16(pdst, psrc);
2495 * Build the Ethernet Segment with entire packet
2496 * data inlining. Checks the boundary of WQEBB and
2497 * ring buffer wrapping, supports Software Parser,
2498 * Checksums and VLAN insertion Tx offload features.
2501 * Pointer to TX queue structure.
2503 * Pointer to burst routine local context.
2505 * Pointer to WQE to fill with built Ethernet Segment.
2507 * Length of VLAN tag insertion if any.
2509 * Length of data to inline (VLAN included, if any).
2511 * TSO flag, set mss field from the packet.
2513 * Configured Tx offloads mask. It is fully defined at
2514 * compile time and may be used for optimization.
2517 * Pointer to the next Data Segment (aligned and wrapped around).
2519 static __rte_always_inline struct mlx5_wqe_dseg *
2520 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
2521 struct mlx5_txq_local *__rte_restrict loc,
2522 struct mlx5_wqe *__rte_restrict wqe,
2528 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2530 uint8_t *psrc, *pdst;
2534 * Calculate and set check sum flags first, dword field
2535 * in segment may be shared with Software Parser flags.
2537 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2540 csum |= loc->mbuf->tso_segsz;
2541 es->flags = rte_cpu_to_be_32(csum);
2543 es->flags = rte_cpu_to_le_32(csum);
2546 * Calculate and set Software Parser offsets and flags.
2547 * These flags a set for custom UDP and IP tunnel packets.
2549 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2550 /* Fill metadata field if needed. */
2551 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2552 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2553 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2554 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2556 sizeof(rte_v128u32_t)),
2557 "invalid Ethernet Segment data size");
2558 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2560 sizeof(struct rte_vlan_hdr) +
2561 2 * RTE_ETHER_ADDR_LEN),
2562 "invalid Ethernet Segment data size");
2563 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2564 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2565 es->inline_data = *(unaligned_uint16_t *)psrc;
2566 psrc += sizeof(uint16_t);
2567 pdst = (uint8_t *)(es + 1);
2568 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2569 /* Implement VLAN tag insertion as part inline data. */
2570 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2571 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2572 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2573 /* Insert VLAN ethertype + VLAN tag. */
2574 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2575 ((RTE_ETHER_TYPE_VLAN << 16) |
2576 loc->mbuf->vlan_tci);
2577 pdst += sizeof(struct rte_vlan_hdr);
2578 /* Copy the rest two bytes from packet data. */
2579 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2580 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2581 psrc += sizeof(uint16_t);
2583 /* Fill the gap in the title WQEBB with inline data. */
2584 rte_mov16(pdst, psrc);
2585 psrc += sizeof(rte_v128u32_t);
2587 pdst = (uint8_t *)(es + 2);
2588 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2589 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2590 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2592 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2593 return (struct mlx5_wqe_dseg *)pdst;
2596 * The WQEBB space availability is checked by caller.
2597 * Here we should be aware of WQE ring buffer wraparound only.
2599 part = (uint8_t *)txq->wqes_end - pdst;
2600 part = RTE_MIN(part, inlen);
2602 rte_memcpy(pdst, psrc, part);
2604 if (likely(!inlen)) {
2606 * If return value is not used by the caller
2607 * the code below will be optimized out.
2610 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2611 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2612 pdst = (uint8_t *)txq->wqes;
2613 return (struct mlx5_wqe_dseg *)pdst;
2615 pdst = (uint8_t *)txq->wqes;
2622 * Copy data from chain of mbuf to the specified linear buffer.
2623 * Checksums and VLAN insertion Tx offload features. If data
2624 * from some mbuf copied completely this mbuf is freed. Local
2625 * structure is used to keep the byte stream state.
2628 * Pointer to the destination linear buffer.
2630 * Pointer to burst routine local context.
2632 * Length of data to be copied.
2634 * Length of data to be copied ignoring no inline hint.
2636 * Configured Tx offloads mask. It is fully defined at
2637 * compile time and may be used for optimization.
2640 * Number of actual copied data bytes. This is always greater than or
2641 * equal to must parameter and might be lesser than len in no inline
2642 * hint flag is encountered.
2644 static __rte_always_inline unsigned int
2645 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2646 struct mlx5_txq_local *__rte_restrict loc,
2649 unsigned int olx __rte_unused)
2651 struct rte_mbuf *mbuf;
2652 unsigned int part, dlen, copy = 0;
2656 MLX5_ASSERT(must <= len);
2658 /* Allow zero length packets, must check first. */
2659 dlen = rte_pktmbuf_data_len(loc->mbuf);
2660 if (dlen <= loc->mbuf_off) {
2661 /* Exhausted packet, just free. */
2663 loc->mbuf = mbuf->next;
2664 rte_pktmbuf_free_seg(mbuf);
2666 MLX5_ASSERT(loc->mbuf_nseg > 1);
2667 MLX5_ASSERT(loc->mbuf);
2669 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2674 * We already copied the minimal
2675 * requested amount of data.
2680 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
2682 * Copy only the minimal required
2683 * part of the data buffer.
2690 dlen -= loc->mbuf_off;
2691 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2693 part = RTE_MIN(len, dlen);
2694 rte_memcpy(pdst, psrc, part);
2696 loc->mbuf_off += part;
2699 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2701 /* Exhausted packet, just free. */
2703 loc->mbuf = mbuf->next;
2704 rte_pktmbuf_free_seg(mbuf);
2706 MLX5_ASSERT(loc->mbuf_nseg >= 1);
2716 * Build the Ethernet Segment with inlined data from
2717 * multi-segment packet. Checks the boundary of WQEBB
2718 * and ring buffer wrapping, supports Software Parser,
2719 * Checksums and VLAN insertion Tx offload features.
2722 * Pointer to TX queue structure.
2724 * Pointer to burst routine local context.
2726 * Pointer to WQE to fill with built Ethernet Segment.
2728 * Length of VLAN tag insertion if any.
2730 * Length of data to inline (VLAN included, if any).
2732 * TSO flag, set mss field from the packet.
2734 * Configured Tx offloads mask. It is fully defined at
2735 * compile time and may be used for optimization.
2738 * Pointer to the next Data Segment (aligned and
2739 * possible NOT wrapped around - caller should do
2740 * wrapping check on its own).
2742 static __rte_always_inline struct mlx5_wqe_dseg *
2743 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
2744 struct mlx5_txq_local *__rte_restrict loc,
2745 struct mlx5_wqe *__rte_restrict wqe,
2751 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2754 unsigned int part, tlen = 0;
2757 * Calculate and set check sum flags first, uint32_t field
2758 * in segment may be shared with Software Parser flags.
2760 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2763 csum |= loc->mbuf->tso_segsz;
2764 es->flags = rte_cpu_to_be_32(csum);
2766 es->flags = rte_cpu_to_le_32(csum);
2769 * Calculate and set Software Parser offsets and flags.
2770 * These flags a set for custom UDP and IP tunnel packets.
2772 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2773 /* Fill metadata field if needed. */
2774 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2775 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2776 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2777 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2779 sizeof(rte_v128u32_t)),
2780 "invalid Ethernet Segment data size");
2781 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2783 sizeof(struct rte_vlan_hdr) +
2784 2 * RTE_ETHER_ADDR_LEN),
2785 "invalid Ethernet Segment data size");
2786 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2787 pdst = (uint8_t *)&es->inline_data;
2788 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2789 /* Implement VLAN tag insertion as part inline data. */
2790 mlx5_tx_mseg_memcpy(pdst, loc,
2791 2 * RTE_ETHER_ADDR_LEN,
2792 2 * RTE_ETHER_ADDR_LEN, olx);
2793 pdst += 2 * RTE_ETHER_ADDR_LEN;
2794 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2795 ((RTE_ETHER_TYPE_VLAN << 16) |
2796 loc->mbuf->vlan_tci);
2797 pdst += sizeof(struct rte_vlan_hdr);
2798 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2800 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2802 * The WQEBB space availability is checked by caller.
2803 * Here we should be aware of WQE ring buffer wraparound only.
2805 part = (uint8_t *)txq->wqes_end - pdst;
2806 part = RTE_MIN(part, inlen - tlen);
2812 * Copying may be interrupted inside the routine
2813 * if run into no inline hint flag.
2815 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
2816 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
2818 if (likely(inlen <= tlen) || copy < part) {
2819 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
2821 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2822 return (struct mlx5_wqe_dseg *)pdst;
2824 pdst = (uint8_t *)txq->wqes;
2825 part = inlen - tlen;
2830 * Build the Data Segment of pointer type.
2833 * Pointer to TX queue structure.
2835 * Pointer to burst routine local context.
2837 * Pointer to WQE to fill with built Data Segment.
2839 * Data buffer to point.
2841 * Data buffer length.
2843 * Configured Tx offloads mask. It is fully defined at
2844 * compile time and may be used for optimization.
2846 static __rte_always_inline void
2847 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
2848 struct mlx5_txq_local *__rte_restrict loc,
2849 struct mlx5_wqe_dseg *__rte_restrict dseg,
2852 unsigned int olx __rte_unused)
2856 dseg->bcount = rte_cpu_to_be_32(len);
2857 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2858 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2862 * Build the Data Segment of pointer type or inline
2863 * if data length is less than buffer in minimal
2864 * Data Segment size.
2867 * Pointer to TX queue structure.
2869 * Pointer to burst routine local context.
2871 * Pointer to WQE to fill with built Data Segment.
2873 * Data buffer to point.
2875 * Data buffer length.
2877 * Configured Tx offloads mask. It is fully defined at
2878 * compile time and may be used for optimization.
2880 static __rte_always_inline void
2881 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
2882 struct mlx5_txq_local *__rte_restrict loc,
2883 struct mlx5_wqe_dseg *__rte_restrict dseg,
2886 unsigned int olx __rte_unused)
2892 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2893 dseg->bcount = rte_cpu_to_be_32(len);
2894 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2895 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2899 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2900 /* Unrolled implementation of generic rte_memcpy. */
2901 dst = (uintptr_t)&dseg->inline_data[0];
2902 src = (uintptr_t)buf;
2904 #ifdef RTE_ARCH_STRICT_ALIGN
2905 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2906 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2907 dst += sizeof(uint32_t);
2908 src += sizeof(uint32_t);
2909 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2910 dst += sizeof(uint32_t);
2911 src += sizeof(uint32_t);
2913 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2914 dst += sizeof(uint64_t);
2915 src += sizeof(uint64_t);
2919 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2920 dst += sizeof(uint32_t);
2921 src += sizeof(uint32_t);
2924 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2925 dst += sizeof(uint16_t);
2926 src += sizeof(uint16_t);
2929 *(uint8_t *)dst = *(uint8_t *)src;
2933 * Build the Data Segment of inlined data from single
2934 * segment packet, no VLAN insertion.
2937 * Pointer to TX queue structure.
2939 * Pointer to burst routine local context.
2941 * Pointer to WQE to fill with built Data Segment.
2943 * Data buffer to point.
2945 * Data buffer length.
2947 * Configured Tx offloads mask. It is fully defined at
2948 * compile time and may be used for optimization.
2951 * Pointer to the next Data Segment after inlined data.
2952 * Ring buffer wraparound check is needed. We do not
2953 * do it here because it may not be needed for the
2954 * last packet in the eMPW session.
2956 static __rte_always_inline struct mlx5_wqe_dseg *
2957 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
2958 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2959 struct mlx5_wqe_dseg *__rte_restrict dseg,
2962 unsigned int olx __rte_unused)
2967 if (!MLX5_TXOFF_CONFIG(MPW)) {
2968 /* Store the descriptor byte counter for eMPW sessions. */
2969 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2970 pdst = &dseg->inline_data[0];
2972 /* The entire legacy MPW session counter is stored on close. */
2973 pdst = (uint8_t *)dseg;
2976 * The WQEBB space availability is checked by caller.
2977 * Here we should be aware of WQE ring buffer wraparound only.
2979 part = (uint8_t *)txq->wqes_end - pdst;
2980 part = RTE_MIN(part, len);
2982 rte_memcpy(pdst, buf, part);
2986 if (!MLX5_TXOFF_CONFIG(MPW))
2987 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2988 /* Note: no final wraparound check here. */
2989 return (struct mlx5_wqe_dseg *)pdst;
2991 pdst = (uint8_t *)txq->wqes;
2998 * Build the Data Segment of inlined data from single
2999 * segment packet with VLAN insertion.
3002 * Pointer to TX queue structure.
3004 * Pointer to burst routine local context.
3006 * Pointer to the dseg fill with built Data Segment.
3008 * Data buffer to point.
3010 * Data buffer length.
3012 * Configured Tx offloads mask. It is fully defined at
3013 * compile time and may be used for optimization.
3016 * Pointer to the next Data Segment after inlined data.
3017 * Ring buffer wraparound check is needed.
3019 static __rte_always_inline struct mlx5_wqe_dseg *
3020 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
3021 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
3022 struct mlx5_wqe_dseg *__rte_restrict dseg,
3025 unsigned int olx __rte_unused)
3031 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
3032 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
3033 (2 * RTE_ETHER_ADDR_LEN),
3034 "invalid Data Segment data size");
3035 if (!MLX5_TXOFF_CONFIG(MPW)) {
3036 /* Store the descriptor byte counter for eMPW sessions. */
3037 dseg->bcount = rte_cpu_to_be_32
3038 ((len + sizeof(struct rte_vlan_hdr)) |
3039 MLX5_ETH_WQE_DATA_INLINE);
3040 pdst = &dseg->inline_data[0];
3042 /* The entire legacy MPW session counter is stored on close. */
3043 pdst = (uint8_t *)dseg;
3045 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
3046 buf += MLX5_DSEG_MIN_INLINE_SIZE;
3047 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
3048 len -= MLX5_DSEG_MIN_INLINE_SIZE;
3049 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
3050 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
3051 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
3052 pdst = (uint8_t *)txq->wqes;
3053 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
3054 loc->mbuf->vlan_tci);
3055 pdst += sizeof(struct rte_vlan_hdr);
3057 * The WQEBB space availability is checked by caller.
3058 * Here we should be aware of WQE ring buffer wraparound only.
3060 part = (uint8_t *)txq->wqes_end - pdst;
3061 part = RTE_MIN(part, len);
3063 rte_memcpy(pdst, buf, part);
3067 if (!MLX5_TXOFF_CONFIG(MPW))
3068 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
3069 /* Note: no final wraparound check here. */
3070 return (struct mlx5_wqe_dseg *)pdst;
3072 pdst = (uint8_t *)txq->wqes;
3079 * Build the Ethernet Segment with optionally inlined data with
3080 * VLAN insertion and following Data Segments (if any) from
3081 * multi-segment packet. Used by ordinary send and TSO.
3084 * Pointer to TX queue structure.
3086 * Pointer to burst routine local context.
3088 * Pointer to WQE to fill with built Ethernet/Data Segments.
3090 * Length of VLAN header to insert, 0 means no VLAN insertion.
3092 * Data length to inline. For TSO this parameter specifies
3093 * exact value, for ordinary send routine can be aligned by
3094 * caller to provide better WQE space saving and data buffer
3095 * start address alignment. This length includes VLAN header
3098 * Zero means ordinary send, inlined data can be extended,
3099 * otherwise this is TSO, inlined data length is fixed.
3101 * Configured Tx offloads mask. It is fully defined at
3102 * compile time and may be used for optimization.
3105 * Actual size of built WQE in segments.
3107 static __rte_always_inline unsigned int
3108 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
3109 struct mlx5_txq_local *__rte_restrict loc,
3110 struct mlx5_wqe *__rte_restrict wqe,
3114 unsigned int olx __rte_unused)
3116 struct mlx5_wqe_dseg *__rte_restrict dseg;
3119 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
3120 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
3123 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
3124 if (!loc->mbuf_nseg)
3127 * There are still some mbuf remaining, not inlined.
3128 * The first mbuf may be partially inlined and we
3129 * must process the possible non-zero data offset.
3131 if (loc->mbuf_off) {
3136 * Exhausted packets must be dropped before.
3137 * Non-zero offset means there are some data
3138 * remained in the packet.
3140 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
3141 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
3142 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
3144 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
3146 * Build the pointer/minimal data Data Segment.
3147 * Do ring buffer wrapping check in advance.
3149 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3150 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3151 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
3152 /* Store the mbuf to be freed on completion. */
3153 MLX5_ASSERT(loc->elts_free);
3154 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3157 if (--loc->mbuf_nseg == 0)
3159 loc->mbuf = loc->mbuf->next;
3163 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3164 struct rte_mbuf *mbuf;
3166 /* Zero length segment found, just skip. */
3168 loc->mbuf = loc->mbuf->next;
3169 rte_pktmbuf_free_seg(mbuf);
3170 if (--loc->mbuf_nseg == 0)
3173 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3174 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3177 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3178 rte_pktmbuf_data_len(loc->mbuf), olx);
3179 MLX5_ASSERT(loc->elts_free);
3180 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3183 if (--loc->mbuf_nseg == 0)
3185 loc->mbuf = loc->mbuf->next;
3190 /* Calculate actual segments used from the dseg pointer. */
3191 if ((uintptr_t)wqe < (uintptr_t)dseg)
3192 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3194 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3195 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3200 * The routine checks timestamp flag in the current packet,
3201 * and push WAIT WQE into the queue if scheduling is required.
3204 * Pointer to TX queue structure.
3206 * Pointer to burst routine local context.
3208 * Configured Tx offloads mask. It is fully defined at
3209 * compile time and may be used for optimization.
3212 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3213 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
3214 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
3215 * Local context variables partially updated.
3217 static __rte_always_inline enum mlx5_txcmp_code
3218 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
3219 struct mlx5_txq_local *restrict loc,
3222 if (MLX5_TXOFF_CONFIG(TXPP) &&
3223 loc->mbuf->ol_flags & txq->ts_mask) {
3224 struct mlx5_wqe *wqe;
3229 * Estimate the required space quickly and roughly.
3230 * We would like to ensure the packet can be pushed
3231 * to the queue and we won't get the orphan WAIT WQE.
3233 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
3234 loc->elts_free < NB_SEGS(loc->mbuf))
3235 return MLX5_TXCMP_CODE_EXIT;
3236 /* Convert the timestamp into completion to wait. */
3237 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
3238 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
3239 if (unlikely(wci < 0))
3240 return MLX5_TXCMP_CODE_SINGLE;
3241 /* Build the WAIT WQE with specified completion. */
3242 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3243 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
3244 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
3247 return MLX5_TXCMP_CODE_MULTI;
3249 return MLX5_TXCMP_CODE_SINGLE;
3253 * Tx one packet function for multi-segment TSO. Supports all
3254 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3255 * sends one packet per WQE.
3257 * This routine is responsible for storing processed mbuf
3258 * into elts ring buffer and update elts_head.
3261 * Pointer to TX queue structure.
3263 * Pointer to burst routine local context.
3265 * Configured Tx offloads mask. It is fully defined at
3266 * compile time and may be used for optimization.
3269 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3270 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3271 * Local context variables partially updated.
3273 static __rte_always_inline enum mlx5_txcmp_code
3274 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
3275 struct mlx5_txq_local *__rte_restrict loc,
3278 struct mlx5_wqe *__rte_restrict wqe;
3279 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3281 if (MLX5_TXOFF_CONFIG(TXPP)) {
3282 enum mlx5_txcmp_code wret;
3284 /* Generate WAIT for scheduling if requested. */
3285 wret = mlx5_tx_schedule_send(txq, loc, olx);
3286 if (wret == MLX5_TXCMP_CODE_EXIT)
3287 return MLX5_TXCMP_CODE_EXIT;
3288 if (wret == MLX5_TXCMP_CODE_ERROR)
3289 return MLX5_TXCMP_CODE_ERROR;
3292 * Calculate data length to be inlined to estimate
3293 * the required space in WQE ring buffer.
3295 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3296 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3297 vlan = sizeof(struct rte_vlan_hdr);
3298 inlen = loc->mbuf->l2_len + vlan +
3299 loc->mbuf->l3_len + loc->mbuf->l4_len;
3300 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3301 return MLX5_TXCMP_CODE_ERROR;
3302 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3303 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3304 /* Packet must contain all TSO headers. */
3305 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3306 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3307 inlen > (dlen + vlan)))
3308 return MLX5_TXCMP_CODE_ERROR;
3309 MLX5_ASSERT(inlen >= txq->inlen_mode);
3311 * Check whether there are enough free WQEBBs:
3313 * - Ethernet Segment
3314 * - First Segment of inlined Ethernet data
3315 * - ... data continued ...
3316 * - Data Segments of pointer/min inline type
3318 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3319 MLX5_ESEG_MIN_INLINE_SIZE +
3321 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3322 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3323 return MLX5_TXCMP_CODE_EXIT;
3324 /* Check for maximal WQE size. */
3325 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3326 return MLX5_TXCMP_CODE_ERROR;
3327 #ifdef MLX5_PMD_SOFT_COUNTERS
3328 /* Update sent data bytes/packets counters. */
3329 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3330 loc->mbuf->tso_segsz;
3332 * One will be added for mbuf itself
3333 * at the end of the mlx5_tx_burst from
3334 * loc->pkts_sent field.
3337 txq->stats.opackets += ntcp;
3338 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3340 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3341 loc->wqe_last = wqe;
3342 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3343 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3344 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3345 txq->wqe_ci += (ds + 3) / 4;
3346 loc->wqe_free -= (ds + 3) / 4;
3347 return MLX5_TXCMP_CODE_MULTI;
3351 * Tx one packet function for multi-segment SEND. Supports all
3352 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3353 * sends one packet per WQE, without any data inlining in
3356 * This routine is responsible for storing processed mbuf
3357 * into elts ring buffer and update elts_head.
3360 * Pointer to TX queue structure.
3362 * Pointer to burst routine local context.
3364 * Configured Tx offloads mask. It is fully defined at
3365 * compile time and may be used for optimization.
3368 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3369 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3370 * Local context variables partially updated.
3372 static __rte_always_inline enum mlx5_txcmp_code
3373 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
3374 struct mlx5_txq_local *__rte_restrict loc,
3377 struct mlx5_wqe_dseg *__rte_restrict dseg;
3378 struct mlx5_wqe *__rte_restrict wqe;
3379 unsigned int ds, nseg;
3381 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3382 if (MLX5_TXOFF_CONFIG(TXPP)) {
3383 enum mlx5_txcmp_code wret;
3385 /* Generate WAIT for scheduling if requested. */
3386 wret = mlx5_tx_schedule_send(txq, loc, olx);
3387 if (wret == MLX5_TXCMP_CODE_EXIT)
3388 return MLX5_TXCMP_CODE_EXIT;
3389 if (wret == MLX5_TXCMP_CODE_ERROR)
3390 return MLX5_TXCMP_CODE_ERROR;
3393 * No inline at all, it means the CPU cycles saving
3394 * is prioritized at configuration, we should not
3395 * copy any packet data to WQE.
3397 nseg = NB_SEGS(loc->mbuf);
3399 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3400 return MLX5_TXCMP_CODE_EXIT;
3401 /* Check for maximal WQE size. */
3402 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3403 return MLX5_TXCMP_CODE_ERROR;
3405 * Some Tx offloads may cause an error if
3406 * packet is not long enough, check against
3407 * assumed minimal length.
3409 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3410 return MLX5_TXCMP_CODE_ERROR;
3411 #ifdef MLX5_PMD_SOFT_COUNTERS
3412 /* Update sent data bytes counter. */
3413 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3414 if (MLX5_TXOFF_CONFIG(VLAN) &&
3415 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3416 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3419 * SEND WQE, one WQEBB:
3420 * - Control Segment, SEND opcode
3421 * - Ethernet Segment, optional VLAN, no inline
3422 * - Data Segments, pointer only type
3424 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3425 loc->wqe_last = wqe;
3426 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3427 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3428 dseg = &wqe->dseg[0];
3430 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3431 struct rte_mbuf *mbuf;
3434 * Zero length segment found, have to
3435 * correct total size of WQE in segments.
3436 * It is supposed to be rare occasion, so
3437 * in normal case (no zero length segments)
3438 * we avoid extra writing to the Control
3442 wqe->cseg.sq_ds -= RTE_BE32(1);
3444 loc->mbuf = mbuf->next;
3445 rte_pktmbuf_free_seg(mbuf);
3451 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3452 rte_pktmbuf_data_len(loc->mbuf), olx);
3453 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3458 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3459 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3460 loc->mbuf = loc->mbuf->next;
3463 txq->wqe_ci += (ds + 3) / 4;
3464 loc->wqe_free -= (ds + 3) / 4;
3465 return MLX5_TXCMP_CODE_MULTI;
3469 * Tx one packet function for multi-segment SEND. Supports all
3470 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3471 * sends one packet per WQE, with data inlining in
3472 * Ethernet Segment and minimal Data Segments.
3474 * This routine is responsible for storing processed mbuf
3475 * into elts ring buffer and update elts_head.
3478 * Pointer to TX queue structure.
3480 * Pointer to burst routine local context.
3482 * Configured Tx offloads mask. It is fully defined at
3483 * compile time and may be used for optimization.
3486 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3487 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3488 * Local context variables partially updated.
3490 static __rte_always_inline enum mlx5_txcmp_code
3491 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
3492 struct mlx5_txq_local *__rte_restrict loc,
3495 struct mlx5_wqe *__rte_restrict wqe;
3496 unsigned int ds, inlen, dlen, vlan = 0;
3498 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3499 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3500 if (MLX5_TXOFF_CONFIG(TXPP)) {
3501 enum mlx5_txcmp_code wret;
3503 /* Generate WAIT for scheduling if requested. */
3504 wret = mlx5_tx_schedule_send(txq, loc, olx);
3505 if (wret == MLX5_TXCMP_CODE_EXIT)
3506 return MLX5_TXCMP_CODE_EXIT;
3507 if (wret == MLX5_TXCMP_CODE_ERROR)
3508 return MLX5_TXCMP_CODE_ERROR;
3511 * First calculate data length to be inlined
3512 * to estimate the required space for WQE.
3514 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3515 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3516 vlan = sizeof(struct rte_vlan_hdr);
3517 inlen = dlen + vlan;
3518 /* Check against minimal length. */
3519 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3520 return MLX5_TXCMP_CODE_ERROR;
3521 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3522 if (inlen > txq->inlen_send ||
3523 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
3524 struct rte_mbuf *mbuf;
3529 * Packet length exceeds the allowed inline
3530 * data length, check whether the minimal
3531 * inlining is required.
3533 if (txq->inlen_mode) {
3534 MLX5_ASSERT(txq->inlen_mode >=
3535 MLX5_ESEG_MIN_INLINE_SIZE);
3536 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
3537 inlen = txq->inlen_mode;
3539 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
3540 !vlan || txq->vlan_en) {
3542 * VLAN insertion will be done inside by HW.
3543 * It is not utmost effective - VLAN flag is
3544 * checked twice, but we should proceed the
3545 * inlining length correctly and take into
3546 * account the VLAN header being inserted.
3548 return mlx5_tx_packet_multi_send
3551 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3554 * Now we know the minimal amount of data is requested
3555 * to inline. Check whether we should inline the buffers
3556 * from the chain beginning to eliminate some mbufs.
3559 nxlen = rte_pktmbuf_data_len(mbuf);
3560 if (unlikely(nxlen <= txq->inlen_send)) {
3561 /* We can inline first mbuf at least. */
3562 if (nxlen < inlen) {
3565 /* Scan mbufs till inlen filled. */
3570 nxlen = rte_pktmbuf_data_len(mbuf);
3572 } while (unlikely(nxlen < inlen));
3573 if (unlikely(nxlen > txq->inlen_send)) {
3574 /* We cannot inline entire mbuf. */
3575 smlen = inlen - smlen;
3576 start = rte_pktmbuf_mtod_offset
3577 (mbuf, uintptr_t, smlen);
3584 /* There should be not end of packet. */
3586 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3587 } while (unlikely(nxlen < txq->inlen_send));
3589 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3591 * Check whether we can do inline to align start
3592 * address of data buffer to cacheline.
3595 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3596 if (unlikely(start)) {
3598 if (start <= txq->inlen_send)
3603 * Check whether there are enough free WQEBBs:
3605 * - Ethernet Segment
3606 * - First Segment of inlined Ethernet data
3607 * - ... data continued ...
3608 * - Data Segments of pointer/min inline type
3610 * Estimate the number of Data Segments conservatively,
3611 * supposing no any mbufs is being freed during inlining.
3613 MLX5_ASSERT(inlen <= txq->inlen_send);
3614 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3615 MLX5_ESEG_MIN_INLINE_SIZE +
3617 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3618 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3619 return MLX5_TXCMP_CODE_EXIT;
3620 /* Check for maximal WQE size. */
3621 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3622 return MLX5_TXCMP_CODE_ERROR;
3623 #ifdef MLX5_PMD_SOFT_COUNTERS
3624 /* Update sent data bytes/packets counters. */
3625 txq->stats.obytes += dlen + vlan;
3627 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3628 loc->wqe_last = wqe;
3629 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3630 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3631 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3632 txq->wqe_ci += (ds + 3) / 4;
3633 loc->wqe_free -= (ds + 3) / 4;
3634 return MLX5_TXCMP_CODE_MULTI;
3638 * Tx burst function for multi-segment packets. Supports all
3639 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3640 * sends one packet per WQE. Function stops sending if it
3641 * encounters the single-segment packet.
3643 * This routine is responsible for storing processed mbuf
3644 * into elts ring buffer and update elts_head.
3647 * Pointer to TX queue structure.
3649 * Packets to transmit.
3651 * Number of packets in array.
3653 * Pointer to burst routine local context.
3655 * Configured Tx offloads mask. It is fully defined at
3656 * compile time and may be used for optimization.
3659 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3660 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3661 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3662 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3663 * Local context variables updated.
3665 static __rte_always_inline enum mlx5_txcmp_code
3666 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
3667 struct rte_mbuf **__rte_restrict pkts,
3668 unsigned int pkts_n,
3669 struct mlx5_txq_local *__rte_restrict loc,
3672 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3673 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3674 pkts += loc->pkts_sent + 1;
3675 pkts_n -= loc->pkts_sent;
3677 enum mlx5_txcmp_code ret;
3679 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3681 * Estimate the number of free elts quickly but
3682 * conservatively. Some segment may be fully inlined
3683 * and freed, ignore this here - precise estimation
3686 if (loc->elts_free < NB_SEGS(loc->mbuf))
3687 return MLX5_TXCMP_CODE_EXIT;
3688 if (MLX5_TXOFF_CONFIG(TSO) &&
3689 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3690 /* Proceed with multi-segment TSO. */
3691 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3692 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3693 /* Proceed with multi-segment SEND with inlining. */
3694 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3696 /* Proceed with multi-segment SEND w/o inlining. */
3697 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3699 if (ret == MLX5_TXCMP_CODE_EXIT)
3700 return MLX5_TXCMP_CODE_EXIT;
3701 if (ret == MLX5_TXCMP_CODE_ERROR)
3702 return MLX5_TXCMP_CODE_ERROR;
3703 /* WQE is built, go to the next packet. */
3706 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3707 return MLX5_TXCMP_CODE_EXIT;
3708 loc->mbuf = *pkts++;
3710 rte_prefetch0(*pkts);
3711 if (likely(NB_SEGS(loc->mbuf) > 1))
3713 /* Here ends the series of multi-segment packets. */
3714 if (MLX5_TXOFF_CONFIG(TSO) &&
3715 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3716 return MLX5_TXCMP_CODE_TSO;
3717 return MLX5_TXCMP_CODE_SINGLE;
3723 * Tx burst function for single-segment packets with TSO.
3724 * Supports all types of Tx offloads, except multi-packets.
3725 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3726 * Function stops sending if it encounters the multi-segment
3727 * packet or packet without TSO requested.
3729 * The routine is responsible for storing processed mbuf
3730 * into elts ring buffer and update elts_head if inline
3731 * offloads is requested due to possible early freeing
3732 * of the inlined mbufs (can not store pkts array in elts
3736 * Pointer to TX queue structure.
3738 * Packets to transmit.
3740 * Number of packets in array.
3742 * Pointer to burst routine local context.
3744 * Configured Tx offloads mask. It is fully defined at
3745 * compile time and may be used for optimization.
3748 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3749 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3750 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3751 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3752 * Local context variables updated.
3754 static __rte_always_inline enum mlx5_txcmp_code
3755 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
3756 struct rte_mbuf **__rte_restrict pkts,
3757 unsigned int pkts_n,
3758 struct mlx5_txq_local *__rte_restrict loc,
3761 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3762 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3763 pkts += loc->pkts_sent + 1;
3764 pkts_n -= loc->pkts_sent;
3766 struct mlx5_wqe_dseg *__rte_restrict dseg;
3767 struct mlx5_wqe *__rte_restrict wqe;
3768 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3771 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3772 if (MLX5_TXOFF_CONFIG(TXPP)) {
3773 enum mlx5_txcmp_code wret;
3775 /* Generate WAIT for scheduling if requested. */
3776 wret = mlx5_tx_schedule_send(txq, loc, olx);
3777 if (wret == MLX5_TXCMP_CODE_EXIT)
3778 return MLX5_TXCMP_CODE_EXIT;
3779 if (wret == MLX5_TXCMP_CODE_ERROR)
3780 return MLX5_TXCMP_CODE_ERROR;
3782 dlen = rte_pktmbuf_data_len(loc->mbuf);
3783 if (MLX5_TXOFF_CONFIG(VLAN) &&
3784 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3785 vlan = sizeof(struct rte_vlan_hdr);
3788 * First calculate the WQE size to check
3789 * whether we have enough space in ring buffer.
3791 hlen = loc->mbuf->l2_len + vlan +
3792 loc->mbuf->l3_len + loc->mbuf->l4_len;
3793 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3794 return MLX5_TXCMP_CODE_ERROR;
3795 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3796 hlen += loc->mbuf->outer_l2_len +
3797 loc->mbuf->outer_l3_len;
3798 /* Segment must contain all TSO headers. */
3799 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3800 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3801 hlen > (dlen + vlan)))
3802 return MLX5_TXCMP_CODE_ERROR;
3804 * Check whether there are enough free WQEBBs:
3806 * - Ethernet Segment
3807 * - First Segment of inlined Ethernet data
3808 * - ... data continued ...
3809 * - Finishing Data Segment of pointer type
3811 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3812 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3813 if (loc->wqe_free < ((ds + 3) / 4))
3814 return MLX5_TXCMP_CODE_EXIT;
3815 #ifdef MLX5_PMD_SOFT_COUNTERS
3816 /* Update sent data bytes/packets counters. */
3817 ntcp = (dlen + vlan - hlen +
3818 loc->mbuf->tso_segsz - 1) /
3819 loc->mbuf->tso_segsz;
3821 * One will be added for mbuf itself at the end
3822 * of the mlx5_tx_burst from loc->pkts_sent field.
3825 txq->stats.opackets += ntcp;
3826 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3829 * Build the TSO WQE:
3831 * - Ethernet Segment with hlen bytes inlined
3832 * - Data Segment of pointer type
3834 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3835 loc->wqe_last = wqe;
3836 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3837 MLX5_OPCODE_TSO, olx);
3838 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3839 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3840 dlen -= hlen - vlan;
3841 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3843 * WQE is built, update the loop parameters
3844 * and go to the next packet.
3846 txq->wqe_ci += (ds + 3) / 4;
3847 loc->wqe_free -= (ds + 3) / 4;
3848 if (MLX5_TXOFF_CONFIG(INLINE))
3849 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3853 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3854 return MLX5_TXCMP_CODE_EXIT;
3855 loc->mbuf = *pkts++;
3857 rte_prefetch0(*pkts);
3858 if (MLX5_TXOFF_CONFIG(MULTI) &&
3859 unlikely(NB_SEGS(loc->mbuf) > 1))
3860 return MLX5_TXCMP_CODE_MULTI;
3861 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3862 return MLX5_TXCMP_CODE_SINGLE;
3863 /* Continue with the next TSO packet. */
3869 * Analyze the packet and select the best method to send.
3872 * Pointer to TX queue structure.
3874 * Pointer to burst routine local context.
3876 * Configured Tx offloads mask. It is fully defined at
3877 * compile time and may be used for optimization.
3879 * The predefined flag whether do complete check for
3880 * multi-segment packets and TSO.
3883 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3884 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3885 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3886 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3888 static __rte_always_inline enum mlx5_txcmp_code
3889 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
3890 struct mlx5_txq_local *__rte_restrict loc,
3894 /* Check for multi-segment packet. */
3896 MLX5_TXOFF_CONFIG(MULTI) &&
3897 unlikely(NB_SEGS(loc->mbuf) > 1))
3898 return MLX5_TXCMP_CODE_MULTI;
3899 /* Check for TSO packet. */
3901 MLX5_TXOFF_CONFIG(TSO) &&
3902 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3903 return MLX5_TXCMP_CODE_TSO;
3904 /* Check if eMPW is enabled at all. */
3905 if (!MLX5_TXOFF_CONFIG(EMPW))
3906 return MLX5_TXCMP_CODE_SINGLE;
3907 /* Check if eMPW can be engaged. */
3908 if (MLX5_TXOFF_CONFIG(VLAN) &&
3909 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3910 (!MLX5_TXOFF_CONFIG(INLINE) ||
3911 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3912 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3914 * eMPW does not support VLAN insertion offload,
3915 * we have to inline the entire packet but
3916 * packet is too long for inlining.
3918 return MLX5_TXCMP_CODE_SINGLE;
3920 return MLX5_TXCMP_CODE_EMPW;
3924 * Check the next packet attributes to match with the eMPW batch ones.
3925 * In addition, for legacy MPW the packet length is checked either.
3928 * Pointer to TX queue structure.
3930 * Pointer to Ethernet Segment of eMPW batch.
3932 * Pointer to burst routine local context.
3934 * Length of previous packet in MPW descriptor.
3936 * Configured Tx offloads mask. It is fully defined at
3937 * compile time and may be used for optimization.
3940 * true - packet match with eMPW batch attributes.
3941 * false - no match, eMPW should be restarted.
3943 static __rte_always_inline bool
3944 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
3945 struct mlx5_wqe_eseg *__rte_restrict es,
3946 struct mlx5_txq_local *__rte_restrict loc,
3950 uint8_t swp_flags = 0;
3952 /* Compare the checksum flags, if any. */
3953 if (MLX5_TXOFF_CONFIG(CSUM) &&
3954 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3956 /* Compare the Software Parser offsets and flags. */
3957 if (MLX5_TXOFF_CONFIG(SWP) &&
3958 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3959 es->swp_flags != swp_flags))
3961 /* Fill metadata field if needed. */
3962 if (MLX5_TXOFF_CONFIG(METADATA) &&
3963 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3964 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3966 /* Legacy MPW can send packets with the same lengt only. */
3967 if (MLX5_TXOFF_CONFIG(MPW) &&
3968 dlen != rte_pktmbuf_data_len(loc->mbuf))
3970 /* There must be no VLAN packets in eMPW loop. */
3971 if (MLX5_TXOFF_CONFIG(VLAN))
3972 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3973 /* Check if the scheduling is requested. */
3974 if (MLX5_TXOFF_CONFIG(TXPP) &&
3975 loc->mbuf->ol_flags & txq->ts_mask)
3981 * Update send loop variables and WQE for eMPW loop
3982 * without data inlining. Number of Data Segments is
3983 * equal to the number of sent packets.
3986 * Pointer to TX queue structure.
3988 * Pointer to burst routine local context.
3990 * Number of packets/Data Segments/Packets.
3992 * Accumulated statistics, bytes sent
3994 * Configured Tx offloads mask. It is fully defined at
3995 * compile time and may be used for optimization.
3998 * true - packet match with eMPW batch attributes.
3999 * false - no match, eMPW should be restarted.
4001 static __rte_always_inline void
4002 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
4003 struct mlx5_txq_local *__rte_restrict loc,
4006 unsigned int olx __rte_unused)
4008 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4009 #ifdef MLX5_PMD_SOFT_COUNTERS
4010 /* Update sent data bytes counter. */
4011 txq->stats.obytes += slen;
4015 loc->elts_free -= ds;
4016 loc->pkts_sent += ds;
4018 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
4019 txq->wqe_ci += (ds + 3) / 4;
4020 loc->wqe_free -= (ds + 3) / 4;
4024 * Update send loop variables and WQE for eMPW loop
4025 * with data inlining. Gets the size of pushed descriptors
4026 * and data to the WQE.
4029 * Pointer to TX queue structure.
4031 * Pointer to burst routine local context.
4033 * Total size of descriptor/data in bytes.
4035 * Accumulated statistics, data bytes sent.
4037 * The base WQE for the eMPW/MPW descriptor.
4039 * Configured Tx offloads mask. It is fully defined at
4040 * compile time and may be used for optimization.
4043 * true - packet match with eMPW batch attributes.
4044 * false - no match, eMPW should be restarted.
4046 static __rte_always_inline void
4047 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
4048 struct mlx5_txq_local *__rte_restrict loc,
4051 struct mlx5_wqe *__rte_restrict wqem,
4052 unsigned int olx __rte_unused)
4054 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
4056 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4057 #ifdef MLX5_PMD_SOFT_COUNTERS
4058 /* Update sent data bytes counter. */
4059 txq->stats.obytes += slen;
4063 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
4065 * If the legacy MPW session contains the inline packets
4066 * we should set the only inline data segment length
4067 * and align the total length to the segment size.
4069 MLX5_ASSERT(len > sizeof(dseg->bcount));
4070 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
4071 MLX5_ETH_WQE_DATA_INLINE);
4072 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
4075 * The session is not legacy MPW or contains the
4076 * data buffer pointer segments.
4078 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
4079 len = len / MLX5_WSEG_SIZE + 2;
4081 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
4082 txq->wqe_ci += (len + 3) / 4;
4083 loc->wqe_free -= (len + 3) / 4;
4084 loc->wqe_last = wqem;
4088 * The set of Tx burst functions for single-segment packets
4089 * without TSO and with Multi-Packet Writing feature support.
4090 * Supports all types of Tx offloads, except multi-packets
4093 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
4094 * as many packet per WQE as it can. If eMPW is not configured
4095 * or packet can not be sent with eMPW (VLAN insertion) the
4096 * ordinary SEND opcode is used and only one packet placed
4099 * Functions stop sending if it encounters the multi-segment
4100 * packet or packet with TSO requested.
4102 * The routines are responsible for storing processed mbuf
4103 * into elts ring buffer and update elts_head if inlining
4104 * offload is requested. Otherwise the copying mbufs to elts
4105 * can be postponed and completed at the end of burst routine.
4108 * Pointer to TX queue structure.
4110 * Packets to transmit.
4112 * Number of packets in array.
4114 * Pointer to burst routine local context.
4116 * Configured Tx offloads mask. It is fully defined at
4117 * compile time and may be used for optimization.
4120 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
4121 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
4122 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
4123 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
4124 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
4125 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
4127 * Local context variables updated.
4130 * The routine sends packets with MLX5_OPCODE_EMPW
4131 * without inlining, this is dedicated optimized branch.
4132 * No VLAN insertion is supported.
4134 static __rte_always_inline enum mlx5_txcmp_code
4135 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
4136 struct rte_mbuf **__rte_restrict pkts,
4137 unsigned int pkts_n,
4138 struct mlx5_txq_local *__rte_restrict loc,
4142 * Subroutine is the part of mlx5_tx_burst_single()
4143 * and sends single-segment packet with eMPW opcode
4144 * without data inlining.
4146 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4147 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4148 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4149 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4150 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4151 pkts += loc->pkts_sent + 1;
4152 pkts_n -= loc->pkts_sent;
4154 struct mlx5_wqe_dseg *__rte_restrict dseg;
4155 struct mlx5_wqe_eseg *__rte_restrict eseg;
4156 enum mlx5_txcmp_code ret;
4157 unsigned int part, loop;
4158 unsigned int slen = 0;
4161 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4162 if (MLX5_TXOFF_CONFIG(TXPP)) {
4163 enum mlx5_txcmp_code wret;
4165 /* Generate WAIT for scheduling if requested. */
4166 wret = mlx5_tx_schedule_send(txq, loc, olx);
4167 if (wret == MLX5_TXCMP_CODE_EXIT)
4168 return MLX5_TXCMP_CODE_EXIT;
4169 if (wret == MLX5_TXCMP_CODE_ERROR)
4170 return MLX5_TXCMP_CODE_ERROR;
4172 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4173 MLX5_MPW_MAX_PACKETS :
4174 MLX5_EMPW_MAX_PACKETS);
4175 if (unlikely(loc->elts_free < part)) {
4176 /* We have no enough elts to save all mbufs. */
4177 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
4178 return MLX5_TXCMP_CODE_EXIT;
4179 /* But we still able to send at least minimal eMPW. */
4180 part = loc->elts_free;
4182 /* Check whether we have enough WQEs */
4183 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
4184 if (unlikely(loc->wqe_free <
4185 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4186 return MLX5_TXCMP_CODE_EXIT;
4187 part = (loc->wqe_free * 4) - 2;
4189 if (likely(part > 1))
4190 rte_prefetch0(*pkts);
4191 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4193 * Build eMPW title WQEBB:
4194 * - Control Segment, eMPW opcode
4195 * - Ethernet Segment, no inline
4197 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
4198 MLX5_OPCODE_ENHANCED_MPSW, olx);
4199 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4200 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4201 eseg = &loc->wqe_last->eseg;
4202 dseg = &loc->wqe_last->dseg[0];
4204 /* Store the packet length for legacy MPW. */
4205 if (MLX5_TXOFF_CONFIG(MPW))
4206 eseg->mss = rte_cpu_to_be_16
4207 (rte_pktmbuf_data_len(loc->mbuf));
4209 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4210 #ifdef MLX5_PMD_SOFT_COUNTERS
4211 /* Update sent data bytes counter. */
4216 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4218 if (unlikely(--loop == 0))
4220 loc->mbuf = *pkts++;
4221 if (likely(loop > 1))
4222 rte_prefetch0(*pkts);
4223 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4225 * Unroll the completion code to avoid
4226 * returning variable value - it results in
4227 * unoptimized sequent checking in caller.
4229 if (ret == MLX5_TXCMP_CODE_MULTI) {
4231 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4232 if (unlikely(!loc->elts_free ||
4234 return MLX5_TXCMP_CODE_EXIT;
4235 return MLX5_TXCMP_CODE_MULTI;
4237 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4238 if (ret == MLX5_TXCMP_CODE_TSO) {
4240 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4241 if (unlikely(!loc->elts_free ||
4243 return MLX5_TXCMP_CODE_EXIT;
4244 return MLX5_TXCMP_CODE_TSO;
4246 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4248 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4249 if (unlikely(!loc->elts_free ||
4251 return MLX5_TXCMP_CODE_EXIT;
4252 return MLX5_TXCMP_CODE_SINGLE;
4254 if (ret != MLX5_TXCMP_CODE_EMPW) {
4257 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4258 return MLX5_TXCMP_CODE_ERROR;
4261 * Check whether packet parameters coincide
4262 * within assumed eMPW batch:
4263 * - check sum settings
4265 * - software parser settings
4266 * - packets length (legacy MPW only)
4267 * - scheduling is not required
4269 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
4272 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4273 if (unlikely(!loc->elts_free ||
4275 return MLX5_TXCMP_CODE_EXIT;
4279 /* Packet attributes match, continue the same eMPW. */
4281 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4282 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4284 /* eMPW is built successfully, update loop parameters. */
4286 MLX5_ASSERT(pkts_n >= part);
4287 #ifdef MLX5_PMD_SOFT_COUNTERS
4288 /* Update sent data bytes counter. */
4289 txq->stats.obytes += slen;
4291 loc->elts_free -= part;
4292 loc->pkts_sent += part;
4293 txq->wqe_ci += (2 + part + 3) / 4;
4294 loc->wqe_free -= (2 + part + 3) / 4;
4296 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4297 return MLX5_TXCMP_CODE_EXIT;
4298 loc->mbuf = *pkts++;
4299 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4300 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4302 /* Continue sending eMPW batches. */
4308 * The routine sends packets with MLX5_OPCODE_EMPW
4309 * with inlining, optionally supports VLAN insertion.
4311 static __rte_always_inline enum mlx5_txcmp_code
4312 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
4313 struct rte_mbuf **__rte_restrict pkts,
4314 unsigned int pkts_n,
4315 struct mlx5_txq_local *__rte_restrict loc,
4319 * Subroutine is the part of mlx5_tx_burst_single()
4320 * and sends single-segment packet with eMPW opcode
4321 * with data inlining.
4323 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4324 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4325 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4326 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4327 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4328 pkts += loc->pkts_sent + 1;
4329 pkts_n -= loc->pkts_sent;
4331 struct mlx5_wqe_dseg *__rte_restrict dseg;
4332 struct mlx5_wqe *__rte_restrict wqem;
4333 enum mlx5_txcmp_code ret;
4334 unsigned int room, part, nlim;
4335 unsigned int slen = 0;
4337 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4338 if (MLX5_TXOFF_CONFIG(TXPP)) {
4339 enum mlx5_txcmp_code wret;
4341 /* Generate WAIT for scheduling if requested. */
4342 wret = mlx5_tx_schedule_send(txq, loc, olx);
4343 if (wret == MLX5_TXCMP_CODE_EXIT)
4344 return MLX5_TXCMP_CODE_EXIT;
4345 if (wret == MLX5_TXCMP_CODE_ERROR)
4346 return MLX5_TXCMP_CODE_ERROR;
4349 * Limits the amount of packets in one WQE
4350 * to improve CQE latency generation.
4352 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4353 MLX5_MPW_INLINE_MAX_PACKETS :
4354 MLX5_EMPW_MAX_PACKETS);
4355 /* Check whether we have minimal amount WQEs */
4356 if (unlikely(loc->wqe_free <
4357 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4358 return MLX5_TXCMP_CODE_EXIT;
4359 if (likely(pkts_n > 1))
4360 rte_prefetch0(*pkts);
4361 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4363 * Build eMPW title WQEBB:
4364 * - Control Segment, eMPW opcode, zero DS
4365 * - Ethernet Segment, no inline
4367 mlx5_tx_cseg_init(txq, loc, wqem, 0,
4368 MLX5_OPCODE_ENHANCED_MPSW, olx);
4369 mlx5_tx_eseg_none(txq, loc, wqem,
4370 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4371 dseg = &wqem->dseg[0];
4372 /* Store the packet length for legacy MPW. */
4373 if (MLX5_TXOFF_CONFIG(MPW))
4374 wqem->eseg.mss = rte_cpu_to_be_16
4375 (rte_pktmbuf_data_len(loc->mbuf));
4376 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4377 loc->wqe_free) * MLX5_WQE_SIZE -
4378 MLX5_WQE_CSEG_SIZE -
4380 /* Limit the room for legacy MPW sessions for performance. */
4381 if (MLX5_TXOFF_CONFIG(MPW))
4382 room = RTE_MIN(room,
4383 RTE_MAX(txq->inlen_empw +
4384 sizeof(dseg->bcount) +
4385 (MLX5_TXOFF_CONFIG(VLAN) ?
4386 sizeof(struct rte_vlan_hdr) : 0),
4387 MLX5_MPW_INLINE_MAX_PACKETS *
4388 MLX5_WQE_DSEG_SIZE));
4389 /* Build WQE till we have space, packets and resources. */
4392 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4393 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4396 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4397 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
4398 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4400 * Some Tx offloads may cause an error if
4401 * packet is not long enough, check against
4402 * assumed minimal length.
4404 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4406 if (unlikely(!part))
4407 return MLX5_TXCMP_CODE_ERROR;
4409 * We have some successfully built
4410 * packet Data Segments to send.
4412 mlx5_tx_idone_empw(txq, loc, part,
4414 return MLX5_TXCMP_CODE_ERROR;
4416 /* Inline or not inline - that's the Question. */
4417 if (dlen > txq->inlen_empw ||
4418 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
4420 if (MLX5_TXOFF_CONFIG(MPW)) {
4421 if (dlen > txq->inlen_send)
4425 /* Open new inline MPW session. */
4426 tlen += sizeof(dseg->bcount);
4427 dseg->bcount = RTE_BE32(0);
4429 (dseg, sizeof(dseg->bcount));
4432 * No pointer and inline descriptor
4433 * intermix for legacy MPW sessions.
4435 if (wqem->dseg[0].bcount)
4439 tlen = sizeof(dseg->bcount) + dlen;
4441 /* Inline entire packet, optional VLAN insertion. */
4442 if (MLX5_TXOFF_CONFIG(VLAN) &&
4443 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4445 * The packet length must be checked in
4446 * mlx5_tx_able_to_empw() and packet
4447 * fits into inline length guaranteed.
4450 sizeof(struct rte_vlan_hdr)) <=
4452 tlen += sizeof(struct rte_vlan_hdr);
4455 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4457 #ifdef MLX5_PMD_SOFT_COUNTERS
4458 /* Update sent data bytes counter. */
4459 slen += sizeof(struct rte_vlan_hdr);
4464 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4467 if (!MLX5_TXOFF_CONFIG(MPW))
4468 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4469 MLX5_ASSERT(room >= tlen);
4472 * Packet data are completely inlined,
4473 * free the packet immediately.
4475 rte_pktmbuf_free_seg(loc->mbuf);
4479 * No pointer and inline descriptor
4480 * intermix for legacy MPW sessions.
4482 if (MLX5_TXOFF_CONFIG(MPW) &&
4484 wqem->dseg[0].bcount == RTE_BE32(0))
4487 * Not inlinable VLAN packets are
4488 * proceeded outside of this routine.
4490 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4491 if (MLX5_TXOFF_CONFIG(VLAN))
4492 MLX5_ASSERT(!(loc->mbuf->ol_flags &
4494 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4495 /* We have to store mbuf in elts.*/
4496 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4497 room -= MLX5_WQE_DSEG_SIZE;
4498 /* Ring buffer wraparound is checked at the loop end.*/
4501 #ifdef MLX5_PMD_SOFT_COUNTERS
4502 /* Update sent data bytes counter. */
4508 if (unlikely(!pkts_n || !loc->elts_free)) {
4510 * We have no resources/packets to
4511 * continue build descriptors.
4514 mlx5_tx_idone_empw(txq, loc, part,
4516 return MLX5_TXCMP_CODE_EXIT;
4518 loc->mbuf = *pkts++;
4519 if (likely(pkts_n > 1))
4520 rte_prefetch0(*pkts);
4521 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4523 * Unroll the completion code to avoid
4524 * returning variable value - it results in
4525 * unoptimized sequent checking in caller.
4527 if (ret == MLX5_TXCMP_CODE_MULTI) {
4529 mlx5_tx_idone_empw(txq, loc, part,
4531 if (unlikely(!loc->elts_free ||
4533 return MLX5_TXCMP_CODE_EXIT;
4534 return MLX5_TXCMP_CODE_MULTI;
4536 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4537 if (ret == MLX5_TXCMP_CODE_TSO) {
4539 mlx5_tx_idone_empw(txq, loc, part,
4541 if (unlikely(!loc->elts_free ||
4543 return MLX5_TXCMP_CODE_EXIT;
4544 return MLX5_TXCMP_CODE_TSO;
4546 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4548 mlx5_tx_idone_empw(txq, loc, part,
4550 if (unlikely(!loc->elts_free ||
4552 return MLX5_TXCMP_CODE_EXIT;
4553 return MLX5_TXCMP_CODE_SINGLE;
4555 if (ret != MLX5_TXCMP_CODE_EMPW) {
4558 mlx5_tx_idone_empw(txq, loc, part,
4560 return MLX5_TXCMP_CODE_ERROR;
4562 /* Check if we have minimal room left. */
4564 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4567 * Check whether packet parameters coincide
4568 * within assumed eMPW batch:
4569 * - check sum settings
4571 * - software parser settings
4572 * - packets length (legacy MPW only)
4573 * - scheduling is not required
4575 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
4578 /* Packet attributes match, continue the same eMPW. */
4579 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4580 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4583 * We get here to close an existing eMPW
4584 * session and start the new one.
4586 MLX5_ASSERT(pkts_n);
4588 if (unlikely(!part))
4589 return MLX5_TXCMP_CODE_EXIT;
4590 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
4591 if (unlikely(!loc->elts_free ||
4593 return MLX5_TXCMP_CODE_EXIT;
4594 /* Continue the loop with new eMPW session. */
4600 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4601 * Data inlining and VLAN insertion are supported.
4603 static __rte_always_inline enum mlx5_txcmp_code
4604 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
4605 struct rte_mbuf **__rte_restrict pkts,
4606 unsigned int pkts_n,
4607 struct mlx5_txq_local *__rte_restrict loc,
4611 * Subroutine is the part of mlx5_tx_burst_single()
4612 * and sends single-segment packet with SEND opcode.
4614 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4615 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4616 pkts += loc->pkts_sent + 1;
4617 pkts_n -= loc->pkts_sent;
4619 struct mlx5_wqe *__rte_restrict wqe;
4620 enum mlx5_txcmp_code ret;
4622 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4623 if (MLX5_TXOFF_CONFIG(TXPP)) {
4624 enum mlx5_txcmp_code wret;
4626 /* Generate WAIT for scheduling if requested. */
4627 wret = mlx5_tx_schedule_send(txq, loc, olx);
4628 if (wret == MLX5_TXCMP_CODE_EXIT)
4629 return MLX5_TXCMP_CODE_EXIT;
4630 if (wret == MLX5_TXCMP_CODE_ERROR)
4631 return MLX5_TXCMP_CODE_ERROR;
4633 if (MLX5_TXOFF_CONFIG(INLINE)) {
4634 unsigned int inlen, vlan = 0;
4636 inlen = rte_pktmbuf_data_len(loc->mbuf);
4637 if (MLX5_TXOFF_CONFIG(VLAN) &&
4638 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4639 vlan = sizeof(struct rte_vlan_hdr);
4641 static_assert((sizeof(struct rte_vlan_hdr) +
4642 sizeof(struct rte_ether_hdr)) ==
4643 MLX5_ESEG_MIN_INLINE_SIZE,
4644 "invalid min inline data size");
4647 * If inlining is enabled at configuration time
4648 * the limit must be not less than minimal size.
4649 * Otherwise we would do extra check for data
4650 * size to avoid crashes due to length overflow.
4652 MLX5_ASSERT(txq->inlen_send >=
4653 MLX5_ESEG_MIN_INLINE_SIZE);
4654 if (inlen <= txq->inlen_send) {
4655 unsigned int seg_n, wqe_n;
4657 rte_prefetch0(rte_pktmbuf_mtod
4658 (loc->mbuf, uint8_t *));
4659 /* Check against minimal length. */
4660 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4661 return MLX5_TXCMP_CODE_ERROR;
4662 if (loc->mbuf->ol_flags &
4663 PKT_TX_DYNF_NOINLINE) {
4665 * The hint flag not to inline packet
4666 * data is set. Check whether we can
4669 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
4671 (MLX5_TXOFF_CONFIG(MPW) &&
4673 if (inlen <= txq->inlen_send)
4676 * The hardware requires the
4677 * minimal inline data header.
4679 goto single_min_inline;
4681 if (MLX5_TXOFF_CONFIG(VLAN) &&
4682 vlan && !txq->vlan_en) {
4684 * We must insert VLAN tag
4685 * by software means.
4687 goto single_part_inline;
4689 goto single_no_inline;
4693 * Completely inlined packet data WQE:
4694 * - Control Segment, SEND opcode
4695 * - Ethernet Segment, no VLAN insertion
4696 * - Data inlined, VLAN optionally inserted
4697 * - Alignment to MLX5_WSEG_SIZE
4698 * Have to estimate amount of WQEBBs
4700 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4701 MLX5_ESEG_MIN_INLINE_SIZE +
4702 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4703 /* Check if there are enough WQEBBs. */
4704 wqe_n = (seg_n + 3) / 4;
4705 if (wqe_n > loc->wqe_free)
4706 return MLX5_TXCMP_CODE_EXIT;
4707 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4708 loc->wqe_last = wqe;
4709 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4710 MLX5_OPCODE_SEND, olx);
4711 mlx5_tx_eseg_data(txq, loc, wqe,
4712 vlan, inlen, 0, olx);
4713 txq->wqe_ci += wqe_n;
4714 loc->wqe_free -= wqe_n;
4716 * Packet data are completely inlined,
4717 * free the packet immediately.
4719 rte_pktmbuf_free_seg(loc->mbuf);
4720 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4721 MLX5_TXOFF_CONFIG(MPW)) &&
4724 * If minimal inlining is requested the eMPW
4725 * feature should be disabled due to data is
4726 * inlined into Ethernet Segment, which can
4727 * not contain inlined data for eMPW due to
4728 * segment shared for all packets.
4730 struct mlx5_wqe_dseg *__rte_restrict dseg;
4735 * The inline-mode settings require
4736 * to inline the specified amount of
4737 * data bytes to the Ethernet Segment.
4738 * We should check the free space in
4739 * WQE ring buffer to inline partially.
4742 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
4743 MLX5_ASSERT(inlen > txq->inlen_mode);
4744 MLX5_ASSERT(txq->inlen_mode >=
4745 MLX5_ESEG_MIN_INLINE_SIZE);
4747 * Check whether there are enough free WQEBBs:
4749 * - Ethernet Segment
4750 * - First Segment of inlined Ethernet data
4751 * - ... data continued ...
4752 * - Finishing Data Segment of pointer type
4754 ds = (MLX5_WQE_CSEG_SIZE +
4755 MLX5_WQE_ESEG_SIZE +
4756 MLX5_WQE_DSEG_SIZE +
4758 MLX5_ESEG_MIN_INLINE_SIZE +
4759 MLX5_WQE_DSEG_SIZE +
4760 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4761 if (loc->wqe_free < ((ds + 3) / 4))
4762 return MLX5_TXCMP_CODE_EXIT;
4764 * Build the ordinary SEND WQE:
4766 * - Ethernet Segment, inline inlen_mode bytes
4767 * - Data Segment of pointer type
4769 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4770 loc->wqe_last = wqe;
4771 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4772 MLX5_OPCODE_SEND, olx);
4773 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4776 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4777 txq->inlen_mode - vlan;
4778 inlen -= txq->inlen_mode;
4779 mlx5_tx_dseg_ptr(txq, loc, dseg,
4782 * WQE is built, update the loop parameters
4783 * and got to the next packet.
4785 txq->wqe_ci += (ds + 3) / 4;
4786 loc->wqe_free -= (ds + 3) / 4;
4787 /* We have to store mbuf in elts.*/
4788 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4789 txq->elts[txq->elts_head++ & txq->elts_m] =
4797 * Partially inlined packet data WQE, we have
4798 * some space in title WQEBB, we can fill it
4799 * with some packet data. It takes one WQEBB,
4800 * it is available, no extra space check:
4801 * - Control Segment, SEND opcode
4802 * - Ethernet Segment, no VLAN insertion
4803 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4804 * - Data Segment, pointer type
4806 * We also get here if VLAN insertion is not
4807 * supported by HW, the inline is enabled.
4810 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4811 loc->wqe_last = wqe;
4812 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4813 MLX5_OPCODE_SEND, olx);
4814 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4815 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4816 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4818 * The length check is performed above, by
4819 * comparing with txq->inlen_send. We should
4820 * not get overflow here.
4822 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4823 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4824 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4828 /* We have to store mbuf in elts.*/
4829 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4830 txq->elts[txq->elts_head++ & txq->elts_m] =
4834 #ifdef MLX5_PMD_SOFT_COUNTERS
4835 /* Update sent data bytes counter. */
4836 txq->stats.obytes += vlan +
4837 rte_pktmbuf_data_len(loc->mbuf);
4841 * No inline at all, it means the CPU cycles saving
4842 * is prioritized at configuration, we should not
4843 * copy any packet data to WQE.
4845 * SEND WQE, one WQEBB:
4846 * - Control Segment, SEND opcode
4847 * - Ethernet Segment, optional VLAN, no inline
4848 * - Data Segment, pointer type
4851 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4852 loc->wqe_last = wqe;
4853 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4854 MLX5_OPCODE_SEND, olx);
4855 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4857 (txq, loc, &wqe->dseg[0],
4858 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4859 rte_pktmbuf_data_len(loc->mbuf), olx);
4863 * We should not store mbuf pointer in elts
4864 * if no inlining is configured, this is done
4865 * by calling routine in a batch copy.
4867 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4869 #ifdef MLX5_PMD_SOFT_COUNTERS
4870 /* Update sent data bytes counter. */
4871 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4872 if (MLX5_TXOFF_CONFIG(VLAN) &&
4873 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4874 txq->stats.obytes +=
4875 sizeof(struct rte_vlan_hdr);
4880 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4881 return MLX5_TXCMP_CODE_EXIT;
4882 loc->mbuf = *pkts++;
4884 rte_prefetch0(*pkts);
4885 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4886 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4892 static __rte_always_inline enum mlx5_txcmp_code
4893 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
4894 struct rte_mbuf **__rte_restrict pkts,
4895 unsigned int pkts_n,
4896 struct mlx5_txq_local *__rte_restrict loc,
4899 enum mlx5_txcmp_code ret;
4901 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4902 if (ret == MLX5_TXCMP_CODE_SINGLE)
4904 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
4906 /* Optimize for inline/no inline eMPW send. */
4907 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4908 mlx5_tx_burst_empw_inline
4909 (txq, pkts, pkts_n, loc, olx) :
4910 mlx5_tx_burst_empw_simple
4911 (txq, pkts, pkts_n, loc, olx);
4912 if (ret != MLX5_TXCMP_CODE_SINGLE)
4914 /* The resources to send one packet should remain. */
4915 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4917 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4918 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
4919 if (ret != MLX5_TXCMP_CODE_EMPW)
4921 /* The resources to send one packet should remain. */
4922 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4927 * DPDK Tx callback template. This is configured template
4928 * used to generate routines optimized for specified offload setup.
4929 * One of this generated functions is chosen at SQ configuration
4933 * Generic pointer to TX queue structure.
4935 * Packets to transmit.
4937 * Number of packets in array.
4939 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4940 * values. Should be static to take compile time static configuration
4944 * Number of packets successfully transmitted (<= pkts_n).
4946 static __rte_always_inline uint16_t
4947 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
4948 struct rte_mbuf **__rte_restrict pkts,
4952 struct mlx5_txq_local loc;
4953 enum mlx5_txcmp_code ret;
4956 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4957 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4958 if (unlikely(!pkts_n))
4962 loc.wqe_last = NULL;
4965 loc.pkts_loop = loc.pkts_sent;
4967 * Check if there are some CQEs, if any:
4968 * - process an encountered errors
4969 * - process the completed WQEs
4970 * - free related mbufs
4971 * - doorbell the NIC about processed CQEs
4973 rte_prefetch0(*(pkts + loc.pkts_sent));
4974 mlx5_tx_handle_completion(txq, olx);
4976 * Calculate the number of available resources - elts and WQEs.
4977 * There are two possible different scenarios:
4978 * - no data inlining into WQEs, one WQEBB may contains up to
4979 * four packets, in this case elts become scarce resource
4980 * - data inlining into WQEs, one packet may require multiple
4981 * WQEBBs, the WQEs become the limiting factor.
4983 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4984 loc.elts_free = txq->elts_s -
4985 (uint16_t)(txq->elts_head - txq->elts_tail);
4986 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4987 loc.wqe_free = txq->wqe_s -
4988 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4989 if (unlikely(!loc.elts_free || !loc.wqe_free))
4993 * Fetch the packet from array. Usually this is
4994 * the first packet in series of multi/single
4997 loc.mbuf = *(pkts + loc.pkts_sent);
4998 /* Dedicated branch for multi-segment packets. */
4999 if (MLX5_TXOFF_CONFIG(MULTI) &&
5000 unlikely(NB_SEGS(loc.mbuf) > 1)) {
5002 * Multi-segment packet encountered.
5003 * Hardware is able to process it only
5004 * with SEND/TSO opcodes, one packet
5005 * per WQE, do it in dedicated routine.
5008 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
5009 part = loc.pkts_sent - loc.pkts_copy;
5010 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
5012 * There are some single-segment mbufs not
5013 * stored in elts. The mbufs must be in the
5014 * same order as WQEs, so we must copy the
5015 * mbufs to elts here, before the coming
5016 * multi-segment packet mbufs is appended.
5018 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
5020 loc.pkts_copy = loc.pkts_sent;
5022 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5023 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
5024 if (!MLX5_TXOFF_CONFIG(INLINE))
5025 loc.pkts_copy = loc.pkts_sent;
5027 * These returned code checks are supposed
5028 * to be optimized out due to routine inlining.
5030 if (ret == MLX5_TXCMP_CODE_EXIT) {
5032 * The routine returns this code when
5033 * all packets are sent or there is no
5034 * enough resources to complete request.
5038 if (ret == MLX5_TXCMP_CODE_ERROR) {
5040 * The routine returns this code when
5041 * some error in the incoming packets
5044 txq->stats.oerrors++;
5047 if (ret == MLX5_TXCMP_CODE_SINGLE) {
5049 * The single-segment packet was encountered
5050 * in the array, try to send it with the
5051 * best optimized way, possible engaging eMPW.
5053 goto enter_send_single;
5055 if (MLX5_TXOFF_CONFIG(TSO) &&
5056 ret == MLX5_TXCMP_CODE_TSO) {
5058 * The single-segment TSO packet was
5059 * encountered in the array.
5061 goto enter_send_tso;
5063 /* We must not get here. Something is going wrong. */
5065 txq->stats.oerrors++;
5068 /* Dedicated branch for single-segment TSO packets. */
5069 if (MLX5_TXOFF_CONFIG(TSO) &&
5070 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
5072 * TSO might require special way for inlining
5073 * (dedicated parameters) and is sent with
5074 * MLX5_OPCODE_TSO opcode only, provide this
5075 * in dedicated branch.
5078 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
5079 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5080 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
5082 * These returned code checks are supposed
5083 * to be optimized out due to routine inlining.
5085 if (ret == MLX5_TXCMP_CODE_EXIT)
5087 if (ret == MLX5_TXCMP_CODE_ERROR) {
5088 txq->stats.oerrors++;
5091 if (ret == MLX5_TXCMP_CODE_SINGLE)
5092 goto enter_send_single;
5093 if (MLX5_TXOFF_CONFIG(MULTI) &&
5094 ret == MLX5_TXCMP_CODE_MULTI) {
5096 * The multi-segment packet was
5097 * encountered in the array.
5099 goto enter_send_multi;
5101 /* We must not get here. Something is going wrong. */
5103 txq->stats.oerrors++;
5107 * The dedicated branch for the single-segment packets
5108 * without TSO. Often these ones can be sent using
5109 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
5110 * The routine builds the WQEs till it encounters
5111 * the TSO or multi-segment packet (in case if these
5112 * offloads are requested at SQ configuration time).
5115 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5116 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
5118 * These returned code checks are supposed
5119 * to be optimized out due to routine inlining.
5121 if (ret == MLX5_TXCMP_CODE_EXIT)
5123 if (ret == MLX5_TXCMP_CODE_ERROR) {
5124 txq->stats.oerrors++;
5127 if (MLX5_TXOFF_CONFIG(MULTI) &&
5128 ret == MLX5_TXCMP_CODE_MULTI) {
5130 * The multi-segment packet was
5131 * encountered in the array.
5133 goto enter_send_multi;
5135 if (MLX5_TXOFF_CONFIG(TSO) &&
5136 ret == MLX5_TXCMP_CODE_TSO) {
5138 * The single-segment TSO packet was
5139 * encountered in the array.
5141 goto enter_send_tso;
5143 /* We must not get here. Something is going wrong. */
5145 txq->stats.oerrors++;
5149 * Main Tx loop is completed, do the rest:
5150 * - set completion request if thresholds are reached
5151 * - doorbell the hardware
5152 * - copy the rest of mbufs to elts (if any)
5154 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
5155 loc.pkts_sent >= loc.pkts_copy);
5156 /* Take a shortcut if nothing is sent. */
5157 if (unlikely(loc.pkts_sent == loc.pkts_loop))
5159 /* Request CQE generation if limits are reached. */
5160 mlx5_tx_request_completion(txq, &loc, olx);
5162 * Ring QP doorbell immediately after WQE building completion
5163 * to improve latencies. The pure software related data treatment
5164 * can be completed after doorbell. Tx CQEs for this SQ are
5165 * processed in this thread only by the polling.
5167 * The rdma core library can map doorbell register in two ways,
5168 * depending on the environment variable "MLX5_SHUT_UP_BF":
5170 * - as regular cached memory, the variable is either missing or
5171 * set to zero. This type of mapping may cause the significant
5172 * doorbell register writing latency and requires explicit
5173 * memory write barrier to mitigate this issue and prevent
5176 * - as non-cached memory, the variable is present and set to
5177 * not "0" value. This type of mapping may cause performance
5178 * impact under heavy loading conditions but the explicit write
5179 * memory barrier is not required and it may improve core
5182 * - the legacy behaviour (prior 19.08 release) was to use some
5183 * heuristics to decide whether write memory barrier should
5184 * be performed. This behavior is supported with specifying
5185 * tx_db_nc=2, write barrier is skipped if application
5186 * provides the full recommended burst of packets, it
5187 * supposes the next packets are coming and the write barrier
5188 * will be issued on the next burst (after descriptor writing,
5191 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
5192 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
5193 /* Not all of the mbufs may be stored into elts yet. */
5194 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
5195 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
5197 * There are some single-segment mbufs not stored in elts.
5198 * It can be only if the last packet was single-segment.
5199 * The copying is gathered into one place due to it is
5200 * a good opportunity to optimize that with SIMD.
5201 * Unfortunately if inlining is enabled the gaps in
5202 * pointer array may happen due to early freeing of the
5205 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
5206 loc.pkts_copy = loc.pkts_sent;
5208 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
5209 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
5210 if (pkts_n > loc.pkts_sent) {
5212 * If burst size is large there might be no enough CQE
5213 * fetched from completion queue and no enough resources
5214 * freed to send all the packets.
5219 #ifdef MLX5_PMD_SOFT_COUNTERS
5220 /* Increment sent packets counter. */
5221 txq->stats.opackets += loc.pkts_sent;
5223 return loc.pkts_sent;
5226 /* Generate routines with Enhanced Multi-Packet Write support. */
5227 MLX5_TXOFF_DECL(full_empw,
5228 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
5230 MLX5_TXOFF_DECL(none_empw,
5231 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5233 MLX5_TXOFF_DECL(md_empw,
5234 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5236 MLX5_TXOFF_DECL(mt_empw,
5237 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5238 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5240 MLX5_TXOFF_DECL(mtsc_empw,
5241 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5242 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5243 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5245 MLX5_TXOFF_DECL(mti_empw,
5246 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5247 MLX5_TXOFF_CONFIG_INLINE |
5248 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5250 MLX5_TXOFF_DECL(mtv_empw,
5251 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5252 MLX5_TXOFF_CONFIG_VLAN |
5253 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5255 MLX5_TXOFF_DECL(mtiv_empw,
5256 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5257 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5258 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5260 MLX5_TXOFF_DECL(sc_empw,
5261 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5262 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5264 MLX5_TXOFF_DECL(sci_empw,
5265 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5266 MLX5_TXOFF_CONFIG_INLINE |
5267 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5269 MLX5_TXOFF_DECL(scv_empw,
5270 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5271 MLX5_TXOFF_CONFIG_VLAN |
5272 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5274 MLX5_TXOFF_DECL(sciv_empw,
5275 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5276 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5277 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5279 MLX5_TXOFF_DECL(i_empw,
5280 MLX5_TXOFF_CONFIG_INLINE |
5281 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5283 MLX5_TXOFF_DECL(v_empw,
5284 MLX5_TXOFF_CONFIG_VLAN |
5285 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5287 MLX5_TXOFF_DECL(iv_empw,
5288 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5289 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5291 /* Generate routines without Enhanced Multi-Packet Write support. */
5292 MLX5_TXOFF_DECL(full,
5293 MLX5_TXOFF_CONFIG_FULL)
5295 MLX5_TXOFF_DECL(none,
5296 MLX5_TXOFF_CONFIG_NONE)
5299 MLX5_TXOFF_CONFIG_METADATA)
5302 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5303 MLX5_TXOFF_CONFIG_METADATA)
5305 MLX5_TXOFF_DECL(mtsc,
5306 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5307 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5308 MLX5_TXOFF_CONFIG_METADATA)
5310 MLX5_TXOFF_DECL(mti,
5311 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5312 MLX5_TXOFF_CONFIG_INLINE |
5313 MLX5_TXOFF_CONFIG_METADATA)
5316 MLX5_TXOFF_DECL(mtv,
5317 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5318 MLX5_TXOFF_CONFIG_VLAN |
5319 MLX5_TXOFF_CONFIG_METADATA)
5322 MLX5_TXOFF_DECL(mtiv,
5323 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5324 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5325 MLX5_TXOFF_CONFIG_METADATA)
5328 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5329 MLX5_TXOFF_CONFIG_METADATA)
5331 MLX5_TXOFF_DECL(sci,
5332 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5333 MLX5_TXOFF_CONFIG_INLINE |
5334 MLX5_TXOFF_CONFIG_METADATA)
5337 MLX5_TXOFF_DECL(scv,
5338 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5339 MLX5_TXOFF_CONFIG_VLAN |
5340 MLX5_TXOFF_CONFIG_METADATA)
5343 MLX5_TXOFF_DECL(sciv,
5344 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5345 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5346 MLX5_TXOFF_CONFIG_METADATA)
5349 MLX5_TXOFF_CONFIG_INLINE |
5350 MLX5_TXOFF_CONFIG_METADATA)
5353 MLX5_TXOFF_CONFIG_VLAN |
5354 MLX5_TXOFF_CONFIG_METADATA)
5357 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5358 MLX5_TXOFF_CONFIG_METADATA)
5360 /* Generate routines with timestamp scheduling. */
5361 MLX5_TXOFF_DECL(full_ts_nompw,
5362 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
5364 MLX5_TXOFF_DECL(full_ts_nompwi,
5365 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5366 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5367 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5368 MLX5_TXOFF_CONFIG_TXPP)
5370 MLX5_TXOFF_DECL(full_ts,
5371 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
5372 MLX5_TXOFF_CONFIG_EMPW)
5374 MLX5_TXOFF_DECL(full_ts_noi,
5375 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5376 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5377 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5378 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5380 MLX5_TXOFF_DECL(none_ts,
5381 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
5382 MLX5_TXOFF_CONFIG_EMPW)
5384 MLX5_TXOFF_DECL(mdi_ts,
5385 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5386 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5388 MLX5_TXOFF_DECL(mti_ts,
5389 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5390 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5391 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5393 MLX5_TXOFF_DECL(mtiv_ts,
5394 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5395 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5396 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
5397 MLX5_TXOFF_CONFIG_EMPW)
5400 * Generate routines with Legacy Multi-Packet Write support.
5401 * This mode is supported by ConnectX-4 Lx only and imposes
5402 * offload limitations, not supported:
5403 * - ACL/Flows (metadata are becoming meaningless)
5404 * - WQE Inline headers
5405 * - SRIOV (E-Switch offloads)
5407 * - tunnel encapsulation/decapsulation
5410 MLX5_TXOFF_DECL(none_mpw,
5411 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5412 MLX5_TXOFF_CONFIG_MPW)
5414 MLX5_TXOFF_DECL(mci_mpw,
5415 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5416 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5417 MLX5_TXOFF_CONFIG_MPW)
5419 MLX5_TXOFF_DECL(mc_mpw,
5420 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5421 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5423 MLX5_TXOFF_DECL(i_mpw,
5424 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5425 MLX5_TXOFF_CONFIG_MPW)
5428 * Array of declared and compiled Tx burst function and corresponding
5429 * supported offloads set. The array is used to select the Tx burst
5430 * function for specified offloads set at Tx queue configuration time.
5433 eth_tx_burst_t func;
5436 MLX5_TXOFF_INFO(full_empw,
5437 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5438 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5439 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5440 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5442 MLX5_TXOFF_INFO(none_empw,
5443 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5445 MLX5_TXOFF_INFO(md_empw,
5446 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5448 MLX5_TXOFF_INFO(mt_empw,
5449 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5450 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5452 MLX5_TXOFF_INFO(mtsc_empw,
5453 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5454 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5455 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5457 MLX5_TXOFF_INFO(mti_empw,
5458 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5459 MLX5_TXOFF_CONFIG_INLINE |
5460 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5462 MLX5_TXOFF_INFO(mtv_empw,
5463 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5464 MLX5_TXOFF_CONFIG_VLAN |
5465 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5467 MLX5_TXOFF_INFO(mtiv_empw,
5468 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5469 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5470 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5472 MLX5_TXOFF_INFO(sc_empw,
5473 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5474 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5476 MLX5_TXOFF_INFO(sci_empw,
5477 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5478 MLX5_TXOFF_CONFIG_INLINE |
5479 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5481 MLX5_TXOFF_INFO(scv_empw,
5482 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5483 MLX5_TXOFF_CONFIG_VLAN |
5484 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5486 MLX5_TXOFF_INFO(sciv_empw,
5487 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5488 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5489 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5491 MLX5_TXOFF_INFO(i_empw,
5492 MLX5_TXOFF_CONFIG_INLINE |
5493 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5495 MLX5_TXOFF_INFO(v_empw,
5496 MLX5_TXOFF_CONFIG_VLAN |
5497 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5499 MLX5_TXOFF_INFO(iv_empw,
5500 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5501 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5503 MLX5_TXOFF_INFO(full_ts_nompw,
5504 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
5506 MLX5_TXOFF_INFO(full_ts_nompwi,
5507 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5508 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5509 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5510 MLX5_TXOFF_CONFIG_TXPP)
5512 MLX5_TXOFF_INFO(full_ts,
5513 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
5514 MLX5_TXOFF_CONFIG_EMPW)
5516 MLX5_TXOFF_INFO(full_ts_noi,
5517 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5518 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5519 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5520 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5522 MLX5_TXOFF_INFO(none_ts,
5523 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
5524 MLX5_TXOFF_CONFIG_EMPW)
5526 MLX5_TXOFF_INFO(mdi_ts,
5527 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5528 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5530 MLX5_TXOFF_INFO(mti_ts,
5531 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5532 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5533 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5535 MLX5_TXOFF_INFO(mtiv_ts,
5536 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5537 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5538 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
5539 MLX5_TXOFF_CONFIG_EMPW)
5541 MLX5_TXOFF_INFO(full,
5542 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5543 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5544 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5545 MLX5_TXOFF_CONFIG_METADATA)
5547 MLX5_TXOFF_INFO(none,
5548 MLX5_TXOFF_CONFIG_NONE)
5551 MLX5_TXOFF_CONFIG_METADATA)
5554 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5555 MLX5_TXOFF_CONFIG_METADATA)
5557 MLX5_TXOFF_INFO(mtsc,
5558 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5559 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5560 MLX5_TXOFF_CONFIG_METADATA)
5562 MLX5_TXOFF_INFO(mti,
5563 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5564 MLX5_TXOFF_CONFIG_INLINE |
5565 MLX5_TXOFF_CONFIG_METADATA)
5567 MLX5_TXOFF_INFO(mtv,
5568 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5569 MLX5_TXOFF_CONFIG_VLAN |
5570 MLX5_TXOFF_CONFIG_METADATA)
5572 MLX5_TXOFF_INFO(mtiv,
5573 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5574 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5575 MLX5_TXOFF_CONFIG_METADATA)
5578 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5579 MLX5_TXOFF_CONFIG_METADATA)
5581 MLX5_TXOFF_INFO(sci,
5582 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5583 MLX5_TXOFF_CONFIG_INLINE |
5584 MLX5_TXOFF_CONFIG_METADATA)
5586 MLX5_TXOFF_INFO(scv,
5587 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5588 MLX5_TXOFF_CONFIG_VLAN |
5589 MLX5_TXOFF_CONFIG_METADATA)
5591 MLX5_TXOFF_INFO(sciv,
5592 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5593 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5594 MLX5_TXOFF_CONFIG_METADATA)
5597 MLX5_TXOFF_CONFIG_INLINE |
5598 MLX5_TXOFF_CONFIG_METADATA)
5601 MLX5_TXOFF_CONFIG_VLAN |
5602 MLX5_TXOFF_CONFIG_METADATA)
5605 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5606 MLX5_TXOFF_CONFIG_METADATA)
5608 MLX5_TXOFF_INFO(none_mpw,
5609 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5610 MLX5_TXOFF_CONFIG_MPW)
5612 MLX5_TXOFF_INFO(mci_mpw,
5613 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5614 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5615 MLX5_TXOFF_CONFIG_MPW)
5617 MLX5_TXOFF_INFO(mc_mpw,
5618 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5619 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5621 MLX5_TXOFF_INFO(i_mpw,
5622 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5623 MLX5_TXOFF_CONFIG_MPW)
5627 * Configure the Tx function to use. The routine checks configured
5628 * Tx offloads for the device and selects appropriate Tx burst
5629 * routine. There are multiple Tx burst routines compiled from
5630 * the same template in the most optimal way for the dedicated
5634 * Pointer to private data structure.
5637 * Pointer to selected Tx burst function.
5640 mlx5_select_tx_function(struct rte_eth_dev *dev)
5642 struct mlx5_priv *priv = dev->data->dev_private;
5643 struct mlx5_dev_config *config = &priv->config;
5644 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5645 unsigned int diff = 0, olx = 0, i, m;
5647 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5648 MLX5_DSEG_MAX, "invalid WQE max size");
5649 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5650 "invalid WQE Control Segment size");
5651 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5652 "invalid WQE Ethernet Segment size");
5653 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5654 "invalid WQE Data Segment size");
5655 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5656 "invalid WQE size");
5658 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5659 /* We should support Multi-Segment Packets. */
5660 olx |= MLX5_TXOFF_CONFIG_MULTI;
5662 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5663 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5664 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5665 DEV_TX_OFFLOAD_IP_TNL_TSO |
5666 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5667 /* We should support TCP Send Offload. */
5668 olx |= MLX5_TXOFF_CONFIG_TSO;
5670 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5671 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5672 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5673 /* We should support Software Parser for Tunnels. */
5674 olx |= MLX5_TXOFF_CONFIG_SWP;
5676 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5677 DEV_TX_OFFLOAD_UDP_CKSUM |
5678 DEV_TX_OFFLOAD_TCP_CKSUM |
5679 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5680 /* We should support IP/TCP/UDP Checksums. */
5681 olx |= MLX5_TXOFF_CONFIG_CSUM;
5683 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5684 /* We should support VLAN insertion. */
5685 olx |= MLX5_TXOFF_CONFIG_VLAN;
5687 if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
5688 rte_mbuf_dynflag_lookup
5689 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) > 0 &&
5690 rte_mbuf_dynfield_lookup
5691 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL) > 0) {
5692 /* Offload configured, dynamic entities registered. */
5693 olx |= MLX5_TXOFF_CONFIG_TXPP;
5695 if (priv->txqs_n && (*priv->txqs)[0]) {
5696 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5698 if (txd->inlen_send) {
5700 * Check the data inline requirements. Data inline
5701 * is enabled on per device basis, we can check
5702 * the first Tx queue only.
5704 * If device does not support VLAN insertion in WQE
5705 * and some queues are requested to perform VLAN
5706 * insertion offload than inline must be enabled.
5708 olx |= MLX5_TXOFF_CONFIG_INLINE;
5711 if (config->mps == MLX5_MPW_ENHANCED &&
5712 config->txq_inline_min <= 0) {
5714 * The NIC supports Enhanced Multi-Packet Write
5715 * and does not require minimal inline data.
5717 olx |= MLX5_TXOFF_CONFIG_EMPW;
5719 if (rte_flow_dynf_metadata_avail()) {
5720 /* We should support Flow metadata. */
5721 olx |= MLX5_TXOFF_CONFIG_METADATA;
5723 if (config->mps == MLX5_MPW) {
5725 * The NIC supports Legacy Multi-Packet Write.
5726 * The MLX5_TXOFF_CONFIG_MPW controls the
5727 * descriptor building method in combination
5728 * with MLX5_TXOFF_CONFIG_EMPW.
5730 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5731 MLX5_TXOFF_CONFIG_SWP |
5732 MLX5_TXOFF_CONFIG_VLAN |
5733 MLX5_TXOFF_CONFIG_METADATA)))
5734 olx |= MLX5_TXOFF_CONFIG_EMPW |
5735 MLX5_TXOFF_CONFIG_MPW;
5738 * Scan the routines table to find the minimal
5739 * satisfying routine with requested offloads.
5741 m = RTE_DIM(txoff_func);
5742 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5745 tmp = txoff_func[i].olx;
5747 /* Meets requested offloads exactly.*/
5751 if ((tmp & olx) != olx) {
5752 /* Does not meet requested offloads at all. */
5755 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW)
5756 /* Do not enable legacy MPW if not configured. */
5758 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5759 /* Do not enable eMPW if not configured. */
5761 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5762 /* Do not enable inlining if not configured. */
5764 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_TXPP)
5765 /* Do not enable scheduling if not configured. */
5768 * Some routine meets the requirements.
5769 * Check whether it has minimal amount
5770 * of not requested offloads.
5772 tmp = __builtin_popcountl(tmp & ~olx);
5773 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5774 /* First or better match, save and continue. */
5780 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5781 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5782 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5783 /* Lighter not requested offload. */
5788 if (m >= RTE_DIM(txoff_func)) {
5789 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5790 " for requested offloads %04X",
5791 dev->data->port_id, olx);
5794 DRV_LOG(DEBUG, "port %u has selected Tx function"
5795 " supporting offloads %04X/%04X",
5796 dev->data->port_id, olx, txoff_func[m].olx);
5797 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5798 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5799 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5800 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5801 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5802 DRV_LOG(DEBUG, "\tSWP (software parser)");
5803 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5804 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5805 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5806 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5807 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5808 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5809 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5810 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5811 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TXPP)
5812 DRV_LOG(DEBUG, "\tMETAD (tx Scheduling)");
5813 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5814 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5815 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5817 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5819 return txoff_func[m].func;
5823 * DPDK callback to get the TX queue information
5826 * Pointer to the device structure.
5828 * @param tx_queue_id
5829 * Tx queue identificator.
5832 * Pointer to the TX queue information structure.
5839 mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
5840 struct rte_eth_txq_info *qinfo)
5842 struct mlx5_priv *priv = dev->data->dev_private;
5843 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
5844 struct mlx5_txq_ctrl *txq_ctrl =
5845 container_of(txq, struct mlx5_txq_ctrl, txq);
5849 qinfo->nb_desc = txq->elts_s;
5850 qinfo->conf.tx_thresh.pthresh = 0;
5851 qinfo->conf.tx_thresh.hthresh = 0;
5852 qinfo->conf.tx_thresh.wthresh = 0;
5853 qinfo->conf.tx_rs_thresh = 0;
5854 qinfo->conf.tx_free_thresh = 0;
5855 qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
5856 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
5860 * DPDK callback to get the TX packet burst mode information
5863 * Pointer to the device structure.
5865 * @param tx_queue_id
5866 * Tx queue identificatior.
5869 * Pointer to the burts mode information.
5872 * 0 as success, -EINVAL as failure.
5876 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
5877 uint16_t tx_queue_id __rte_unused,
5878 struct rte_eth_burst_mode *mode)
5880 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
5881 unsigned int i, olx;
5883 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5884 if (pkt_burst == txoff_func[i].func) {
5885 olx = txoff_func[i].olx;
5886 snprintf(mode->info, sizeof(mode->info),
5887 "%s%s%s%s%s%s%s%s%s",
5888 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
5889 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
5890 "Legacy MPW" : "Enhanced MPW") : "No MPW",
5891 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
5893 (olx & MLX5_TXOFF_CONFIG_TSO) ?
5895 (olx & MLX5_TXOFF_CONFIG_SWP) ?
5897 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
5899 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
5901 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
5903 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
5905 (olx & MLX5_TXOFF_CONFIG_TXPP) ?