1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
25 #include <rte_common.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_ether.h>
28 #include <rte_cycles.h>
31 #include "mlx5_utils.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_autoconf.h"
34 #include "mlx5_defs.h"
37 /* TX burst subroutines return codes. */
38 enum mlx5_txcmp_code {
39 MLX5_TXCMP_CODE_EXIT = 0,
40 MLX5_TXCMP_CODE_ERROR,
41 MLX5_TXCMP_CODE_SINGLE,
42 MLX5_TXCMP_CODE_MULTI,
48 * These defines are used to configure Tx burst routine option set
49 * supported at compile time. The not specified options are optimized out
50 * out due to if conditions can be explicitly calculated at compile time.
51 * The offloads with bigger runtime check (require more CPU cycles to
52 * skip) overhead should have the bigger index - this is needed to
53 * select the better matching routine function if no exact match and
54 * some offloads are not actually requested.
56 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
57 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
58 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
59 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
60 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
61 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
62 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
63 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
65 /* The most common offloads groups. */
66 #define MLX5_TXOFF_CONFIG_NONE 0
67 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
68 MLX5_TXOFF_CONFIG_TSO | \
69 MLX5_TXOFF_CONFIG_SWP | \
70 MLX5_TXOFF_CONFIG_CSUM | \
71 MLX5_TXOFF_CONFIG_INLINE | \
72 MLX5_TXOFF_CONFIG_VLAN | \
73 MLX5_TXOFF_CONFIG_METADATA)
75 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
77 #define MLX5_TXOFF_DECL(func, olx) \
78 static uint16_t mlx5_tx_burst_##func(void *txq, \
79 struct rte_mbuf **pkts, \
82 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
83 pkts, pkts_n, (olx)); \
86 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
88 static __rte_always_inline uint32_t
89 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
91 static __rte_always_inline int
92 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
93 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
95 static __rte_always_inline uint32_t
96 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
98 static __rte_always_inline void
99 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
100 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
102 static __rte_always_inline void
103 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
104 const unsigned int strd_n);
107 mlx5_queue_state_modify(struct rte_eth_dev *dev,
108 struct mlx5_mp_arg_queue_state_modify *sm);
110 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
111 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
114 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
115 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
118 * Build a table to translate Rx completion flags to packet type.
120 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
123 mlx5_set_ptype_table(void)
126 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
128 /* Last entry must not be overwritten, reserved for errored packet. */
129 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
130 (*p)[i] = RTE_PTYPE_UNKNOWN;
132 * The index to the array should have:
133 * bit[1:0] = l3_hdr_type
134 * bit[4:2] = l4_hdr_type
137 * bit[7] = outer_l3_type
140 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
142 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
143 RTE_PTYPE_L4_NONFRAG;
144 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
145 RTE_PTYPE_L4_NONFRAG;
147 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
149 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
152 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
154 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
156 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
158 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
160 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
162 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
165 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
167 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
169 /* Repeat with outer_l3_type being set. Just in case. */
170 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
171 RTE_PTYPE_L4_NONFRAG;
172 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
173 RTE_PTYPE_L4_NONFRAG;
174 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
176 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
178 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
180 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
182 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
184 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
186 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
188 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
190 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
192 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
195 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
196 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
197 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
198 RTE_PTYPE_INNER_L4_NONFRAG;
199 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
200 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
201 RTE_PTYPE_INNER_L4_NONFRAG;
202 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
203 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
204 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
205 RTE_PTYPE_INNER_L4_NONFRAG;
206 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
207 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
208 RTE_PTYPE_INNER_L4_NONFRAG;
209 /* Tunneled - Fragmented */
210 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
211 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
212 RTE_PTYPE_INNER_L4_FRAG;
213 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
215 RTE_PTYPE_INNER_L4_FRAG;
216 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L4_FRAG;
219 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
220 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
221 RTE_PTYPE_INNER_L4_FRAG;
223 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L4_TCP;
226 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
228 RTE_PTYPE_INNER_L4_TCP;
229 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L4_TCP;
232 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L4_TCP;
235 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
237 RTE_PTYPE_INNER_L4_TCP;
238 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
240 RTE_PTYPE_INNER_L4_TCP;
241 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L4_TCP;
244 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L4_TCP;
247 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L4_TCP;
250 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L4_TCP;
253 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L4_TCP;
256 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L4_TCP;
260 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
262 RTE_PTYPE_INNER_L4_UDP;
263 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
265 RTE_PTYPE_INNER_L4_UDP;
266 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
268 RTE_PTYPE_INNER_L4_UDP;
269 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L4_UDP;
275 * Build a table to translate packet to checksum type of Verbs.
278 mlx5_set_cksum_table(void)
284 * The index should have:
285 * bit[0] = PKT_TX_TCP_SEG
286 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
287 * bit[4] = PKT_TX_IP_CKSUM
288 * bit[8] = PKT_TX_OUTER_IP_CKSUM
291 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
294 /* Tunneled packet. */
295 if (i & (1 << 8)) /* Outer IP. */
296 v |= MLX5_ETH_WQE_L3_CSUM;
297 if (i & (1 << 4)) /* Inner IP. */
298 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
299 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
300 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
303 if (i & (1 << 4)) /* IP. */
304 v |= MLX5_ETH_WQE_L3_CSUM;
305 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
306 v |= MLX5_ETH_WQE_L4_CSUM;
308 mlx5_cksum_table[i] = v;
313 * Build a table to translate packet type of mbuf to SWP type of Verbs.
316 mlx5_set_swp_types_table(void)
322 * The index should have:
323 * bit[0:1] = PKT_TX_L4_MASK
324 * bit[4] = PKT_TX_IPV6
325 * bit[8] = PKT_TX_OUTER_IPV6
326 * bit[9] = PKT_TX_OUTER_UDP
328 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
331 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
333 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
335 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
336 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
337 v |= MLX5_ETH_WQE_L4_INNER_UDP;
338 mlx5_swp_types_table[i] = v;
343 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
344 * Flags must be preliminary initialized to zero.
347 * Pointer to burst routine local context.
349 * Pointer to store Software Parser flags
351 * Configured Tx offloads mask. It is fully defined at
352 * compile time and may be used for optimization.
355 * Software Parser offsets packed in dword.
356 * Software Parser flags are set by pointer.
358 static __rte_always_inline uint32_t
359 txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc,
364 unsigned int idx, off;
367 if (!MLX5_TXOFF_CONFIG(SWP))
369 ol = loc->mbuf->ol_flags;
370 tunnel = ol & PKT_TX_TUNNEL_MASK;
372 * Check whether Software Parser is required.
373 * Only customized tunnels may ask for.
375 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
378 * The index should have:
379 * bit[0:1] = PKT_TX_L4_MASK
380 * bit[4] = PKT_TX_IPV6
381 * bit[8] = PKT_TX_OUTER_IPV6
382 * bit[9] = PKT_TX_OUTER_UDP
384 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
385 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
386 *swp_flags = mlx5_swp_types_table[idx];
388 * Set offsets for SW parser. Since ConnectX-5, SW parser just
389 * complements HW parser. SW parser starts to engage only if HW parser
390 * can't reach a header. For the older devices, HW parser will not kick
391 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
392 * should be set regardless of HW offload.
394 off = loc->mbuf->outer_l2_len;
395 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
396 off += sizeof(struct rte_vlan_hdr);
397 set = (off >> 1) << 8; /* Outer L3 offset. */
398 off += loc->mbuf->outer_l3_len;
399 if (tunnel == PKT_TX_TUNNEL_UDP)
400 set |= off >> 1; /* Outer L4 offset. */
401 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
402 const uint64_t csum = ol & PKT_TX_L4_MASK;
403 off += loc->mbuf->l2_len;
404 set |= (off >> 1) << 24; /* Inner L3 offset. */
405 if (csum == PKT_TX_TCP_CKSUM ||
406 csum == PKT_TX_UDP_CKSUM ||
407 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
408 off += loc->mbuf->l3_len;
409 set |= (off >> 1) << 16; /* Inner L4 offset. */
412 set = rte_cpu_to_le_32(set);
417 * Convert the Checksum offloads to Verbs.
420 * Pointer to the mbuf.
423 * Converted checksum flags.
425 static __rte_always_inline uint8_t
426 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
429 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
430 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
431 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
434 * The index should have:
435 * bit[0] = PKT_TX_TCP_SEG
436 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
437 * bit[4] = PKT_TX_IP_CKSUM
438 * bit[8] = PKT_TX_OUTER_IP_CKSUM
441 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
442 return mlx5_cksum_table[idx];
446 * Internal function to compute the number of used descriptors in an RX queue
452 * The number of used rx descriptor.
455 rx_queue_count(struct mlx5_rxq_data *rxq)
457 struct rxq_zip *zip = &rxq->zip;
458 volatile struct mlx5_cqe *cqe;
459 const unsigned int cqe_n = (1 << rxq->cqe_n);
460 const unsigned int cqe_cnt = cqe_n - 1;
464 /* if we are processing a compressed cqe */
466 used = zip->cqe_cnt - zip->ca;
472 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
473 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
477 op_own = cqe->op_own;
478 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
479 n = rte_be_to_cpu_32(cqe->byte_cnt);
484 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
486 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
491 * DPDK callback to check the status of a rx descriptor.
496 * The index of the descriptor in the ring.
499 * The status of the tx descriptor.
502 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
504 struct mlx5_rxq_data *rxq = rx_queue;
505 struct mlx5_rxq_ctrl *rxq_ctrl =
506 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
507 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
509 if (dev->rx_pkt_burst != mlx5_rx_burst) {
513 if (offset >= (1 << rxq->elts_n)) {
517 if (offset < rx_queue_count(rxq))
518 return RTE_ETH_RX_DESC_DONE;
519 return RTE_ETH_RX_DESC_AVAIL;
523 * DPDK callback to get the number of used descriptors in a RX queue
526 * Pointer to the device structure.
532 * The number of used rx descriptor.
533 * -EINVAL if the queue is invalid
536 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
538 struct mlx5_priv *priv = dev->data->dev_private;
539 struct mlx5_rxq_data *rxq;
541 if (dev->rx_pkt_burst != mlx5_rx_burst) {
545 rxq = (*priv->rxqs)[rx_queue_id];
550 return rx_queue_count(rxq);
553 #define MLX5_SYSTEM_LOG_DIR "/var/log"
555 * Dump debug information to log file.
560 * If not NULL this string is printed as a header to the output
561 * and the output will be in hexadecimal view.
563 * This is the buffer address to print out.
565 * The number of bytes to dump out.
568 mlx5_dump_debug_information(const char *fname, const char *hex_title,
569 const void *buf, unsigned int hex_len)
573 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
574 fd = fopen(path, "a+");
576 DRV_LOG(WARNING, "cannot open %s for debug dump\n",
578 MKSTR(path2, "./%s", fname);
579 fd = fopen(path2, "a+");
581 DRV_LOG(ERR, "cannot open %s for debug dump\n",
585 DRV_LOG(INFO, "New debug dump in file %s\n", path2);
587 DRV_LOG(INFO, "New debug dump in file %s\n", path);
590 rte_hexdump(fd, hex_title, buf, hex_len);
592 fprintf(fd, "%s", (const char *)buf);
593 fprintf(fd, "\n\n\n");
598 * Move QP from error state to running state and initialize indexes.
601 * Pointer to TX queue control structure.
604 * 0 on success, else -1.
607 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
609 struct mlx5_mp_arg_queue_state_modify sm = {
611 .queue_id = txq_ctrl->txq.idx,
614 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
616 txq_ctrl->txq.wqe_ci = 0;
617 txq_ctrl->txq.wqe_pi = 0;
618 txq_ctrl->txq.elts_comp = 0;
622 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
624 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
626 static const uint8_t magic[] = "seen";
630 for (i = 0; i < sizeof(magic); ++i)
631 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
633 err_cqe->rsvd1[i] = magic[i];
642 * Pointer to TX queue structure.
644 * Pointer to the error CQE.
647 * The last Tx buffer element to free.
650 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
651 volatile struct mlx5_err_cqe *err_cqe)
653 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
654 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
655 struct mlx5_txq_ctrl *txq_ctrl =
656 container_of(txq, struct mlx5_txq_ctrl, txq);
657 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
658 int seen = check_err_cqe_seen(err_cqe);
660 if (!seen && txq_ctrl->dump_file_n <
661 txq_ctrl->priv->config.max_dump_files_num) {
662 MKSTR(err_str, "Unexpected CQE error syndrome "
663 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
664 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
665 txq->cqe_s, txq->qp_num_8s >> 8,
666 rte_be_to_cpu_16(err_cqe->wqe_counter),
667 txq->wqe_ci, txq->cq_ci);
668 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
669 PORT_ID(txq_ctrl->priv), txq->idx,
670 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
671 mlx5_dump_debug_information(name, NULL, err_str, 0);
672 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
673 (const void *)((uintptr_t)
677 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
678 (const void *)((uintptr_t)
682 txq_ctrl->dump_file_n++;
686 * Count errors in WQEs units.
687 * Later it can be improved to count error packets,
688 * for example, by SQ parsing to find how much packets
689 * should be counted for each WQE.
691 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
693 if (tx_recover_qp(txq_ctrl) == 0) {
695 /* Release all the remaining buffers. */
696 return txq->elts_head;
698 /* Recovering failed - try again later on the same WQE. */
702 /* Do not release buffers. */
703 return txq->elts_tail;
707 * Translate RX completion flags to packet type.
710 * Pointer to RX queue structure.
714 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
717 * Packet type for struct rte_mbuf.
719 static inline uint32_t
720 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
723 uint8_t pinfo = cqe->pkt_info;
724 uint16_t ptype = cqe->hdr_type_etc;
727 * The index to the array should have:
728 * bit[1:0] = l3_hdr_type
729 * bit[4:2] = l4_hdr_type
732 * bit[7] = outer_l3_type
734 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
735 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
739 * Initialize Rx WQ and indexes.
742 * Pointer to RX queue structure.
745 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
747 const unsigned int wqe_n = 1 << rxq->elts_n;
750 for (i = 0; (i != wqe_n); ++i) {
751 volatile struct mlx5_wqe_data_seg *scat;
755 if (mlx5_rxq_mprq_enabled(rxq)) {
756 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
758 scat = &((volatile struct mlx5_wqe_mprq *)
760 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
761 1 << rxq->strd_num_n);
762 byte_count = (1 << rxq->strd_sz_n) *
763 (1 << rxq->strd_num_n);
765 struct rte_mbuf *buf = (*rxq->elts)[i];
767 scat = &((volatile struct mlx5_wqe_data_seg *)
769 addr = rte_pktmbuf_mtod(buf, uintptr_t);
770 byte_count = DATA_LEN(buf);
772 /* scat->addr must be able to store a pointer. */
773 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
774 *scat = (struct mlx5_wqe_data_seg){
775 .addr = rte_cpu_to_be_64(addr),
776 .byte_count = rte_cpu_to_be_32(byte_count),
777 .lkey = mlx5_rx_addr2mr(rxq, addr),
780 rxq->consumed_strd = 0;
781 rxq->decompressed = 0;
783 rxq->zip = (struct rxq_zip){
786 /* Update doorbell counter. */
787 rxq->rq_ci = wqe_n >> rxq->sges_n;
789 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
793 * Modify a Verbs/DevX queue state.
794 * This must be called from the primary process.
797 * Pointer to Ethernet device.
799 * State modify request parameters.
802 * 0 in case of success else non-zero value and rte_errno is set.
805 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
806 const struct mlx5_mp_arg_queue_state_modify *sm)
809 struct mlx5_priv *priv = dev->data->dev_private;
812 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
813 struct mlx5_rxq_ctrl *rxq_ctrl =
814 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
816 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
817 struct ibv_wq_attr mod = {
818 .attr_mask = IBV_WQ_ATTR_STATE,
819 .wq_state = sm->state,
822 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
823 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
824 struct mlx5_devx_modify_rq_attr rq_attr;
826 memset(&rq_attr, 0, sizeof(rq_attr));
827 if (sm->state == IBV_WQS_RESET) {
828 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
829 rq_attr.state = MLX5_RQC_STATE_RST;
830 } else if (sm->state == IBV_WQS_RDY) {
831 rq_attr.rq_state = MLX5_RQC_STATE_RST;
832 rq_attr.state = MLX5_RQC_STATE_RDY;
833 } else if (sm->state == IBV_WQS_ERR) {
834 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
835 rq_attr.state = MLX5_RQC_STATE_ERR;
837 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
841 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s\n",
842 sm->state, strerror(errno));
847 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
848 struct mlx5_txq_ctrl *txq_ctrl =
849 container_of(txq, struct mlx5_txq_ctrl, txq);
850 struct ibv_qp_attr mod = {
851 .qp_state = IBV_QPS_RESET,
852 .port_num = (uint8_t)priv->ibv_port,
854 struct ibv_qp *qp = txq_ctrl->ibv->qp;
856 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
858 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
859 "%s\n", strerror(errno));
863 mod.qp_state = IBV_QPS_INIT;
864 ret = mlx5_glue->modify_qp(qp, &mod,
865 (IBV_QP_STATE | IBV_QP_PORT));
867 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s\n",
872 mod.qp_state = IBV_QPS_RTR;
873 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
875 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s\n",
880 mod.qp_state = IBV_QPS_RTS;
881 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
883 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s\n",
893 * Modify a Verbs queue state.
896 * Pointer to Ethernet device.
898 * State modify request parameters.
901 * 0 in case of success else non-zero value.
904 mlx5_queue_state_modify(struct rte_eth_dev *dev,
905 struct mlx5_mp_arg_queue_state_modify *sm)
909 switch (rte_eal_process_type()) {
910 case RTE_PROC_PRIMARY:
911 ret = mlx5_queue_state_modify_primary(dev, sm);
913 case RTE_PROC_SECONDARY:
914 ret = mlx5_mp_req_queue_state_modify(dev, sm);
924 * The function inserts the RQ state to reset when the first error CQE is
925 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
926 * it moves the RQ state to ready and initializes the RQ.
927 * Next CQE identification and error counting are in the caller responsibility.
930 * Pointer to RX queue structure.
931 * @param[in] mbuf_prepare
932 * Whether to prepare mbufs for the RQ.
935 * -1 in case of recovery error, otherwise the CQE status.
938 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t mbuf_prepare)
940 const uint16_t cqe_n = 1 << rxq->cqe_n;
941 const uint16_t cqe_mask = cqe_n - 1;
942 const unsigned int wqe_n = 1 << rxq->elts_n;
943 struct mlx5_rxq_ctrl *rxq_ctrl =
944 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
946 volatile struct mlx5_cqe *cqe;
947 volatile struct mlx5_err_cqe *err_cqe;
949 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
951 struct mlx5_mp_arg_queue_state_modify sm;
954 switch (rxq->err_state) {
955 case MLX5_RXQ_ERR_STATE_NO_ERROR:
956 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
958 case MLX5_RXQ_ERR_STATE_NEED_RESET:
960 sm.queue_id = rxq->idx;
961 sm.state = IBV_WQS_RESET;
962 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
964 if (rxq_ctrl->dump_file_n <
965 rxq_ctrl->priv->config.max_dump_files_num) {
966 MKSTR(err_str, "Unexpected CQE error syndrome "
967 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
968 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
969 rxq->cqn, rxq_ctrl->wqn,
970 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
971 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
972 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
973 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
974 mlx5_dump_debug_information(name, NULL, err_str, 0);
975 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
976 (const void *)((uintptr_t)
978 sizeof(*u.cqe) * cqe_n);
979 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
980 (const void *)((uintptr_t)
983 rxq_ctrl->dump_file_n++;
985 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
987 case MLX5_RXQ_ERR_STATE_NEED_READY:
988 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
989 if (ret == MLX5_CQE_STATUS_HW_OWN) {
991 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
994 * The RQ consumer index must be zeroed while moving
995 * from RESET state to RDY state.
997 *rxq->rq_db = rte_cpu_to_be_32(0);
1000 sm.queue_id = rxq->idx;
1001 sm.state = IBV_WQS_RDY;
1002 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1006 const uint16_t q_mask = wqe_n - 1;
1008 struct rte_mbuf **elt;
1010 unsigned int n = wqe_n - (rxq->rq_ci -
1013 for (i = 0; i < (int)n; ++i) {
1014 elt_idx = (rxq->rq_ci + i) & q_mask;
1015 elt = &(*rxq->elts)[elt_idx];
1016 *elt = rte_mbuf_raw_alloc(rxq->mp);
1018 for (i--; i >= 0; --i) {
1019 elt_idx = (rxq->rq_ci +
1023 rte_pktmbuf_free_seg
1030 mlx5_rxq_initialize(rxq);
1031 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1040 * Get size of the next packet for a given CQE. For compressed CQEs, the
1041 * consumer index is updated only once all packets of the current one have
1045 * Pointer to RX queue.
1049 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1053 * 0 in case of empty CQE, otherwise the packet size in bytes.
1056 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1057 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1059 struct rxq_zip *zip = &rxq->zip;
1060 uint16_t cqe_n = cqe_cnt + 1;
1066 /* Process compressed data in the CQE and mini arrays. */
1068 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1069 (volatile struct mlx5_mini_cqe8 (*)[8])
1070 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1073 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1074 *mcqe = &(*mc)[zip->ai & 7];
1075 if ((++zip->ai & 7) == 0) {
1076 /* Invalidate consumed CQEs */
1079 while (idx != end) {
1080 (*rxq->cqes)[idx & cqe_cnt].op_own =
1081 MLX5_CQE_INVALIDATE;
1085 * Increment consumer index to skip the number
1086 * of CQEs consumed. Hardware leaves holes in
1087 * the CQ ring for software use.
1092 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1093 /* Invalidate the rest */
1097 while (idx != end) {
1098 (*rxq->cqes)[idx & cqe_cnt].op_own =
1099 MLX5_CQE_INVALIDATE;
1102 rxq->cq_ci = zip->cq_ci;
1106 * No compressed data, get next CQE and verify if it is
1113 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1114 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1115 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1117 ret = mlx5_rx_err_handle(rxq, 0);
1118 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1126 op_own = cqe->op_own;
1127 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1128 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1129 (volatile struct mlx5_mini_cqe8 (*)[8])
1130 (uintptr_t)(&(*rxq->cqes)
1134 /* Fix endianness. */
1135 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1137 * Current mini array position is the one
1138 * returned by check_cqe64().
1140 * If completion comprises several mini arrays,
1141 * as a special case the second one is located
1142 * 7 CQEs after the initial CQE instead of 8
1143 * for subsequent ones.
1145 zip->ca = rxq->cq_ci;
1146 zip->na = zip->ca + 7;
1147 /* Compute the next non compressed CQE. */
1149 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1150 /* Get packet size to return. */
1151 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1154 /* Prefetch all to be invalidated */
1157 while (idx != end) {
1158 rte_prefetch0(&(*rxq->cqes)[(idx) &
1163 len = rte_be_to_cpu_32(cqe->byte_cnt);
1166 if (unlikely(rxq->err_state)) {
1167 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1168 ++rxq->stats.idropped;
1176 * Translate RX completion flags to offload flags.
1182 * Offload flags (ol_flags) for struct rte_mbuf.
1184 static inline uint32_t
1185 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1187 uint32_t ol_flags = 0;
1188 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1192 MLX5_CQE_RX_L3_HDR_VALID,
1193 PKT_RX_IP_CKSUM_GOOD) |
1195 MLX5_CQE_RX_L4_HDR_VALID,
1196 PKT_RX_L4_CKSUM_GOOD);
1201 * Fill in mbuf fields from RX completion flags.
1202 * Note that pkt->ol_flags should be initialized outside of this function.
1205 * Pointer to RX queue.
1210 * @param rss_hash_res
1211 * Packet RSS Hash result.
1214 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1215 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1217 /* Update packet information. */
1218 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1219 if (rss_hash_res && rxq->rss_hash) {
1220 pkt->hash.rss = rss_hash_res;
1221 pkt->ol_flags |= PKT_RX_RSS_HASH;
1223 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1224 pkt->ol_flags |= PKT_RX_FDIR;
1225 if (cqe->sop_drop_qpn !=
1226 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1227 uint32_t mark = cqe->sop_drop_qpn;
1229 pkt->ol_flags |= PKT_RX_FDIR_ID;
1230 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1234 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1235 if (rxq->vlan_strip &&
1236 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1237 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1238 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1240 if (rxq->hw_timestamp) {
1241 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1242 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1247 * DPDK callback for RX.
1250 * Generic pointer to RX queue structure.
1252 * Array to store received packets.
1254 * Maximum number of packets in array.
1257 * Number of packets successfully received (<= pkts_n).
1260 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1262 struct mlx5_rxq_data *rxq = dpdk_rxq;
1263 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1264 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1265 const unsigned int sges_n = rxq->sges_n;
1266 struct rte_mbuf *pkt = NULL;
1267 struct rte_mbuf *seg = NULL;
1268 volatile struct mlx5_cqe *cqe =
1269 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1271 unsigned int rq_ci = rxq->rq_ci << sges_n;
1272 int len = 0; /* keep its value across iterations. */
1275 unsigned int idx = rq_ci & wqe_cnt;
1276 volatile struct mlx5_wqe_data_seg *wqe =
1277 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1278 struct rte_mbuf *rep = (*rxq->elts)[idx];
1279 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1280 uint32_t rss_hash_res;
1288 rep = rte_mbuf_raw_alloc(rxq->mp);
1289 if (unlikely(rep == NULL)) {
1290 ++rxq->stats.rx_nombuf;
1293 * no buffers before we even started,
1294 * bail out silently.
1298 while (pkt != seg) {
1299 assert(pkt != (*rxq->elts)[idx]);
1303 rte_mbuf_raw_free(pkt);
1309 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1310 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1312 rte_mbuf_raw_free(rep);
1316 assert(len >= (rxq->crc_present << 2));
1318 /* If compressed, take hash result from mini-CQE. */
1319 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1321 mcqe->rx_hash_result);
1322 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1323 if (rxq->crc_present)
1324 len -= RTE_ETHER_CRC_LEN;
1327 DATA_LEN(rep) = DATA_LEN(seg);
1328 PKT_LEN(rep) = PKT_LEN(seg);
1329 SET_DATA_OFF(rep, DATA_OFF(seg));
1330 PORT(rep) = PORT(seg);
1331 (*rxq->elts)[idx] = rep;
1333 * Fill NIC descriptor with the new buffer. The lkey and size
1334 * of the buffers are already known, only the buffer address
1337 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1338 /* If there's only one MR, no need to replace LKey in WQE. */
1339 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1340 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1341 if (len > DATA_LEN(seg)) {
1342 len -= DATA_LEN(seg);
1347 DATA_LEN(seg) = len;
1348 #ifdef MLX5_PMD_SOFT_COUNTERS
1349 /* Increment bytes counter. */
1350 rxq->stats.ibytes += PKT_LEN(pkt);
1352 /* Return packet. */
1357 /* Align consumer index to the next stride. */
1362 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1364 /* Update the consumer index. */
1365 rxq->rq_ci = rq_ci >> sges_n;
1367 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1369 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1370 #ifdef MLX5_PMD_SOFT_COUNTERS
1371 /* Increment packets counter. */
1372 rxq->stats.ipackets += i;
1378 * Update LRO packet TCP header.
1379 * The HW LRO feature doesn't update the TCP header after coalescing the
1380 * TCP segments but supplies information in CQE to fill it by SW.
1383 * Pointer to the TCP header.
1385 * Pointer to the completion entry..
1387 * The L3 pseudo-header checksum.
1390 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
1391 volatile struct mlx5_cqe *restrict cqe,
1394 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1395 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1397 * The HW calculates only the TCP payload checksum, need to complete
1398 * the TCP header checksum and the L3 pseudo-header checksum.
1400 uint32_t csum = phcsum + cqe->csum;
1402 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1403 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1404 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1405 tcp->recv_ack = cqe->lro_ack_seq_num;
1406 tcp->rx_win = cqe->lro_tcp_win;
1408 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1409 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1411 csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4);
1412 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1413 csum = (~csum) & 0xffff;
1420 * Update LRO packet headers.
1421 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1422 * TCP segments but supply information in CQE to fill it by SW.
1425 * The packet address.
1427 * Pointer to the completion entry..
1429 * The packet length.
1432 mlx5_lro_update_hdr(uint8_t *restrict padd,
1433 volatile struct mlx5_cqe *restrict cqe,
1437 struct rte_ether_hdr *eth;
1438 struct rte_vlan_hdr *vlan;
1439 struct rte_ipv4_hdr *ipv4;
1440 struct rte_ipv6_hdr *ipv6;
1441 struct rte_tcp_hdr *tcp;
1446 uint16_t proto = h.eth->ether_type;
1450 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1451 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1452 proto = h.vlan->eth_proto;
1455 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1456 h.ipv4->time_to_live = cqe->lro_min_ttl;
1457 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1458 h.ipv4->hdr_checksum = 0;
1459 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1460 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1463 h.ipv6->hop_limits = cqe->lro_min_ttl;
1464 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1466 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1469 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1473 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1475 struct mlx5_mprq_buf *buf = opaque;
1477 if (rte_atomic16_read(&buf->refcnt) == 1) {
1478 rte_mempool_put(buf->mp, buf);
1479 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1480 rte_atomic16_set(&buf->refcnt, 1);
1481 rte_mempool_put(buf->mp, buf);
1486 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1488 mlx5_mprq_buf_free_cb(NULL, buf);
1492 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1493 const unsigned int strd_n)
1495 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1496 volatile struct mlx5_wqe_data_seg *wqe =
1497 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1500 assert(rep != NULL);
1501 /* Replace MPRQ buf. */
1502 (*rxq->mprq_bufs)[rq_idx] = rep;
1504 addr = mlx5_mprq_buf_addr(rep, strd_n);
1505 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1506 /* If there's only one MR, no need to replace LKey in WQE. */
1507 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1508 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1509 /* Stash a mbuf for next replacement. */
1510 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1511 rxq->mprq_repl = rep;
1513 rxq->mprq_repl = NULL;
1517 * DPDK callback for RX with Multi-Packet RQ support.
1520 * Generic pointer to RX queue structure.
1522 * Array to store received packets.
1524 * Maximum number of packets in array.
1527 * Number of packets successfully received (<= pkts_n).
1530 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1532 struct mlx5_rxq_data *rxq = dpdk_rxq;
1533 const unsigned int strd_n = 1 << rxq->strd_num_n;
1534 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1535 const unsigned int strd_shift =
1536 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1537 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1538 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1539 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1541 uint32_t rq_ci = rxq->rq_ci;
1542 uint16_t consumed_strd = rxq->consumed_strd;
1543 uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM;
1544 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1546 while (i < pkts_n) {
1547 struct rte_mbuf *pkt;
1555 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1556 uint32_t rss_hash_res = 0;
1557 uint8_t lro_num_seg;
1559 if (consumed_strd == strd_n) {
1560 /* Replace WQE only if the buffer is still in use. */
1561 if (rte_atomic16_read(&buf->refcnt) > 1) {
1562 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1563 /* Release the old buffer. */
1564 mlx5_mprq_buf_free(buf);
1565 } else if (unlikely(rxq->mprq_repl == NULL)) {
1566 struct mlx5_mprq_buf *rep;
1569 * Currently, the MPRQ mempool is out of buffer
1570 * and doing memcpy regardless of the size of Rx
1571 * packet. Retry allocation to get back to
1574 if (!rte_mempool_get(rxq->mprq_mp,
1576 rxq->mprq_repl = rep;
1578 /* Advance to the next WQE. */
1581 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1583 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1584 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1588 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1589 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1591 consumed_strd += strd_cnt;
1592 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1595 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1596 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1598 /* mini-CQE for MPRQ doesn't have hash result. */
1599 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1601 assert(strd_idx < strd_n);
1602 assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
1603 lro_num_seg = cqe->lro_num_seg;
1605 * Currently configured to receive a packet per a stride. But if
1606 * MTU is adjusted through kernel interface, device could
1607 * consume multiple strides without raising an error. In this
1608 * case, the packet should be dropped because it is bigger than
1609 * the max_rx_pkt_len.
1611 if (unlikely(!lro_num_seg && strd_cnt > 1)) {
1612 ++rxq->stats.idropped;
1615 pkt = rte_pktmbuf_alloc(rxq->mp);
1616 if (unlikely(pkt == NULL)) {
1617 ++rxq->stats.rx_nombuf;
1620 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1621 assert((int)len >= (rxq->crc_present << 2));
1622 if (rxq->crc_present)
1623 len -= RTE_ETHER_CRC_LEN;
1624 offset = strd_idx * strd_sz + strd_shift;
1625 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1627 * Memcpy packets to the target mbuf if:
1628 * - The size of packet is smaller than mprq_max_memcpy_len.
1629 * - Out of buffer in the Mempool for Multi-Packet RQ.
1631 if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
1633 * When memcpy'ing packet due to out-of-buffer, the
1634 * packet must be smaller than the target mbuf.
1636 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1637 rte_pktmbuf_free_seg(pkt);
1638 ++rxq->stats.idropped;
1641 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
1642 DATA_LEN(pkt) = len;
1644 rte_iova_t buf_iova;
1645 struct rte_mbuf_ext_shared_info *shinfo;
1646 uint16_t buf_len = strd_cnt * strd_sz;
1649 /* Increment the refcnt of the whole chunk. */
1650 rte_atomic16_add_return(&buf->refcnt, 1);
1651 assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1653 buf_addr = RTE_PTR_SUB(addr, headroom_sz);
1655 * MLX5 device doesn't use iova but it is necessary in a
1656 * case where the Rx packet is transmitted via a
1659 buf_iova = rte_mempool_virt2iova(buf) +
1660 RTE_PTR_DIFF(buf_addr, buf);
1661 shinfo = &buf->shinfos[strd_idx];
1662 rte_mbuf_ext_refcnt_set(shinfo, 1);
1664 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1665 * attaching the stride to mbuf and more offload flags
1666 * will be added below by calling rxq_cq_to_mbuf().
1667 * Other fields will be overwritten.
1669 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1671 /* Set mbuf head-room. */
1672 pkt->data_off = headroom_sz;
1673 assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
1675 * Prevent potential overflow due to MTU change through
1678 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1679 rte_pktmbuf_free_seg(pkt);
1680 ++rxq->stats.idropped;
1683 DATA_LEN(pkt) = len;
1685 * LRO packet may consume all the stride memory, in this
1686 * case packet head-room space is not guaranteed so must
1687 * to add an empty mbuf for the head-room.
1689 if (!rxq->strd_headroom_en) {
1690 struct rte_mbuf *headroom_mbuf =
1691 rte_pktmbuf_alloc(rxq->mp);
1693 if (unlikely(headroom_mbuf == NULL)) {
1694 rte_pktmbuf_free_seg(pkt);
1695 ++rxq->stats.rx_nombuf;
1698 PORT(pkt) = rxq->port_id;
1699 NEXT(headroom_mbuf) = pkt;
1700 pkt = headroom_mbuf;
1704 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1705 if (lro_num_seg > 1) {
1706 mlx5_lro_update_hdr(addr, cqe, len);
1707 pkt->ol_flags |= PKT_RX_LRO;
1708 pkt->tso_segsz = strd_sz;
1711 PORT(pkt) = rxq->port_id;
1712 #ifdef MLX5_PMD_SOFT_COUNTERS
1713 /* Increment bytes counter. */
1714 rxq->stats.ibytes += PKT_LEN(pkt);
1716 /* Return packet. */
1720 /* Update the consumer indexes. */
1721 rxq->consumed_strd = consumed_strd;
1723 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1724 if (rq_ci != rxq->rq_ci) {
1727 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1729 #ifdef MLX5_PMD_SOFT_COUNTERS
1730 /* Increment packets counter. */
1731 rxq->stats.ipackets += i;
1737 * Dummy DPDK callback for TX.
1739 * This function is used to temporarily replace the real callback during
1740 * unsafe control operations on the queue, or in case of error.
1743 * Generic pointer to TX queue structure.
1745 * Packets to transmit.
1747 * Number of packets in array.
1750 * Number of packets successfully transmitted (<= pkts_n).
1753 removed_tx_burst(void *dpdk_txq __rte_unused,
1754 struct rte_mbuf **pkts __rte_unused,
1755 uint16_t pkts_n __rte_unused)
1762 * Dummy DPDK callback for RX.
1764 * This function is used to temporarily replace the real callback during
1765 * unsafe control operations on the queue, or in case of error.
1768 * Generic pointer to RX queue structure.
1770 * Array to store received packets.
1772 * Maximum number of packets in array.
1775 * Number of packets successfully received (<= pkts_n).
1778 removed_rx_burst(void *dpdk_txq __rte_unused,
1779 struct rte_mbuf **pkts __rte_unused,
1780 uint16_t pkts_n __rte_unused)
1787 * Vectorized Rx/Tx routines are not compiled in when required vector
1788 * instructions are not supported on a target architecture. The following null
1789 * stubs are needed for linkage when those are not included outside of this file
1790 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1794 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1795 struct rte_mbuf **pkts __rte_unused,
1796 uint16_t pkts_n __rte_unused)
1802 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1808 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1814 * Free the mbufs from the linear array of pointers.
1817 * Pointer to array of packets to be free.
1819 * Number of packets to be freed.
1821 * Configured Tx offloads mask. It is fully defined at
1822 * compile time and may be used for optimization.
1824 static __rte_always_inline void
1825 mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
1826 unsigned int pkts_n,
1827 unsigned int olx __rte_unused)
1829 struct rte_mempool *pool = NULL;
1830 struct rte_mbuf **p_free = NULL;
1831 struct rte_mbuf *mbuf;
1832 unsigned int n_free = 0;
1835 * The implemented algorithm eliminates
1836 * copying pointers to temporary array
1837 * for rte_mempool_put_bulk() calls.
1844 * Decrement mbuf reference counter, detach
1845 * indirect and external buffers if needed.
1847 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1848 if (likely(mbuf != NULL)) {
1849 assert(mbuf == *pkts);
1850 if (likely(n_free != 0)) {
1851 if (unlikely(pool != mbuf->pool))
1852 /* From different pool. */
1855 /* Start new scan array. */
1862 if (unlikely(pkts_n == 0)) {
1868 * This happens if mbuf is still referenced.
1869 * We can't put it back to the pool, skip.
1873 if (unlikely(n_free != 0))
1874 /* There is some array to free.*/
1876 if (unlikely(pkts_n == 0))
1877 /* Last mbuf, nothing to free. */
1883 * This loop is implemented to avoid multiple
1884 * inlining of rte_mempool_put_bulk().
1890 * Free the array of pre-freed mbufs
1891 * belonging to the same memory pool.
1893 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1894 if (unlikely(mbuf != NULL)) {
1895 /* There is the request to start new scan. */
1900 if (likely(pkts_n != 0))
1903 * This is the last mbuf to be freed.
1904 * Do one more loop iteration to complete.
1905 * This is rare case of the last unique mbuf.
1910 if (likely(pkts_n == 0))
1919 * Free the mbuf from the elts ring buffer till new tail.
1922 * Pointer to Tx queue structure.
1924 * Index in elts to free up to, becomes new elts tail.
1926 * Configured Tx offloads mask. It is fully defined at
1927 * compile time and may be used for optimization.
1929 static __rte_always_inline void
1930 mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
1932 unsigned int olx __rte_unused)
1934 uint16_t n_elts = tail - txq->elts_tail;
1937 assert(n_elts <= txq->elts_s);
1939 * Implement a loop to support ring buffer wraparound
1940 * with single inlining of mlx5_tx_free_mbuf().
1945 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
1946 part = RTE_MIN(part, n_elts);
1948 assert(part <= txq->elts_s);
1949 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
1951 txq->elts_tail += part;
1957 * Store the mbuf being sent into elts ring buffer.
1958 * On Tx completion these mbufs will be freed.
1961 * Pointer to Tx queue structure.
1963 * Pointer to array of packets to be stored.
1965 * Number of packets to be stored.
1967 * Configured Tx offloads mask. It is fully defined at
1968 * compile time and may be used for optimization.
1970 static __rte_always_inline void
1971 mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
1972 struct rte_mbuf **restrict pkts,
1973 unsigned int pkts_n,
1974 unsigned int olx __rte_unused)
1977 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
1981 part = txq->elts_s - (txq->elts_head & txq->elts_m);
1983 assert(part <= txq->elts_s);
1984 /* This code is a good candidate for vectorizing with SIMD. */
1985 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
1987 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
1988 txq->elts_head += pkts_n;
1989 if (unlikely(part < pkts_n))
1990 /* The copy is wrapping around the elts array. */
1991 rte_memcpy((void *)elts, (void *)(pkts + part),
1992 (pkts_n - part) * sizeof(struct rte_mbuf *));
1996 * Manage TX completions. This routine checks the CQ for
1997 * arrived CQEs, deduces the last accomplished WQE in SQ,
1998 * updates SQ producing index and frees all completed mbufs.
2001 * Pointer to TX queue structure.
2003 * Configured Tx offloads mask. It is fully defined at
2004 * compile time and may be used for optimization.
2006 * NOTE: not inlined intentionally, it makes tx_burst
2007 * routine smaller, simple and faster - from experiments.
2010 mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
2011 unsigned int olx __rte_unused)
2013 bool update = false;
2017 volatile struct mlx5_wqe_cseg *cseg;
2018 volatile struct mlx5_cqe *cqe;
2021 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
2022 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
2023 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2024 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2025 /* No new CQEs in completion queue. */
2026 assert(ret == MLX5_CQE_STATUS_HW_OWN);
2027 if (likely(update)) {
2028 /* Update the consumer index. */
2029 rte_compiler_barrier();
2031 rte_cpu_to_be_32(txq->cq_ci);
2035 /* Some error occurred, try to restart. */
2037 tail = mlx5_tx_error_cqe_handle
2038 (txq, (volatile struct mlx5_err_cqe *)cqe);
2040 /* Normal transmit completion. */
2043 txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
2044 cseg = (volatile struct mlx5_wqe_cseg *)
2045 (txq->wqes + (txq->wqe_pi & txq->wqe_m));
2052 if (likely(tail != txq->elts_tail)) {
2053 /* Free data buffers from elts. */
2054 mlx5_tx_free_elts(txq, tail, olx);
2055 assert(tail == txq->elts_tail);
2062 * Check if the completion request flag should be set in the last WQE.
2063 * Both pushed mbufs and WQEs are monitored and the completion request
2064 * flag is set if any of thresholds is reached.
2067 * Pointer to TX queue structure.
2069 * Number of mbuf not stored yet in elts array.
2071 * Pointer to burst routine local context.
2073 * Configured Tx offloads mask. It is fully defined at
2074 * compile time and may be used for optimization.
2076 static __rte_always_inline void
2077 mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
2078 unsigned int n_mbuf,
2079 struct mlx5_txq_local *restrict loc,
2080 unsigned int olx __rte_unused)
2082 uint16_t head = txq->elts_head + n_mbuf;
2084 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2085 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres) {
2086 volatile struct mlx5_wqe *last = loc->wqe_last;
2088 txq->elts_comp = head;
2089 txq->wqe_comp = txq->wqe_ci;
2090 /* Request unconditional completion on last WQE. */
2091 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2092 MLX5_COMP_MODE_OFFSET);
2093 /* Save elts_head in unused "immediate" field of WQE. */
2094 last->cseg.misc = head;
2096 * A CQE slot must always be available. Count the
2097 * issued CEQ "always" request instead of production
2098 * index due to here can be CQE with errors and
2099 * difference with ci may become inconsistent.
2101 assert(txq->cqe_s > ++txq->cq_pi);
2106 * DPDK callback to check the status of a tx descriptor.
2111 * The index of the descriptor in the ring.
2114 * The status of the tx descriptor.
2117 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2119 struct mlx5_txq_data *restrict txq = tx_queue;
2122 mlx5_tx_handle_completion(txq, 0);
2123 used = txq->elts_head - txq->elts_tail;
2125 return RTE_ETH_TX_DESC_FULL;
2126 return RTE_ETH_TX_DESC_DONE;
2130 * Build the Control Segment with specified opcode:
2131 * - MLX5_OPCODE_SEND
2132 * - MLX5_OPCODE_ENHANCED_MPSW
2136 * Pointer to TX queue structure.
2138 * Pointer to burst routine local context.
2140 * Pointer to WQE to fill with built Control Segment.
2142 * Supposed length of WQE in segments.
2144 * SQ WQE opcode to put into Control Segment.
2146 * Configured Tx offloads mask. It is fully defined at
2147 * compile time and may be used for optimization.
2149 static __rte_always_inline void
2150 mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq,
2151 struct mlx5_txq_local *restrict loc __rte_unused,
2152 struct mlx5_wqe *restrict wqe,
2154 unsigned int opcode,
2155 unsigned int olx __rte_unused)
2157 struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
2159 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2160 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2161 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2162 MLX5_COMP_MODE_OFFSET);
2163 cs->misc = RTE_BE32(0);
2167 * Build the Ethernet Segment without inlined data.
2168 * Supports Software Parser, Checksums and VLAN
2169 * insertion Tx offload features.
2172 * Pointer to TX queue structure.
2174 * Pointer to burst routine local context.
2176 * Pointer to WQE to fill with built Ethernet Segment.
2178 * Configured Tx offloads mask. It is fully defined at
2179 * compile time and may be used for optimization.
2181 static __rte_always_inline void
2182 mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
2183 struct mlx5_txq_local *restrict loc,
2184 struct mlx5_wqe *restrict wqe,
2187 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2191 * Calculate and set check sum flags first, dword field
2192 * in segment may be shared with Software Parser flags.
2194 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2195 es->flags = rte_cpu_to_le_32(csum);
2197 * Calculate and set Software Parser offsets and flags.
2198 * These flags a set for custom UDP and IP tunnel packets.
2200 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2201 /* Fill metadata field if needed. */
2202 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2203 loc->mbuf->ol_flags & PKT_TX_METADATA ?
2204 loc->mbuf->tx_metadata : 0 : 0;
2205 /* Engage VLAN tag insertion feature if requested. */
2206 if (MLX5_TXOFF_CONFIG(VLAN) &&
2207 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2209 * We should get here only if device support
2210 * this feature correctly.
2212 assert(txq->vlan_en);
2213 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2214 loc->mbuf->vlan_tci);
2216 es->inline_hdr = RTE_BE32(0);
2221 * Build the Ethernet Segment with minimal inlined data
2222 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2223 * used to fill the gap in single WQEBB WQEs.
2224 * Supports Software Parser, Checksums and VLAN
2225 * insertion Tx offload features.
2228 * Pointer to TX queue structure.
2230 * Pointer to burst routine local context.
2232 * Pointer to WQE to fill with built Ethernet Segment.
2234 * Length of VLAN tag insertion if any.
2236 * Configured Tx offloads mask. It is fully defined at
2237 * compile time and may be used for optimization.
2239 static __rte_always_inline void
2240 mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
2241 struct mlx5_txq_local *restrict loc,
2242 struct mlx5_wqe *restrict wqe,
2246 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2248 uint8_t *psrc, *pdst;
2251 * Calculate and set check sum flags first, dword field
2252 * in segment may be shared with Software Parser flags.
2254 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2255 es->flags = rte_cpu_to_le_32(csum);
2257 * Calculate and set Software Parser offsets and flags.
2258 * These flags a set for custom UDP and IP tunnel packets.
2260 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2261 /* Fill metadata field if needed. */
2262 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2263 loc->mbuf->ol_flags & PKT_TX_METADATA ?
2264 loc->mbuf->tx_metadata : 0 : 0;
2265 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2267 sizeof(rte_v128u32_t)),
2268 "invalid Ethernet Segment data size");
2269 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2271 sizeof(struct rte_vlan_hdr) +
2272 2 * RTE_ETHER_ADDR_LEN),
2273 "invalid Ethernet Segment data size");
2274 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2275 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2276 es->inline_data = *(unaligned_uint16_t *)psrc;
2277 psrc += sizeof(uint16_t);
2278 pdst = (uint8_t *)(es + 1);
2279 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2280 /* Implement VLAN tag insertion as part inline data. */
2281 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2282 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2283 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2284 /* Insert VLAN ethertype + VLAN tag. */
2285 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2286 ((RTE_ETHER_TYPE_VLAN << 16) |
2287 loc->mbuf->vlan_tci);
2288 pdst += sizeof(struct rte_vlan_hdr);
2289 /* Copy the rest two bytes from packet data. */
2290 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2291 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2293 /* Fill the gap in the title WQEBB with inline data. */
2294 rte_mov16(pdst, psrc);
2299 * Build the Ethernet Segment with entire packet
2300 * data inlining. Checks the boundary of WQEBB and
2301 * ring buffer wrapping, supports Software Parser,
2302 * Checksums and VLAN insertion Tx offload features.
2305 * Pointer to TX queue structure.
2307 * Pointer to burst routine local context.
2309 * Pointer to WQE to fill with built Ethernet Segment.
2311 * Length of VLAN tag insertion if any.
2313 * Length of data to inline (VLAN included, if any).
2315 * TSO flag, set mss field from the packet.
2317 * Configured Tx offloads mask. It is fully defined at
2318 * compile time and may be used for optimization.
2321 * Pointer to the next Data Segment (aligned and wrapped around).
2323 static __rte_always_inline struct mlx5_wqe_dseg *
2324 mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
2325 struct mlx5_txq_local *restrict loc,
2326 struct mlx5_wqe *restrict wqe,
2332 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2334 uint8_t *psrc, *pdst;
2338 * Calculate and set check sum flags first, dword field
2339 * in segment may be shared with Software Parser flags.
2341 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2344 csum |= loc->mbuf->tso_segsz;
2345 es->flags = rte_cpu_to_be_32(csum);
2347 es->flags = rte_cpu_to_le_32(csum);
2350 * Calculate and set Software Parser offsets and flags.
2351 * These flags a set for custom UDP and IP tunnel packets.
2353 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2354 /* Fill metadata field if needed. */
2355 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2356 loc->mbuf->ol_flags & PKT_TX_METADATA ?
2357 loc->mbuf->tx_metadata : 0 : 0;
2358 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2360 sizeof(rte_v128u32_t)),
2361 "invalid Ethernet Segment data size");
2362 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2364 sizeof(struct rte_vlan_hdr) +
2365 2 * RTE_ETHER_ADDR_LEN),
2366 "invalid Ethernet Segment data size");
2367 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2368 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2369 es->inline_data = *(unaligned_uint16_t *)psrc;
2370 psrc += sizeof(uint16_t);
2371 pdst = (uint8_t *)(es + 1);
2372 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2373 /* Implement VLAN tag insertion as part inline data. */
2374 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2375 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2376 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2377 /* Insert VLAN ethertype + VLAN tag. */
2378 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2379 ((RTE_ETHER_TYPE_VLAN << 16) |
2380 loc->mbuf->vlan_tci);
2381 pdst += sizeof(struct rte_vlan_hdr);
2382 /* Copy the rest two bytes from packet data. */
2383 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2384 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2385 psrc += sizeof(uint16_t);
2387 /* Fill the gap in the title WQEBB with inline data. */
2388 rte_mov16(pdst, psrc);
2389 psrc += sizeof(rte_v128u32_t);
2391 pdst = (uint8_t *)(es + 2);
2392 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2393 assert(pdst < (uint8_t *)txq->wqes_end);
2394 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2396 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2397 return (struct mlx5_wqe_dseg *)pdst;
2400 * The WQEBB space availability is checked by caller.
2401 * Here we should be aware of WQE ring buffer wraparound only.
2403 part = (uint8_t *)txq->wqes_end - pdst;
2404 part = RTE_MIN(part, inlen);
2406 rte_memcpy(pdst, psrc, part);
2408 if (likely(!inlen)) {
2410 * If return value is not used by the caller
2411 * the code below will be optimized out.
2414 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2415 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2416 pdst = (uint8_t *)txq->wqes;
2417 return (struct mlx5_wqe_dseg *)pdst;
2419 pdst = (uint8_t *)txq->wqes;
2426 * Copy data from chain of mbuf to the specified linear buffer.
2427 * Checksums and VLAN insertion Tx offload features. If data
2428 * from some mbuf copied completely this mbuf is freed. Local
2429 * structure is used to keep the byte stream state.
2432 * Pointer to the destination linear buffer.
2434 * Pointer to burst routine local context.
2436 * Length of data to be copied.
2438 * Configured Tx offloads mask. It is fully defined at
2439 * compile time and may be used for optimization.
2441 static __rte_always_inline void
2442 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2443 struct mlx5_txq_local *restrict loc,
2445 unsigned int olx __rte_unused)
2447 struct rte_mbuf *mbuf;
2448 unsigned int part, dlen;
2453 /* Allow zero length packets, must check first. */
2454 dlen = rte_pktmbuf_data_len(loc->mbuf);
2455 if (dlen <= loc->mbuf_off) {
2456 /* Exhausted packet, just free. */
2458 loc->mbuf = mbuf->next;
2459 rte_pktmbuf_free_seg(mbuf);
2461 assert(loc->mbuf_nseg > 1);
2466 dlen -= loc->mbuf_off;
2467 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2469 part = RTE_MIN(len, dlen);
2470 rte_memcpy(pdst, psrc, part);
2471 loc->mbuf_off += part;
2474 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2476 /* Exhausted packet, just free. */
2478 loc->mbuf = mbuf->next;
2479 rte_pktmbuf_free_seg(mbuf);
2481 assert(loc->mbuf_nseg >= 1);
2491 * Build the Ethernet Segment with inlined data from
2492 * multi-segment packet. Checks the boundary of WQEBB
2493 * and ring buffer wrapping, supports Software Parser,
2494 * Checksums and VLAN insertion Tx offload features.
2497 * Pointer to TX queue structure.
2499 * Pointer to burst routine local context.
2501 * Pointer to WQE to fill with built Ethernet Segment.
2503 * Length of VLAN tag insertion if any.
2505 * Length of data to inline (VLAN included, if any).
2507 * TSO flag, set mss field from the packet.
2509 * Configured Tx offloads mask. It is fully defined at
2510 * compile time and may be used for optimization.
2513 * Pointer to the next Data Segment (aligned and
2514 * possible NOT wrapped around - caller should do
2515 * wrapping check on its own).
2517 static __rte_always_inline struct mlx5_wqe_dseg *
2518 mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
2519 struct mlx5_txq_local *restrict loc,
2520 struct mlx5_wqe *restrict wqe,
2526 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2532 * Calculate and set check sum flags first, uint32_t field
2533 * in segment may be shared with Software Parser flags.
2535 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2538 csum |= loc->mbuf->tso_segsz;
2539 es->flags = rte_cpu_to_be_32(csum);
2541 es->flags = rte_cpu_to_le_32(csum);
2544 * Calculate and set Software Parser offsets and flags.
2545 * These flags a set for custom UDP and IP tunnel packets.
2547 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2548 /* Fill metadata field if needed. */
2549 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2550 loc->mbuf->ol_flags & PKT_TX_METADATA ?
2551 loc->mbuf->tx_metadata : 0 : 0;
2552 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2554 sizeof(rte_v128u32_t)),
2555 "invalid Ethernet Segment data size");
2556 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2558 sizeof(struct rte_vlan_hdr) +
2559 2 * RTE_ETHER_ADDR_LEN),
2560 "invalid Ethernet Segment data size");
2561 assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
2562 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2563 pdst = (uint8_t *)&es->inline_data;
2564 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2565 /* Implement VLAN tag insertion as part inline data. */
2566 mlx5_tx_mseg_memcpy(pdst, loc, 2 * RTE_ETHER_ADDR_LEN, olx);
2567 pdst += 2 * RTE_ETHER_ADDR_LEN;
2568 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2569 ((RTE_ETHER_TYPE_VLAN << 16) |
2570 loc->mbuf->vlan_tci);
2571 pdst += sizeof(struct rte_vlan_hdr);
2572 inlen -= 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2574 assert(pdst < (uint8_t *)txq->wqes_end);
2576 * The WQEBB space availability is checked by caller.
2577 * Here we should be aware of WQE ring buffer wraparound only.
2579 part = (uint8_t *)txq->wqes_end - pdst;
2580 part = RTE_MIN(part, inlen);
2583 mlx5_tx_mseg_memcpy(pdst, loc, part, olx);
2585 if (likely(!inlen)) {
2587 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2588 return (struct mlx5_wqe_dseg *)pdst;
2590 pdst = (uint8_t *)txq->wqes;
2596 * Build the Data Segment of pointer type.
2599 * Pointer to TX queue structure.
2601 * Pointer to burst routine local context.
2603 * Pointer to WQE to fill with built Data Segment.
2605 * Data buffer to point.
2607 * Data buffer length.
2609 * Configured Tx offloads mask. It is fully defined at
2610 * compile time and may be used for optimization.
2612 static __rte_always_inline void
2613 mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
2614 struct mlx5_txq_local *restrict loc,
2615 struct mlx5_wqe_dseg *restrict dseg,
2618 unsigned int olx __rte_unused)
2622 dseg->bcount = rte_cpu_to_be_32(len);
2623 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2624 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2628 * Build the Data Segment of pointer type or inline
2629 * if data length is less than buffer in minimal
2630 * Data Segment size.
2633 * Pointer to TX queue structure.
2635 * Pointer to burst routine local context.
2637 * Pointer to WQE to fill with built Data Segment.
2639 * Data buffer to point.
2641 * Data buffer length.
2643 * Configured Tx offloads mask. It is fully defined at
2644 * compile time and may be used for optimization.
2646 static __rte_always_inline void
2647 mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
2648 struct mlx5_txq_local *restrict loc,
2649 struct mlx5_wqe_dseg *restrict dseg,
2652 unsigned int olx __rte_unused)
2658 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2659 dseg->bcount = rte_cpu_to_be_32(len);
2660 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2661 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2665 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2666 /* Unrolled implementation of generic rte_memcpy. */
2667 dst = (uintptr_t)&dseg->inline_data[0];
2668 src = (uintptr_t)buf;
2669 #ifdef RTE_ARCH_STRICT_ALIGN
2670 memcpy(dst, src, len);
2673 *(uint64_t *)dst = *(uint64_t *)src;
2674 dst += sizeof(uint64_t);
2675 src += sizeof(uint64_t);
2678 *(uint32_t *)dst = *(uint32_t *)src;
2679 dst += sizeof(uint32_t);
2680 src += sizeof(uint32_t);
2683 *(uint16_t *)dst = *(uint16_t *)src;
2684 dst += sizeof(uint16_t);
2685 src += sizeof(uint16_t);
2688 *(uint8_t *)dst = *(uint8_t *)src;
2693 * Build the Data Segment of inlined data from single
2694 * segment packet, no VLAN insertion.
2697 * Pointer to TX queue structure.
2699 * Pointer to burst routine local context.
2701 * Pointer to WQE to fill with built Data Segment.
2703 * Data buffer to point.
2705 * Data buffer length.
2707 * Configured Tx offloads mask. It is fully defined at
2708 * compile time and may be used for optimization.
2711 * Pointer to the next Data Segment after inlined data.
2712 * Ring buffer wraparound check is needed. We do not
2713 * do it here because it may not be needed for the
2714 * last packet in the eMPW session.
2716 static __rte_always_inline struct mlx5_wqe_dseg *
2717 mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq,
2718 struct mlx5_txq_local *restrict loc __rte_unused,
2719 struct mlx5_wqe_dseg *restrict dseg,
2722 unsigned int olx __rte_unused)
2727 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2728 pdst = &dseg->inline_data[0];
2730 * The WQEBB space availability is checked by caller.
2731 * Here we should be aware of WQE ring buffer wraparound only.
2733 part = (uint8_t *)txq->wqes_end - pdst;
2734 part = RTE_MIN(part, len);
2736 rte_memcpy(pdst, buf, part);
2740 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2741 /* Note: no final wraparound check here. */
2742 return (struct mlx5_wqe_dseg *)pdst;
2744 pdst = (uint8_t *)txq->wqes;
2751 * Build the Data Segment of inlined data from single
2752 * segment packet with VLAN insertion.
2755 * Pointer to TX queue structure.
2757 * Pointer to burst routine local context.
2759 * Pointer to the dseg fill with built Data Segment.
2761 * Data buffer to point.
2763 * Data buffer length.
2765 * Configured Tx offloads mask. It is fully defined at
2766 * compile time and may be used for optimization.
2769 * Pointer to the next Data Segment after inlined data.
2770 * Ring buffer wraparound check is needed.
2772 static __rte_always_inline struct mlx5_wqe_dseg *
2773 mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
2774 struct mlx5_txq_local *restrict loc __rte_unused,
2775 struct mlx5_wqe_dseg *restrict dseg,
2778 unsigned int olx __rte_unused)
2784 assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
2785 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
2786 (2 * RTE_ETHER_ADDR_LEN),
2787 "invalid Data Segment data size");
2788 dseg->bcount = rte_cpu_to_be_32((len + sizeof(struct rte_vlan_hdr)) |
2789 MLX5_ETH_WQE_DATA_INLINE);
2790 pdst = &dseg->inline_data[0];
2791 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
2792 buf += MLX5_DSEG_MIN_INLINE_SIZE;
2793 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
2794 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
2795 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2796 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
2797 loc->mbuf->vlan_tci);
2798 pdst += sizeof(struct rte_vlan_hdr);
2799 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2800 pdst = (uint8_t *)txq->wqes;
2802 * The WQEBB space availability is checked by caller.
2803 * Here we should be aware of WQE ring buffer wraparound only.
2805 part = (uint8_t *)txq->wqes_end - pdst;
2806 part = RTE_MIN(part, len);
2808 rte_memcpy(pdst, buf, part);
2812 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2813 /* Note: no final wraparound check here. */
2814 return (struct mlx5_wqe_dseg *)pdst;
2816 pdst = (uint8_t *)txq->wqes;
2823 * Build the Ethernet Segment with optionally inlined data with
2824 * VLAN insertion and following Data Segments (if any) from
2825 * multi-segment packet. Used by ordinary send and TSO.
2828 * Pointer to TX queue structure.
2830 * Pointer to burst routine local context.
2832 * Pointer to WQE to fill with built Ethernet/Data Segments.
2834 * Length of VLAN header to insert, 0 means no VLAN insertion.
2836 * Data length to inline. For TSO this parameter specifies
2837 * exact value, for ordinary send routine can be aligned by
2838 * caller to provide better WQE space saving and data buffer
2839 * start address alignment. This length includes VLAN header
2842 * Zero means ordinary send, inlined data can be extended,
2843 * otherwise this is TSO, inlined data length is fixed.
2845 * Configured Tx offloads mask. It is fully defined at
2846 * compile time and may be used for optimization.
2849 * Actual size of built WQE in segments.
2851 static __rte_always_inline unsigned int
2852 mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
2853 struct mlx5_txq_local *restrict loc,
2854 struct mlx5_wqe *restrict wqe,
2858 unsigned int olx __rte_unused)
2860 struct mlx5_wqe_dseg *restrict dseg;
2863 assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
2864 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
2867 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
2868 if (!loc->mbuf_nseg)
2871 * There are still some mbuf remaining, not inlined.
2872 * The first mbuf may be partially inlined and we
2873 * must process the possible non-zero data offset.
2875 if (loc->mbuf_off) {
2880 * Exhausted packets must be dropped before.
2881 * Non-zero offset means there are some data
2882 * remained in the packet.
2884 assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
2885 assert(rte_pktmbuf_data_len(loc->mbuf));
2886 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2888 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
2890 * Build the pointer/minimal data Data Segment.
2891 * Do ring buffer wrapping check in advance.
2893 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2894 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2895 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
2896 /* Store the mbuf to be freed on completion. */
2897 assert(loc->elts_free);
2898 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2901 if (--loc->mbuf_nseg == 0)
2903 loc->mbuf = loc->mbuf->next;
2907 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
2908 struct rte_mbuf *mbuf;
2910 /* Zero length segment found, just skip. */
2912 loc->mbuf = loc->mbuf->next;
2913 rte_pktmbuf_free_seg(mbuf);
2914 if (--loc->mbuf_nseg == 0)
2917 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2918 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2921 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2922 rte_pktmbuf_data_len(loc->mbuf), olx);
2923 assert(loc->elts_free);
2924 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2927 if (--loc->mbuf_nseg == 0)
2929 loc->mbuf = loc->mbuf->next;
2934 /* Calculate actual segments used from the dseg pointer. */
2935 if ((uintptr_t)wqe < (uintptr_t)dseg)
2936 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
2938 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
2939 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
2944 * Tx one packet function for multi-segment TSO. Supports all
2945 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
2946 * sends one packet per WQE.
2948 * This routine is responsible for storing processed mbuf
2949 * into elts ring buffer and update elts_head.
2952 * Pointer to TX queue structure.
2954 * Pointer to burst routine local context.
2956 * Configured Tx offloads mask. It is fully defined at
2957 * compile time and may be used for optimization.
2960 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2961 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2962 * Local context variables partially updated.
2964 static __rte_always_inline enum mlx5_txcmp_code
2965 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
2966 struct mlx5_txq_local *restrict loc,
2969 struct mlx5_wqe *restrict wqe;
2970 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
2973 * Calculate data length to be inlined to estimate
2974 * the required space in WQE ring buffer.
2976 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
2977 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2978 vlan = sizeof(struct rte_vlan_hdr);
2979 inlen = loc->mbuf->l2_len + vlan +
2980 loc->mbuf->l3_len + loc->mbuf->l4_len;
2981 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
2982 return MLX5_TXCMP_CODE_ERROR;
2983 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
2984 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
2985 /* Packet must contain all TSO headers. */
2986 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
2987 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2988 inlen > (dlen + vlan)))
2989 return MLX5_TXCMP_CODE_ERROR;
2990 assert(inlen >= txq->inlen_mode);
2992 * Check whether there are enough free WQEBBs:
2994 * - Ethernet Segment
2995 * - First Segment of inlined Ethernet data
2996 * - ... data continued ...
2997 * - Data Segments of pointer/min inline type
2999 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3000 MLX5_ESEG_MIN_INLINE_SIZE +
3002 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3003 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3004 return MLX5_TXCMP_CODE_EXIT;
3005 /* Check for maximal WQE size. */
3006 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3007 return MLX5_TXCMP_CODE_ERROR;
3008 #ifdef MLX5_PMD_SOFT_COUNTERS
3009 /* Update sent data bytes/packets counters. */
3010 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3011 loc->mbuf->tso_segsz;
3013 * One will be added for mbuf itself
3014 * at the end of the mlx5_tx_burst from
3015 * loc->pkts_sent field.
3018 txq->stats.opackets += ntcp;
3019 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3021 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3022 loc->wqe_last = wqe;
3023 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3024 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3025 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3026 txq->wqe_ci += (ds + 3) / 4;
3027 loc->wqe_free -= (ds + 3) / 4;
3028 return MLX5_TXCMP_CODE_MULTI;
3032 * Tx one packet function for multi-segment SEND. Supports all
3033 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3034 * sends one packet per WQE, without any data inlining in
3037 * This routine is responsible for storing processed mbuf
3038 * into elts ring buffer and update elts_head.
3041 * Pointer to TX queue structure.
3043 * Pointer to burst routine local context.
3045 * Configured Tx offloads mask. It is fully defined at
3046 * compile time and may be used for optimization.
3049 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3050 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3051 * Local context variables partially updated.
3053 static __rte_always_inline enum mlx5_txcmp_code
3054 mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
3055 struct mlx5_txq_local *restrict loc,
3058 struct mlx5_wqe_dseg *restrict dseg;
3059 struct mlx5_wqe *restrict wqe;
3060 unsigned int ds, nseg;
3062 assert(NB_SEGS(loc->mbuf) > 1);
3064 * No inline at all, it means the CPU cycles saving
3065 * is prioritized at configuration, we should not
3066 * copy any packet data to WQE.
3068 nseg = NB_SEGS(loc->mbuf);
3070 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3071 return MLX5_TXCMP_CODE_EXIT;
3072 /* Check for maximal WQE size. */
3073 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3074 return MLX5_TXCMP_CODE_ERROR;
3076 * Some Tx offloads may cause an error if
3077 * packet is not long enough, check against
3078 * assumed minimal length.
3080 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3081 return MLX5_TXCMP_CODE_ERROR;
3082 #ifdef MLX5_PMD_SOFT_COUNTERS
3083 /* Update sent data bytes counter. */
3084 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3085 if (MLX5_TXOFF_CONFIG(VLAN) &&
3086 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3087 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3090 * SEND WQE, one WQEBB:
3091 * - Control Segment, SEND opcode
3092 * - Ethernet Segment, optional VLAN, no inline
3093 * - Data Segments, pointer only type
3095 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3096 loc->wqe_last = wqe;
3097 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3098 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3099 dseg = &wqe->dseg[0];
3101 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3102 struct rte_mbuf *mbuf;
3105 * Zero length segment found, have to
3106 * correct total size of WQE in segments.
3107 * It is supposed to be rare occasion, so
3108 * in normal case (no zero length segments)
3109 * we avoid extra writing to the Control
3113 wqe->cseg.sq_ds -= RTE_BE32(1);
3115 loc->mbuf = mbuf->next;
3116 rte_pktmbuf_free_seg(mbuf);
3122 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3123 rte_pktmbuf_data_len(loc->mbuf), olx);
3124 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3129 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3130 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3131 loc->mbuf = loc->mbuf->next;
3134 txq->wqe_ci += (ds + 3) / 4;
3135 loc->wqe_free -= (ds + 3) / 4;
3136 return MLX5_TXCMP_CODE_MULTI;
3140 * Tx one packet function for multi-segment SEND. Supports all
3141 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3142 * sends one packet per WQE, with data inlining in
3143 * Ethernet Segment and minimal Data Segments.
3145 * This routine is responsible for storing processed mbuf
3146 * into elts ring buffer and update elts_head.
3149 * Pointer to TX queue structure.
3151 * Pointer to burst routine local context.
3153 * Configured Tx offloads mask. It is fully defined at
3154 * compile time and may be used for optimization.
3157 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3158 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3159 * Local context variables partially updated.
3161 static __rte_always_inline enum mlx5_txcmp_code
3162 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
3163 struct mlx5_txq_local *restrict loc,
3166 struct mlx5_wqe *restrict wqe;
3167 unsigned int ds, inlen, dlen, vlan = 0;
3169 assert(MLX5_TXOFF_CONFIG(INLINE));
3170 assert(NB_SEGS(loc->mbuf) > 1);
3172 * First calculate data length to be inlined
3173 * to estimate the required space for WQE.
3175 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3176 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3177 vlan = sizeof(struct rte_vlan_hdr);
3178 inlen = dlen + vlan;
3179 /* Check against minimal length. */
3180 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3181 return MLX5_TXCMP_CODE_ERROR;
3182 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3183 if (inlen > txq->inlen_send) {
3184 struct rte_mbuf *mbuf;
3189 * Packet length exceeds the allowed inline
3190 * data length, check whether the minimal
3191 * inlining is required.
3193 if (txq->inlen_mode) {
3194 assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
3195 assert(txq->inlen_mode <= txq->inlen_send);
3196 inlen = txq->inlen_mode;
3198 if (!vlan || txq->vlan_en) {
3200 * VLAN insertion will be done inside by HW.
3201 * It is not utmost effective - VLAN flag is
3202 * checked twice, but we should proceed the
3203 * inlining length correctly and take into
3204 * account the VLAN header being inserted.
3206 return mlx5_tx_packet_multi_send
3209 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3212 * Now we know the minimal amount of data is requested
3213 * to inline. Check whether we should inline the buffers
3214 * from the chain beginning to eliminate some mbufs.
3217 nxlen = rte_pktmbuf_data_len(mbuf);
3218 if (unlikely(nxlen <= txq->inlen_send)) {
3219 /* We can inline first mbuf at least. */
3220 if (nxlen < inlen) {
3223 /* Scan mbufs till inlen filled. */
3228 nxlen = rte_pktmbuf_data_len(mbuf);
3230 } while (unlikely(nxlen < inlen));
3231 if (unlikely(nxlen > txq->inlen_send)) {
3232 /* We cannot inline entire mbuf. */
3233 smlen = inlen - smlen;
3234 start = rte_pktmbuf_mtod_offset
3235 (mbuf, uintptr_t, smlen);
3242 /* There should be not end of packet. */
3244 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3245 } while (unlikely(nxlen < txq->inlen_send));
3247 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3249 * Check whether we can do inline to align start
3250 * address of data buffer to cacheline.
3253 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3254 if (unlikely(start)) {
3256 if (start <= txq->inlen_send)
3261 * Check whether there are enough free WQEBBs:
3263 * - Ethernet Segment
3264 * - First Segment of inlined Ethernet data
3265 * - ... data continued ...
3266 * - Data Segments of pointer/min inline type
3268 * Estimate the number of Data Segments conservatively,
3269 * supposing no any mbufs is being freed during inlining.
3271 assert(inlen <= txq->inlen_send);
3272 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3273 MLX5_ESEG_MIN_INLINE_SIZE +
3275 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3276 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3277 return MLX5_TXCMP_CODE_EXIT;
3278 /* Check for maximal WQE size. */
3279 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3280 return MLX5_TXCMP_CODE_ERROR;
3281 #ifdef MLX5_PMD_SOFT_COUNTERS
3282 /* Update sent data bytes/packets counters. */
3283 txq->stats.obytes += dlen + vlan;
3285 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3286 loc->wqe_last = wqe;
3287 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3288 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3289 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3290 txq->wqe_ci += (ds + 3) / 4;
3291 loc->wqe_free -= (ds + 3) / 4;
3292 return MLX5_TXCMP_CODE_MULTI;
3296 * Tx burst function for multi-segment packets. Supports all
3297 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3298 * sends one packet per WQE. Function stops sending if it
3299 * encounters the single-segment packet.
3301 * This routine is responsible for storing processed mbuf
3302 * into elts ring buffer and update elts_head.
3305 * Pointer to TX queue structure.
3307 * Packets to transmit.
3309 * Number of packets in array.
3311 * Pointer to burst routine local context.
3313 * Configured Tx offloads mask. It is fully defined at
3314 * compile time and may be used for optimization.
3317 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3318 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3319 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3320 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3321 * Local context variables updated.
3323 static __rte_always_inline enum mlx5_txcmp_code
3324 mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
3325 struct rte_mbuf **restrict pkts,
3326 unsigned int pkts_n,
3327 struct mlx5_txq_local *restrict loc,
3330 assert(loc->elts_free && loc->wqe_free);
3331 assert(pkts_n > loc->pkts_sent);
3332 pkts += loc->pkts_sent + 1;
3333 pkts_n -= loc->pkts_sent;
3335 enum mlx5_txcmp_code ret;
3337 assert(NB_SEGS(loc->mbuf) > 1);
3339 * Estimate the number of free elts quickly but
3340 * conservatively. Some segment may be fully inlined
3341 * and freed, ignore this here - precise estimation
3344 if (loc->elts_free < NB_SEGS(loc->mbuf))
3345 return MLX5_TXCMP_CODE_EXIT;
3346 if (MLX5_TXOFF_CONFIG(TSO) &&
3347 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3348 /* Proceed with multi-segment TSO. */
3349 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3350 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3351 /* Proceed with multi-segment SEND with inlining. */
3352 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3354 /* Proceed with multi-segment SEND w/o inlining. */
3355 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3357 if (ret == MLX5_TXCMP_CODE_EXIT)
3358 return MLX5_TXCMP_CODE_EXIT;
3359 if (ret == MLX5_TXCMP_CODE_ERROR)
3360 return MLX5_TXCMP_CODE_ERROR;
3361 /* WQE is built, go to the next packet. */
3364 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3365 return MLX5_TXCMP_CODE_EXIT;
3366 loc->mbuf = *pkts++;
3368 rte_prefetch0(*pkts);
3369 if (likely(NB_SEGS(loc->mbuf) > 1))
3371 /* Here ends the series of multi-segment packets. */
3372 if (MLX5_TXOFF_CONFIG(TSO) &&
3373 unlikely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3374 return MLX5_TXCMP_CODE_TSO;
3375 return MLX5_TXCMP_CODE_SINGLE;
3381 * Tx burst function for single-segment packets with TSO.
3382 * Supports all types of Tx offloads, except multi-packets.
3383 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3384 * Function stops sending if it encounters the multi-segment
3385 * packet or packet without TSO requested.
3387 * The routine is responsible for storing processed mbuf
3388 * into elts ring buffer and update elts_head if inline
3389 * offloads is requested due to possible early freeing
3390 * of the inlined mbufs (can not store pkts array in elts
3394 * Pointer to TX queue structure.
3396 * Packets to transmit.
3398 * Number of packets in array.
3400 * Pointer to burst routine local context.
3402 * Configured Tx offloads mask. It is fully defined at
3403 * compile time and may be used for optimization.
3406 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3407 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3408 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3409 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3410 * Local context variables updated.
3412 static __rte_always_inline enum mlx5_txcmp_code
3413 mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
3414 struct rte_mbuf **restrict pkts,
3415 unsigned int pkts_n,
3416 struct mlx5_txq_local *restrict loc,
3419 assert(loc->elts_free && loc->wqe_free);
3420 assert(pkts_n > loc->pkts_sent);
3421 pkts += loc->pkts_sent + 1;
3422 pkts_n -= loc->pkts_sent;
3424 struct mlx5_wqe_dseg *restrict dseg;
3425 struct mlx5_wqe *restrict wqe;
3426 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3429 assert(NB_SEGS(loc->mbuf) == 1);
3430 dlen = rte_pktmbuf_data_len(loc->mbuf);
3431 if (MLX5_TXOFF_CONFIG(VLAN) &&
3432 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3433 vlan = sizeof(struct rte_vlan_hdr);
3436 * First calculate the WQE size to check
3437 * whether we have enough space in ring buffer.
3439 hlen = loc->mbuf->l2_len + vlan +
3440 loc->mbuf->l3_len + loc->mbuf->l4_len;
3441 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3442 return MLX5_TXCMP_CODE_ERROR;
3443 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3444 hlen += loc->mbuf->outer_l2_len +
3445 loc->mbuf->outer_l3_len;
3446 /* Segment must contain all TSO headers. */
3447 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3448 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3449 hlen > (dlen + vlan)))
3450 return MLX5_TXCMP_CODE_ERROR;
3452 * Check whether there are enough free WQEBBs:
3454 * - Ethernet Segment
3455 * - First Segment of inlined Ethernet data
3456 * - ... data continued ...
3457 * - Finishing Data Segment of pointer type
3459 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3460 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3461 if (loc->wqe_free < ((ds + 3) / 4))
3462 return MLX5_TXCMP_CODE_EXIT;
3463 #ifdef MLX5_PMD_SOFT_COUNTERS
3464 /* Update sent data bytes/packets counters. */
3465 ntcp = (dlen + vlan - hlen +
3466 loc->mbuf->tso_segsz - 1) /
3467 loc->mbuf->tso_segsz;
3469 * One will be added for mbuf itself at the end
3470 * of the mlx5_tx_burst from loc->pkts_sent field.
3473 txq->stats.opackets += ntcp;
3474 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3477 * Build the TSO WQE:
3479 * - Ethernet Segment with hlen bytes inlined
3480 * - Data Segment of pointer type
3482 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3483 loc->wqe_last = wqe;
3484 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3485 MLX5_OPCODE_TSO, olx);
3486 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3487 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3488 dlen -= hlen - vlan;
3489 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3491 * WQE is built, update the loop parameters
3492 * and go to the next packet.
3494 txq->wqe_ci += (ds + 3) / 4;
3495 loc->wqe_free -= (ds + 3) / 4;
3496 if (MLX5_TXOFF_CONFIG(INLINE))
3497 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3501 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3502 return MLX5_TXCMP_CODE_EXIT;
3503 loc->mbuf = *pkts++;
3505 rte_prefetch0(*pkts);
3506 if (MLX5_TXOFF_CONFIG(MULTI) &&
3507 unlikely(NB_SEGS(loc->mbuf) > 1))
3508 return MLX5_TXCMP_CODE_MULTI;
3509 if (unlikely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3510 return MLX5_TXCMP_CODE_SINGLE;
3511 /* Continue with the next TSO packet. */
3517 * Analyze the packet and select the best method to send.
3520 * Pointer to TX queue structure.
3522 * Pointer to burst routine local context.
3524 * Configured Tx offloads mask. It is fully defined at
3525 * compile time and may be used for optimization.
3527 * The predefined flag whether do complete check for
3528 * multi-segment packets and TSO.
3531 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3532 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3533 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3534 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3536 static __rte_always_inline enum mlx5_txcmp_code
3537 mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq,
3538 struct mlx5_txq_local *restrict loc,
3542 /* Check for multi-segment packet. */
3544 MLX5_TXOFF_CONFIG(MULTI) &&
3545 unlikely(NB_SEGS(loc->mbuf) > 1))
3546 return MLX5_TXCMP_CODE_MULTI;
3547 /* Check for TSO packet. */
3549 MLX5_TXOFF_CONFIG(TSO) &&
3550 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3551 return MLX5_TXCMP_CODE_TSO;
3552 /* Check if eMPW is enabled at all. */
3553 if (!MLX5_TXOFF_CONFIG(EMPW))
3554 return MLX5_TXCMP_CODE_SINGLE;
3555 /* Check if eMPW can be engaged. */
3556 if (MLX5_TXOFF_CONFIG(VLAN) &&
3557 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3558 (!MLX5_TXOFF_CONFIG(INLINE) ||
3559 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3560 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3562 * eMPW does not support VLAN insertion offload,
3563 * we have to inline the entire packet but
3564 * packet is too long for inlining.
3566 return MLX5_TXCMP_CODE_SINGLE;
3568 return MLX5_TXCMP_CODE_EMPW;
3572 * Check the next packet attributes to match with the eMPW batch ones.
3575 * Pointer to TX queue structure.
3577 * Pointer to Ethernet Segment of eMPW batch.
3579 * Pointer to burst routine local context.
3581 * Configured Tx offloads mask. It is fully defined at
3582 * compile time and may be used for optimization.
3585 * true - packet match with eMPW batch attributes.
3586 * false - no match, eMPW should be restarted.
3588 static __rte_always_inline bool
3589 mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
3590 struct mlx5_wqe_eseg *restrict es,
3591 struct mlx5_txq_local *restrict loc,
3594 uint8_t swp_flags = 0;
3596 /* Compare the checksum flags, if any. */
3597 if (MLX5_TXOFF_CONFIG(CSUM) &&
3598 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3600 /* Compare the Software Parser offsets and flags. */
3601 if (MLX5_TXOFF_CONFIG(SWP) &&
3602 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3603 es->swp_flags != swp_flags))
3605 /* Fill metadata field if needed. */
3606 if (MLX5_TXOFF_CONFIG(METADATA) &&
3607 es->metadata != (loc->mbuf->ol_flags & PKT_TX_METADATA ?
3608 loc->mbuf->tx_metadata : 0))
3610 /* There must be no VLAN packets in eMPW loop. */
3611 if (MLX5_TXOFF_CONFIG(VLAN))
3612 assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3617 * Update send loop variables and WQE for eMPW loop
3618 * without data inlining. Number of Data Segments is
3619 * equal to the number of sent packets.
3622 * Pointer to TX queue structure.
3624 * Pointer to burst routine local context.
3626 * Number of packets/Data Segments/Packets.
3628 * Accumulated statistics, bytes sent
3630 * Configured Tx offloads mask. It is fully defined at
3631 * compile time and may be used for optimization.
3634 * true - packet match with eMPW batch attributes.
3635 * false - no match, eMPW should be restarted.
3637 static __rte_always_inline void
3638 mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
3639 struct mlx5_txq_local *restrict loc,
3642 unsigned int olx __rte_unused)
3644 assert(!MLX5_TXOFF_CONFIG(INLINE));
3645 #ifdef MLX5_PMD_SOFT_COUNTERS
3646 /* Update sent data bytes counter. */
3647 txq->stats.obytes += slen;
3651 loc->elts_free -= ds;
3652 loc->pkts_sent += ds;
3654 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3655 txq->wqe_ci += (ds + 3) / 4;
3656 loc->wqe_free -= (ds + 3) / 4;
3660 * Update send loop variables and WQE for eMPW loop
3661 * with data inlining. Gets the size of pushed descriptors
3662 * and data to the WQE.
3665 * Pointer to TX queue structure.
3667 * Pointer to burst routine local context.
3669 * Total size of descriptor/data in bytes.
3671 * Accumulated statistics, data bytes sent.
3673 * Configured Tx offloads mask. It is fully defined at
3674 * compile time and may be used for optimization.
3677 * true - packet match with eMPW batch attributes.
3678 * false - no match, eMPW should be restarted.
3680 static __rte_always_inline void
3681 mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
3682 struct mlx5_txq_local *restrict loc,
3685 unsigned int olx __rte_unused)
3687 assert(MLX5_TXOFF_CONFIG(INLINE));
3688 assert((len % MLX5_WSEG_SIZE) == 0);
3689 #ifdef MLX5_PMD_SOFT_COUNTERS
3690 /* Update sent data bytes counter. */
3691 txq->stats.obytes += slen;
3695 len = len / MLX5_WSEG_SIZE + 2;
3696 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
3697 txq->wqe_ci += (len + 3) / 4;
3698 loc->wqe_free -= (len + 3) / 4;
3702 * The set of Tx burst functions for single-segment packets
3703 * without TSO and with Multi-Packet Writing feature support.
3704 * Supports all types of Tx offloads, except multi-packets
3707 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
3708 * as many packet per WQE as it can. If eMPW is not configured
3709 * or packet can not be sent with eMPW (VLAN insertion) the
3710 * ordinary SEND opcode is used and only one packet placed
3713 * Functions stop sending if it encounters the multi-segment
3714 * packet or packet with TSO requested.
3716 * The routines are responsible for storing processed mbuf
3717 * into elts ring buffer and update elts_head if inlining
3718 * offload is requested. Otherwise the copying mbufs to elts
3719 * can be postponed and completed at the end of burst routine.
3722 * Pointer to TX queue structure.
3724 * Packets to transmit.
3726 * Number of packets in array.
3728 * Pointer to burst routine local context.
3730 * Configured Tx offloads mask. It is fully defined at
3731 * compile time and may be used for optimization.
3734 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3735 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3736 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3737 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
3738 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
3739 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
3741 * Local context variables updated.
3744 * The routine sends packets with MLX5_OPCODE_EMPW
3745 * without inlining, this is dedicated optimized branch.
3746 * No VLAN insertion is supported.
3748 static __rte_always_inline enum mlx5_txcmp_code
3749 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
3750 struct rte_mbuf **restrict pkts,
3751 unsigned int pkts_n,
3752 struct mlx5_txq_local *restrict loc,
3756 * Subroutine is the part of mlx5_tx_burst_single()
3757 * and sends single-segment packet with eMPW opcode
3758 * without data inlining.
3760 assert(!MLX5_TXOFF_CONFIG(INLINE));
3761 assert(MLX5_TXOFF_CONFIG(EMPW));
3762 assert(loc->elts_free && loc->wqe_free);
3763 assert(pkts_n > loc->pkts_sent);
3764 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
3765 pkts += loc->pkts_sent + 1;
3766 pkts_n -= loc->pkts_sent;
3768 struct mlx5_wqe_dseg *restrict dseg;
3769 struct mlx5_wqe_eseg *restrict eseg;
3770 enum mlx5_txcmp_code ret;
3771 unsigned int part, loop;
3772 unsigned int slen = 0;
3775 part = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS);
3776 if (unlikely(loc->elts_free < part)) {
3777 /* We have no enough elts to save all mbufs. */
3778 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
3779 return MLX5_TXCMP_CODE_EXIT;
3780 /* But we still able to send at least minimal eMPW. */
3781 part = loc->elts_free;
3783 /* Check whether we have enough WQEs */
3784 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
3785 if (unlikely(loc->wqe_free <
3786 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3787 return MLX5_TXCMP_CODE_EXIT;
3788 part = (loc->wqe_free * 4) - 2;
3790 if (likely(part > 1))
3791 rte_prefetch0(*pkts);
3792 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3794 * Build eMPW title WQEBB:
3795 * - Control Segment, eMPW opcode
3796 * - Ethernet Segment, no inline
3798 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
3799 MLX5_OPCODE_ENHANCED_MPSW, olx);
3800 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
3801 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3802 eseg = &loc->wqe_last->eseg;
3803 dseg = &loc->wqe_last->dseg[0];
3806 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3807 #ifdef MLX5_PMD_SOFT_COUNTERS
3808 /* Update sent data bytes counter. */
3813 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3815 if (unlikely(--loop == 0))
3817 loc->mbuf = *pkts++;
3818 if (likely(loop > 1))
3819 rte_prefetch0(*pkts);
3820 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3822 * Unroll the completion code to avoid
3823 * returning variable value - it results in
3824 * unoptimized sequent checking in caller.
3826 if (ret == MLX5_TXCMP_CODE_MULTI) {
3828 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3829 if (unlikely(!loc->elts_free ||
3831 return MLX5_TXCMP_CODE_EXIT;
3832 return MLX5_TXCMP_CODE_MULTI;
3834 if (ret == MLX5_TXCMP_CODE_TSO) {
3836 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3837 if (unlikely(!loc->elts_free ||
3839 return MLX5_TXCMP_CODE_EXIT;
3840 return MLX5_TXCMP_CODE_TSO;
3842 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3844 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3845 if (unlikely(!loc->elts_free ||
3847 return MLX5_TXCMP_CODE_EXIT;
3848 return MLX5_TXCMP_CODE_SINGLE;
3850 if (ret != MLX5_TXCMP_CODE_EMPW) {
3853 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3854 return MLX5_TXCMP_CODE_ERROR;
3857 * Check whether packet parameters coincide
3858 * within assumed eMPW batch:
3859 * - check sum settings
3861 * - software parser settings
3863 if (!mlx5_tx_match_empw(txq, eseg, loc, olx)) {
3866 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3867 if (unlikely(!loc->elts_free ||
3869 return MLX5_TXCMP_CODE_EXIT;
3872 /* Packet attributes match, continue the same eMPW. */
3874 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3875 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3877 /* eMPW is built successfully, update loop parameters. */
3879 assert(pkts_n >= part);
3880 #ifdef MLX5_PMD_SOFT_COUNTERS
3881 /* Update sent data bytes counter. */
3882 txq->stats.obytes += slen;
3884 loc->elts_free -= part;
3885 loc->pkts_sent += part;
3886 txq->wqe_ci += (2 + part + 3) / 4;
3887 loc->wqe_free -= (2 + part + 3) / 4;
3889 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3890 return MLX5_TXCMP_CODE_EXIT;
3891 loc->mbuf = *pkts++;
3892 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3893 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
3895 /* Continue sending eMPW batches. */
3901 * The routine sends packets with MLX5_OPCODE_EMPW
3902 * with inlining, optionally supports VLAN insertion.
3904 static __rte_always_inline enum mlx5_txcmp_code
3905 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
3906 struct rte_mbuf **restrict pkts,
3907 unsigned int pkts_n,
3908 struct mlx5_txq_local *restrict loc,
3912 * Subroutine is the part of mlx5_tx_burst_single()
3913 * and sends single-segment packet with eMPW opcode
3914 * with data inlining.
3916 assert(MLX5_TXOFF_CONFIG(INLINE));
3917 assert(MLX5_TXOFF_CONFIG(EMPW));
3918 assert(loc->elts_free && loc->wqe_free);
3919 assert(pkts_n > loc->pkts_sent);
3920 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
3921 pkts += loc->pkts_sent + 1;
3922 pkts_n -= loc->pkts_sent;
3924 struct mlx5_wqe_dseg *restrict dseg;
3925 struct mlx5_wqe_eseg *restrict eseg;
3926 enum mlx5_txcmp_code ret;
3927 unsigned int room, part;
3928 unsigned int slen = 0;
3931 /* Check whether we have minimal amount WQEs */
3932 if (unlikely(loc->wqe_free <
3933 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3934 return MLX5_TXCMP_CODE_EXIT;
3935 if (likely(pkts_n > 1))
3936 rte_prefetch0(*pkts);
3937 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3939 * Build eMPW title WQEBB:
3940 * - Control Segment, eMPW opcode, zero DS
3941 * - Ethernet Segment, no inline
3943 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0,
3944 MLX5_OPCODE_ENHANCED_MPSW, olx);
3945 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
3946 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3947 eseg = &loc->wqe_last->eseg;
3948 dseg = &loc->wqe_last->dseg[0];
3949 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
3950 loc->wqe_free) * MLX5_WQE_SIZE -
3951 MLX5_WQE_CSEG_SIZE -
3953 /* Build WQE till we have space, packets and resources. */
3956 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3957 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
3960 assert(room >= MLX5_WQE_DSEG_SIZE);
3961 assert((room % MLX5_WQE_DSEG_SIZE) == 0);
3962 assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
3964 * Some Tx offloads may cause an error if
3965 * packet is not long enough, check against
3966 * assumed minimal length.
3968 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
3970 if (unlikely(!part))
3971 return MLX5_TXCMP_CODE_ERROR;
3973 * We have some successfully built
3974 * packet Data Segments to send.
3976 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
3977 return MLX5_TXCMP_CODE_ERROR;
3979 /* Inline or not inline - that's the Question. */
3980 if (dlen > txq->inlen_empw)
3982 /* Inline entire packet, optional VLAN insertion. */
3983 tlen = sizeof(dseg->bcount) + dlen;
3984 if (MLX5_TXOFF_CONFIG(VLAN) &&
3985 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3987 * The packet length must be checked in
3988 * mlx5_tx_able_to_empw() and packet
3989 * fits into inline length guaranteed.
3991 assert((dlen + sizeof(struct rte_vlan_hdr)) <=
3993 tlen += sizeof(struct rte_vlan_hdr);
3996 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
3998 #ifdef MLX5_PMD_SOFT_COUNTERS
3999 /* Update sent data bytes counter. */
4000 slen += sizeof(struct rte_vlan_hdr);
4005 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4008 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4009 assert(room >= tlen);
4012 * Packet data are completely inlined,
4013 * free the packet immediately.
4015 rte_pktmbuf_free_seg(loc->mbuf);
4019 * Not inlinable VLAN packets are
4020 * proceeded outside of this routine.
4022 assert(room >= MLX5_WQE_DSEG_SIZE);
4023 if (MLX5_TXOFF_CONFIG(VLAN))
4024 assert(!(loc->mbuf->ol_flags &
4026 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4027 /* We have to store mbuf in elts.*/
4028 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4029 room -= MLX5_WQE_DSEG_SIZE;
4030 /* Ring buffer wraparound is checked at the loop end.*/
4033 #ifdef MLX5_PMD_SOFT_COUNTERS
4034 /* Update sent data bytes counter. */
4040 if (unlikely(!pkts_n || !loc->elts_free)) {
4042 * We have no resources/packets to
4043 * continue build descriptors.
4046 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4047 return MLX5_TXCMP_CODE_EXIT;
4049 /* Check if we have minimal room left. */
4050 if (room < MLX5_WQE_DSEG_SIZE) {
4052 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4055 loc->mbuf = *pkts++;
4056 if (likely(pkts_n > 1))
4057 rte_prefetch0(*pkts);
4058 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4060 * Unroll the completion code to avoid
4061 * returning variable value - it results in
4062 * unoptimized sequent checking in caller.
4064 if (ret == MLX5_TXCMP_CODE_MULTI) {
4066 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4067 if (unlikely(!loc->elts_free ||
4069 return MLX5_TXCMP_CODE_EXIT;
4070 return MLX5_TXCMP_CODE_MULTI;
4072 if (ret == MLX5_TXCMP_CODE_TSO) {
4074 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4075 if (unlikely(!loc->elts_free ||
4077 return MLX5_TXCMP_CODE_EXIT;
4078 return MLX5_TXCMP_CODE_TSO;
4080 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4082 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4083 if (unlikely(!loc->elts_free ||
4085 return MLX5_TXCMP_CODE_EXIT;
4086 return MLX5_TXCMP_CODE_SINGLE;
4088 if (ret != MLX5_TXCMP_CODE_EMPW) {
4091 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4092 return MLX5_TXCMP_CODE_ERROR;
4095 * Check whether packet parameters coincide
4096 * within assumed eMPW batch:
4097 * - check sum settings
4099 * - software parser settings
4101 if (!mlx5_tx_match_empw(txq, eseg, loc, olx))
4103 /* Packet attributes match, continue the same eMPW. */
4104 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4105 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4108 * We get here to close an existing eMPW
4109 * session and start the new one.
4113 if (unlikely(!part))
4114 return MLX5_TXCMP_CODE_EXIT;
4115 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
4116 if (unlikely(!loc->elts_free ||
4118 return MLX5_TXCMP_CODE_EXIT;
4125 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4126 * Data inlining and VLAN insertion are supported.
4128 static __rte_always_inline enum mlx5_txcmp_code
4129 mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
4130 struct rte_mbuf **restrict pkts,
4131 unsigned int pkts_n,
4132 struct mlx5_txq_local *restrict loc,
4136 * Subroutine is the part of mlx5_tx_burst_single()
4137 * and sends single-segment packet with SEND opcode.
4139 assert(loc->elts_free && loc->wqe_free);
4140 assert(pkts_n > loc->pkts_sent);
4141 pkts += loc->pkts_sent + 1;
4142 pkts_n -= loc->pkts_sent;
4144 struct mlx5_wqe *restrict wqe;
4145 enum mlx5_txcmp_code ret;
4147 assert(NB_SEGS(loc->mbuf) == 1);
4148 if (MLX5_TXOFF_CONFIG(INLINE)) {
4149 unsigned int inlen, vlan = 0;
4151 inlen = rte_pktmbuf_data_len(loc->mbuf);
4152 if (MLX5_TXOFF_CONFIG(VLAN) &&
4153 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4154 vlan = sizeof(struct rte_vlan_hdr);
4156 static_assert((sizeof(struct rte_vlan_hdr) +
4157 sizeof(struct rte_ether_hdr)) ==
4158 MLX5_ESEG_MIN_INLINE_SIZE,
4159 "invalid min inline data size");
4162 * If inlining is enabled at configuration time
4163 * the limit must be not less than minimal size.
4164 * Otherwise we would do extra check for data
4165 * size to avoid crashes due to length overflow.
4167 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
4168 if (inlen <= txq->inlen_send) {
4169 unsigned int seg_n, wqe_n;
4171 rte_prefetch0(rte_pktmbuf_mtod
4172 (loc->mbuf, uint8_t *));
4173 /* Check against minimal length. */
4174 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4175 return MLX5_TXCMP_CODE_ERROR;
4177 * Completely inlined packet data WQE:
4178 * - Control Segment, SEND opcode
4179 * - Ethernet Segment, no VLAN insertion
4180 * - Data inlined, VLAN optionally inserted
4181 * - Alignment to MLX5_WSEG_SIZE
4182 * Have to estimate amount of WQEBBs
4184 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4185 MLX5_ESEG_MIN_INLINE_SIZE +
4186 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4187 /* Check if there are enough WQEBBs. */
4188 wqe_n = (seg_n + 3) / 4;
4189 if (wqe_n > loc->wqe_free)
4190 return MLX5_TXCMP_CODE_EXIT;
4191 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4192 loc->wqe_last = wqe;
4193 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4194 MLX5_OPCODE_SEND, olx);
4195 mlx5_tx_eseg_data(txq, loc, wqe,
4196 vlan, inlen, 0, olx);
4197 txq->wqe_ci += wqe_n;
4198 loc->wqe_free -= wqe_n;
4200 * Packet data are completely inlined,
4201 * free the packet immediately.
4203 rte_pktmbuf_free_seg(loc->mbuf);
4204 } else if (!MLX5_TXOFF_CONFIG(EMPW) &&
4207 * If minimal inlining is requested the eMPW
4208 * feature should be disabled due to data is
4209 * inlined into Ethernet Segment, which can
4210 * not contain inlined data for eMPW due to
4211 * segment shared for all packets.
4213 struct mlx5_wqe_dseg *restrict dseg;
4218 * The inline-mode settings require
4219 * to inline the specified amount of
4220 * data bytes to the Ethernet Segment.
4221 * We should check the free space in
4222 * WQE ring buffer to inline partially.
4224 assert(txq->inlen_send >= txq->inlen_mode);
4225 assert(inlen > txq->inlen_mode);
4226 assert(txq->inlen_mode >=
4227 MLX5_ESEG_MIN_INLINE_SIZE);
4229 * Check whether there are enough free WQEBBs:
4231 * - Ethernet Segment
4232 * - First Segment of inlined Ethernet data
4233 * - ... data continued ...
4234 * - Finishing Data Segment of pointer type
4236 ds = (MLX5_WQE_CSEG_SIZE +
4237 MLX5_WQE_ESEG_SIZE +
4238 MLX5_WQE_DSEG_SIZE +
4240 MLX5_ESEG_MIN_INLINE_SIZE +
4241 MLX5_WQE_DSEG_SIZE +
4242 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4243 if (loc->wqe_free < ((ds + 3) / 4))
4244 return MLX5_TXCMP_CODE_EXIT;
4246 * Build the ordinary SEND WQE:
4248 * - Ethernet Segment, inline inlen_mode bytes
4249 * - Data Segment of pointer type
4251 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4252 loc->wqe_last = wqe;
4253 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4254 MLX5_OPCODE_SEND, olx);
4255 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4258 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4259 txq->inlen_mode - vlan;
4260 inlen -= txq->inlen_mode;
4261 mlx5_tx_dseg_ptr(txq, loc, dseg,
4264 * WQE is built, update the loop parameters
4265 * and got to the next packet.
4267 txq->wqe_ci += (ds + 3) / 4;
4268 loc->wqe_free -= (ds + 3) / 4;
4269 /* We have to store mbuf in elts.*/
4270 assert(MLX5_TXOFF_CONFIG(INLINE));
4271 txq->elts[txq->elts_head++ & txq->elts_m] =
4279 * Partially inlined packet data WQE, we have
4280 * some space in title WQEBB, we can fill it
4281 * with some packet data. It takes one WQEBB,
4282 * it is available, no extra space check:
4283 * - Control Segment, SEND opcode
4284 * - Ethernet Segment, no VLAN insertion
4285 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4286 * - Data Segment, pointer type
4288 * We also get here if VLAN insertion is not
4289 * supported by HW, the inline is enabled.
4291 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4292 loc->wqe_last = wqe;
4293 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4294 MLX5_OPCODE_SEND, olx);
4295 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4296 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4297 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4299 * The length check is performed above, by
4300 * comparing with txq->inlen_send. We should
4301 * not get overflow here.
4303 assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4304 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4305 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4309 /* We have to store mbuf in elts.*/
4310 assert(MLX5_TXOFF_CONFIG(INLINE));
4311 txq->elts[txq->elts_head++ & txq->elts_m] =
4315 #ifdef MLX5_PMD_SOFT_COUNTERS
4316 /* Update sent data bytes counter. */
4317 txq->stats.obytes += vlan +
4318 rte_pktmbuf_data_len(loc->mbuf);
4322 * No inline at all, it means the CPU cycles saving
4323 * is prioritized at configuration, we should not
4324 * copy any packet data to WQE.
4326 * SEND WQE, one WQEBB:
4327 * - Control Segment, SEND opcode
4328 * - Ethernet Segment, optional VLAN, no inline
4329 * - Data Segment, pointer type
4331 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4332 loc->wqe_last = wqe;
4333 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4334 MLX5_OPCODE_SEND, olx);
4335 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4337 (txq, loc, &wqe->dseg[0],
4338 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4339 rte_pktmbuf_data_len(loc->mbuf), olx);
4343 * We should not store mbuf pointer in elts
4344 * if no inlining is configured, this is done
4345 * by calling routine in a batch copy.
4347 assert(!MLX5_TXOFF_CONFIG(INLINE));
4349 #ifdef MLX5_PMD_SOFT_COUNTERS
4350 /* Update sent data bytes counter. */
4351 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4352 if (MLX5_TXOFF_CONFIG(VLAN) &&
4353 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4354 txq->stats.obytes +=
4355 sizeof(struct rte_vlan_hdr);
4360 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4361 return MLX5_TXCMP_CODE_EXIT;
4362 loc->mbuf = *pkts++;
4364 rte_prefetch0(*pkts);
4365 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4366 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4372 static __rte_always_inline enum mlx5_txcmp_code
4373 mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
4374 struct rte_mbuf **restrict pkts,
4375 unsigned int pkts_n,
4376 struct mlx5_txq_local *restrict loc,
4379 enum mlx5_txcmp_code ret;
4381 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4382 if (ret == MLX5_TXCMP_CODE_SINGLE)
4384 assert(ret == MLX5_TXCMP_CODE_EMPW);
4386 /* Optimize for inline/no inline eMPW send. */
4387 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4388 mlx5_tx_burst_empw_inline
4389 (txq, pkts, pkts_n, loc, olx) :
4390 mlx5_tx_burst_empw_simple
4391 (txq, pkts, pkts_n, loc, olx);
4392 if (ret != MLX5_TXCMP_CODE_SINGLE)
4394 /* The resources to send one packet should remain. */
4395 assert(loc->elts_free && loc->wqe_free);
4397 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4398 assert(ret != MLX5_TXCMP_CODE_SINGLE);
4399 if (ret != MLX5_TXCMP_CODE_EMPW)
4401 /* The resources to send one packet should remain. */
4402 assert(loc->elts_free && loc->wqe_free);
4407 * DPDK Tx callback template. This is configured template
4408 * used to generate routines optimized for specified offload setup.
4409 * One of this generated functions is chosen at SQ configuration
4413 * Generic pointer to TX queue structure.
4415 * Packets to transmit.
4417 * Number of packets in array.
4419 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4420 * values. Should be static to take compile time static configuration
4424 * Number of packets successfully transmitted (<= pkts_n).
4426 static __rte_always_inline uint16_t
4427 mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
4428 struct rte_mbuf **restrict pkts,
4432 struct mlx5_txq_local loc;
4433 enum mlx5_txcmp_code ret;
4436 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4437 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4439 * Check if there are some CQEs, if any:
4440 * - process an encountered errors
4441 * - process the completed WQEs
4442 * - free related mbufs
4443 * - doorbell the NIC about processed CQEs
4445 if (unlikely(!pkts_n))
4447 rte_prefetch0(*pkts);
4448 mlx5_tx_handle_completion(txq, olx);
4450 * Calculate the number of available resources - elts and WQEs.
4451 * There are two possible different scenarios:
4452 * - no data inlining into WQEs, one WQEBB may contains upto
4453 * four packets, in this case elts become scarce resource
4454 * - data inlining into WQEs, one packet may require multiple
4455 * WQEBBs, the WQEs become the limiting factor.
4457 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4458 loc.elts_free = txq->elts_s -
4459 (uint16_t)(txq->elts_head - txq->elts_tail);
4460 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4461 loc.wqe_free = txq->wqe_s -
4462 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4463 if (unlikely(!loc.elts_free || !loc.wqe_free))
4467 loc.wqe_last = NULL;
4470 * Fetch the packet from array. Usually this is
4471 * the first packet in series of multi/single
4474 loc.mbuf = *(pkts + loc.pkts_sent);
4475 /* Dedicated branch for multi-segment packets. */
4476 if (MLX5_TXOFF_CONFIG(MULTI) &&
4477 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4479 * Multi-segment packet encountered.
4480 * Hardware is able to process it only
4481 * with SEND/TSO opcodes, one packet
4482 * per WQE, do it in dedicated routine.
4485 assert(loc.pkts_sent >= loc.pkts_copy);
4486 part = loc.pkts_sent - loc.pkts_copy;
4487 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4489 * There are some single-segment mbufs not
4490 * stored in elts. The mbufs must be in the
4491 * same order as WQEs, so we must copy the
4492 * mbufs to elts here, before the coming
4493 * multi-segment packet mbufs is appended.
4495 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4497 loc.pkts_copy = loc.pkts_sent;
4499 assert(pkts_n > loc.pkts_sent);
4500 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4501 if (!MLX5_TXOFF_CONFIG(INLINE))
4502 loc.pkts_copy = loc.pkts_sent;
4504 * These returned code checks are supposed
4505 * to be optimized out due to routine inlining.
4507 if (ret == MLX5_TXCMP_CODE_EXIT) {
4509 * The routine returns this code when
4510 * all packets are sent or there is no
4511 * enough resources to complete request.
4515 if (ret == MLX5_TXCMP_CODE_ERROR) {
4517 * The routine returns this code when
4518 * some error in the incoming packets
4521 txq->stats.oerrors++;
4524 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4526 * The single-segment packet was encountered
4527 * in the array, try to send it with the
4528 * best optimized way, possible engaging eMPW.
4530 goto enter_send_single;
4532 if (MLX5_TXOFF_CONFIG(TSO) &&
4533 ret == MLX5_TXCMP_CODE_TSO) {
4535 * The single-segment TSO packet was
4536 * encountered in the array.
4538 goto enter_send_tso;
4540 /* We must not get here. Something is going wrong. */
4542 txq->stats.oerrors++;
4545 /* Dedicated branch for single-segment TSO packets. */
4546 if (MLX5_TXOFF_CONFIG(TSO) &&
4547 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4549 * TSO might require special way for inlining
4550 * (dedicated parameters) and is sent with
4551 * MLX5_OPCODE_TSO opcode only, provide this
4552 * in dedicated branch.
4555 assert(NB_SEGS(loc.mbuf) == 1);
4556 assert(pkts_n > loc.pkts_sent);
4557 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4559 * These returned code checks are supposed
4560 * to be optimized out due to routine inlining.
4562 if (ret == MLX5_TXCMP_CODE_EXIT)
4564 if (ret == MLX5_TXCMP_CODE_ERROR) {
4565 txq->stats.oerrors++;
4568 if (ret == MLX5_TXCMP_CODE_SINGLE)
4569 goto enter_send_single;
4570 if (MLX5_TXOFF_CONFIG(MULTI) &&
4571 ret == MLX5_TXCMP_CODE_MULTI) {
4573 * The multi-segment packet was
4574 * encountered in the array.
4576 goto enter_send_multi;
4578 /* We must not get here. Something is going wrong. */
4580 txq->stats.oerrors++;
4584 * The dedicated branch for the single-segment packets
4585 * without TSO. Often these ones can be sent using
4586 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
4587 * The routine builds the WQEs till it encounters
4588 * the TSO or multi-segment packet (in case if these
4589 * offloads are requested at SQ configuration time).
4592 assert(pkts_n > loc.pkts_sent);
4593 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
4595 * These returned code checks are supposed
4596 * to be optimized out due to routine inlining.
4598 if (ret == MLX5_TXCMP_CODE_EXIT)
4600 if (ret == MLX5_TXCMP_CODE_ERROR) {
4601 txq->stats.oerrors++;
4604 if (MLX5_TXOFF_CONFIG(MULTI) &&
4605 ret == MLX5_TXCMP_CODE_MULTI) {
4607 * The multi-segment packet was
4608 * encountered in the array.
4610 goto enter_send_multi;
4612 if (MLX5_TXOFF_CONFIG(TSO) &&
4613 ret == MLX5_TXCMP_CODE_TSO) {
4615 * The single-segment TSO packet was
4616 * encountered in the array.
4618 goto enter_send_tso;
4620 /* We must not get here. Something is going wrong. */
4622 txq->stats.oerrors++;
4626 * Main Tx loop is completed, do the rest:
4627 * - set completion request if thresholds are reached
4628 * - doorbell the hardware
4629 * - copy the rest of mbufs to elts (if any)
4631 assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
4632 /* Take a shortcut if nothing is sent. */
4633 if (unlikely(loc.pkts_sent == 0))
4635 /* Not all of the mbufs may be stored into elts yet. */
4636 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
4637 mlx5_tx_request_completion(txq, part, &loc, olx);
4639 * Ring QP doorbell immediately after WQE building completion
4640 * to improve latencies. The pure software related data treatment
4641 * can be completed after doorbell. Tx CQEs for this SQ are
4642 * processed in this thread only by the polling.
4644 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, 0);
4645 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4647 * There are some single-segment mbufs not stored in elts.
4648 * It can be only if last packet was single-segment.
4649 * The copying is gathered into one place due to it is
4650 * a good opportunity to optimize that with SIMD.
4651 * Unfortunately if inlining is enabled the gaps in
4652 * pointer array may happen due to early freeing of the
4655 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
4657 #ifdef MLX5_PMD_SOFT_COUNTERS
4658 /* Increment sent packets counter. */
4659 txq->stats.opackets += loc.pkts_sent;
4661 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4662 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4663 return loc.pkts_sent;
4666 /* Generate routines with Enhanced Multi-Packet Write support. */
4667 MLX5_TXOFF_DECL(full_empw,
4668 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
4670 MLX5_TXOFF_DECL(none_empw,
4671 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4673 MLX5_TXOFF_DECL(md_empw,
4674 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4676 MLX5_TXOFF_DECL(mt_empw,
4677 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4678 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4680 MLX5_TXOFF_DECL(mtsc_empw,
4681 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4682 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4683 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4685 MLX5_TXOFF_DECL(mti_empw,
4686 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4687 MLX5_TXOFF_CONFIG_INLINE |
4688 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4690 MLX5_TXOFF_DECL(mtv_empw,
4691 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4692 MLX5_TXOFF_CONFIG_VLAN |
4693 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4695 MLX5_TXOFF_DECL(mtiv_empw,
4696 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4697 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4698 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4700 MLX5_TXOFF_DECL(sc_empw,
4701 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4702 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4704 MLX5_TXOFF_DECL(sci_empw,
4705 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4706 MLX5_TXOFF_CONFIG_INLINE |
4707 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4709 MLX5_TXOFF_DECL(scv_empw,
4710 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4711 MLX5_TXOFF_CONFIG_VLAN |
4712 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4714 MLX5_TXOFF_DECL(sciv_empw,
4715 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4716 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4717 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4719 MLX5_TXOFF_DECL(i_empw,
4720 MLX5_TXOFF_CONFIG_INLINE |
4721 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4723 MLX5_TXOFF_DECL(v_empw,
4724 MLX5_TXOFF_CONFIG_VLAN |
4725 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4727 MLX5_TXOFF_DECL(iv_empw,
4728 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4729 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4731 /* Generate routines without Enhanced Multi-Packet Write support. */
4732 MLX5_TXOFF_DECL(full,
4733 MLX5_TXOFF_CONFIG_FULL)
4735 MLX5_TXOFF_DECL(none,
4736 MLX5_TXOFF_CONFIG_NONE)
4739 MLX5_TXOFF_CONFIG_METADATA)
4742 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4743 MLX5_TXOFF_CONFIG_METADATA)
4745 MLX5_TXOFF_DECL(mtsc,
4746 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4747 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4748 MLX5_TXOFF_CONFIG_METADATA)
4750 MLX5_TXOFF_DECL(mti,
4751 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4752 MLX5_TXOFF_CONFIG_INLINE |
4753 MLX5_TXOFF_CONFIG_METADATA)
4756 MLX5_TXOFF_DECL(mtv,
4757 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4758 MLX5_TXOFF_CONFIG_VLAN |
4759 MLX5_TXOFF_CONFIG_METADATA)
4762 MLX5_TXOFF_DECL(mtiv,
4763 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4764 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4765 MLX5_TXOFF_CONFIG_METADATA)
4768 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4769 MLX5_TXOFF_CONFIG_METADATA)
4771 MLX5_TXOFF_DECL(sci,
4772 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4773 MLX5_TXOFF_CONFIG_INLINE |
4774 MLX5_TXOFF_CONFIG_METADATA)
4777 MLX5_TXOFF_DECL(scv,
4778 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4779 MLX5_TXOFF_CONFIG_VLAN |
4780 MLX5_TXOFF_CONFIG_METADATA)
4783 MLX5_TXOFF_DECL(sciv,
4784 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4785 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4786 MLX5_TXOFF_CONFIG_METADATA)
4789 MLX5_TXOFF_CONFIG_INLINE |
4790 MLX5_TXOFF_CONFIG_METADATA)
4793 MLX5_TXOFF_CONFIG_VLAN |
4794 MLX5_TXOFF_CONFIG_METADATA)
4797 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4798 MLX5_TXOFF_CONFIG_METADATA)
4801 * Array of declared and compiled Tx burst function and corresponding
4802 * supported offloads set. The array is used to select the Tx burst
4803 * function for specified offloads set at Tx queue configuration time.
4806 eth_tx_burst_t func;
4809 MLX5_TXOFF_INFO(full_empw,
4810 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4811 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4812 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4813 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4815 MLX5_TXOFF_INFO(none_empw,
4816 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4818 MLX5_TXOFF_INFO(md_empw,
4819 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4821 MLX5_TXOFF_INFO(mt_empw,
4822 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4823 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4825 MLX5_TXOFF_INFO(mtsc_empw,
4826 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4827 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4828 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4830 MLX5_TXOFF_INFO(mti_empw,
4831 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4832 MLX5_TXOFF_CONFIG_INLINE |
4833 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4835 MLX5_TXOFF_INFO(mtv_empw,
4836 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4837 MLX5_TXOFF_CONFIG_VLAN |
4838 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4840 MLX5_TXOFF_INFO(mtiv_empw,
4841 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4842 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4843 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4845 MLX5_TXOFF_INFO(sc_empw,
4846 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4847 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4849 MLX5_TXOFF_INFO(sci_empw,
4850 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4851 MLX5_TXOFF_CONFIG_INLINE |
4852 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4854 MLX5_TXOFF_INFO(scv_empw,
4855 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4856 MLX5_TXOFF_CONFIG_VLAN |
4857 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4859 MLX5_TXOFF_INFO(sciv_empw,
4860 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4861 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4862 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4864 MLX5_TXOFF_INFO(i_empw,
4865 MLX5_TXOFF_CONFIG_INLINE |
4866 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4868 MLX5_TXOFF_INFO(v_empw,
4869 MLX5_TXOFF_CONFIG_VLAN |
4870 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4872 MLX5_TXOFF_INFO(iv_empw,
4873 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4874 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4876 MLX5_TXOFF_INFO(full,
4877 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4878 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4879 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4880 MLX5_TXOFF_CONFIG_METADATA)
4882 MLX5_TXOFF_INFO(none,
4883 MLX5_TXOFF_CONFIG_NONE)
4886 MLX5_TXOFF_CONFIG_METADATA)
4889 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4890 MLX5_TXOFF_CONFIG_METADATA)
4892 MLX5_TXOFF_INFO(mtsc,
4893 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4894 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4895 MLX5_TXOFF_CONFIG_METADATA)
4897 MLX5_TXOFF_INFO(mti,
4898 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4899 MLX5_TXOFF_CONFIG_INLINE |
4900 MLX5_TXOFF_CONFIG_METADATA)
4903 MLX5_TXOFF_INFO(mtv,
4904 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4905 MLX5_TXOFF_CONFIG_VLAN |
4906 MLX5_TXOFF_CONFIG_METADATA)
4908 MLX5_TXOFF_INFO(mtiv,
4909 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4910 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4911 MLX5_TXOFF_CONFIG_METADATA)
4914 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4915 MLX5_TXOFF_CONFIG_METADATA)
4917 MLX5_TXOFF_INFO(sci,
4918 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4919 MLX5_TXOFF_CONFIG_INLINE |
4920 MLX5_TXOFF_CONFIG_METADATA)
4922 MLX5_TXOFF_INFO(scv,
4923 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4924 MLX5_TXOFF_CONFIG_VLAN |
4925 MLX5_TXOFF_CONFIG_METADATA)
4927 MLX5_TXOFF_INFO(sciv,
4928 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4929 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4930 MLX5_TXOFF_CONFIG_METADATA)
4933 MLX5_TXOFF_CONFIG_INLINE |
4934 MLX5_TXOFF_CONFIG_METADATA)
4937 MLX5_TXOFF_CONFIG_VLAN |
4938 MLX5_TXOFF_CONFIG_METADATA)
4941 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4942 MLX5_TXOFF_CONFIG_METADATA)
4946 * Configure the Tx function to use. The routine checks configured
4947 * Tx offloads for the device and selects appropriate Tx burst
4948 * routine. There are multiple Tx burst routines compiled from
4949 * the same template in the most optimal way for the dedicated
4953 * Pointer to private data structure.
4956 * Pointer to selected Tx burst function.
4959 mlx5_select_tx_function(struct rte_eth_dev *dev)
4961 struct mlx5_priv *priv = dev->data->dev_private;
4962 struct mlx5_dev_config *config = &priv->config;
4963 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
4964 unsigned int diff = 0, olx = 0, i, m;
4966 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
4967 MLX5_DSEG_MAX, "invalid WQE max size");
4968 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
4969 "invalid WQE Control Segment size");
4970 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
4971 "invalid WQE Ethernet Segment size");
4972 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
4973 "invalid WQE Data Segment size");
4974 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
4975 "invalid WQE size");
4977 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
4978 /* We should support Multi-Segment Packets. */
4979 olx |= MLX5_TXOFF_CONFIG_MULTI;
4981 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
4982 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
4983 DEV_TX_OFFLOAD_GRE_TNL_TSO |
4984 DEV_TX_OFFLOAD_IP_TNL_TSO |
4985 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
4986 /* We should support TCP Send Offload. */
4987 olx |= MLX5_TXOFF_CONFIG_TSO;
4989 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
4990 DEV_TX_OFFLOAD_UDP_TNL_TSO |
4991 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
4992 /* We should support Software Parser for Tunnels. */
4993 olx |= MLX5_TXOFF_CONFIG_SWP;
4995 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
4996 DEV_TX_OFFLOAD_UDP_CKSUM |
4997 DEV_TX_OFFLOAD_TCP_CKSUM |
4998 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
4999 /* We should support IP/TCP/UDP Checksums. */
5000 olx |= MLX5_TXOFF_CONFIG_CSUM;
5002 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5003 /* We should support VLAN insertion. */
5004 olx |= MLX5_TXOFF_CONFIG_VLAN;
5006 if (priv->txqs_n && (*priv->txqs)[0]) {
5007 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5009 if (txd->inlen_send) {
5011 * Check the data inline requirements. Data inline
5012 * is enabled on per device basis, we can check
5013 * the first Tx queue only.
5015 * If device does not support VLAN insertion in WQE
5016 * and some queues are requested to perform VLAN
5017 * insertion offload than inline must be enabled.
5019 olx |= MLX5_TXOFF_CONFIG_INLINE;
5022 if (config->mps == MLX5_MPW_ENHANCED &&
5023 config->txq_inline_min <= 0) {
5025 * The NIC supports Enhanced Multi-Packet Write.
5026 * We do not support legacy MPW due to its
5027 * hardware related problems, so we just ignore
5028 * legacy MLX5_MPW settings. There should be no
5029 * minimal required inline data.
5031 olx |= MLX5_TXOFF_CONFIG_EMPW;
5033 if (tx_offloads & DEV_TX_OFFLOAD_MATCH_METADATA) {
5034 /* We should support Flow metadata. */
5035 olx |= MLX5_TXOFF_CONFIG_METADATA;
5038 * Scan the routines table to find the minimal
5039 * satisfying routine with requested offloads.
5041 m = RTE_DIM(txoff_func);
5042 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5045 tmp = txoff_func[i].olx;
5047 /* Meets requested offloads exactly.*/
5051 if ((tmp & olx) != olx) {
5052 /* Does not meet requested offloads at all. */
5055 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5056 /* Do not enable eMPW if not configured. */
5058 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5059 /* Do not enable inlining if not configured. */
5062 * Some routine meets the requirements.
5063 * Check whether it has minimal amount
5064 * of not requested offloads.
5066 tmp = __builtin_popcountl(tmp & ~olx);
5067 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5068 /* First or better match, save and continue. */
5074 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5075 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5076 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5077 /* Lighter not requested offload. */
5082 if (m >= RTE_DIM(txoff_func)) {
5083 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5084 " for requested offloads %04X",
5085 dev->data->port_id, olx);
5088 DRV_LOG(DEBUG, "port %u has selected Tx function"
5089 " supporting offloads %04X/%04X",
5090 dev->data->port_id, olx, txoff_func[m].olx);
5091 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5092 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5093 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5094 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5095 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5096 DRV_LOG(DEBUG, "\tSWP (software parser)");
5097 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5098 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5099 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5100 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5101 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5102 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5103 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5104 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5105 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW)
5106 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5107 return txoff_func[m].func;