1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
25 #include <rte_common.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_ether.h>
28 #include <rte_cycles.h>
31 #include "mlx5_utils.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_autoconf.h"
34 #include "mlx5_defs.h"
37 /* TX burst subroutines return codes. */
38 enum mlx5_txcmp_code {
39 MLX5_TXCMP_CODE_EXIT = 0,
40 MLX5_TXCMP_CODE_ERROR,
41 MLX5_TXCMP_CODE_SINGLE,
42 MLX5_TXCMP_CODE_MULTI,
48 * These defines are used to configure Tx burst routine option set
49 * supported at compile time. The not specified options are optimized out
50 * out due to if conditions can be explicitly calculated at compile time.
51 * The offloads with bigger runtime check (require more CPU cycles to
52 * skip) overhead should have the bigger index - this is needed to
53 * select the better matching routine function if no exact match and
54 * some offloads are not actually requested.
56 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
57 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
58 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
59 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
60 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
61 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
62 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
63 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
65 /* The most common offloads groups. */
66 #define MLX5_TXOFF_CONFIG_NONE 0
67 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
68 MLX5_TXOFF_CONFIG_TSO | \
69 MLX5_TXOFF_CONFIG_SWP | \
70 MLX5_TXOFF_CONFIG_CSUM | \
71 MLX5_TXOFF_CONFIG_INLINE | \
72 MLX5_TXOFF_CONFIG_VLAN | \
73 MLX5_TXOFF_CONFIG_METADATA)
75 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
77 #define MLX5_TXOFF_DECL(func, olx) \
78 static uint16_t mlx5_tx_burst_##func(void *txq, \
79 struct rte_mbuf **pkts, \
82 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
83 pkts, pkts_n, (olx)); \
86 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
88 static __rte_always_inline uint32_t
89 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
91 static __rte_always_inline int
92 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
93 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
95 static __rte_always_inline uint32_t
96 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
98 static __rte_always_inline void
99 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
100 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
102 static __rte_always_inline void
103 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx);
106 mlx5_queue_state_modify(struct rte_eth_dev *dev,
107 struct mlx5_mp_arg_queue_state_modify *sm);
109 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
110 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
113 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
114 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
117 * Build a table to translate Rx completion flags to packet type.
119 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
122 mlx5_set_ptype_table(void)
125 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
127 /* Last entry must not be overwritten, reserved for errored packet. */
128 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
129 (*p)[i] = RTE_PTYPE_UNKNOWN;
131 * The index to the array should have:
132 * bit[1:0] = l3_hdr_type
133 * bit[4:2] = l4_hdr_type
136 * bit[7] = outer_l3_type
139 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
141 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
142 RTE_PTYPE_L4_NONFRAG;
143 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
144 RTE_PTYPE_L4_NONFRAG;
146 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
148 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
151 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
153 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
155 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
157 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
159 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
161 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
164 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
166 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
168 /* Repeat with outer_l3_type being set. Just in case. */
169 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
170 RTE_PTYPE_L4_NONFRAG;
171 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
172 RTE_PTYPE_L4_NONFRAG;
173 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
175 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
177 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
179 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
181 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
183 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
185 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
187 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
189 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
191 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
194 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
195 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
196 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
197 RTE_PTYPE_INNER_L4_NONFRAG;
198 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
199 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
200 RTE_PTYPE_INNER_L4_NONFRAG;
201 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
202 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
203 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
204 RTE_PTYPE_INNER_L4_NONFRAG;
205 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
206 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
207 RTE_PTYPE_INNER_L4_NONFRAG;
208 /* Tunneled - Fragmented */
209 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
210 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
211 RTE_PTYPE_INNER_L4_FRAG;
212 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
213 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L4_FRAG;
215 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
216 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L4_FRAG;
218 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
219 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
220 RTE_PTYPE_INNER_L4_FRAG;
222 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L4_TCP;
225 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L4_TCP;
228 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L4_TCP;
231 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
232 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L4_TCP;
234 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L4_TCP;
237 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L4_TCP;
240 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
241 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L4_TCP;
243 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
244 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L4_TCP;
246 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
247 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L4_TCP;
249 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
250 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L4_TCP;
252 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L4_TCP;
255 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L4_TCP;
259 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_UDP;
262 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L4_UDP;
265 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L4_UDP;
268 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
269 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L4_UDP;
274 * Build a table to translate packet to checksum type of Verbs.
277 mlx5_set_cksum_table(void)
283 * The index should have:
284 * bit[0] = PKT_TX_TCP_SEG
285 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
286 * bit[4] = PKT_TX_IP_CKSUM
287 * bit[8] = PKT_TX_OUTER_IP_CKSUM
290 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
293 /* Tunneled packet. */
294 if (i & (1 << 8)) /* Outer IP. */
295 v |= MLX5_ETH_WQE_L3_CSUM;
296 if (i & (1 << 4)) /* Inner IP. */
297 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
298 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
299 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
302 if (i & (1 << 4)) /* IP. */
303 v |= MLX5_ETH_WQE_L3_CSUM;
304 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
305 v |= MLX5_ETH_WQE_L4_CSUM;
307 mlx5_cksum_table[i] = v;
312 * Build a table to translate packet type of mbuf to SWP type of Verbs.
315 mlx5_set_swp_types_table(void)
321 * The index should have:
322 * bit[0:1] = PKT_TX_L4_MASK
323 * bit[4] = PKT_TX_IPV6
324 * bit[8] = PKT_TX_OUTER_IPV6
325 * bit[9] = PKT_TX_OUTER_UDP
327 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
330 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
332 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
334 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
335 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
336 v |= MLX5_ETH_WQE_L4_INNER_UDP;
337 mlx5_swp_types_table[i] = v;
342 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
343 * Flags must be preliminary initialized to zero.
346 * Pointer to burst routine local context.
348 * Pointer to store Software Parser flags
350 * Configured Tx offloads mask. It is fully defined at
351 * compile time and may be used for optimization.
354 * Software Parser offsets packed in dword.
355 * Software Parser flags are set by pointer.
357 static __rte_always_inline uint32_t
358 txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc,
363 unsigned int idx, off;
366 if (!MLX5_TXOFF_CONFIG(SWP))
368 ol = loc->mbuf->ol_flags;
369 tunnel = ol & PKT_TX_TUNNEL_MASK;
371 * Check whether Software Parser is required.
372 * Only customized tunnels may ask for.
374 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
377 * The index should have:
378 * bit[0:1] = PKT_TX_L4_MASK
379 * bit[4] = PKT_TX_IPV6
380 * bit[8] = PKT_TX_OUTER_IPV6
381 * bit[9] = PKT_TX_OUTER_UDP
383 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
384 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
385 *swp_flags = mlx5_swp_types_table[idx];
387 * Set offsets for SW parser. Since ConnectX-5, SW parser just
388 * complements HW parser. SW parser starts to engage only if HW parser
389 * can't reach a header. For the older devices, HW parser will not kick
390 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
391 * should be set regardless of HW offload.
393 off = loc->mbuf->outer_l2_len;
394 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
395 off += sizeof(struct rte_vlan_hdr);
396 set = (off >> 1) << 8; /* Outer L3 offset. */
397 off += loc->mbuf->outer_l3_len;
398 if (tunnel == PKT_TX_TUNNEL_UDP)
399 set |= off >> 1; /* Outer L4 offset. */
400 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
401 const uint64_t csum = ol & PKT_TX_L4_MASK;
402 off += loc->mbuf->l2_len;
403 set |= (off >> 1) << 24; /* Inner L3 offset. */
404 if (csum == PKT_TX_TCP_CKSUM ||
405 csum == PKT_TX_UDP_CKSUM ||
406 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
407 off += loc->mbuf->l3_len;
408 set |= (off >> 1) << 16; /* Inner L4 offset. */
411 set = rte_cpu_to_le_32(set);
416 * Convert the Checksum offloads to Verbs.
419 * Pointer to the mbuf.
422 * Converted checksum flags.
424 static __rte_always_inline uint8_t
425 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
428 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
429 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
430 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
433 * The index should have:
434 * bit[0] = PKT_TX_TCP_SEG
435 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
436 * bit[4] = PKT_TX_IP_CKSUM
437 * bit[8] = PKT_TX_OUTER_IP_CKSUM
440 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
441 return mlx5_cksum_table[idx];
445 * Internal function to compute the number of used descriptors in an RX queue
451 * The number of used rx descriptor.
454 rx_queue_count(struct mlx5_rxq_data *rxq)
456 struct rxq_zip *zip = &rxq->zip;
457 volatile struct mlx5_cqe *cqe;
458 const unsigned int cqe_n = (1 << rxq->cqe_n);
459 const unsigned int cqe_cnt = cqe_n - 1;
463 /* if we are processing a compressed cqe */
465 used = zip->cqe_cnt - zip->ca;
471 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
472 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
476 op_own = cqe->op_own;
477 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
478 n = rte_be_to_cpu_32(cqe->byte_cnt);
483 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
485 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
490 * DPDK callback to check the status of a rx descriptor.
495 * The index of the descriptor in the ring.
498 * The status of the tx descriptor.
501 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
503 struct mlx5_rxq_data *rxq = rx_queue;
504 struct mlx5_rxq_ctrl *rxq_ctrl =
505 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
506 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
508 if (dev->rx_pkt_burst != mlx5_rx_burst) {
512 if (offset >= (1 << rxq->elts_n)) {
516 if (offset < rx_queue_count(rxq))
517 return RTE_ETH_RX_DESC_DONE;
518 return RTE_ETH_RX_DESC_AVAIL;
522 * DPDK callback to get the number of used descriptors in a RX queue
525 * Pointer to the device structure.
531 * The number of used rx descriptor.
532 * -EINVAL if the queue is invalid
535 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
537 struct mlx5_priv *priv = dev->data->dev_private;
538 struct mlx5_rxq_data *rxq;
540 if (dev->rx_pkt_burst != mlx5_rx_burst) {
544 rxq = (*priv->rxqs)[rx_queue_id];
549 return rx_queue_count(rxq);
552 #define MLX5_SYSTEM_LOG_DIR "/var/log"
554 * Dump debug information to log file.
559 * If not NULL this string is printed as a header to the output
560 * and the output will be in hexadecimal view.
562 * This is the buffer address to print out.
564 * The number of bytes to dump out.
567 mlx5_dump_debug_information(const char *fname, const char *hex_title,
568 const void *buf, unsigned int hex_len)
572 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
573 fd = fopen(path, "a+");
575 DRV_LOG(WARNING, "cannot open %s for debug dump\n",
577 MKSTR(path2, "./%s", fname);
578 fd = fopen(path2, "a+");
580 DRV_LOG(ERR, "cannot open %s for debug dump\n",
584 DRV_LOG(INFO, "New debug dump in file %s\n", path2);
586 DRV_LOG(INFO, "New debug dump in file %s\n", path);
589 rte_hexdump(fd, hex_title, buf, hex_len);
591 fprintf(fd, "%s", (const char *)buf);
592 fprintf(fd, "\n\n\n");
597 * Move QP from error state to running state and initialize indexes.
600 * Pointer to TX queue control structure.
603 * 0 on success, else -1.
606 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
608 struct mlx5_mp_arg_queue_state_modify sm = {
610 .queue_id = txq_ctrl->txq.idx,
613 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
615 txq_ctrl->txq.wqe_ci = 0;
616 txq_ctrl->txq.wqe_pi = 0;
617 txq_ctrl->txq.elts_comp = 0;
621 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
623 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
625 static const uint8_t magic[] = "seen";
629 for (i = 0; i < sizeof(magic); ++i)
630 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
632 err_cqe->rsvd1[i] = magic[i];
641 * Pointer to TX queue structure.
643 * Pointer to the error CQE.
646 * The last Tx buffer element to free.
649 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
650 volatile struct mlx5_err_cqe *err_cqe)
652 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
653 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
654 struct mlx5_txq_ctrl *txq_ctrl =
655 container_of(txq, struct mlx5_txq_ctrl, txq);
656 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
657 int seen = check_err_cqe_seen(err_cqe);
659 if (!seen && txq_ctrl->dump_file_n <
660 txq_ctrl->priv->config.max_dump_files_num) {
661 MKSTR(err_str, "Unexpected CQE error syndrome "
662 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
663 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
664 txq->cqe_s, txq->qp_num_8s >> 8,
665 rte_be_to_cpu_16(err_cqe->wqe_counter),
666 txq->wqe_ci, txq->cq_ci);
667 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
668 PORT_ID(txq_ctrl->priv), txq->idx,
669 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
670 mlx5_dump_debug_information(name, NULL, err_str, 0);
671 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
672 (const void *)((uintptr_t)
676 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
677 (const void *)((uintptr_t)
681 txq_ctrl->dump_file_n++;
685 * Count errors in WQEs units.
686 * Later it can be improved to count error packets,
687 * for example, by SQ parsing to find how much packets
688 * should be counted for each WQE.
690 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
692 if (tx_recover_qp(txq_ctrl) == 0) {
694 /* Release all the remaining buffers. */
695 return txq->elts_head;
697 /* Recovering failed - try again later on the same WQE. */
701 /* Do not release buffers. */
702 return txq->elts_tail;
706 * Translate RX completion flags to packet type.
709 * Pointer to RX queue structure.
713 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
716 * Packet type for struct rte_mbuf.
718 static inline uint32_t
719 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
722 uint8_t pinfo = cqe->pkt_info;
723 uint16_t ptype = cqe->hdr_type_etc;
726 * The index to the array should have:
727 * bit[1:0] = l3_hdr_type
728 * bit[4:2] = l4_hdr_type
731 * bit[7] = outer_l3_type
733 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
734 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
738 * Initialize Rx WQ and indexes.
741 * Pointer to RX queue structure.
744 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
746 const unsigned int wqe_n = 1 << rxq->elts_n;
749 for (i = 0; (i != wqe_n); ++i) {
750 volatile struct mlx5_wqe_data_seg *scat;
754 if (mlx5_rxq_mprq_enabled(rxq)) {
755 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
757 scat = &((volatile struct mlx5_wqe_mprq *)
759 addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
760 byte_count = (1 << rxq->strd_sz_n) *
761 (1 << rxq->strd_num_n);
763 struct rte_mbuf *buf = (*rxq->elts)[i];
765 scat = &((volatile struct mlx5_wqe_data_seg *)
767 addr = rte_pktmbuf_mtod(buf, uintptr_t);
768 byte_count = DATA_LEN(buf);
770 /* scat->addr must be able to store a pointer. */
771 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
772 *scat = (struct mlx5_wqe_data_seg){
773 .addr = rte_cpu_to_be_64(addr),
774 .byte_count = rte_cpu_to_be_32(byte_count),
775 .lkey = mlx5_rx_addr2mr(rxq, addr),
778 rxq->consumed_strd = 0;
779 rxq->decompressed = 0;
781 rxq->zip = (struct rxq_zip){
784 /* Update doorbell counter. */
785 rxq->rq_ci = wqe_n >> rxq->sges_n;
787 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
791 * Modify a Verbs/DevX queue state.
792 * This must be called from the primary process.
795 * Pointer to Ethernet device.
797 * State modify request parameters.
800 * 0 in case of success else non-zero value and rte_errno is set.
803 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
804 const struct mlx5_mp_arg_queue_state_modify *sm)
807 struct mlx5_priv *priv = dev->data->dev_private;
810 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
811 struct mlx5_rxq_ctrl *rxq_ctrl =
812 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
814 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
815 struct ibv_wq_attr mod = {
816 .attr_mask = IBV_WQ_ATTR_STATE,
817 .wq_state = sm->state,
820 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
821 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
822 struct mlx5_devx_modify_rq_attr rq_attr;
824 memset(&rq_attr, 0, sizeof(rq_attr));
825 if (sm->state == IBV_WQS_RESET) {
826 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
827 rq_attr.state = MLX5_RQC_STATE_RST;
828 } else if (sm->state == IBV_WQS_RDY) {
829 rq_attr.rq_state = MLX5_RQC_STATE_RST;
830 rq_attr.state = MLX5_RQC_STATE_RDY;
831 } else if (sm->state == IBV_WQS_ERR) {
832 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
833 rq_attr.state = MLX5_RQC_STATE_ERR;
835 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
839 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s\n",
840 sm->state, strerror(errno));
845 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
846 struct mlx5_txq_ctrl *txq_ctrl =
847 container_of(txq, struct mlx5_txq_ctrl, txq);
848 struct ibv_qp_attr mod = {
849 .qp_state = IBV_QPS_RESET,
850 .port_num = (uint8_t)priv->ibv_port,
852 struct ibv_qp *qp = txq_ctrl->ibv->qp;
854 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
856 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
857 "%s\n", strerror(errno));
861 mod.qp_state = IBV_QPS_INIT;
862 ret = mlx5_glue->modify_qp(qp, &mod,
863 (IBV_QP_STATE | IBV_QP_PORT));
865 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s\n",
870 mod.qp_state = IBV_QPS_RTR;
871 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
873 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s\n",
878 mod.qp_state = IBV_QPS_RTS;
879 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
881 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s\n",
891 * Modify a Verbs queue state.
894 * Pointer to Ethernet device.
896 * State modify request parameters.
899 * 0 in case of success else non-zero value.
902 mlx5_queue_state_modify(struct rte_eth_dev *dev,
903 struct mlx5_mp_arg_queue_state_modify *sm)
907 switch (rte_eal_process_type()) {
908 case RTE_PROC_PRIMARY:
909 ret = mlx5_queue_state_modify_primary(dev, sm);
911 case RTE_PROC_SECONDARY:
912 ret = mlx5_mp_req_queue_state_modify(dev, sm);
922 * The function inserts the RQ state to reset when the first error CQE is
923 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
924 * it moves the RQ state to ready and initializes the RQ.
925 * Next CQE identification and error counting are in the caller responsibility.
928 * Pointer to RX queue structure.
929 * @param[in] mbuf_prepare
930 * Whether to prepare mbufs for the RQ.
933 * -1 in case of recovery error, otherwise the CQE status.
936 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t mbuf_prepare)
938 const uint16_t cqe_n = 1 << rxq->cqe_n;
939 const uint16_t cqe_mask = cqe_n - 1;
940 const unsigned int wqe_n = 1 << rxq->elts_n;
941 struct mlx5_rxq_ctrl *rxq_ctrl =
942 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
944 volatile struct mlx5_cqe *cqe;
945 volatile struct mlx5_err_cqe *err_cqe;
947 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
949 struct mlx5_mp_arg_queue_state_modify sm;
952 switch (rxq->err_state) {
953 case MLX5_RXQ_ERR_STATE_NO_ERROR:
954 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
956 case MLX5_RXQ_ERR_STATE_NEED_RESET:
958 sm.queue_id = rxq->idx;
959 sm.state = IBV_WQS_RESET;
960 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
962 if (rxq_ctrl->dump_file_n <
963 rxq_ctrl->priv->config.max_dump_files_num) {
964 MKSTR(err_str, "Unexpected CQE error syndrome "
965 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
966 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
967 rxq->cqn, rxq_ctrl->wqn,
968 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
969 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
970 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
971 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
972 mlx5_dump_debug_information(name, NULL, err_str, 0);
973 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
974 (const void *)((uintptr_t)
976 sizeof(*u.cqe) * cqe_n);
977 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
978 (const void *)((uintptr_t)
981 rxq_ctrl->dump_file_n++;
983 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
985 case MLX5_RXQ_ERR_STATE_NEED_READY:
986 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
987 if (ret == MLX5_CQE_STATUS_HW_OWN) {
989 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
992 * The RQ consumer index must be zeroed while moving
993 * from RESET state to RDY state.
995 *rxq->rq_db = rte_cpu_to_be_32(0);
998 sm.queue_id = rxq->idx;
999 sm.state = IBV_WQS_RDY;
1000 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1004 const uint16_t q_mask = wqe_n - 1;
1006 struct rte_mbuf **elt;
1008 unsigned int n = wqe_n - (rxq->rq_ci -
1011 for (i = 0; i < (int)n; ++i) {
1012 elt_idx = (rxq->rq_ci + i) & q_mask;
1013 elt = &(*rxq->elts)[elt_idx];
1014 *elt = rte_mbuf_raw_alloc(rxq->mp);
1016 for (i--; i >= 0; --i) {
1017 elt_idx = (rxq->rq_ci +
1021 rte_pktmbuf_free_seg
1028 mlx5_rxq_initialize(rxq);
1029 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1038 * Get size of the next packet for a given CQE. For compressed CQEs, the
1039 * consumer index is updated only once all packets of the current one have
1043 * Pointer to RX queue.
1047 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1051 * 0 in case of empty CQE, otherwise the packet size in bytes.
1054 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1055 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1057 struct rxq_zip *zip = &rxq->zip;
1058 uint16_t cqe_n = cqe_cnt + 1;
1064 /* Process compressed data in the CQE and mini arrays. */
1066 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1067 (volatile struct mlx5_mini_cqe8 (*)[8])
1068 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1071 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1072 *mcqe = &(*mc)[zip->ai & 7];
1073 if ((++zip->ai & 7) == 0) {
1074 /* Invalidate consumed CQEs */
1077 while (idx != end) {
1078 (*rxq->cqes)[idx & cqe_cnt].op_own =
1079 MLX5_CQE_INVALIDATE;
1083 * Increment consumer index to skip the number
1084 * of CQEs consumed. Hardware leaves holes in
1085 * the CQ ring for software use.
1090 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1091 /* Invalidate the rest */
1095 while (idx != end) {
1096 (*rxq->cqes)[idx & cqe_cnt].op_own =
1097 MLX5_CQE_INVALIDATE;
1100 rxq->cq_ci = zip->cq_ci;
1104 * No compressed data, get next CQE and verify if it is
1111 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1112 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1113 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1115 ret = mlx5_rx_err_handle(rxq, 0);
1116 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1124 op_own = cqe->op_own;
1125 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1126 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1127 (volatile struct mlx5_mini_cqe8 (*)[8])
1128 (uintptr_t)(&(*rxq->cqes)
1132 /* Fix endianness. */
1133 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1135 * Current mini array position is the one
1136 * returned by check_cqe64().
1138 * If completion comprises several mini arrays,
1139 * as a special case the second one is located
1140 * 7 CQEs after the initial CQE instead of 8
1141 * for subsequent ones.
1143 zip->ca = rxq->cq_ci;
1144 zip->na = zip->ca + 7;
1145 /* Compute the next non compressed CQE. */
1147 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1148 /* Get packet size to return. */
1149 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1152 /* Prefetch all to be invalidated */
1155 while (idx != end) {
1156 rte_prefetch0(&(*rxq->cqes)[(idx) &
1161 len = rte_be_to_cpu_32(cqe->byte_cnt);
1164 if (unlikely(rxq->err_state)) {
1165 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1166 ++rxq->stats.idropped;
1174 * Translate RX completion flags to offload flags.
1180 * Offload flags (ol_flags) for struct rte_mbuf.
1182 static inline uint32_t
1183 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1185 uint32_t ol_flags = 0;
1186 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1190 MLX5_CQE_RX_L3_HDR_VALID,
1191 PKT_RX_IP_CKSUM_GOOD) |
1193 MLX5_CQE_RX_L4_HDR_VALID,
1194 PKT_RX_L4_CKSUM_GOOD);
1199 * Fill in mbuf fields from RX completion flags.
1200 * Note that pkt->ol_flags should be initialized outside of this function.
1203 * Pointer to RX queue.
1208 * @param rss_hash_res
1209 * Packet RSS Hash result.
1212 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1213 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1215 /* Update packet information. */
1216 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1217 if (rss_hash_res && rxq->rss_hash) {
1218 pkt->hash.rss = rss_hash_res;
1219 pkt->ol_flags |= PKT_RX_RSS_HASH;
1221 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1222 pkt->ol_flags |= PKT_RX_FDIR;
1223 if (cqe->sop_drop_qpn !=
1224 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1225 uint32_t mark = cqe->sop_drop_qpn;
1227 pkt->ol_flags |= PKT_RX_FDIR_ID;
1228 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1232 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1233 if (rxq->vlan_strip &&
1234 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1235 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1236 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1238 if (rxq->hw_timestamp) {
1239 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1240 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1245 * DPDK callback for RX.
1248 * Generic pointer to RX queue structure.
1250 * Array to store received packets.
1252 * Maximum number of packets in array.
1255 * Number of packets successfully received (<= pkts_n).
1258 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1260 struct mlx5_rxq_data *rxq = dpdk_rxq;
1261 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1262 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1263 const unsigned int sges_n = rxq->sges_n;
1264 struct rte_mbuf *pkt = NULL;
1265 struct rte_mbuf *seg = NULL;
1266 volatile struct mlx5_cqe *cqe =
1267 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1269 unsigned int rq_ci = rxq->rq_ci << sges_n;
1270 int len = 0; /* keep its value across iterations. */
1273 unsigned int idx = rq_ci & wqe_cnt;
1274 volatile struct mlx5_wqe_data_seg *wqe =
1275 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1276 struct rte_mbuf *rep = (*rxq->elts)[idx];
1277 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1278 uint32_t rss_hash_res;
1286 rep = rte_mbuf_raw_alloc(rxq->mp);
1287 if (unlikely(rep == NULL)) {
1288 ++rxq->stats.rx_nombuf;
1291 * no buffers before we even started,
1292 * bail out silently.
1296 while (pkt != seg) {
1297 assert(pkt != (*rxq->elts)[idx]);
1301 rte_mbuf_raw_free(pkt);
1307 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1308 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1310 rte_mbuf_raw_free(rep);
1314 assert(len >= (rxq->crc_present << 2));
1316 /* If compressed, take hash result from mini-CQE. */
1317 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1319 mcqe->rx_hash_result);
1320 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1321 if (rxq->crc_present)
1322 len -= RTE_ETHER_CRC_LEN;
1325 DATA_LEN(rep) = DATA_LEN(seg);
1326 PKT_LEN(rep) = PKT_LEN(seg);
1327 SET_DATA_OFF(rep, DATA_OFF(seg));
1328 PORT(rep) = PORT(seg);
1329 (*rxq->elts)[idx] = rep;
1331 * Fill NIC descriptor with the new buffer. The lkey and size
1332 * of the buffers are already known, only the buffer address
1335 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1336 /* If there's only one MR, no need to replace LKey in WQE. */
1337 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1338 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1339 if (len > DATA_LEN(seg)) {
1340 len -= DATA_LEN(seg);
1345 DATA_LEN(seg) = len;
1346 #ifdef MLX5_PMD_SOFT_COUNTERS
1347 /* Increment bytes counter. */
1348 rxq->stats.ibytes += PKT_LEN(pkt);
1350 /* Return packet. */
1355 /* Align consumer index to the next stride. */
1360 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1362 /* Update the consumer index. */
1363 rxq->rq_ci = rq_ci >> sges_n;
1365 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1367 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1368 #ifdef MLX5_PMD_SOFT_COUNTERS
1369 /* Increment packets counter. */
1370 rxq->stats.ipackets += i;
1376 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1378 struct mlx5_mprq_buf *buf = opaque;
1380 if (rte_atomic16_read(&buf->refcnt) == 1) {
1381 rte_mempool_put(buf->mp, buf);
1382 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1383 rte_atomic16_set(&buf->refcnt, 1);
1384 rte_mempool_put(buf->mp, buf);
1389 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1391 mlx5_mprq_buf_free_cb(NULL, buf);
1395 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
1397 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1398 volatile struct mlx5_wqe_data_seg *wqe =
1399 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1402 assert(rep != NULL);
1403 /* Replace MPRQ buf. */
1404 (*rxq->mprq_bufs)[rq_idx] = rep;
1406 addr = mlx5_mprq_buf_addr(rep);
1407 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1408 /* If there's only one MR, no need to replace LKey in WQE. */
1409 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1410 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1411 /* Stash a mbuf for next replacement. */
1412 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1413 rxq->mprq_repl = rep;
1415 rxq->mprq_repl = NULL;
1419 * DPDK callback for RX with Multi-Packet RQ support.
1422 * Generic pointer to RX queue structure.
1424 * Array to store received packets.
1426 * Maximum number of packets in array.
1429 * Number of packets successfully received (<= pkts_n).
1432 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1434 struct mlx5_rxq_data *rxq = dpdk_rxq;
1435 const unsigned int strd_n = 1 << rxq->strd_num_n;
1436 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1437 const unsigned int strd_shift =
1438 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1439 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1440 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1441 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1443 uint32_t rq_ci = rxq->rq_ci;
1444 uint16_t consumed_strd = rxq->consumed_strd;
1445 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1447 while (i < pkts_n) {
1448 struct rte_mbuf *pkt;
1456 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1457 uint32_t rss_hash_res = 0;
1459 if (consumed_strd == strd_n) {
1460 /* Replace WQE only if the buffer is still in use. */
1461 if (rte_atomic16_read(&buf->refcnt) > 1) {
1462 mprq_buf_replace(rxq, rq_ci & wq_mask);
1463 /* Release the old buffer. */
1464 mlx5_mprq_buf_free(buf);
1465 } else if (unlikely(rxq->mprq_repl == NULL)) {
1466 struct mlx5_mprq_buf *rep;
1469 * Currently, the MPRQ mempool is out of buffer
1470 * and doing memcpy regardless of the size of Rx
1471 * packet. Retry allocation to get back to
1474 if (!rte_mempool_get(rxq->mprq_mp,
1476 rxq->mprq_repl = rep;
1478 /* Advance to the next WQE. */
1481 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1483 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1484 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1488 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1489 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1491 consumed_strd += strd_cnt;
1492 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1495 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1496 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1498 /* mini-CQE for MPRQ doesn't have hash result. */
1499 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1501 assert(strd_idx < strd_n);
1502 assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
1504 * Currently configured to receive a packet per a stride. But if
1505 * MTU is adjusted through kernel interface, device could
1506 * consume multiple strides without raising an error. In this
1507 * case, the packet should be dropped because it is bigger than
1508 * the max_rx_pkt_len.
1510 if (unlikely(strd_cnt > 1)) {
1511 ++rxq->stats.idropped;
1514 pkt = rte_pktmbuf_alloc(rxq->mp);
1515 if (unlikely(pkt == NULL)) {
1516 ++rxq->stats.rx_nombuf;
1519 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1520 assert((int)len >= (rxq->crc_present << 2));
1521 if (rxq->crc_present)
1522 len -= RTE_ETHER_CRC_LEN;
1523 offset = strd_idx * strd_sz + strd_shift;
1524 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset);
1525 /* Initialize the offload flag. */
1528 * Memcpy packets to the target mbuf if:
1529 * - The size of packet is smaller than mprq_max_memcpy_len.
1530 * - Out of buffer in the Mempool for Multi-Packet RQ.
1532 if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
1534 * When memcpy'ing packet due to out-of-buffer, the
1535 * packet must be smaller than the target mbuf.
1537 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1538 rte_pktmbuf_free_seg(pkt);
1539 ++rxq->stats.idropped;
1542 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
1544 rte_iova_t buf_iova;
1545 struct rte_mbuf_ext_shared_info *shinfo;
1546 uint16_t buf_len = strd_cnt * strd_sz;
1548 /* Increment the refcnt of the whole chunk. */
1549 rte_atomic16_add_return(&buf->refcnt, 1);
1550 assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1552 addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
1554 * MLX5 device doesn't use iova but it is necessary in a
1555 * case where the Rx packet is transmitted via a
1558 buf_iova = rte_mempool_virt2iova(buf) +
1559 RTE_PTR_DIFF(addr, buf);
1560 shinfo = rte_pktmbuf_ext_shinfo_init_helper(addr,
1561 &buf_len, mlx5_mprq_buf_free_cb, buf);
1563 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1564 * attaching the stride to mbuf and more offload flags
1565 * will be added below by calling rxq_cq_to_mbuf().
1566 * Other fields will be overwritten.
1568 rte_pktmbuf_attach_extbuf(pkt, addr, buf_iova, buf_len,
1570 rte_pktmbuf_reset_headroom(pkt);
1571 assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
1573 * Prevent potential overflow due to MTU change through
1576 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1577 rte_pktmbuf_free_seg(pkt);
1578 ++rxq->stats.idropped;
1582 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1584 DATA_LEN(pkt) = len;
1585 PORT(pkt) = rxq->port_id;
1586 #ifdef MLX5_PMD_SOFT_COUNTERS
1587 /* Increment bytes counter. */
1588 rxq->stats.ibytes += PKT_LEN(pkt);
1590 /* Return packet. */
1594 /* Update the consumer indexes. */
1595 rxq->consumed_strd = consumed_strd;
1597 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1598 if (rq_ci != rxq->rq_ci) {
1601 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1603 #ifdef MLX5_PMD_SOFT_COUNTERS
1604 /* Increment packets counter. */
1605 rxq->stats.ipackets += i;
1611 * Dummy DPDK callback for TX.
1613 * This function is used to temporarily replace the real callback during
1614 * unsafe control operations on the queue, or in case of error.
1617 * Generic pointer to TX queue structure.
1619 * Packets to transmit.
1621 * Number of packets in array.
1624 * Number of packets successfully transmitted (<= pkts_n).
1627 removed_tx_burst(void *dpdk_txq __rte_unused,
1628 struct rte_mbuf **pkts __rte_unused,
1629 uint16_t pkts_n __rte_unused)
1636 * Dummy DPDK callback for RX.
1638 * This function is used to temporarily replace the real callback during
1639 * unsafe control operations on the queue, or in case of error.
1642 * Generic pointer to RX queue structure.
1644 * Array to store received packets.
1646 * Maximum number of packets in array.
1649 * Number of packets successfully received (<= pkts_n).
1652 removed_rx_burst(void *dpdk_txq __rte_unused,
1653 struct rte_mbuf **pkts __rte_unused,
1654 uint16_t pkts_n __rte_unused)
1661 * Vectorized Rx/Tx routines are not compiled in when required vector
1662 * instructions are not supported on a target architecture. The following null
1663 * stubs are needed for linkage when those are not included outside of this file
1664 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1668 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1669 struct rte_mbuf **pkts __rte_unused,
1670 uint16_t pkts_n __rte_unused)
1676 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1682 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1688 * Free the mbufs from the linear array of pointers.
1691 * Pointer to array of packets to be free.
1693 * Number of packets to be freed.
1695 * Configured Tx offloads mask. It is fully defined at
1696 * compile time and may be used for optimization.
1698 static __rte_always_inline void
1699 mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
1700 unsigned int pkts_n,
1701 unsigned int olx __rte_unused)
1703 struct rte_mempool *pool = NULL;
1704 struct rte_mbuf **p_free = NULL;
1705 struct rte_mbuf *mbuf;
1706 unsigned int n_free = 0;
1709 * The implemented algorithm eliminates
1710 * copying pointers to temporary array
1711 * for rte_mempool_put_bulk() calls.
1718 * Decrement mbuf reference counter, detach
1719 * indirect and external buffers if needed.
1721 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1722 if (likely(mbuf != NULL)) {
1723 assert(mbuf == *pkts);
1724 if (likely(n_free != 0)) {
1725 if (unlikely(pool != mbuf->pool))
1726 /* From different pool. */
1729 /* Start new scan array. */
1736 if (unlikely(pkts_n == 0)) {
1742 * This happens if mbuf is still referenced.
1743 * We can't put it back to the pool, skip.
1747 if (unlikely(n_free != 0))
1748 /* There is some array to free.*/
1750 if (unlikely(pkts_n == 0))
1751 /* Last mbuf, nothing to free. */
1757 * This loop is implemented to avoid multiple
1758 * inlining of rte_mempool_put_bulk().
1764 * Free the array of pre-freed mbufs
1765 * belonging to the same memory pool.
1767 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1768 if (unlikely(mbuf != NULL)) {
1769 /* There is the request to start new scan. */
1774 if (likely(pkts_n != 0))
1777 * This is the last mbuf to be freed.
1778 * Do one more loop iteration to complete.
1779 * This is rare case of the last unique mbuf.
1784 if (likely(pkts_n == 0))
1793 * Free the mbuf from the elts ring buffer till new tail.
1796 * Pointer to Tx queue structure.
1798 * Index in elts to free up to, becomes new elts tail.
1800 * Configured Tx offloads mask. It is fully defined at
1801 * compile time and may be used for optimization.
1803 static __rte_always_inline void
1804 mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
1806 unsigned int olx __rte_unused)
1808 uint16_t n_elts = tail - txq->elts_tail;
1811 assert(n_elts <= txq->elts_s);
1813 * Implement a loop to support ring buffer wraparound
1814 * with single inlining of mlx5_tx_free_mbuf().
1819 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
1820 part = RTE_MIN(part, n_elts);
1822 assert(part <= txq->elts_s);
1823 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
1825 txq->elts_tail += part;
1831 * Store the mbuf being sent into elts ring buffer.
1832 * On Tx completion these mbufs will be freed.
1835 * Pointer to Tx queue structure.
1837 * Pointer to array of packets to be stored.
1839 * Number of packets to be stored.
1841 * Configured Tx offloads mask. It is fully defined at
1842 * compile time and may be used for optimization.
1844 static __rte_always_inline void
1845 mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
1846 struct rte_mbuf **restrict pkts,
1847 unsigned int pkts_n,
1848 unsigned int olx __rte_unused)
1851 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
1855 part = txq->elts_s - (txq->elts_head & txq->elts_m);
1857 assert(part <= txq->elts_s);
1858 /* This code is a good candidate for vectorizing with SIMD. */
1859 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
1861 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
1862 txq->elts_head += pkts_n;
1863 if (unlikely(part < pkts_n))
1864 /* The copy is wrapping around the elts array. */
1865 rte_memcpy((void *)elts, (void *)(pkts + part),
1866 (pkts_n - part) * sizeof(struct rte_mbuf *));
1870 * Manage TX completions. This routine checks the CQ for
1871 * arrived CQEs, deduces the last accomplished WQE in SQ,
1872 * updates SQ producing index and frees all completed mbufs.
1875 * Pointer to TX queue structure.
1877 * Configured Tx offloads mask. It is fully defined at
1878 * compile time and may be used for optimization.
1880 * NOTE: not inlined intentionally, it makes tx_burst
1881 * routine smaller, simple and faster - from experiments.
1884 mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
1885 unsigned int olx __rte_unused)
1887 bool update = false;
1891 volatile struct mlx5_wqe_cseg *cseg;
1892 volatile struct mlx5_cqe *cqe;
1895 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
1896 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
1897 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1898 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
1899 /* No new CQEs in completion queue. */
1900 assert(ret == MLX5_CQE_STATUS_HW_OWN);
1901 if (likely(update)) {
1902 /* Update the consumer index. */
1903 rte_compiler_barrier();
1905 rte_cpu_to_be_32(txq->cq_ci);
1909 /* Some error occurred, try to restart. */
1911 tail = mlx5_tx_error_cqe_handle
1912 (txq, (volatile struct mlx5_err_cqe *)cqe);
1914 /* Normal transmit completion. */
1917 txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
1918 cseg = (volatile struct mlx5_wqe_cseg *)
1919 (txq->wqes + (txq->wqe_pi & txq->wqe_m));
1926 if (likely(tail != txq->elts_tail)) {
1927 /* Free data buffers from elts. */
1928 mlx5_tx_free_elts(txq, tail, olx);
1929 assert(tail == txq->elts_tail);
1936 * Check if the completion request flag should be set in the last WQE.
1937 * Both pushed mbufs and WQEs are monitored and the completion request
1938 * flag is set if any of thresholds is reached.
1941 * Pointer to TX queue structure.
1943 * Number of mbuf not stored yet in elts array.
1945 * Pointer to burst routine local context.
1947 * Configured Tx offloads mask. It is fully defined at
1948 * compile time and may be used for optimization.
1950 static __rte_always_inline void
1951 mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
1952 unsigned int n_mbuf,
1953 struct mlx5_txq_local *restrict loc,
1954 unsigned int olx __rte_unused)
1956 uint16_t head = txq->elts_head + n_mbuf;
1958 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
1959 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres) {
1960 volatile struct mlx5_wqe *last = loc->wqe_last;
1962 txq->elts_comp = head;
1963 txq->wqe_comp = txq->wqe_ci;
1964 /* Request unconditional completion on last WQE. */
1965 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
1966 MLX5_COMP_MODE_OFFSET);
1967 /* Save elts_head in unused "immediate" field of WQE. */
1968 last->cseg.misc = head;
1970 * A CQE slot must always be available. Count the
1971 * issued CEQ "always" request instead of production
1972 * index due to here can be CQE with errors and
1973 * difference with ci may become inconsistent.
1975 assert(txq->cqe_s > ++txq->cq_pi);
1980 * DPDK callback to check the status of a tx descriptor.
1985 * The index of the descriptor in the ring.
1988 * The status of the tx descriptor.
1991 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
1993 struct mlx5_txq_data *restrict txq = tx_queue;
1996 mlx5_tx_handle_completion(txq, 0);
1997 used = txq->elts_head - txq->elts_tail;
1999 return RTE_ETH_TX_DESC_FULL;
2000 return RTE_ETH_TX_DESC_DONE;
2004 * Build the Control Segment with specified opcode:
2005 * - MLX5_OPCODE_SEND
2006 * - MLX5_OPCODE_ENHANCED_MPSW
2010 * Pointer to TX queue structure.
2012 * Pointer to burst routine local context.
2014 * Pointer to WQE to fill with built Control Segment.
2016 * Supposed length of WQE in segments.
2018 * SQ WQE opcode to put into Control Segment.
2020 * Configured Tx offloads mask. It is fully defined at
2021 * compile time and may be used for optimization.
2023 static __rte_always_inline void
2024 mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq,
2025 struct mlx5_txq_local *restrict loc __rte_unused,
2026 struct mlx5_wqe *restrict wqe,
2028 unsigned int opcode,
2029 unsigned int olx __rte_unused)
2031 struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
2033 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2034 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2035 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2036 MLX5_COMP_MODE_OFFSET);
2037 cs->misc = RTE_BE32(0);
2041 * Build the Ethernet Segment without inlined data.
2042 * Supports Software Parser, Checksums and VLAN
2043 * insertion Tx offload features.
2046 * Pointer to TX queue structure.
2048 * Pointer to burst routine local context.
2050 * Pointer to WQE to fill with built Ethernet Segment.
2052 * Configured Tx offloads mask. It is fully defined at
2053 * compile time and may be used for optimization.
2055 static __rte_always_inline void
2056 mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
2057 struct mlx5_txq_local *restrict loc,
2058 struct mlx5_wqe *restrict wqe,
2061 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2065 * Calculate and set check sum flags first, dword field
2066 * in segment may be shared with Software Parser flags.
2068 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2069 es->flags = rte_cpu_to_le_32(csum);
2071 * Calculate and set Software Parser offsets and flags.
2072 * These flags a set for custom UDP and IP tunnel packets.
2074 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2075 /* Fill metadata field if needed. */
2076 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2077 loc->mbuf->ol_flags & PKT_TX_METADATA ?
2078 loc->mbuf->tx_metadata : 0 : 0;
2079 /* Engage VLAN tag insertion feature if requested. */
2080 if (MLX5_TXOFF_CONFIG(VLAN) &&
2081 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2083 * We should get here only if device support
2084 * this feature correctly.
2086 assert(txq->vlan_en);
2087 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2088 loc->mbuf->vlan_tci);
2090 es->inline_hdr = RTE_BE32(0);
2095 * Build the Ethernet Segment with minimal inlined data
2096 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2097 * used to fill the gap in single WQEBB WQEs.
2098 * Supports Software Parser, Checksums and VLAN
2099 * insertion Tx offload features.
2102 * Pointer to TX queue structure.
2104 * Pointer to burst routine local context.
2106 * Pointer to WQE to fill with built Ethernet Segment.
2108 * Length of VLAN tag insertion if any.
2110 * Configured Tx offloads mask. It is fully defined at
2111 * compile time and may be used for optimization.
2113 static __rte_always_inline void
2114 mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
2115 struct mlx5_txq_local *restrict loc,
2116 struct mlx5_wqe *restrict wqe,
2120 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2122 uint8_t *psrc, *pdst;
2125 * Calculate and set check sum flags first, dword field
2126 * in segment may be shared with Software Parser flags.
2128 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2129 es->flags = rte_cpu_to_le_32(csum);
2131 * Calculate and set Software Parser offsets and flags.
2132 * These flags a set for custom UDP and IP tunnel packets.
2134 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2135 /* Fill metadata field if needed. */
2136 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2137 loc->mbuf->ol_flags & PKT_TX_METADATA ?
2138 loc->mbuf->tx_metadata : 0 : 0;
2139 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2141 sizeof(rte_v128u32_t)),
2142 "invalid Ethernet Segment data size");
2143 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2145 sizeof(struct rte_vlan_hdr) +
2146 2 * RTE_ETHER_ADDR_LEN),
2147 "invalid Ethernet Segment data size");
2148 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2149 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2150 es->inline_data = *(unaligned_uint16_t *)psrc;
2151 psrc += sizeof(uint16_t);
2152 pdst = (uint8_t *)(es + 1);
2153 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2154 /* Implement VLAN tag insertion as part inline data. */
2155 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2156 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2157 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2158 /* Insert VLAN ethertype + VLAN tag. */
2159 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2160 ((RTE_ETHER_TYPE_VLAN << 16) |
2161 loc->mbuf->vlan_tci);
2162 pdst += sizeof(struct rte_vlan_hdr);
2163 /* Copy the rest two bytes from packet data. */
2164 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2165 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2167 /* Fill the gap in the title WQEBB with inline data. */
2168 rte_mov16(pdst, psrc);
2173 * Build the Ethernet Segment with entire packet
2174 * data inlining. Checks the boundary of WQEBB and
2175 * ring buffer wrapping, supports Software Parser,
2176 * Checksums and VLAN insertion Tx offload features.
2179 * Pointer to TX queue structure.
2181 * Pointer to burst routine local context.
2183 * Pointer to WQE to fill with built Ethernet Segment.
2185 * Length of VLAN tag insertion if any.
2187 * Length of data to inline (VLAN included, if any).
2189 * TSO flag, set mss field from the packet.
2191 * Configured Tx offloads mask. It is fully defined at
2192 * compile time and may be used for optimization.
2195 * Pointer to the next Data Segment (aligned and wrapped around).
2197 static __rte_always_inline struct mlx5_wqe_dseg *
2198 mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
2199 struct mlx5_txq_local *restrict loc,
2200 struct mlx5_wqe *restrict wqe,
2206 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2208 uint8_t *psrc, *pdst;
2212 * Calculate and set check sum flags first, dword field
2213 * in segment may be shared with Software Parser flags.
2215 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2218 csum |= loc->mbuf->tso_segsz;
2219 es->flags = rte_cpu_to_be_32(csum);
2221 es->flags = rte_cpu_to_le_32(csum);
2224 * Calculate and set Software Parser offsets and flags.
2225 * These flags a set for custom UDP and IP tunnel packets.
2227 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2228 /* Fill metadata field if needed. */
2229 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2230 loc->mbuf->ol_flags & PKT_TX_METADATA ?
2231 loc->mbuf->tx_metadata : 0 : 0;
2232 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2234 sizeof(rte_v128u32_t)),
2235 "invalid Ethernet Segment data size");
2236 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2238 sizeof(struct rte_vlan_hdr) +
2239 2 * RTE_ETHER_ADDR_LEN),
2240 "invalid Ethernet Segment data size");
2241 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2242 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2243 es->inline_data = *(unaligned_uint16_t *)psrc;
2244 psrc += sizeof(uint16_t);
2245 pdst = (uint8_t *)(es + 1);
2246 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2247 /* Implement VLAN tag insertion as part inline data. */
2248 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2249 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2250 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2251 /* Insert VLAN ethertype + VLAN tag. */
2252 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2253 ((RTE_ETHER_TYPE_VLAN << 16) |
2254 loc->mbuf->vlan_tci);
2255 pdst += sizeof(struct rte_vlan_hdr);
2256 /* Copy the rest two bytes from packet data. */
2257 assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2258 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2259 psrc += sizeof(uint16_t);
2261 /* Fill the gap in the title WQEBB with inline data. */
2262 rte_mov16(pdst, psrc);
2263 psrc += sizeof(rte_v128u32_t);
2265 pdst = (uint8_t *)(es + 2);
2266 assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2267 assert(pdst < (uint8_t *)txq->wqes_end);
2268 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2270 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2271 return (struct mlx5_wqe_dseg *)pdst;
2274 * The WQEBB space availability is checked by caller.
2275 * Here we should be aware of WQE ring buffer wraparound only.
2277 part = (uint8_t *)txq->wqes_end - pdst;
2278 part = RTE_MIN(part, inlen);
2280 rte_memcpy(pdst, psrc, part);
2282 if (likely(!inlen)) {
2284 * If return value is not used by the caller
2285 * the code below will be optimized out.
2288 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2289 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2290 pdst = (uint8_t *)txq->wqes;
2291 return (struct mlx5_wqe_dseg *)pdst;
2293 pdst = (uint8_t *)txq->wqes;
2300 * Copy data from chain of mbuf to the specified linear buffer.
2301 * Checksums and VLAN insertion Tx offload features. If data
2302 * from some mbuf copied completely this mbuf is freed. Local
2303 * structure is used to keep the byte stream state.
2306 * Pointer to the destination linear buffer.
2308 * Pointer to burst routine local context.
2310 * Length of data to be copied.
2312 * Configured Tx offloads mask. It is fully defined at
2313 * compile time and may be used for optimization.
2315 static __rte_always_inline void
2316 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2317 struct mlx5_txq_local *restrict loc,
2319 unsigned int olx __rte_unused)
2321 struct rte_mbuf *mbuf;
2322 unsigned int part, dlen;
2327 /* Allow zero length packets, must check first. */
2328 dlen = rte_pktmbuf_data_len(loc->mbuf);
2329 if (dlen <= loc->mbuf_off) {
2330 /* Exhausted packet, just free. */
2332 loc->mbuf = mbuf->next;
2333 rte_pktmbuf_free_seg(mbuf);
2335 assert(loc->mbuf_nseg > 1);
2340 dlen -= loc->mbuf_off;
2341 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2343 part = RTE_MIN(len, dlen);
2344 rte_memcpy(pdst, psrc, part);
2345 loc->mbuf_off += part;
2348 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2350 /* Exhausted packet, just free. */
2352 loc->mbuf = mbuf->next;
2353 rte_pktmbuf_free_seg(mbuf);
2355 assert(loc->mbuf_nseg >= 1);
2365 * Build the Ethernet Segment with inlined data from
2366 * multi-segment packet. Checks the boundary of WQEBB
2367 * and ring buffer wrapping, supports Software Parser,
2368 * Checksums and VLAN insertion Tx offload features.
2371 * Pointer to TX queue structure.
2373 * Pointer to burst routine local context.
2375 * Pointer to WQE to fill with built Ethernet Segment.
2377 * Length of VLAN tag insertion if any.
2379 * Length of data to inline (VLAN included, if any).
2381 * TSO flag, set mss field from the packet.
2383 * Configured Tx offloads mask. It is fully defined at
2384 * compile time and may be used for optimization.
2387 * Pointer to the next Data Segment (aligned and
2388 * possible NOT wrapped around - caller should do
2389 * wrapping check on its own).
2391 static __rte_always_inline struct mlx5_wqe_dseg *
2392 mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
2393 struct mlx5_txq_local *restrict loc,
2394 struct mlx5_wqe *restrict wqe,
2400 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2406 * Calculate and set check sum flags first, uint32_t field
2407 * in segment may be shared with Software Parser flags.
2409 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2412 csum |= loc->mbuf->tso_segsz;
2413 es->flags = rte_cpu_to_be_32(csum);
2415 es->flags = rte_cpu_to_le_32(csum);
2418 * Calculate and set Software Parser offsets and flags.
2419 * These flags a set for custom UDP and IP tunnel packets.
2421 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2422 /* Fill metadata field if needed. */
2423 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2424 loc->mbuf->ol_flags & PKT_TX_METADATA ?
2425 loc->mbuf->tx_metadata : 0 : 0;
2426 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2428 sizeof(rte_v128u32_t)),
2429 "invalid Ethernet Segment data size");
2430 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2432 sizeof(struct rte_vlan_hdr) +
2433 2 * RTE_ETHER_ADDR_LEN),
2434 "invalid Ethernet Segment data size");
2435 assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
2436 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2437 pdst = (uint8_t *)&es->inline_data;
2438 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2439 /* Implement VLAN tag insertion as part inline data. */
2440 mlx5_tx_mseg_memcpy(pdst, loc, 2 * RTE_ETHER_ADDR_LEN, olx);
2441 pdst += 2 * RTE_ETHER_ADDR_LEN;
2442 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2443 ((RTE_ETHER_TYPE_VLAN << 16) |
2444 loc->mbuf->vlan_tci);
2445 pdst += sizeof(struct rte_vlan_hdr);
2446 inlen -= 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2448 assert(pdst < (uint8_t *)txq->wqes_end);
2450 * The WQEBB space availability is checked by caller.
2451 * Here we should be aware of WQE ring buffer wraparound only.
2453 part = (uint8_t *)txq->wqes_end - pdst;
2454 part = RTE_MIN(part, inlen);
2457 mlx5_tx_mseg_memcpy(pdst, loc, part, olx);
2459 if (likely(!inlen)) {
2461 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2462 return (struct mlx5_wqe_dseg *)pdst;
2464 pdst = (uint8_t *)txq->wqes;
2470 * Build the Data Segment of pointer type.
2473 * Pointer to TX queue structure.
2475 * Pointer to burst routine local context.
2477 * Pointer to WQE to fill with built Data Segment.
2479 * Data buffer to point.
2481 * Data buffer length.
2483 * Configured Tx offloads mask. It is fully defined at
2484 * compile time and may be used for optimization.
2486 static __rte_always_inline void
2487 mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
2488 struct mlx5_txq_local *restrict loc,
2489 struct mlx5_wqe_dseg *restrict dseg,
2492 unsigned int olx __rte_unused)
2496 dseg->bcount = rte_cpu_to_be_32(len);
2497 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2498 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2502 * Build the Data Segment of pointer type or inline
2503 * if data length is less than buffer in minimal
2504 * Data Segment size.
2507 * Pointer to TX queue structure.
2509 * Pointer to burst routine local context.
2511 * Pointer to WQE to fill with built Data Segment.
2513 * Data buffer to point.
2515 * Data buffer length.
2517 * Configured Tx offloads mask. It is fully defined at
2518 * compile time and may be used for optimization.
2520 static __rte_always_inline void
2521 mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
2522 struct mlx5_txq_local *restrict loc,
2523 struct mlx5_wqe_dseg *restrict dseg,
2526 unsigned int olx __rte_unused)
2532 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2533 dseg->bcount = rte_cpu_to_be_32(len);
2534 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2535 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2539 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2540 /* Unrolled implementation of generic rte_memcpy. */
2541 dst = (uintptr_t)&dseg->inline_data[0];
2542 src = (uintptr_t)buf;
2543 #ifdef RTE_ARCH_STRICT_ALIGN
2544 memcpy(dst, src, len);
2547 *(uint64_t *)dst = *(uint64_t *)src;
2548 dst += sizeof(uint64_t);
2549 src += sizeof(uint64_t);
2552 *(uint32_t *)dst = *(uint32_t *)src;
2553 dst += sizeof(uint32_t);
2554 src += sizeof(uint32_t);
2557 *(uint16_t *)dst = *(uint16_t *)src;
2558 dst += sizeof(uint16_t);
2559 src += sizeof(uint16_t);
2562 *(uint8_t *)dst = *(uint8_t *)src;
2567 * Build the Data Segment of inlined data from single
2568 * segment packet, no VLAN insertion.
2571 * Pointer to TX queue structure.
2573 * Pointer to burst routine local context.
2575 * Pointer to WQE to fill with built Data Segment.
2577 * Data buffer to point.
2579 * Data buffer length.
2581 * Configured Tx offloads mask. It is fully defined at
2582 * compile time and may be used for optimization.
2585 * Pointer to the next Data Segment after inlined data.
2586 * Ring buffer wraparound check is needed. We do not
2587 * do it here because it may not be needed for the
2588 * last packet in the eMPW session.
2590 static __rte_always_inline struct mlx5_wqe_dseg *
2591 mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq,
2592 struct mlx5_txq_local *restrict loc __rte_unused,
2593 struct mlx5_wqe_dseg *restrict dseg,
2596 unsigned int olx __rte_unused)
2601 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2602 pdst = &dseg->inline_data[0];
2604 * The WQEBB space availability is checked by caller.
2605 * Here we should be aware of WQE ring buffer wraparound only.
2607 part = (uint8_t *)txq->wqes_end - pdst;
2608 part = RTE_MIN(part, len);
2610 rte_memcpy(pdst, buf, part);
2614 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2615 /* Note: no final wraparound check here. */
2616 return (struct mlx5_wqe_dseg *)pdst;
2618 pdst = (uint8_t *)txq->wqes;
2625 * Build the Data Segment of inlined data from single
2626 * segment packet with VLAN insertion.
2629 * Pointer to TX queue structure.
2631 * Pointer to burst routine local context.
2633 * Pointer to the dseg fill with built Data Segment.
2635 * Data buffer to point.
2637 * Data buffer length.
2639 * Configured Tx offloads mask. It is fully defined at
2640 * compile time and may be used for optimization.
2643 * Pointer to the next Data Segment after inlined data.
2644 * Ring buffer wraparound check is needed.
2646 static __rte_always_inline struct mlx5_wqe_dseg *
2647 mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
2648 struct mlx5_txq_local *restrict loc __rte_unused,
2649 struct mlx5_wqe_dseg *restrict dseg,
2652 unsigned int olx __rte_unused)
2658 assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
2659 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
2660 (2 * RTE_ETHER_ADDR_LEN),
2661 "invalid Data Segment data size");
2662 dseg->bcount = rte_cpu_to_be_32((len + sizeof(struct rte_vlan_hdr)) |
2663 MLX5_ETH_WQE_DATA_INLINE);
2664 pdst = &dseg->inline_data[0];
2665 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
2666 buf += MLX5_DSEG_MIN_INLINE_SIZE;
2667 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
2668 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
2669 assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2670 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
2671 loc->mbuf->vlan_tci);
2672 pdst += sizeof(struct rte_vlan_hdr);
2673 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2674 pdst = (uint8_t *)txq->wqes;
2676 * The WQEBB space availability is checked by caller.
2677 * Here we should be aware of WQE ring buffer wraparound only.
2679 part = (uint8_t *)txq->wqes_end - pdst;
2680 part = RTE_MIN(part, len);
2682 rte_memcpy(pdst, buf, part);
2686 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2687 /* Note: no final wraparound check here. */
2688 return (struct mlx5_wqe_dseg *)pdst;
2690 pdst = (uint8_t *)txq->wqes;
2697 * Build the Ethernet Segment with optionally inlined data with
2698 * VLAN insertion and following Data Segments (if any) from
2699 * multi-segment packet. Used by ordinary send and TSO.
2702 * Pointer to TX queue structure.
2704 * Pointer to burst routine local context.
2706 * Pointer to WQE to fill with built Ethernet/Data Segments.
2708 * Length of VLAN header to insert, 0 means no VLAN insertion.
2710 * Data length to inline. For TSO this parameter specifies
2711 * exact value, for ordinary send routine can be aligned by
2712 * caller to provide better WQE space saving and data buffer
2713 * start address alignment. This length includes VLAN header
2716 * Zero means ordinary send, inlined data can be extended,
2717 * otherwise this is TSO, inlined data length is fixed.
2719 * Configured Tx offloads mask. It is fully defined at
2720 * compile time and may be used for optimization.
2723 * Actual size of built WQE in segments.
2725 static __rte_always_inline unsigned int
2726 mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
2727 struct mlx5_txq_local *restrict loc,
2728 struct mlx5_wqe *restrict wqe,
2732 unsigned int olx __rte_unused)
2734 struct mlx5_wqe_dseg *restrict dseg;
2737 assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
2738 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
2741 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
2742 if (!loc->mbuf_nseg)
2745 * There are still some mbuf remaining, not inlined.
2746 * The first mbuf may be partially inlined and we
2747 * must process the possible non-zero data offset.
2749 if (loc->mbuf_off) {
2754 * Exhausted packets must be dropped before.
2755 * Non-zero offset means there are some data
2756 * remained in the packet.
2758 assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
2759 assert(rte_pktmbuf_data_len(loc->mbuf));
2760 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2762 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
2764 * Build the pointer/minimal data Data Segment.
2765 * Do ring buffer wrapping check in advance.
2767 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2768 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2769 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
2770 /* Store the mbuf to be freed on completion. */
2771 assert(loc->elts_free);
2772 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2775 if (--loc->mbuf_nseg == 0)
2777 loc->mbuf = loc->mbuf->next;
2781 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
2782 struct rte_mbuf *mbuf;
2784 /* Zero length segment found, just skip. */
2786 loc->mbuf = loc->mbuf->next;
2787 rte_pktmbuf_free_seg(mbuf);
2788 if (--loc->mbuf_nseg == 0)
2791 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
2792 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
2795 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2796 rte_pktmbuf_data_len(loc->mbuf), olx);
2797 assert(loc->elts_free);
2798 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
2801 if (--loc->mbuf_nseg == 0)
2803 loc->mbuf = loc->mbuf->next;
2808 /* Calculate actual segments used from the dseg pointer. */
2809 if ((uintptr_t)wqe < (uintptr_t)dseg)
2810 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
2812 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
2813 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
2818 * Tx one packet function for multi-segment TSO. Supports all
2819 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
2820 * sends one packet per WQE.
2822 * This routine is responsible for storing processed mbuf
2823 * into elts ring buffer and update elts_head.
2826 * Pointer to TX queue structure.
2828 * Pointer to burst routine local context.
2830 * Configured Tx offloads mask. It is fully defined at
2831 * compile time and may be used for optimization.
2834 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2835 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2836 * Local context variables partially updated.
2838 static __rte_always_inline enum mlx5_txcmp_code
2839 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
2840 struct mlx5_txq_local *restrict loc,
2843 struct mlx5_wqe *restrict wqe;
2844 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
2847 * Calculate data length to be inlined to estimate
2848 * the required space in WQE ring buffer.
2850 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
2851 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2852 vlan = sizeof(struct rte_vlan_hdr);
2853 inlen = loc->mbuf->l2_len + vlan +
2854 loc->mbuf->l3_len + loc->mbuf->l4_len;
2855 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
2856 return MLX5_TXCMP_CODE_ERROR;
2857 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
2858 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
2859 /* Packet must contain all TSO headers. */
2860 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
2861 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
2862 inlen > (dlen + vlan)))
2863 return MLX5_TXCMP_CODE_ERROR;
2864 assert(inlen >= txq->inlen_mode);
2866 * Check whether there are enough free WQEBBs:
2868 * - Ethernet Segment
2869 * - First Segment of inlined Ethernet data
2870 * - ... data continued ...
2871 * - Data Segments of pointer/min inline type
2873 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
2874 MLX5_ESEG_MIN_INLINE_SIZE +
2876 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
2877 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2878 return MLX5_TXCMP_CODE_EXIT;
2879 /* Check for maximal WQE size. */
2880 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2881 return MLX5_TXCMP_CODE_ERROR;
2882 #ifdef MLX5_PMD_SOFT_COUNTERS
2883 /* Update sent data bytes/packets counters. */
2884 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
2885 loc->mbuf->tso_segsz;
2887 * One will be added for mbuf itself
2888 * at the end of the mlx5_tx_burst from
2889 * loc->pkts_sent field.
2892 txq->stats.opackets += ntcp;
2893 txq->stats.obytes += dlen + vlan + ntcp * inlen;
2895 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2896 loc->wqe_last = wqe;
2897 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
2898 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
2899 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2900 txq->wqe_ci += (ds + 3) / 4;
2901 loc->wqe_free -= (ds + 3) / 4;
2902 return MLX5_TXCMP_CODE_MULTI;
2906 * Tx one packet function for multi-segment SEND. Supports all
2907 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
2908 * sends one packet per WQE, without any data inlining in
2911 * This routine is responsible for storing processed mbuf
2912 * into elts ring buffer and update elts_head.
2915 * Pointer to TX queue structure.
2917 * Pointer to burst routine local context.
2919 * Configured Tx offloads mask. It is fully defined at
2920 * compile time and may be used for optimization.
2923 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
2924 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
2925 * Local context variables partially updated.
2927 static __rte_always_inline enum mlx5_txcmp_code
2928 mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
2929 struct mlx5_txq_local *restrict loc,
2932 struct mlx5_wqe_dseg *restrict dseg;
2933 struct mlx5_wqe *restrict wqe;
2934 unsigned int ds, nseg;
2936 assert(NB_SEGS(loc->mbuf) > 1);
2938 * No inline at all, it means the CPU cycles saving
2939 * is prioritized at configuration, we should not
2940 * copy any packet data to WQE.
2942 nseg = NB_SEGS(loc->mbuf);
2944 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
2945 return MLX5_TXCMP_CODE_EXIT;
2946 /* Check for maximal WQE size. */
2947 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
2948 return MLX5_TXCMP_CODE_ERROR;
2950 * Some Tx offloads may cause an error if
2951 * packet is not long enough, check against
2952 * assumed minimal length.
2954 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
2955 return MLX5_TXCMP_CODE_ERROR;
2956 #ifdef MLX5_PMD_SOFT_COUNTERS
2957 /* Update sent data bytes counter. */
2958 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
2959 if (MLX5_TXOFF_CONFIG(VLAN) &&
2960 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
2961 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
2964 * SEND WQE, one WQEBB:
2965 * - Control Segment, SEND opcode
2966 * - Ethernet Segment, optional VLAN, no inline
2967 * - Data Segments, pointer only type
2969 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
2970 loc->wqe_last = wqe;
2971 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
2972 mlx5_tx_eseg_none(txq, loc, wqe, olx);
2973 dseg = &wqe->dseg[0];
2975 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
2976 struct rte_mbuf *mbuf;
2979 * Zero length segment found, have to
2980 * correct total size of WQE in segments.
2981 * It is supposed to be rare occasion, so
2982 * in normal case (no zero length segments)
2983 * we avoid extra writing to the Control
2987 wqe->cseg.sq_ds -= RTE_BE32(1);
2989 loc->mbuf = mbuf->next;
2990 rte_pktmbuf_free_seg(mbuf);
2996 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
2997 rte_pktmbuf_data_len(loc->mbuf), olx);
2998 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3003 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3004 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3005 loc->mbuf = loc->mbuf->next;
3008 txq->wqe_ci += (ds + 3) / 4;
3009 loc->wqe_free -= (ds + 3) / 4;
3010 return MLX5_TXCMP_CODE_MULTI;
3014 * Tx one packet function for multi-segment SEND. Supports all
3015 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3016 * sends one packet per WQE, with data inlining in
3017 * Ethernet Segment and minimal Data Segments.
3019 * This routine is responsible for storing processed mbuf
3020 * into elts ring buffer and update elts_head.
3023 * Pointer to TX queue structure.
3025 * Pointer to burst routine local context.
3027 * Configured Tx offloads mask. It is fully defined at
3028 * compile time and may be used for optimization.
3031 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3032 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3033 * Local context variables partially updated.
3035 static __rte_always_inline enum mlx5_txcmp_code
3036 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
3037 struct mlx5_txq_local *restrict loc,
3040 struct mlx5_wqe *restrict wqe;
3041 unsigned int ds, inlen, dlen, vlan = 0;
3043 assert(MLX5_TXOFF_CONFIG(INLINE));
3044 assert(NB_SEGS(loc->mbuf) > 1);
3046 * First calculate data length to be inlined
3047 * to estimate the required space for WQE.
3049 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3050 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3051 vlan = sizeof(struct rte_vlan_hdr);
3052 inlen = dlen + vlan;
3053 /* Check against minimal length. */
3054 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3055 return MLX5_TXCMP_CODE_ERROR;
3056 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3057 if (inlen > txq->inlen_send) {
3058 struct rte_mbuf *mbuf;
3063 * Packet length exceeds the allowed inline
3064 * data length, check whether the minimal
3065 * inlining is required.
3067 if (txq->inlen_mode) {
3068 assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
3069 assert(txq->inlen_mode <= txq->inlen_send);
3070 inlen = txq->inlen_mode;
3072 if (!vlan || txq->vlan_en) {
3074 * VLAN insertion will be done inside by HW.
3075 * It is not utmost effective - VLAN flag is
3076 * checked twice, but we should proceed the
3077 * inlining length correctly and take into
3078 * account the VLAN header being inserted.
3080 return mlx5_tx_packet_multi_send
3083 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3086 * Now we know the minimal amount of data is requested
3087 * to inline. Check whether we should inline the buffers
3088 * from the chain beginning to eliminate some mbufs.
3091 nxlen = rte_pktmbuf_data_len(mbuf);
3092 if (unlikely(nxlen <= txq->inlen_send)) {
3093 /* We can inline first mbuf at least. */
3094 if (nxlen < inlen) {
3097 /* Scan mbufs till inlen filled. */
3102 nxlen = rte_pktmbuf_data_len(mbuf);
3104 } while (unlikely(nxlen < inlen));
3105 if (unlikely(nxlen > txq->inlen_send)) {
3106 /* We cannot inline entire mbuf. */
3107 smlen = inlen - smlen;
3108 start = rte_pktmbuf_mtod_offset
3109 (mbuf, uintptr_t, smlen);
3116 /* There should be not end of packet. */
3118 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3119 } while (unlikely(nxlen < txq->inlen_send));
3121 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3123 * Check whether we can do inline to align start
3124 * address of data buffer to cacheline.
3127 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3128 if (unlikely(start)) {
3130 if (start <= txq->inlen_send)
3135 * Check whether there are enough free WQEBBs:
3137 * - Ethernet Segment
3138 * - First Segment of inlined Ethernet data
3139 * - ... data continued ...
3140 * - Data Segments of pointer/min inline type
3142 * Estimate the number of Data Segments conservatively,
3143 * supposing no any mbufs is being freed during inlining.
3145 assert(inlen <= txq->inlen_send);
3146 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3147 MLX5_ESEG_MIN_INLINE_SIZE +
3149 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3150 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3151 return MLX5_TXCMP_CODE_EXIT;
3152 /* Check for maximal WQE size. */
3153 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3154 return MLX5_TXCMP_CODE_ERROR;
3155 #ifdef MLX5_PMD_SOFT_COUNTERS
3156 /* Update sent data bytes/packets counters. */
3157 txq->stats.obytes += dlen + vlan;
3159 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3160 loc->wqe_last = wqe;
3161 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3162 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3163 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3164 txq->wqe_ci += (ds + 3) / 4;
3165 loc->wqe_free -= (ds + 3) / 4;
3166 return MLX5_TXCMP_CODE_MULTI;
3170 * Tx burst function for multi-segment packets. Supports all
3171 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3172 * sends one packet per WQE. Function stops sending if it
3173 * encounters the single-segment packet.
3175 * This routine is responsible for storing processed mbuf
3176 * into elts ring buffer and update elts_head.
3179 * Pointer to TX queue structure.
3181 * Packets to transmit.
3183 * Number of packets in array.
3185 * Pointer to burst routine local context.
3187 * Configured Tx offloads mask. It is fully defined at
3188 * compile time and may be used for optimization.
3191 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3192 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3193 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3194 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3195 * Local context variables updated.
3197 static __rte_always_inline enum mlx5_txcmp_code
3198 mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
3199 struct rte_mbuf **restrict pkts,
3200 unsigned int pkts_n,
3201 struct mlx5_txq_local *restrict loc,
3204 assert(loc->elts_free && loc->wqe_free);
3205 assert(pkts_n > loc->pkts_sent);
3206 pkts += loc->pkts_sent + 1;
3207 pkts_n -= loc->pkts_sent;
3209 enum mlx5_txcmp_code ret;
3211 assert(NB_SEGS(loc->mbuf) > 1);
3213 * Estimate the number of free elts quickly but
3214 * conservatively. Some segment may be fully inlined
3215 * and freed, ignore this here - precise estimation
3218 if (loc->elts_free < NB_SEGS(loc->mbuf))
3219 return MLX5_TXCMP_CODE_EXIT;
3220 if (MLX5_TXOFF_CONFIG(TSO) &&
3221 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3222 /* Proceed with multi-segment TSO. */
3223 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3224 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3225 /* Proceed with multi-segment SEND with inlining. */
3226 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3228 /* Proceed with multi-segment SEND w/o inlining. */
3229 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3231 if (ret == MLX5_TXCMP_CODE_EXIT)
3232 return MLX5_TXCMP_CODE_EXIT;
3233 if (ret == MLX5_TXCMP_CODE_ERROR)
3234 return MLX5_TXCMP_CODE_ERROR;
3235 /* WQE is built, go to the next packet. */
3238 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3239 return MLX5_TXCMP_CODE_EXIT;
3240 loc->mbuf = *pkts++;
3242 rte_prefetch0(*pkts);
3243 if (likely(NB_SEGS(loc->mbuf) > 1))
3245 /* Here ends the series of multi-segment packets. */
3246 if (MLX5_TXOFF_CONFIG(TSO) &&
3247 unlikely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3248 return MLX5_TXCMP_CODE_TSO;
3249 return MLX5_TXCMP_CODE_SINGLE;
3255 * Tx burst function for single-segment packets with TSO.
3256 * Supports all types of Tx offloads, except multi-packets.
3257 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3258 * Function stops sending if it encounters the multi-segment
3259 * packet or packet without TSO requested.
3261 * The routine is responsible for storing processed mbuf
3262 * into elts ring buffer and update elts_head if inline
3263 * offloads is requested due to possible early freeing
3264 * of the inlined mbufs (can not store pkts array in elts
3268 * Pointer to TX queue structure.
3270 * Packets to transmit.
3272 * Number of packets in array.
3274 * Pointer to burst routine local context.
3276 * Configured Tx offloads mask. It is fully defined at
3277 * compile time and may be used for optimization.
3280 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3281 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3282 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3283 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3284 * Local context variables updated.
3286 static __rte_always_inline enum mlx5_txcmp_code
3287 mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
3288 struct rte_mbuf **restrict pkts,
3289 unsigned int pkts_n,
3290 struct mlx5_txq_local *restrict loc,
3293 assert(loc->elts_free && loc->wqe_free);
3294 assert(pkts_n > loc->pkts_sent);
3295 pkts += loc->pkts_sent + 1;
3296 pkts_n -= loc->pkts_sent;
3298 struct mlx5_wqe_dseg *restrict dseg;
3299 struct mlx5_wqe *restrict wqe;
3300 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3303 assert(NB_SEGS(loc->mbuf) == 1);
3304 dlen = rte_pktmbuf_data_len(loc->mbuf);
3305 if (MLX5_TXOFF_CONFIG(VLAN) &&
3306 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3307 vlan = sizeof(struct rte_vlan_hdr);
3310 * First calculate the WQE size to check
3311 * whether we have enough space in ring buffer.
3313 hlen = loc->mbuf->l2_len + vlan +
3314 loc->mbuf->l3_len + loc->mbuf->l4_len;
3315 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3316 return MLX5_TXCMP_CODE_ERROR;
3317 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3318 hlen += loc->mbuf->outer_l2_len +
3319 loc->mbuf->outer_l3_len;
3320 /* Segment must contain all TSO headers. */
3321 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3322 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3323 hlen > (dlen + vlan)))
3324 return MLX5_TXCMP_CODE_ERROR;
3326 * Check whether there are enough free WQEBBs:
3328 * - Ethernet Segment
3329 * - First Segment of inlined Ethernet data
3330 * - ... data continued ...
3331 * - Finishing Data Segment of pointer type
3333 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3334 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3335 if (loc->wqe_free < ((ds + 3) / 4))
3336 return MLX5_TXCMP_CODE_EXIT;
3337 #ifdef MLX5_PMD_SOFT_COUNTERS
3338 /* Update sent data bytes/packets counters. */
3339 ntcp = (dlen + vlan - hlen +
3340 loc->mbuf->tso_segsz - 1) /
3341 loc->mbuf->tso_segsz;
3343 * One will be added for mbuf itself at the end
3344 * of the mlx5_tx_burst from loc->pkts_sent field.
3347 txq->stats.opackets += ntcp;
3348 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3351 * Build the TSO WQE:
3353 * - Ethernet Segment with hlen bytes inlined
3354 * - Data Segment of pointer type
3356 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3357 loc->wqe_last = wqe;
3358 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3359 MLX5_OPCODE_TSO, olx);
3360 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3361 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3362 dlen -= hlen - vlan;
3363 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3365 * WQE is built, update the loop parameters
3366 * and go to the next packet.
3368 txq->wqe_ci += (ds + 3) / 4;
3369 loc->wqe_free -= (ds + 3) / 4;
3370 if (MLX5_TXOFF_CONFIG(INLINE))
3371 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3375 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3376 return MLX5_TXCMP_CODE_EXIT;
3377 loc->mbuf = *pkts++;
3379 rte_prefetch0(*pkts);
3380 if (MLX5_TXOFF_CONFIG(MULTI) &&
3381 unlikely(NB_SEGS(loc->mbuf) > 1))
3382 return MLX5_TXCMP_CODE_MULTI;
3383 if (unlikely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3384 return MLX5_TXCMP_CODE_SINGLE;
3385 /* Continue with the next TSO packet. */
3391 * Analyze the packet and select the best method to send.
3394 * Pointer to TX queue structure.
3396 * Pointer to burst routine local context.
3398 * Configured Tx offloads mask. It is fully defined at
3399 * compile time and may be used for optimization.
3401 * The predefined flag whether do complete check for
3402 * multi-segment packets and TSO.
3405 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3406 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3407 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3408 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3410 static __rte_always_inline enum mlx5_txcmp_code
3411 mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq,
3412 struct mlx5_txq_local *restrict loc,
3416 /* Check for multi-segment packet. */
3418 MLX5_TXOFF_CONFIG(MULTI) &&
3419 unlikely(NB_SEGS(loc->mbuf) > 1))
3420 return MLX5_TXCMP_CODE_MULTI;
3421 /* Check for TSO packet. */
3423 MLX5_TXOFF_CONFIG(TSO) &&
3424 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3425 return MLX5_TXCMP_CODE_TSO;
3426 /* Check if eMPW is enabled at all. */
3427 if (!MLX5_TXOFF_CONFIG(EMPW))
3428 return MLX5_TXCMP_CODE_SINGLE;
3429 /* Check if eMPW can be engaged. */
3430 if (MLX5_TXOFF_CONFIG(VLAN) &&
3431 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3432 (!MLX5_TXOFF_CONFIG(INLINE) ||
3433 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3434 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3436 * eMPW does not support VLAN insertion offload,
3437 * we have to inline the entire packet but
3438 * packet is too long for inlining.
3440 return MLX5_TXCMP_CODE_SINGLE;
3442 return MLX5_TXCMP_CODE_EMPW;
3446 * Check the next packet attributes to match with the eMPW batch ones.
3449 * Pointer to TX queue structure.
3451 * Pointer to Ethernet Segment of eMPW batch.
3453 * Pointer to burst routine local context.
3455 * Configured Tx offloads mask. It is fully defined at
3456 * compile time and may be used for optimization.
3459 * true - packet match with eMPW batch attributes.
3460 * false - no match, eMPW should be restarted.
3462 static __rte_always_inline bool
3463 mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
3464 struct mlx5_wqe_eseg *restrict es,
3465 struct mlx5_txq_local *restrict loc,
3468 uint8_t swp_flags = 0;
3470 /* Compare the checksum flags, if any. */
3471 if (MLX5_TXOFF_CONFIG(CSUM) &&
3472 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3474 /* Compare the Software Parser offsets and flags. */
3475 if (MLX5_TXOFF_CONFIG(SWP) &&
3476 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3477 es->swp_flags != swp_flags))
3479 /* Fill metadata field if needed. */
3480 if (MLX5_TXOFF_CONFIG(METADATA) &&
3481 es->metadata != (loc->mbuf->ol_flags & PKT_TX_METADATA ?
3482 loc->mbuf->tx_metadata : 0))
3484 /* There must be no VLAN packets in eMPW loop. */
3485 if (MLX5_TXOFF_CONFIG(VLAN))
3486 assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3491 * Update send loop variables and WQE for eMPW loop
3492 * without data inlining. Number of Data Segments is
3493 * equal to the number of sent packets.
3496 * Pointer to TX queue structure.
3498 * Pointer to burst routine local context.
3500 * Number of packets/Data Segments/Packets.
3502 * Accumulated statistics, bytes sent
3504 * Configured Tx offloads mask. It is fully defined at
3505 * compile time and may be used for optimization.
3508 * true - packet match with eMPW batch attributes.
3509 * false - no match, eMPW should be restarted.
3511 static __rte_always_inline void
3512 mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
3513 struct mlx5_txq_local *restrict loc,
3516 unsigned int olx __rte_unused)
3518 assert(!MLX5_TXOFF_CONFIG(INLINE));
3519 #ifdef MLX5_PMD_SOFT_COUNTERS
3520 /* Update sent data bytes counter. */
3521 txq->stats.obytes += slen;
3525 loc->elts_free -= ds;
3526 loc->pkts_sent += ds;
3528 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3529 txq->wqe_ci += (ds + 3) / 4;
3530 loc->wqe_free -= (ds + 3) / 4;
3534 * Update send loop variables and WQE for eMPW loop
3535 * with data inlining. Gets the size of pushed descriptors
3536 * and data to the WQE.
3539 * Pointer to TX queue structure.
3541 * Pointer to burst routine local context.
3543 * Total size of descriptor/data in bytes.
3545 * Accumulated statistics, data bytes sent.
3547 * Configured Tx offloads mask. It is fully defined at
3548 * compile time and may be used for optimization.
3551 * true - packet match with eMPW batch attributes.
3552 * false - no match, eMPW should be restarted.
3554 static __rte_always_inline void
3555 mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
3556 struct mlx5_txq_local *restrict loc,
3559 unsigned int olx __rte_unused)
3561 assert(MLX5_TXOFF_CONFIG(INLINE));
3562 assert((len % MLX5_WSEG_SIZE) == 0);
3563 #ifdef MLX5_PMD_SOFT_COUNTERS
3564 /* Update sent data bytes counter. */
3565 txq->stats.obytes += slen;
3569 len = len / MLX5_WSEG_SIZE + 2;
3570 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
3571 txq->wqe_ci += (len + 3) / 4;
3572 loc->wqe_free -= (len + 3) / 4;
3576 * The set of Tx burst functions for single-segment packets
3577 * without TSO and with Multi-Packet Writing feature support.
3578 * Supports all types of Tx offloads, except multi-packets
3581 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
3582 * as many packet per WQE as it can. If eMPW is not configured
3583 * or packet can not be sent with eMPW (VLAN insertion) the
3584 * ordinary SEND opcode is used and only one packet placed
3587 * Functions stop sending if it encounters the multi-segment
3588 * packet or packet with TSO requested.
3590 * The routines are responsible for storing processed mbuf
3591 * into elts ring buffer and update elts_head if inlining
3592 * offload is requested. Otherwise the copying mbufs to elts
3593 * can be postponed and completed at the end of burst routine.
3596 * Pointer to TX queue structure.
3598 * Packets to transmit.
3600 * Number of packets in array.
3602 * Pointer to burst routine local context.
3604 * Configured Tx offloads mask. It is fully defined at
3605 * compile time and may be used for optimization.
3608 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3609 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3610 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3611 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
3612 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
3613 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
3615 * Local context variables updated.
3618 * The routine sends packets with MLX5_OPCODE_EMPW
3619 * without inlining, this is dedicated optimized branch.
3620 * No VLAN insertion is supported.
3622 static __rte_always_inline enum mlx5_txcmp_code
3623 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
3624 struct rte_mbuf **restrict pkts,
3625 unsigned int pkts_n,
3626 struct mlx5_txq_local *restrict loc,
3630 * Subroutine is the part of mlx5_tx_burst_single()
3631 * and sends single-segment packet with eMPW opcode
3632 * without data inlining.
3634 assert(!MLX5_TXOFF_CONFIG(INLINE));
3635 assert(MLX5_TXOFF_CONFIG(EMPW));
3636 assert(loc->elts_free && loc->wqe_free);
3637 assert(pkts_n > loc->pkts_sent);
3638 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
3639 pkts += loc->pkts_sent + 1;
3640 pkts_n -= loc->pkts_sent;
3642 struct mlx5_wqe_dseg *restrict dseg;
3643 struct mlx5_wqe_eseg *restrict eseg;
3644 enum mlx5_txcmp_code ret;
3645 unsigned int part, loop;
3646 unsigned int slen = 0;
3649 part = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS);
3650 if (unlikely(loc->elts_free < part)) {
3651 /* We have no enough elts to save all mbufs. */
3652 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
3653 return MLX5_TXCMP_CODE_EXIT;
3654 /* But we still able to send at least minimal eMPW. */
3655 part = loc->elts_free;
3657 /* Check whether we have enough WQEs */
3658 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
3659 if (unlikely(loc->wqe_free <
3660 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3661 return MLX5_TXCMP_CODE_EXIT;
3662 part = (loc->wqe_free * 4) - 2;
3664 if (likely(part > 1))
3665 rte_prefetch0(*pkts);
3666 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3668 * Build eMPW title WQEBB:
3669 * - Control Segment, eMPW opcode
3670 * - Ethernet Segment, no inline
3672 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
3673 MLX5_OPCODE_ENHANCED_MPSW, olx);
3674 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
3675 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3676 eseg = &loc->wqe_last->eseg;
3677 dseg = &loc->wqe_last->dseg[0];
3680 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3681 #ifdef MLX5_PMD_SOFT_COUNTERS
3682 /* Update sent data bytes counter. */
3687 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3689 if (unlikely(--loop == 0))
3691 loc->mbuf = *pkts++;
3692 if (likely(loop > 1))
3693 rte_prefetch0(*pkts);
3694 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3696 * Unroll the completion code to avoid
3697 * returning variable value - it results in
3698 * unoptimized sequent checking in caller.
3700 if (ret == MLX5_TXCMP_CODE_MULTI) {
3702 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3703 if (unlikely(!loc->elts_free ||
3705 return MLX5_TXCMP_CODE_EXIT;
3706 return MLX5_TXCMP_CODE_MULTI;
3708 if (ret == MLX5_TXCMP_CODE_TSO) {
3710 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3711 if (unlikely(!loc->elts_free ||
3713 return MLX5_TXCMP_CODE_EXIT;
3714 return MLX5_TXCMP_CODE_TSO;
3716 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3718 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3719 if (unlikely(!loc->elts_free ||
3721 return MLX5_TXCMP_CODE_EXIT;
3722 return MLX5_TXCMP_CODE_SINGLE;
3724 if (ret != MLX5_TXCMP_CODE_EMPW) {
3727 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3728 return MLX5_TXCMP_CODE_ERROR;
3731 * Check whether packet parameters coincide
3732 * within assumed eMPW batch:
3733 * - check sum settings
3735 * - software parser settings
3737 if (!mlx5_tx_match_empw(txq, eseg, loc, olx)) {
3740 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
3741 if (unlikely(!loc->elts_free ||
3743 return MLX5_TXCMP_CODE_EXIT;
3746 /* Packet attributes match, continue the same eMPW. */
3748 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3749 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3751 /* eMPW is built successfully, update loop parameters. */
3753 assert(pkts_n >= part);
3754 #ifdef MLX5_PMD_SOFT_COUNTERS
3755 /* Update sent data bytes counter. */
3756 txq->stats.obytes += slen;
3758 loc->elts_free -= part;
3759 loc->pkts_sent += part;
3760 txq->wqe_ci += (2 + part + 3) / 4;
3761 loc->wqe_free -= (2 + part + 3) / 4;
3763 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3764 return MLX5_TXCMP_CODE_EXIT;
3765 loc->mbuf = *pkts++;
3766 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3767 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
3769 /* Continue sending eMPW batches. */
3775 * The routine sends packets with MLX5_OPCODE_EMPW
3776 * with inlining, optionally supports VLAN insertion.
3778 static __rte_always_inline enum mlx5_txcmp_code
3779 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
3780 struct rte_mbuf **restrict pkts,
3781 unsigned int pkts_n,
3782 struct mlx5_txq_local *restrict loc,
3786 * Subroutine is the part of mlx5_tx_burst_single()
3787 * and sends single-segment packet with eMPW opcode
3788 * with data inlining.
3790 assert(MLX5_TXOFF_CONFIG(INLINE));
3791 assert(MLX5_TXOFF_CONFIG(EMPW));
3792 assert(loc->elts_free && loc->wqe_free);
3793 assert(pkts_n > loc->pkts_sent);
3794 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
3795 pkts += loc->pkts_sent + 1;
3796 pkts_n -= loc->pkts_sent;
3798 struct mlx5_wqe_dseg *restrict dseg;
3799 struct mlx5_wqe_eseg *restrict eseg;
3800 enum mlx5_txcmp_code ret;
3801 unsigned int room, part;
3802 unsigned int slen = 0;
3805 /* Check whether we have minimal amount WQEs */
3806 if (unlikely(loc->wqe_free <
3807 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
3808 return MLX5_TXCMP_CODE_EXIT;
3809 if (likely(pkts_n > 1))
3810 rte_prefetch0(*pkts);
3811 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3813 * Build eMPW title WQEBB:
3814 * - Control Segment, eMPW opcode, zero DS
3815 * - Ethernet Segment, no inline
3817 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0,
3818 MLX5_OPCODE_ENHANCED_MPSW, olx);
3819 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
3820 olx & ~MLX5_TXOFF_CONFIG_VLAN);
3821 eseg = &loc->wqe_last->eseg;
3822 dseg = &loc->wqe_last->dseg[0];
3823 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
3824 loc->wqe_free) * MLX5_WQE_SIZE -
3825 MLX5_WQE_CSEG_SIZE -
3827 /* Build WQE till we have space, packets and resources. */
3830 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
3831 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
3834 assert(room >= MLX5_WQE_DSEG_SIZE);
3835 assert((room % MLX5_WQE_DSEG_SIZE) == 0);
3836 assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
3838 * Some Tx offloads may cause an error if
3839 * packet is not long enough, check against
3840 * assumed minimal length.
3842 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
3844 if (unlikely(!part))
3845 return MLX5_TXCMP_CODE_ERROR;
3847 * We have some successfully built
3848 * packet Data Segments to send.
3850 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
3851 return MLX5_TXCMP_CODE_ERROR;
3853 /* Inline or not inline - that's the Question. */
3854 if (dlen > txq->inlen_empw)
3856 /* Inline entire packet, optional VLAN insertion. */
3857 tlen = sizeof(dseg->bcount) + dlen;
3858 if (MLX5_TXOFF_CONFIG(VLAN) &&
3859 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3861 * The packet length must be checked in
3862 * mlx5_tx_able_to_empw() and packet
3863 * fits into inline length guaranteed.
3865 assert((dlen + sizeof(struct rte_vlan_hdr)) <=
3867 tlen += sizeof(struct rte_vlan_hdr);
3870 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
3872 #ifdef MLX5_PMD_SOFT_COUNTERS
3873 /* Update sent data bytes counter. */
3874 slen += sizeof(struct rte_vlan_hdr);
3879 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
3882 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
3883 assert(room >= tlen);
3886 * Packet data are completely inlined,
3887 * free the packet immediately.
3889 rte_pktmbuf_free_seg(loc->mbuf);
3893 * Not inlinable VLAN packets are
3894 * proceeded outside of this routine.
3896 assert(room >= MLX5_WQE_DSEG_SIZE);
3897 if (MLX5_TXOFF_CONFIG(VLAN))
3898 assert(!(loc->mbuf->ol_flags &
3900 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3901 /* We have to store mbuf in elts.*/
3902 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3903 room -= MLX5_WQE_DSEG_SIZE;
3904 /* Ring buffer wraparound is checked at the loop end.*/
3907 #ifdef MLX5_PMD_SOFT_COUNTERS
3908 /* Update sent data bytes counter. */
3914 if (unlikely(!pkts_n || !loc->elts_free)) {
3916 * We have no resources/packets to
3917 * continue build descriptors.
3920 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
3921 return MLX5_TXCMP_CODE_EXIT;
3923 /* Check if we have minimal room left. */
3924 if (room < MLX5_WQE_DSEG_SIZE) {
3926 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
3929 loc->mbuf = *pkts++;
3930 if (likely(pkts_n > 1))
3931 rte_prefetch0(*pkts);
3932 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
3934 * Unroll the completion code to avoid
3935 * returning variable value - it results in
3936 * unoptimized sequent checking in caller.
3938 if (ret == MLX5_TXCMP_CODE_MULTI) {
3940 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
3941 if (unlikely(!loc->elts_free ||
3943 return MLX5_TXCMP_CODE_EXIT;
3944 return MLX5_TXCMP_CODE_MULTI;
3946 if (ret == MLX5_TXCMP_CODE_TSO) {
3948 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
3949 if (unlikely(!loc->elts_free ||
3951 return MLX5_TXCMP_CODE_EXIT;
3952 return MLX5_TXCMP_CODE_TSO;
3954 if (ret == MLX5_TXCMP_CODE_SINGLE) {
3956 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
3957 if (unlikely(!loc->elts_free ||
3959 return MLX5_TXCMP_CODE_EXIT;
3960 return MLX5_TXCMP_CODE_SINGLE;
3962 if (ret != MLX5_TXCMP_CODE_EMPW) {
3965 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
3966 return MLX5_TXCMP_CODE_ERROR;
3969 * Check whether packet parameters coincide
3970 * within assumed eMPW batch:
3971 * - check sum settings
3973 * - software parser settings
3975 if (!mlx5_tx_match_empw(txq, eseg, loc, olx))
3977 /* Packet attributes match, continue the same eMPW. */
3978 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3979 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3982 * We get here to close an existing eMPW
3983 * session and start the new one.
3987 if (unlikely(!part))
3988 return MLX5_TXCMP_CODE_EXIT;
3989 mlx5_tx_idone_empw(txq, loc, part, slen, olx);
3990 if (unlikely(!loc->elts_free ||
3992 return MLX5_TXCMP_CODE_EXIT;
3999 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4000 * Data inlining and VLAN insertion are supported.
4002 static __rte_always_inline enum mlx5_txcmp_code
4003 mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
4004 struct rte_mbuf **restrict pkts,
4005 unsigned int pkts_n,
4006 struct mlx5_txq_local *restrict loc,
4010 * Subroutine is the part of mlx5_tx_burst_single()
4011 * and sends single-segment packet with SEND opcode.
4013 assert(loc->elts_free && loc->wqe_free);
4014 assert(pkts_n > loc->pkts_sent);
4015 pkts += loc->pkts_sent + 1;
4016 pkts_n -= loc->pkts_sent;
4018 struct mlx5_wqe *restrict wqe;
4019 enum mlx5_txcmp_code ret;
4021 assert(NB_SEGS(loc->mbuf) == 1);
4022 if (MLX5_TXOFF_CONFIG(INLINE)) {
4023 unsigned int inlen, vlan = 0;
4025 inlen = rte_pktmbuf_data_len(loc->mbuf);
4026 if (MLX5_TXOFF_CONFIG(VLAN) &&
4027 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4028 vlan = sizeof(struct rte_vlan_hdr);
4030 static_assert((sizeof(struct rte_vlan_hdr) +
4031 sizeof(struct rte_ether_hdr)) ==
4032 MLX5_ESEG_MIN_INLINE_SIZE,
4033 "invalid min inline data size");
4036 * If inlining is enabled at configuration time
4037 * the limit must be not less than minimal size.
4038 * Otherwise we would do extra check for data
4039 * size to avoid crashes due to length overflow.
4041 assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
4042 if (inlen <= txq->inlen_send) {
4043 unsigned int seg_n, wqe_n;
4045 rte_prefetch0(rte_pktmbuf_mtod
4046 (loc->mbuf, uint8_t *));
4047 /* Check against minimal length. */
4048 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4049 return MLX5_TXCMP_CODE_ERROR;
4051 * Completely inlined packet data WQE:
4052 * - Control Segment, SEND opcode
4053 * - Ethernet Segment, no VLAN insertion
4054 * - Data inlined, VLAN optionally inserted
4055 * - Alignment to MLX5_WSEG_SIZE
4056 * Have to estimate amount of WQEBBs
4058 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4059 MLX5_ESEG_MIN_INLINE_SIZE +
4060 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4061 /* Check if there are enough WQEBBs. */
4062 wqe_n = (seg_n + 3) / 4;
4063 if (wqe_n > loc->wqe_free)
4064 return MLX5_TXCMP_CODE_EXIT;
4065 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4066 loc->wqe_last = wqe;
4067 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4068 MLX5_OPCODE_SEND, olx);
4069 mlx5_tx_eseg_data(txq, loc, wqe,
4070 vlan, inlen, 0, olx);
4071 txq->wqe_ci += wqe_n;
4072 loc->wqe_free -= wqe_n;
4074 * Packet data are completely inlined,
4075 * free the packet immediately.
4077 rte_pktmbuf_free_seg(loc->mbuf);
4078 } else if (!MLX5_TXOFF_CONFIG(EMPW) &&
4081 * If minimal inlining is requested the eMPW
4082 * feature should be disabled due to data is
4083 * inlined into Ethernet Segment, which can
4084 * not contain inlined data for eMPW due to
4085 * segment shared for all packets.
4087 struct mlx5_wqe_dseg *restrict dseg;
4092 * The inline-mode settings require
4093 * to inline the specified amount of
4094 * data bytes to the Ethernet Segment.
4095 * We should check the free space in
4096 * WQE ring buffer to inline partially.
4098 assert(txq->inlen_send >= txq->inlen_mode);
4099 assert(inlen > txq->inlen_mode);
4100 assert(txq->inlen_mode >=
4101 MLX5_ESEG_MIN_INLINE_SIZE);
4103 * Check whether there are enough free WQEBBs:
4105 * - Ethernet Segment
4106 * - First Segment of inlined Ethernet data
4107 * - ... data continued ...
4108 * - Finishing Data Segment of pointer type
4110 ds = (MLX5_WQE_CSEG_SIZE +
4111 MLX5_WQE_ESEG_SIZE +
4112 MLX5_WQE_DSEG_SIZE +
4114 MLX5_ESEG_MIN_INLINE_SIZE +
4115 MLX5_WQE_DSEG_SIZE +
4116 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4117 if (loc->wqe_free < ((ds + 3) / 4))
4118 return MLX5_TXCMP_CODE_EXIT;
4120 * Build the ordinary SEND WQE:
4122 * - Ethernet Segment, inline inlen_mode bytes
4123 * - Data Segment of pointer type
4125 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4126 loc->wqe_last = wqe;
4127 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4128 MLX5_OPCODE_SEND, olx);
4129 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4132 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4133 txq->inlen_mode - vlan;
4134 inlen -= txq->inlen_mode;
4135 mlx5_tx_dseg_ptr(txq, loc, dseg,
4138 * WQE is built, update the loop parameters
4139 * and got to the next packet.
4141 txq->wqe_ci += (ds + 3) / 4;
4142 loc->wqe_free -= (ds + 3) / 4;
4143 /* We have to store mbuf in elts.*/
4144 assert(MLX5_TXOFF_CONFIG(INLINE));
4145 txq->elts[txq->elts_head++ & txq->elts_m] =
4153 * Partially inlined packet data WQE, we have
4154 * some space in title WQEBB, we can fill it
4155 * with some packet data. It takes one WQEBB,
4156 * it is available, no extra space check:
4157 * - Control Segment, SEND opcode
4158 * - Ethernet Segment, no VLAN insertion
4159 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4160 * - Data Segment, pointer type
4162 * We also get here if VLAN insertion is not
4163 * supported by HW, the inline is enabled.
4165 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4166 loc->wqe_last = wqe;
4167 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4168 MLX5_OPCODE_SEND, olx);
4169 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4170 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4171 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4173 * The length check is performed above, by
4174 * comparing with txq->inlen_send. We should
4175 * not get overflow here.
4177 assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4178 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4179 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4183 /* We have to store mbuf in elts.*/
4184 assert(MLX5_TXOFF_CONFIG(INLINE));
4185 txq->elts[txq->elts_head++ & txq->elts_m] =
4189 #ifdef MLX5_PMD_SOFT_COUNTERS
4190 /* Update sent data bytes counter. */
4191 txq->stats.obytes += vlan +
4192 rte_pktmbuf_data_len(loc->mbuf);
4196 * No inline at all, it means the CPU cycles saving
4197 * is prioritized at configuration, we should not
4198 * copy any packet data to WQE.
4200 * SEND WQE, one WQEBB:
4201 * - Control Segment, SEND opcode
4202 * - Ethernet Segment, optional VLAN, no inline
4203 * - Data Segment, pointer type
4205 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4206 loc->wqe_last = wqe;
4207 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4208 MLX5_OPCODE_SEND, olx);
4209 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4211 (txq, loc, &wqe->dseg[0],
4212 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4213 rte_pktmbuf_data_len(loc->mbuf), olx);
4217 * We should not store mbuf pointer in elts
4218 * if no inlining is configured, this is done
4219 * by calling routine in a batch copy.
4221 assert(!MLX5_TXOFF_CONFIG(INLINE));
4223 #ifdef MLX5_PMD_SOFT_COUNTERS
4224 /* Update sent data bytes counter. */
4225 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4226 if (MLX5_TXOFF_CONFIG(VLAN) &&
4227 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4228 txq->stats.obytes +=
4229 sizeof(struct rte_vlan_hdr);
4234 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4235 return MLX5_TXCMP_CODE_EXIT;
4236 loc->mbuf = *pkts++;
4238 rte_prefetch0(*pkts);
4239 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4240 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4246 static __rte_always_inline enum mlx5_txcmp_code
4247 mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
4248 struct rte_mbuf **restrict pkts,
4249 unsigned int pkts_n,
4250 struct mlx5_txq_local *restrict loc,
4253 enum mlx5_txcmp_code ret;
4255 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4256 if (ret == MLX5_TXCMP_CODE_SINGLE)
4258 assert(ret == MLX5_TXCMP_CODE_EMPW);
4260 /* Optimize for inline/no inline eMPW send. */
4261 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4262 mlx5_tx_burst_empw_inline
4263 (txq, pkts, pkts_n, loc, olx) :
4264 mlx5_tx_burst_empw_simple
4265 (txq, pkts, pkts_n, loc, olx);
4266 if (ret != MLX5_TXCMP_CODE_SINGLE)
4268 /* The resources to send one packet should remain. */
4269 assert(loc->elts_free && loc->wqe_free);
4271 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4272 assert(ret != MLX5_TXCMP_CODE_SINGLE);
4273 if (ret != MLX5_TXCMP_CODE_EMPW)
4275 /* The resources to send one packet should remain. */
4276 assert(loc->elts_free && loc->wqe_free);
4281 * DPDK Tx callback template. This is configured template
4282 * used to generate routines optimized for specified offload setup.
4283 * One of this generated functions is chosen at SQ configuration
4287 * Generic pointer to TX queue structure.
4289 * Packets to transmit.
4291 * Number of packets in array.
4293 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4294 * values. Should be static to take compile time static configuration
4298 * Number of packets successfully transmitted (<= pkts_n).
4300 static __rte_always_inline uint16_t
4301 mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
4302 struct rte_mbuf **restrict pkts,
4306 struct mlx5_txq_local loc;
4307 enum mlx5_txcmp_code ret;
4310 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4311 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4313 * Check if there are some CQEs, if any:
4314 * - process an encountered errors
4315 * - process the completed WQEs
4316 * - free related mbufs
4317 * - doorbell the NIC about processed CQEs
4319 if (unlikely(!pkts_n))
4321 rte_prefetch0(*pkts);
4322 mlx5_tx_handle_completion(txq, olx);
4324 * Calculate the number of available resources - elts and WQEs.
4325 * There are two possible different scenarios:
4326 * - no data inlining into WQEs, one WQEBB may contains upto
4327 * four packets, in this case elts become scarce resource
4328 * - data inlining into WQEs, one packet may require multiple
4329 * WQEBBs, the WQEs become the limiting factor.
4331 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4332 loc.elts_free = txq->elts_s -
4333 (uint16_t)(txq->elts_head - txq->elts_tail);
4334 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4335 loc.wqe_free = txq->wqe_s -
4336 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4337 if (unlikely(!loc.elts_free || !loc.wqe_free))
4341 loc.wqe_last = NULL;
4344 * Fetch the packet from array. Usually this is
4345 * the first packet in series of multi/single
4348 loc.mbuf = *(pkts + loc.pkts_sent);
4349 /* Dedicated branch for multi-segment packets. */
4350 if (MLX5_TXOFF_CONFIG(MULTI) &&
4351 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4353 * Multi-segment packet encountered.
4354 * Hardware is able to process it only
4355 * with SEND/TSO opcodes, one packet
4356 * per WQE, do it in dedicated routine.
4359 assert(loc.pkts_sent >= loc.pkts_copy);
4360 part = loc.pkts_sent - loc.pkts_copy;
4361 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4363 * There are some single-segment mbufs not
4364 * stored in elts. The mbufs must be in the
4365 * same order as WQEs, so we must copy the
4366 * mbufs to elts here, before the coming
4367 * multi-segment packet mbufs is appended.
4369 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4371 loc.pkts_copy = loc.pkts_sent;
4373 assert(pkts_n > loc.pkts_sent);
4374 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4375 if (!MLX5_TXOFF_CONFIG(INLINE))
4376 loc.pkts_copy = loc.pkts_sent;
4378 * These returned code checks are supposed
4379 * to be optimized out due to routine inlining.
4381 if (ret == MLX5_TXCMP_CODE_EXIT) {
4383 * The routine returns this code when
4384 * all packets are sent or there is no
4385 * enough resources to complete request.
4389 if (ret == MLX5_TXCMP_CODE_ERROR) {
4391 * The routine returns this code when
4392 * some error in the incoming packets
4395 txq->stats.oerrors++;
4398 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4400 * The single-segment packet was encountered
4401 * in the array, try to send it with the
4402 * best optimized way, possible engaging eMPW.
4404 goto enter_send_single;
4406 if (MLX5_TXOFF_CONFIG(TSO) &&
4407 ret == MLX5_TXCMP_CODE_TSO) {
4409 * The single-segment TSO packet was
4410 * encountered in the array.
4412 goto enter_send_tso;
4414 /* We must not get here. Something is going wrong. */
4416 txq->stats.oerrors++;
4419 /* Dedicated branch for single-segment TSO packets. */
4420 if (MLX5_TXOFF_CONFIG(TSO) &&
4421 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4423 * TSO might require special way for inlining
4424 * (dedicated parameters) and is sent with
4425 * MLX5_OPCODE_TSO opcode only, provide this
4426 * in dedicated branch.
4429 assert(NB_SEGS(loc.mbuf) == 1);
4430 assert(pkts_n > loc.pkts_sent);
4431 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4433 * These returned code checks are supposed
4434 * to be optimized out due to routine inlining.
4436 if (ret == MLX5_TXCMP_CODE_EXIT)
4438 if (ret == MLX5_TXCMP_CODE_ERROR) {
4439 txq->stats.oerrors++;
4442 if (ret == MLX5_TXCMP_CODE_SINGLE)
4443 goto enter_send_single;
4444 if (MLX5_TXOFF_CONFIG(MULTI) &&
4445 ret == MLX5_TXCMP_CODE_MULTI) {
4447 * The multi-segment packet was
4448 * encountered in the array.
4450 goto enter_send_multi;
4452 /* We must not get here. Something is going wrong. */
4454 txq->stats.oerrors++;
4458 * The dedicated branch for the single-segment packets
4459 * without TSO. Often these ones can be sent using
4460 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
4461 * The routine builds the WQEs till it encounters
4462 * the TSO or multi-segment packet (in case if these
4463 * offloads are requested at SQ configuration time).
4466 assert(pkts_n > loc.pkts_sent);
4467 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
4469 * These returned code checks are supposed
4470 * to be optimized out due to routine inlining.
4472 if (ret == MLX5_TXCMP_CODE_EXIT)
4474 if (ret == MLX5_TXCMP_CODE_ERROR) {
4475 txq->stats.oerrors++;
4478 if (MLX5_TXOFF_CONFIG(MULTI) &&
4479 ret == MLX5_TXCMP_CODE_MULTI) {
4481 * The multi-segment packet was
4482 * encountered in the array.
4484 goto enter_send_multi;
4486 if (MLX5_TXOFF_CONFIG(TSO) &&
4487 ret == MLX5_TXCMP_CODE_TSO) {
4489 * The single-segment TSO packet was
4490 * encountered in the array.
4492 goto enter_send_tso;
4494 /* We must not get here. Something is going wrong. */
4496 txq->stats.oerrors++;
4500 * Main Tx loop is completed, do the rest:
4501 * - set completion request if thresholds are reached
4502 * - doorbell the hardware
4503 * - copy the rest of mbufs to elts (if any)
4505 assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
4506 /* Take a shortcut if nothing is sent. */
4507 if (unlikely(loc.pkts_sent == 0))
4509 /* Not all of the mbufs may be stored into elts yet. */
4510 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
4511 mlx5_tx_request_completion(txq, part, &loc, olx);
4513 * Ring QP doorbell immediately after WQE building completion
4514 * to improve latencies. The pure software related data treatment
4515 * can be completed after doorbell. Tx CQEs for this SQ are
4516 * processed in this thread only by the polling.
4518 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, 0);
4519 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4521 * There are some single-segment mbufs not stored in elts.
4522 * It can be only if last packet was single-segment.
4523 * The copying is gathered into one place due to it is
4524 * a good opportunity to optimize that with SIMD.
4525 * Unfortunately if inlining is enabled the gaps in
4526 * pointer array may happen due to early freeing of the
4529 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
4531 #ifdef MLX5_PMD_SOFT_COUNTERS
4532 /* Increment sent packets counter. */
4533 txq->stats.opackets += loc.pkts_sent;
4535 assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4536 assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4537 return loc.pkts_sent;
4540 /* Generate routines with Enhanced Multi-Packet Write support. */
4541 MLX5_TXOFF_DECL(full_empw,
4542 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
4544 MLX5_TXOFF_DECL(none_empw,
4545 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4547 MLX5_TXOFF_DECL(md_empw,
4548 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4550 MLX5_TXOFF_DECL(mt_empw,
4551 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4552 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4554 MLX5_TXOFF_DECL(mtsc_empw,
4555 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4556 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4557 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4559 MLX5_TXOFF_DECL(mti_empw,
4560 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4561 MLX5_TXOFF_CONFIG_INLINE |
4562 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4564 MLX5_TXOFF_DECL(mtv_empw,
4565 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4566 MLX5_TXOFF_CONFIG_VLAN |
4567 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4569 MLX5_TXOFF_DECL(mtiv_empw,
4570 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4571 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4572 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4574 MLX5_TXOFF_DECL(sc_empw,
4575 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4576 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4578 MLX5_TXOFF_DECL(sci_empw,
4579 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4580 MLX5_TXOFF_CONFIG_INLINE |
4581 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4583 MLX5_TXOFF_DECL(scv_empw,
4584 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4585 MLX5_TXOFF_CONFIG_VLAN |
4586 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4588 MLX5_TXOFF_DECL(sciv_empw,
4589 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4590 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4591 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4593 MLX5_TXOFF_DECL(i_empw,
4594 MLX5_TXOFF_CONFIG_INLINE |
4595 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4597 MLX5_TXOFF_DECL(v_empw,
4598 MLX5_TXOFF_CONFIG_VLAN |
4599 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4601 MLX5_TXOFF_DECL(iv_empw,
4602 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4603 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4605 /* Generate routines without Enhanced Multi-Packet Write support. */
4606 MLX5_TXOFF_DECL(full,
4607 MLX5_TXOFF_CONFIG_FULL)
4609 MLX5_TXOFF_DECL(none,
4610 MLX5_TXOFF_CONFIG_NONE)
4613 MLX5_TXOFF_CONFIG_METADATA)
4616 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4617 MLX5_TXOFF_CONFIG_METADATA)
4619 MLX5_TXOFF_DECL(mtsc,
4620 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4621 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4622 MLX5_TXOFF_CONFIG_METADATA)
4624 MLX5_TXOFF_DECL(mti,
4625 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4626 MLX5_TXOFF_CONFIG_INLINE |
4627 MLX5_TXOFF_CONFIG_METADATA)
4630 MLX5_TXOFF_DECL(mtv,
4631 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4632 MLX5_TXOFF_CONFIG_VLAN |
4633 MLX5_TXOFF_CONFIG_METADATA)
4636 MLX5_TXOFF_DECL(mtiv,
4637 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4638 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4639 MLX5_TXOFF_CONFIG_METADATA)
4642 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4643 MLX5_TXOFF_CONFIG_METADATA)
4645 MLX5_TXOFF_DECL(sci,
4646 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4647 MLX5_TXOFF_CONFIG_INLINE |
4648 MLX5_TXOFF_CONFIG_METADATA)
4651 MLX5_TXOFF_DECL(scv,
4652 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4653 MLX5_TXOFF_CONFIG_VLAN |
4654 MLX5_TXOFF_CONFIG_METADATA)
4657 MLX5_TXOFF_DECL(sciv,
4658 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4659 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4660 MLX5_TXOFF_CONFIG_METADATA)
4663 MLX5_TXOFF_CONFIG_INLINE |
4664 MLX5_TXOFF_CONFIG_METADATA)
4667 MLX5_TXOFF_CONFIG_VLAN |
4668 MLX5_TXOFF_CONFIG_METADATA)
4671 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4672 MLX5_TXOFF_CONFIG_METADATA)
4675 * Array of declared and compiled Tx burst function and corresponding
4676 * supported offloads set. The array is used to select the Tx burst
4677 * function for specified offloads set at Tx queue configuration time.
4680 eth_tx_burst_t func;
4683 MLX5_TXOFF_INFO(full_empw,
4684 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4685 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4686 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4687 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4689 MLX5_TXOFF_INFO(none_empw,
4690 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
4692 MLX5_TXOFF_INFO(md_empw,
4693 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4695 MLX5_TXOFF_INFO(mt_empw,
4696 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4697 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4699 MLX5_TXOFF_INFO(mtsc_empw,
4700 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4701 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4702 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4704 MLX5_TXOFF_INFO(mti_empw,
4705 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4706 MLX5_TXOFF_CONFIG_INLINE |
4707 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4709 MLX5_TXOFF_INFO(mtv_empw,
4710 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4711 MLX5_TXOFF_CONFIG_VLAN |
4712 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4714 MLX5_TXOFF_INFO(mtiv_empw,
4715 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4716 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4717 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4719 MLX5_TXOFF_INFO(sc_empw,
4720 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4721 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4723 MLX5_TXOFF_INFO(sci_empw,
4724 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4725 MLX5_TXOFF_CONFIG_INLINE |
4726 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4728 MLX5_TXOFF_INFO(scv_empw,
4729 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4730 MLX5_TXOFF_CONFIG_VLAN |
4731 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4733 MLX5_TXOFF_INFO(sciv_empw,
4734 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4735 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4736 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4738 MLX5_TXOFF_INFO(i_empw,
4739 MLX5_TXOFF_CONFIG_INLINE |
4740 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4742 MLX5_TXOFF_INFO(v_empw,
4743 MLX5_TXOFF_CONFIG_VLAN |
4744 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4746 MLX5_TXOFF_INFO(iv_empw,
4747 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4748 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
4750 MLX5_TXOFF_INFO(full,
4751 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4752 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4753 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4754 MLX5_TXOFF_CONFIG_METADATA)
4756 MLX5_TXOFF_INFO(none,
4757 MLX5_TXOFF_CONFIG_NONE)
4760 MLX5_TXOFF_CONFIG_METADATA)
4763 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4764 MLX5_TXOFF_CONFIG_METADATA)
4766 MLX5_TXOFF_INFO(mtsc,
4767 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4768 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4769 MLX5_TXOFF_CONFIG_METADATA)
4771 MLX5_TXOFF_INFO(mti,
4772 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4773 MLX5_TXOFF_CONFIG_INLINE |
4774 MLX5_TXOFF_CONFIG_METADATA)
4777 MLX5_TXOFF_INFO(mtv,
4778 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4779 MLX5_TXOFF_CONFIG_VLAN |
4780 MLX5_TXOFF_CONFIG_METADATA)
4782 MLX5_TXOFF_INFO(mtiv,
4783 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
4784 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4785 MLX5_TXOFF_CONFIG_METADATA)
4788 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4789 MLX5_TXOFF_CONFIG_METADATA)
4791 MLX5_TXOFF_INFO(sci,
4792 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4793 MLX5_TXOFF_CONFIG_INLINE |
4794 MLX5_TXOFF_CONFIG_METADATA)
4796 MLX5_TXOFF_INFO(scv,
4797 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4798 MLX5_TXOFF_CONFIG_VLAN |
4799 MLX5_TXOFF_CONFIG_METADATA)
4801 MLX5_TXOFF_INFO(sciv,
4802 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
4803 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4804 MLX5_TXOFF_CONFIG_METADATA)
4807 MLX5_TXOFF_CONFIG_INLINE |
4808 MLX5_TXOFF_CONFIG_METADATA)
4811 MLX5_TXOFF_CONFIG_VLAN |
4812 MLX5_TXOFF_CONFIG_METADATA)
4815 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
4816 MLX5_TXOFF_CONFIG_METADATA)
4820 * Configure the Tx function to use. The routine checks configured
4821 * Tx offloads for the device and selects appropriate Tx burst
4822 * routine. There are multiple Tx burst routines compiled from
4823 * the same template in the most optimal way for the dedicated
4827 * Pointer to private data structure.
4830 * Pointer to selected Tx burst function.
4833 mlx5_select_tx_function(struct rte_eth_dev *dev)
4835 struct mlx5_priv *priv = dev->data->dev_private;
4836 struct mlx5_dev_config *config = &priv->config;
4837 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
4838 unsigned int diff = 0, olx = 0, i, m;
4840 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
4841 MLX5_DSEG_MAX, "invalid WQE max size");
4842 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
4843 "invalid WQE Control Segment size");
4844 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
4845 "invalid WQE Ethernet Segment size");
4846 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
4847 "invalid WQE Data Segment size");
4848 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
4849 "invalid WQE size");
4851 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
4852 /* We should support Multi-Segment Packets. */
4853 olx |= MLX5_TXOFF_CONFIG_MULTI;
4855 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
4856 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
4857 DEV_TX_OFFLOAD_GRE_TNL_TSO |
4858 DEV_TX_OFFLOAD_IP_TNL_TSO |
4859 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
4860 /* We should support TCP Send Offload. */
4861 olx |= MLX5_TXOFF_CONFIG_TSO;
4863 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
4864 DEV_TX_OFFLOAD_UDP_TNL_TSO |
4865 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
4866 /* We should support Software Parser for Tunnels. */
4867 olx |= MLX5_TXOFF_CONFIG_SWP;
4869 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
4870 DEV_TX_OFFLOAD_UDP_CKSUM |
4871 DEV_TX_OFFLOAD_TCP_CKSUM |
4872 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
4873 /* We should support IP/TCP/UDP Checksums. */
4874 olx |= MLX5_TXOFF_CONFIG_CSUM;
4876 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
4877 /* We should support VLAN insertion. */
4878 olx |= MLX5_TXOFF_CONFIG_VLAN;
4880 if (priv->txqs_n && (*priv->txqs)[0]) {
4881 struct mlx5_txq_data *txd = (*priv->txqs)[0];
4883 if (txd->inlen_send) {
4885 * Check the data inline requirements. Data inline
4886 * is enabled on per device basis, we can check
4887 * the first Tx queue only.
4889 * If device does not support VLAN insertion in WQE
4890 * and some queues are requested to perform VLAN
4891 * insertion offload than inline must be enabled.
4893 olx |= MLX5_TXOFF_CONFIG_INLINE;
4896 if (config->mps == MLX5_MPW_ENHANCED &&
4897 config->txq_inline_min <= 0) {
4899 * The NIC supports Enhanced Multi-Packet Write.
4900 * We do not support legacy MPW due to its
4901 * hardware related problems, so we just ignore
4902 * legacy MLX5_MPW settings. There should be no
4903 * minimal required inline data.
4905 olx |= MLX5_TXOFF_CONFIG_EMPW;
4907 if (tx_offloads & DEV_TX_OFFLOAD_MATCH_METADATA) {
4908 /* We should support Flow metadata. */
4909 olx |= MLX5_TXOFF_CONFIG_METADATA;
4912 * Scan the routines table to find the minimal
4913 * satisfying routine with requested offloads.
4915 m = RTE_DIM(txoff_func);
4916 for (i = 0; i < RTE_DIM(txoff_func); i++) {
4919 tmp = txoff_func[i].olx;
4921 /* Meets requested offloads exactly.*/
4925 if ((tmp & olx) != olx) {
4926 /* Does not meet requested offloads at all. */
4929 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
4930 /* Do not enable eMPW if not configured. */
4932 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
4933 /* Do not enable inlining if not configured. */
4936 * Some routine meets the requirements.
4937 * Check whether it has minimal amount
4938 * of not requested offloads.
4940 tmp = __builtin_popcountl(tmp & ~olx);
4941 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
4942 /* First or better match, save and continue. */
4948 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
4949 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
4950 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
4951 /* Lighter not requested offload. */
4956 if (m >= RTE_DIM(txoff_func)) {
4957 DRV_LOG(DEBUG, "port %u has no selected Tx function"
4958 " for requested offloads %04X",
4959 dev->data->port_id, olx);
4962 DRV_LOG(DEBUG, "port %u has selected Tx function"
4963 " supporting offloads %04X/%04X",
4964 dev->data->port_id, olx, txoff_func[m].olx);
4965 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
4966 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
4967 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
4968 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
4969 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
4970 DRV_LOG(DEBUG, "\tSWP (software parser)");
4971 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
4972 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
4973 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
4974 DRV_LOG(DEBUG, "\tINLIN (inline data)");
4975 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
4976 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
4977 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
4978 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
4979 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW)
4980 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
4981 return txoff_func[m].func;