1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
20 #include <mlx5_common.h>
22 #include "mlx5_autoconf.h"
23 #include "mlx5_defs.h"
26 #include "mlx5_utils.h"
27 #include "mlx5_rxtx.h"
29 /* TX burst subroutines return codes. */
30 enum mlx5_txcmp_code {
31 MLX5_TXCMP_CODE_EXIT = 0,
32 MLX5_TXCMP_CODE_ERROR,
33 MLX5_TXCMP_CODE_SINGLE,
34 MLX5_TXCMP_CODE_MULTI,
40 * These defines are used to configure Tx burst routine option set
41 * supported at compile time. The not specified options are optimized out
42 * out due to if conditions can be explicitly calculated at compile time.
43 * The offloads with bigger runtime check (require more CPU cycles to
44 * skip) overhead should have the bigger index - this is needed to
45 * select the better matching routine function if no exact match and
46 * some offloads are not actually requested.
48 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
49 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
50 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
51 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
52 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
53 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
54 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
55 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
56 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
57 #define MLX5_TXOFF_CONFIG_TXPP (1u << 10) /* Scheduling on timestamp.*/
59 /* The most common offloads groups. */
60 #define MLX5_TXOFF_CONFIG_NONE 0
61 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
62 MLX5_TXOFF_CONFIG_TSO | \
63 MLX5_TXOFF_CONFIG_SWP | \
64 MLX5_TXOFF_CONFIG_CSUM | \
65 MLX5_TXOFF_CONFIG_INLINE | \
66 MLX5_TXOFF_CONFIG_VLAN | \
67 MLX5_TXOFF_CONFIG_METADATA)
69 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
71 #define MLX5_TXOFF_DECL(func, olx) \
72 static uint16_t mlx5_tx_burst_##func(void *txq, \
73 struct rte_mbuf **pkts, \
76 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
77 pkts, pkts_n, (olx)); \
80 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
82 static __rte_always_inline uint32_t
83 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
84 volatile struct mlx5_mini_cqe8 *mcqe);
86 static __rte_always_inline int
87 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
88 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
90 static __rte_always_inline uint32_t
91 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
93 static __rte_always_inline void
94 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
95 volatile struct mlx5_cqe *cqe,
96 volatile struct mlx5_mini_cqe8 *mcqe);
99 mlx5_queue_state_modify(struct rte_eth_dev *dev,
100 struct mlx5_mp_arg_queue_state_modify *sm);
103 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
104 volatile struct mlx5_cqe *__rte_restrict cqe,
105 uint32_t phcsum, uint8_t l4_type);
108 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
109 volatile struct mlx5_cqe *__rte_restrict cqe,
110 volatile struct mlx5_mini_cqe8 *mcqe,
111 struct mlx5_rxq_data *rxq, uint32_t len);
113 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
114 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
117 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
118 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
120 uint64_t rte_net_mlx5_dynf_inline_mask;
121 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
124 * Build a table to translate Rx completion flags to packet type.
126 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
129 mlx5_set_ptype_table(void)
132 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
134 /* Last entry must not be overwritten, reserved for errored packet. */
135 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
136 (*p)[i] = RTE_PTYPE_UNKNOWN;
138 * The index to the array should have:
139 * bit[1:0] = l3_hdr_type
140 * bit[4:2] = l4_hdr_type
143 * bit[7] = outer_l3_type
146 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
148 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
149 RTE_PTYPE_L4_NONFRAG;
150 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
151 RTE_PTYPE_L4_NONFRAG;
153 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
155 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
158 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
160 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
162 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
164 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
166 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
168 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
171 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
173 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
175 /* Repeat with outer_l3_type being set. Just in case. */
176 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
177 RTE_PTYPE_L4_NONFRAG;
178 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
179 RTE_PTYPE_L4_NONFRAG;
180 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
182 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
184 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
186 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
188 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
190 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
192 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
194 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
196 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
198 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
201 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
202 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
203 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
204 RTE_PTYPE_INNER_L4_NONFRAG;
205 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
206 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
207 RTE_PTYPE_INNER_L4_NONFRAG;
208 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
209 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
210 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
211 RTE_PTYPE_INNER_L4_NONFRAG;
212 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
213 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L4_NONFRAG;
215 /* Tunneled - Fragmented */
216 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L4_FRAG;
219 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
220 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
221 RTE_PTYPE_INNER_L4_FRAG;
222 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L4_FRAG;
225 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L4_FRAG;
229 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L4_TCP;
232 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
234 RTE_PTYPE_INNER_L4_TCP;
235 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
237 RTE_PTYPE_INNER_L4_TCP;
238 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
240 RTE_PTYPE_INNER_L4_TCP;
241 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L4_TCP;
244 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L4_TCP;
247 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L4_TCP;
250 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L4_TCP;
253 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L4_TCP;
256 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L4_TCP;
259 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_TCP;
262 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L4_TCP;
266 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
268 RTE_PTYPE_INNER_L4_UDP;
269 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L4_UDP;
272 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
273 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
274 RTE_PTYPE_INNER_L4_UDP;
275 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
276 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
277 RTE_PTYPE_INNER_L4_UDP;
281 * Build a table to translate packet to checksum type of Verbs.
284 mlx5_set_cksum_table(void)
290 * The index should have:
291 * bit[0] = PKT_TX_TCP_SEG
292 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
293 * bit[4] = PKT_TX_IP_CKSUM
294 * bit[8] = PKT_TX_OUTER_IP_CKSUM
297 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
300 /* Tunneled packet. */
301 if (i & (1 << 8)) /* Outer IP. */
302 v |= MLX5_ETH_WQE_L3_CSUM;
303 if (i & (1 << 4)) /* Inner IP. */
304 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
305 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
306 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
309 if (i & (1 << 4)) /* IP. */
310 v |= MLX5_ETH_WQE_L3_CSUM;
311 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
312 v |= MLX5_ETH_WQE_L4_CSUM;
314 mlx5_cksum_table[i] = v;
319 * Build a table to translate packet type of mbuf to SWP type of Verbs.
322 mlx5_set_swp_types_table(void)
328 * The index should have:
329 * bit[0:1] = PKT_TX_L4_MASK
330 * bit[4] = PKT_TX_IPV6
331 * bit[8] = PKT_TX_OUTER_IPV6
332 * bit[9] = PKT_TX_OUTER_UDP
334 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
337 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
339 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
341 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
342 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
343 v |= MLX5_ETH_WQE_L4_INNER_UDP;
344 mlx5_swp_types_table[i] = v;
349 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
350 * Flags must be preliminary initialized to zero.
353 * Pointer to burst routine local context.
355 * Pointer to store Software Parser flags
357 * Configured Tx offloads mask. It is fully defined at
358 * compile time and may be used for optimization.
361 * Software Parser offsets packed in dword.
362 * Software Parser flags are set by pointer.
364 static __rte_always_inline uint32_t
365 txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
370 unsigned int idx, off;
373 if (!MLX5_TXOFF_CONFIG(SWP))
375 ol = loc->mbuf->ol_flags;
376 tunnel = ol & PKT_TX_TUNNEL_MASK;
378 * Check whether Software Parser is required.
379 * Only customized tunnels may ask for.
381 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
384 * The index should have:
385 * bit[0:1] = PKT_TX_L4_MASK
386 * bit[4] = PKT_TX_IPV6
387 * bit[8] = PKT_TX_OUTER_IPV6
388 * bit[9] = PKT_TX_OUTER_UDP
390 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
391 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
392 *swp_flags = mlx5_swp_types_table[idx];
394 * Set offsets for SW parser. Since ConnectX-5, SW parser just
395 * complements HW parser. SW parser starts to engage only if HW parser
396 * can't reach a header. For the older devices, HW parser will not kick
397 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
398 * should be set regardless of HW offload.
400 off = loc->mbuf->outer_l2_len;
401 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
402 off += sizeof(struct rte_vlan_hdr);
403 set = (off >> 1) << 8; /* Outer L3 offset. */
404 off += loc->mbuf->outer_l3_len;
405 if (tunnel == PKT_TX_TUNNEL_UDP)
406 set |= off >> 1; /* Outer L4 offset. */
407 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
408 const uint64_t csum = ol & PKT_TX_L4_MASK;
409 off += loc->mbuf->l2_len;
410 set |= (off >> 1) << 24; /* Inner L3 offset. */
411 if (csum == PKT_TX_TCP_CKSUM ||
412 csum == PKT_TX_UDP_CKSUM ||
413 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
414 off += loc->mbuf->l3_len;
415 set |= (off >> 1) << 16; /* Inner L4 offset. */
418 set = rte_cpu_to_le_32(set);
423 * Convert the Checksum offloads to Verbs.
426 * Pointer to the mbuf.
429 * Converted checksum flags.
431 static __rte_always_inline uint8_t
432 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
435 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
436 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
437 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
440 * The index should have:
441 * bit[0] = PKT_TX_TCP_SEG
442 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
443 * bit[4] = PKT_TX_IP_CKSUM
444 * bit[8] = PKT_TX_OUTER_IP_CKSUM
447 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
448 return mlx5_cksum_table[idx];
452 * Internal function to compute the number of used descriptors in an RX queue
458 * The number of used rx descriptor.
461 rx_queue_count(struct mlx5_rxq_data *rxq)
463 struct rxq_zip *zip = &rxq->zip;
464 volatile struct mlx5_cqe *cqe;
465 unsigned int cq_ci = rxq->cq_ci;
466 const unsigned int cqe_n = (1 << rxq->cqe_n);
467 const unsigned int cqe_cnt = cqe_n - 1;
468 unsigned int used = 0;
470 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
471 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
475 op_own = cqe->op_own;
476 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
477 if (unlikely(zip->ai))
478 n = zip->cqe_cnt - zip->ai;
480 n = rte_be_to_cpu_32(cqe->byte_cnt);
485 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
487 used = RTE_MIN(used, cqe_n);
492 * DPDK callback to check the status of a rx descriptor.
497 * The index of the descriptor in the ring.
500 * The status of the tx descriptor.
503 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
505 struct mlx5_rxq_data *rxq = rx_queue;
506 struct mlx5_rxq_ctrl *rxq_ctrl =
507 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
508 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
510 if (dev->rx_pkt_burst == NULL ||
511 dev->rx_pkt_burst == removed_rx_burst) {
515 if (offset >= (1 << rxq->cqe_n)) {
519 if (offset < rx_queue_count(rxq))
520 return RTE_ETH_RX_DESC_DONE;
521 return RTE_ETH_RX_DESC_AVAIL;
525 * DPDK callback to get the RX queue information
528 * Pointer to the device structure.
531 * Rx queue identificator.
534 * Pointer to the RX queue information structure.
541 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
542 struct rte_eth_rxq_info *qinfo)
544 struct mlx5_priv *priv = dev->data->dev_private;
545 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
546 struct mlx5_rxq_ctrl *rxq_ctrl =
547 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
551 qinfo->mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
552 rxq->mprq_mp : rxq->mp;
553 qinfo->conf.rx_thresh.pthresh = 0;
554 qinfo->conf.rx_thresh.hthresh = 0;
555 qinfo->conf.rx_thresh.wthresh = 0;
556 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
557 qinfo->conf.rx_drop_en = 1;
558 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
559 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
560 qinfo->scattered_rx = dev->data->scattered_rx;
561 qinfo->nb_desc = 1 << rxq->elts_n;
565 * DPDK callback to get the RX packet burst mode information
568 * Pointer to the device structure.
571 * Rx queue identificatior.
574 * Pointer to the burts mode information.
577 * 0 as success, -EINVAL as failure.
581 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
582 uint16_t rx_queue_id __rte_unused,
583 struct rte_eth_burst_mode *mode)
585 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
586 struct mlx5_priv *priv = dev->data->dev_private;
587 struct mlx5_rxq_data *rxq;
589 rxq = (*priv->rxqs)[rx_queue_id];
594 if (pkt_burst == mlx5_rx_burst) {
595 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
596 } else if (pkt_burst == mlx5_rx_burst_mprq) {
597 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
598 } else if (pkt_burst == mlx5_rx_burst_vec) {
599 #if defined RTE_ARCH_X86_64
600 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
601 #elif defined RTE_ARCH_ARM64
602 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
603 #elif defined RTE_ARCH_PPC_64
604 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
608 } else if (pkt_burst == mlx5_rx_burst_mprq_vec) {
609 #if defined RTE_ARCH_X86_64
610 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector SSE");
611 #elif defined RTE_ARCH_ARM64
612 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector Neon");
613 #elif defined RTE_ARCH_PPC_64
614 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector AltiVec");
625 * DPDK callback to get the number of used descriptors in a RX queue
628 * Pointer to the device structure.
634 * The number of used rx descriptor.
635 * -EINVAL if the queue is invalid
638 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
640 struct mlx5_priv *priv = dev->data->dev_private;
641 struct mlx5_rxq_data *rxq;
643 if (dev->rx_pkt_burst == NULL ||
644 dev->rx_pkt_burst == removed_rx_burst) {
648 rxq = (*priv->rxqs)[rx_queue_id];
653 return rx_queue_count(rxq);
656 #define MLX5_SYSTEM_LOG_DIR "/var/log"
658 * Dump debug information to log file.
663 * If not NULL this string is printed as a header to the output
664 * and the output will be in hexadecimal view.
666 * This is the buffer address to print out.
668 * The number of bytes to dump out.
671 mlx5_dump_debug_information(const char *fname, const char *hex_title,
672 const void *buf, unsigned int hex_len)
676 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
677 fd = fopen(path, "a+");
679 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
680 MKSTR(path2, "./%s", fname);
681 fd = fopen(path2, "a+");
683 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
686 DRV_LOG(INFO, "New debug dump in file %s", path2);
688 DRV_LOG(INFO, "New debug dump in file %s", path);
691 rte_hexdump(fd, hex_title, buf, hex_len);
693 fprintf(fd, "%s", (const char *)buf);
694 fprintf(fd, "\n\n\n");
699 * Move QP from error state to running state and initialize indexes.
702 * Pointer to TX queue control structure.
705 * 0 on success, else -1.
708 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
710 struct mlx5_mp_arg_queue_state_modify sm = {
712 .queue_id = txq_ctrl->txq.idx,
715 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
717 txq_ctrl->txq.wqe_ci = 0;
718 txq_ctrl->txq.wqe_pi = 0;
719 txq_ctrl->txq.elts_comp = 0;
723 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
725 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
727 static const uint8_t magic[] = "seen";
731 for (i = 0; i < sizeof(magic); ++i)
732 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
734 err_cqe->rsvd1[i] = magic[i];
743 * Pointer to TX queue structure.
745 * Pointer to the error CQE.
748 * Negative value if queue recovery failed, otherwise
749 * the error completion entry is handled successfully.
752 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
753 volatile struct mlx5_err_cqe *err_cqe)
755 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
756 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
757 struct mlx5_txq_ctrl *txq_ctrl =
758 container_of(txq, struct mlx5_txq_ctrl, txq);
759 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
760 int seen = check_err_cqe_seen(err_cqe);
762 if (!seen && txq_ctrl->dump_file_n <
763 txq_ctrl->priv->config.max_dump_files_num) {
764 MKSTR(err_str, "Unexpected CQE error syndrome "
765 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
766 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
767 txq->cqe_s, txq->qp_num_8s >> 8,
768 rte_be_to_cpu_16(err_cqe->wqe_counter),
769 txq->wqe_ci, txq->cq_ci);
770 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
771 PORT_ID(txq_ctrl->priv), txq->idx,
772 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
773 mlx5_dump_debug_information(name, NULL, err_str, 0);
774 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
775 (const void *)((uintptr_t)
779 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
780 (const void *)((uintptr_t)
784 txq_ctrl->dump_file_n++;
788 * Count errors in WQEs units.
789 * Later it can be improved to count error packets,
790 * for example, by SQ parsing to find how much packets
791 * should be counted for each WQE.
793 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
795 if (tx_recover_qp(txq_ctrl)) {
796 /* Recovering failed - retry later on the same WQE. */
799 /* Release all the remaining buffers. */
800 txq_free_elts(txq_ctrl);
806 * Translate RX completion flags to packet type.
809 * Pointer to RX queue structure.
813 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
816 * Packet type for struct rte_mbuf.
818 static inline uint32_t
819 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
820 volatile struct mlx5_mini_cqe8 *mcqe)
824 uint8_t pinfo = (cqe->pkt_info & 0x3) << 6;
826 /* Get l3/l4 header from mini-CQE in case L3/L4 format*/
828 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
829 ptype = (cqe->hdr_type_etc & 0xfc00) >> 10;
831 ptype = mcqe->hdr_type >> 2;
833 * The index to the array should have:
834 * bit[1:0] = l3_hdr_type
835 * bit[4:2] = l4_hdr_type
838 * bit[7] = outer_l3_type
841 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
845 * Initialize Rx WQ and indexes.
848 * Pointer to RX queue structure.
851 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
853 const unsigned int wqe_n = 1 << rxq->elts_n;
856 for (i = 0; (i != wqe_n); ++i) {
857 volatile struct mlx5_wqe_data_seg *scat;
861 if (mlx5_rxq_mprq_enabled(rxq)) {
862 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
864 scat = &((volatile struct mlx5_wqe_mprq *)
866 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
867 1 << rxq->strd_num_n);
868 byte_count = (1 << rxq->strd_sz_n) *
869 (1 << rxq->strd_num_n);
871 struct rte_mbuf *buf = (*rxq->elts)[i];
873 scat = &((volatile struct mlx5_wqe_data_seg *)
875 addr = rte_pktmbuf_mtod(buf, uintptr_t);
876 byte_count = DATA_LEN(buf);
878 /* scat->addr must be able to store a pointer. */
879 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
880 *scat = (struct mlx5_wqe_data_seg){
881 .addr = rte_cpu_to_be_64(addr),
882 .byte_count = rte_cpu_to_be_32(byte_count),
883 .lkey = mlx5_rx_addr2mr(rxq, addr),
886 rxq->consumed_strd = 0;
887 rxq->decompressed = 0;
889 rxq->zip = (struct rxq_zip){
892 rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
893 (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0;
894 /* Update doorbell counter. */
895 rxq->rq_ci = wqe_n >> rxq->sges_n;
897 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
901 * Modify a Verbs/DevX queue state.
902 * This must be called from the primary process.
905 * Pointer to Ethernet device.
907 * State modify request parameters.
910 * 0 in case of success else non-zero value and rte_errno is set.
913 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
914 const struct mlx5_mp_arg_queue_state_modify *sm)
917 struct mlx5_priv *priv = dev->data->dev_private;
920 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
921 struct mlx5_rxq_ctrl *rxq_ctrl =
922 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
924 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, sm->state);
926 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
927 sm->state, strerror(errno));
932 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
933 struct mlx5_txq_ctrl *txq_ctrl =
934 container_of(txq, struct mlx5_txq_ctrl, txq);
936 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
937 MLX5_TXQ_MOD_ERR2RDY,
938 (uint8_t)priv->dev_port);
946 * Modify a Verbs queue state.
949 * Pointer to Ethernet device.
951 * State modify request parameters.
954 * 0 in case of success else non-zero value.
957 mlx5_queue_state_modify(struct rte_eth_dev *dev,
958 struct mlx5_mp_arg_queue_state_modify *sm)
960 struct mlx5_priv *priv = dev->data->dev_private;
963 switch (rte_eal_process_type()) {
964 case RTE_PROC_PRIMARY:
965 ret = mlx5_queue_state_modify_primary(dev, sm);
967 case RTE_PROC_SECONDARY:
968 ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
978 * The function inserts the RQ state to reset when the first error CQE is
979 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
980 * it moves the RQ state to ready and initializes the RQ.
981 * Next CQE identification and error counting are in the caller responsibility.
984 * Pointer to RX queue structure.
986 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
987 * 0 when called from non-vectorized Rx burst.
990 * -1 in case of recovery error, otherwise the CQE status.
993 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
995 const uint16_t cqe_n = 1 << rxq->cqe_n;
996 const uint16_t cqe_mask = cqe_n - 1;
997 const uint16_t wqe_n = 1 << rxq->elts_n;
998 const uint16_t strd_n = 1 << rxq->strd_num_n;
999 struct mlx5_rxq_ctrl *rxq_ctrl =
1000 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1002 volatile struct mlx5_cqe *cqe;
1003 volatile struct mlx5_err_cqe *err_cqe;
1005 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
1007 struct mlx5_mp_arg_queue_state_modify sm;
1010 switch (rxq->err_state) {
1011 case MLX5_RXQ_ERR_STATE_NO_ERROR:
1012 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
1014 case MLX5_RXQ_ERR_STATE_NEED_RESET:
1016 sm.queue_id = rxq->idx;
1017 sm.state = IBV_WQS_RESET;
1018 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
1020 if (rxq_ctrl->dump_file_n <
1021 rxq_ctrl->priv->config.max_dump_files_num) {
1022 MKSTR(err_str, "Unexpected CQE error syndrome "
1023 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
1024 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
1025 rxq->cqn, rxq_ctrl->wqn,
1026 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
1027 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
1028 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
1029 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
1030 mlx5_dump_debug_information(name, NULL, err_str, 0);
1031 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
1032 (const void *)((uintptr_t)
1034 sizeof(*u.cqe) * cqe_n);
1035 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
1036 (const void *)((uintptr_t)
1039 rxq_ctrl->dump_file_n++;
1041 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
1043 case MLX5_RXQ_ERR_STATE_NEED_READY:
1044 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1045 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1047 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1050 * The RQ consumer index must be zeroed while moving
1051 * from RESET state to RDY state.
1053 *rxq->rq_db = rte_cpu_to_be_32(0);
1056 sm.queue_id = rxq->idx;
1057 sm.state = IBV_WQS_RDY;
1058 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1062 const uint32_t elts_n =
1063 mlx5_rxq_mprq_enabled(rxq) ?
1064 wqe_n * strd_n : wqe_n;
1065 const uint32_t e_mask = elts_n - 1;
1067 mlx5_rxq_mprq_enabled(rxq) ?
1068 rxq->elts_ci : rxq->rq_ci;
1070 struct rte_mbuf **elt;
1072 unsigned int n = elts_n - (elts_ci -
1075 for (i = 0; i < (int)n; ++i) {
1076 elt_idx = (elts_ci + i) & e_mask;
1077 elt = &(*rxq->elts)[elt_idx];
1078 *elt = rte_mbuf_raw_alloc(rxq->mp);
1080 for (i--; i >= 0; --i) {
1081 elt_idx = (elts_ci +
1085 rte_pktmbuf_free_seg
1091 for (i = 0; i < (int)elts_n; ++i) {
1092 elt = &(*rxq->elts)[i];
1094 (uint16_t)((*elt)->buf_len -
1095 rte_pktmbuf_headroom(*elt));
1097 /* Padding with a fake mbuf for vec Rx. */
1098 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1099 (*rxq->elts)[elts_n + i] =
1102 mlx5_rxq_initialize(rxq);
1103 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1112 * Get size of the next packet for a given CQE. For compressed CQEs, the
1113 * consumer index is updated only once all packets of the current one have
1117 * Pointer to RX queue.
1121 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1125 * 0 in case of empty CQE, otherwise the packet size in bytes.
1128 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1129 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1131 struct rxq_zip *zip = &rxq->zip;
1132 uint16_t cqe_n = cqe_cnt + 1;
1138 /* Process compressed data in the CQE and mini arrays. */
1140 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1141 (volatile struct mlx5_mini_cqe8 (*)[8])
1142 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1144 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt &
1146 *mcqe = &(*mc)[zip->ai & 7];
1147 if ((++zip->ai & 7) == 0) {
1148 /* Invalidate consumed CQEs */
1151 while (idx != end) {
1152 (*rxq->cqes)[idx & cqe_cnt].op_own =
1153 MLX5_CQE_INVALIDATE;
1157 * Increment consumer index to skip the number
1158 * of CQEs consumed. Hardware leaves holes in
1159 * the CQ ring for software use.
1164 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1165 /* Invalidate the rest */
1169 while (idx != end) {
1170 (*rxq->cqes)[idx & cqe_cnt].op_own =
1171 MLX5_CQE_INVALIDATE;
1174 rxq->cq_ci = zip->cq_ci;
1178 * No compressed data, get next CQE and verify if it is
1185 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1186 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1187 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1189 ret = mlx5_rx_err_handle(rxq, 0);
1190 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1198 op_own = cqe->op_own;
1199 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1200 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1201 (volatile struct mlx5_mini_cqe8 (*)[8])
1202 (uintptr_t)(&(*rxq->cqes)
1206 /* Fix endianness. */
1207 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1209 * Current mini array position is the one
1210 * returned by check_cqe64().
1212 * If completion comprises several mini arrays,
1213 * as a special case the second one is located
1214 * 7 CQEs after the initial CQE instead of 8
1215 * for subsequent ones.
1217 zip->ca = rxq->cq_ci;
1218 zip->na = zip->ca + 7;
1219 /* Compute the next non compressed CQE. */
1221 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1222 /* Get packet size to return. */
1223 len = rte_be_to_cpu_32((*mc)[0].byte_cnt &
1227 /* Prefetch all to be invalidated */
1230 while (idx != end) {
1231 rte_prefetch0(&(*rxq->cqes)[(idx) &
1236 len = rte_be_to_cpu_32(cqe->byte_cnt);
1239 if (unlikely(rxq->err_state)) {
1240 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1241 ++rxq->stats.idropped;
1249 * Translate RX completion flags to offload flags.
1255 * Offload flags (ol_flags) for struct rte_mbuf.
1257 static inline uint32_t
1258 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1260 uint32_t ol_flags = 0;
1261 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1265 MLX5_CQE_RX_L3_HDR_VALID,
1266 PKT_RX_IP_CKSUM_GOOD) |
1268 MLX5_CQE_RX_L4_HDR_VALID,
1269 PKT_RX_L4_CKSUM_GOOD);
1274 * Fill in mbuf fields from RX completion flags.
1275 * Note that pkt->ol_flags should be initialized outside of this function.
1278 * Pointer to RX queue.
1283 * @param rss_hash_res
1284 * Packet RSS Hash result.
1287 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1288 volatile struct mlx5_cqe *cqe,
1289 volatile struct mlx5_mini_cqe8 *mcqe)
1291 /* Update packet information. */
1292 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe);
1294 if (rxq->rss_hash) {
1295 uint32_t rss_hash_res = 0;
1297 /* If compressed, take hash result from mini-CQE. */
1299 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH)
1300 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1302 rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result);
1304 pkt->hash.rss = rss_hash_res;
1305 pkt->ol_flags |= PKT_RX_RSS_HASH;
1311 /* If compressed, take flow tag from mini-CQE. */
1313 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
1314 mark = cqe->sop_drop_qpn;
1316 mark = ((mcqe->byte_cnt_flow & 0xff) << 8) |
1317 (mcqe->flow_tag_high << 16);
1318 if (MLX5_FLOW_MARK_IS_VALID(mark)) {
1319 pkt->ol_flags |= PKT_RX_FDIR;
1320 if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) {
1321 pkt->ol_flags |= PKT_RX_FDIR_ID;
1322 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1326 if (rxq->dynf_meta && cqe->flow_table_metadata) {
1327 pkt->ol_flags |= rxq->flow_meta_mask;
1328 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, uint32_t *) =
1329 cqe->flow_table_metadata;
1332 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1333 if (rxq->vlan_strip) {
1337 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
1338 vlan_strip = cqe->hdr_type_etc &
1339 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
1341 vlan_strip = mcqe->hdr_type &
1342 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
1344 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1345 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1348 if (rxq->hw_timestamp) {
1349 uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
1351 if (rxq->rt_timestamp)
1352 ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
1353 mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts);
1354 pkt->ol_flags |= rxq->timestamp_rx_flag;
1359 * DPDK callback for RX.
1362 * Generic pointer to RX queue structure.
1364 * Array to store received packets.
1366 * Maximum number of packets in array.
1369 * Number of packets successfully received (<= pkts_n).
1372 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1374 struct mlx5_rxq_data *rxq = dpdk_rxq;
1375 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1376 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1377 const unsigned int sges_n = rxq->sges_n;
1378 struct rte_mbuf *pkt = NULL;
1379 struct rte_mbuf *seg = NULL;
1380 volatile struct mlx5_cqe *cqe =
1381 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1383 unsigned int rq_ci = rxq->rq_ci << sges_n;
1384 int len = 0; /* keep its value across iterations. */
1387 unsigned int idx = rq_ci & wqe_cnt;
1388 volatile struct mlx5_wqe_data_seg *wqe =
1389 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1390 struct rte_mbuf *rep = (*rxq->elts)[idx];
1391 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1399 /* Allocate the buf from the same pool. */
1400 rep = rte_mbuf_raw_alloc(seg->pool);
1401 if (unlikely(rep == NULL)) {
1402 ++rxq->stats.rx_nombuf;
1405 * no buffers before we even started,
1406 * bail out silently.
1410 while (pkt != seg) {
1411 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
1415 rte_mbuf_raw_free(pkt);
1421 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1422 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1424 rte_mbuf_raw_free(rep);
1428 MLX5_ASSERT(len >= (rxq->crc_present << 2));
1429 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1430 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
1431 if (rxq->crc_present)
1432 len -= RTE_ETHER_CRC_LEN;
1434 if (cqe->lro_num_seg > 1) {
1436 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1438 pkt->ol_flags |= PKT_RX_LRO;
1439 pkt->tso_segsz = len / cqe->lro_num_seg;
1442 DATA_LEN(rep) = DATA_LEN(seg);
1443 PKT_LEN(rep) = PKT_LEN(seg);
1444 SET_DATA_OFF(rep, DATA_OFF(seg));
1445 PORT(rep) = PORT(seg);
1446 (*rxq->elts)[idx] = rep;
1448 * Fill NIC descriptor with the new buffer. The lkey and size
1449 * of the buffers are already known, only the buffer address
1452 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1453 /* If there's only one MR, no need to replace LKey in WQE. */
1454 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1455 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1456 if (len > DATA_LEN(seg)) {
1457 len -= DATA_LEN(seg);
1462 DATA_LEN(seg) = len;
1463 #ifdef MLX5_PMD_SOFT_COUNTERS
1464 /* Increment bytes counter. */
1465 rxq->stats.ibytes += PKT_LEN(pkt);
1467 /* Return packet. */
1472 /* Align consumer index to the next stride. */
1477 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1479 /* Update the consumer index. */
1480 rxq->rq_ci = rq_ci >> sges_n;
1482 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1484 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1485 #ifdef MLX5_PMD_SOFT_COUNTERS
1486 /* Increment packets counter. */
1487 rxq->stats.ipackets += i;
1493 * Update LRO packet TCP header.
1494 * The HW LRO feature doesn't update the TCP header after coalescing the
1495 * TCP segments but supplies information in CQE to fill it by SW.
1498 * Pointer to the TCP header.
1500 * Pointer to the completion entry..
1502 * The L3 pseudo-header checksum.
1505 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
1506 volatile struct mlx5_cqe *__rte_restrict cqe,
1507 uint32_t phcsum, uint8_t l4_type)
1510 * The HW calculates only the TCP payload checksum, need to complete
1511 * the TCP header checksum and the L3 pseudo-header checksum.
1513 uint32_t csum = phcsum + cqe->csum;
1515 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1516 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1517 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1518 tcp->recv_ack = cqe->lro_ack_seq_num;
1519 tcp->rx_win = cqe->lro_tcp_win;
1521 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1522 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1524 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
1525 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1526 csum = (~csum) & 0xffff;
1533 * Update LRO packet headers.
1534 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1535 * TCP segments but supply information in CQE to fill it by SW.
1538 * The packet address.
1540 * Pointer to the completion entry..
1542 * The packet length.
1545 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
1546 volatile struct mlx5_cqe *__rte_restrict cqe,
1547 volatile struct mlx5_mini_cqe8 *mcqe,
1548 struct mlx5_rxq_data *rxq, uint32_t len)
1551 struct rte_ether_hdr *eth;
1552 struct rte_vlan_hdr *vlan;
1553 struct rte_ipv4_hdr *ipv4;
1554 struct rte_ipv6_hdr *ipv6;
1555 struct rte_tcp_hdr *tcp;
1560 uint16_t proto = h.eth->ether_type;
1565 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1566 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1567 proto = h.vlan->eth_proto;
1570 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1571 h.ipv4->time_to_live = cqe->lro_min_ttl;
1572 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1573 h.ipv4->hdr_checksum = 0;
1574 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1575 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1578 h.ipv6->hop_limits = cqe->lro_min_ttl;
1579 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1581 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1585 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
1586 l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1587 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1589 l4_type = (rte_be_to_cpu_16(mcqe->hdr_type) &
1590 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1591 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type);
1595 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1597 struct mlx5_mprq_buf *buf = opaque;
1599 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
1600 rte_mempool_put(buf->mp, buf);
1601 } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
1602 __ATOMIC_RELAXED) == 0)) {
1603 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1604 rte_mempool_put(buf->mp, buf);
1609 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1611 mlx5_mprq_buf_free_cb(NULL, buf);
1615 * DPDK callback for RX with Multi-Packet RQ support.
1618 * Generic pointer to RX queue structure.
1620 * Array to store received packets.
1622 * Maximum number of packets in array.
1625 * Number of packets successfully received (<= pkts_n).
1628 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1630 struct mlx5_rxq_data *rxq = dpdk_rxq;
1631 const uint32_t strd_n = 1 << rxq->strd_num_n;
1632 const uint32_t strd_sz = 1 << rxq->strd_sz_n;
1633 const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
1634 const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
1635 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1637 uint32_t rq_ci = rxq->rq_ci;
1638 uint16_t consumed_strd = rxq->consumed_strd;
1639 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1641 while (i < pkts_n) {
1642 struct rte_mbuf *pkt;
1648 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1649 enum mlx5_rqx_code rxq_code;
1651 if (consumed_strd == strd_n) {
1652 /* Replace WQE if the buffer is still in use. */
1653 mprq_buf_replace(rxq, rq_ci & wq_mask);
1654 /* Advance to the next WQE. */
1657 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1659 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1660 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1664 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1665 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1666 if (rxq->crc_present)
1667 len -= RTE_ETHER_CRC_LEN;
1669 rxq->mcqe_format == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
1670 strd_cnt = (len / strd_sz) + !!(len % strd_sz);
1672 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1673 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1674 MLX5_ASSERT(strd_cnt);
1675 consumed_strd += strd_cnt;
1676 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1678 strd_idx = rte_be_to_cpu_16(mcqe == NULL ?
1681 MLX5_ASSERT(strd_idx < strd_n);
1682 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1684 pkt = rte_pktmbuf_alloc(rxq->mp);
1685 if (unlikely(pkt == NULL)) {
1686 ++rxq->stats.rx_nombuf;
1689 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1690 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1691 if (rxq->crc_present)
1692 len -= RTE_ETHER_CRC_LEN;
1693 rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf,
1694 strd_idx, strd_cnt);
1695 if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
1696 rte_pktmbuf_free_seg(pkt);
1697 if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
1698 ++rxq->stats.idropped;
1701 if (rxq_code == MLX5_RXQ_CODE_NOMBUF) {
1702 ++rxq->stats.rx_nombuf;
1706 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
1707 if (cqe->lro_num_seg > 1) {
1708 mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
1709 cqe, mcqe, rxq, len);
1710 pkt->ol_flags |= PKT_RX_LRO;
1711 pkt->tso_segsz = len / cqe->lro_num_seg;
1714 PORT(pkt) = rxq->port_id;
1715 #ifdef MLX5_PMD_SOFT_COUNTERS
1716 /* Increment bytes counter. */
1717 rxq->stats.ibytes += PKT_LEN(pkt);
1719 /* Return packet. */
1723 /* Update the consumer indexes. */
1724 rxq->consumed_strd = consumed_strd;
1726 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1727 if (rq_ci != rxq->rq_ci) {
1730 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1732 #ifdef MLX5_PMD_SOFT_COUNTERS
1733 /* Increment packets counter. */
1734 rxq->stats.ipackets += i;
1740 * Dummy DPDK callback for TX.
1742 * This function is used to temporarily replace the real callback during
1743 * unsafe control operations on the queue, or in case of error.
1746 * Generic pointer to TX queue structure.
1748 * Packets to transmit.
1750 * Number of packets in array.
1753 * Number of packets successfully transmitted (<= pkts_n).
1756 removed_tx_burst(void *dpdk_txq __rte_unused,
1757 struct rte_mbuf **pkts __rte_unused,
1758 uint16_t pkts_n __rte_unused)
1765 * Dummy DPDK callback for RX.
1767 * This function is used to temporarily replace the real callback during
1768 * unsafe control operations on the queue, or in case of error.
1771 * Generic pointer to RX queue structure.
1773 * Array to store received packets.
1775 * Maximum number of packets in array.
1778 * Number of packets successfully received (<= pkts_n).
1781 removed_rx_burst(void *dpdk_txq __rte_unused,
1782 struct rte_mbuf **pkts __rte_unused,
1783 uint16_t pkts_n __rte_unused)
1790 * Vectorized Rx/Tx routines are not compiled in when required vector
1791 * instructions are not supported on a target architecture. The following null
1792 * stubs are needed for linkage when those are not included outside of this file
1793 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1797 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1798 struct rte_mbuf **pkts __rte_unused,
1799 uint16_t pkts_n __rte_unused)
1805 mlx5_rx_burst_mprq_vec(void *dpdk_txq __rte_unused,
1806 struct rte_mbuf **pkts __rte_unused,
1807 uint16_t pkts_n __rte_unused)
1813 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1819 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1825 * Free the mbufs from the linear array of pointers.
1828 * Pointer to array of packets to be free.
1830 * Number of packets to be freed.
1832 * Configured Tx offloads mask. It is fully defined at
1833 * compile time and may be used for optimization.
1835 static __rte_always_inline void
1836 mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
1837 unsigned int pkts_n,
1838 unsigned int olx __rte_unused)
1840 struct rte_mempool *pool = NULL;
1841 struct rte_mbuf **p_free = NULL;
1842 struct rte_mbuf *mbuf;
1843 unsigned int n_free = 0;
1846 * The implemented algorithm eliminates
1847 * copying pointers to temporary array
1848 * for rte_mempool_put_bulk() calls.
1851 MLX5_ASSERT(pkts_n);
1855 * Decrement mbuf reference counter, detach
1856 * indirect and external buffers if needed.
1858 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1859 if (likely(mbuf != NULL)) {
1860 MLX5_ASSERT(mbuf == *pkts);
1861 if (likely(n_free != 0)) {
1862 if (unlikely(pool != mbuf->pool))
1863 /* From different pool. */
1866 /* Start new scan array. */
1873 if (unlikely(pkts_n == 0)) {
1879 * This happens if mbuf is still referenced.
1880 * We can't put it back to the pool, skip.
1884 if (unlikely(n_free != 0))
1885 /* There is some array to free.*/
1887 if (unlikely(pkts_n == 0))
1888 /* Last mbuf, nothing to free. */
1894 * This loop is implemented to avoid multiple
1895 * inlining of rte_mempool_put_bulk().
1898 MLX5_ASSERT(p_free);
1899 MLX5_ASSERT(n_free);
1901 * Free the array of pre-freed mbufs
1902 * belonging to the same memory pool.
1904 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
1905 if (unlikely(mbuf != NULL)) {
1906 /* There is the request to start new scan. */
1911 if (likely(pkts_n != 0))
1914 * This is the last mbuf to be freed.
1915 * Do one more loop iteration to complete.
1916 * This is rare case of the last unique mbuf.
1921 if (likely(pkts_n == 0))
1930 * Free the mbuf from the elts ring buffer till new tail.
1933 * Pointer to Tx queue structure.
1935 * Index in elts to free up to, becomes new elts tail.
1937 * Configured Tx offloads mask. It is fully defined at
1938 * compile time and may be used for optimization.
1940 static __rte_always_inline void
1941 mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
1943 unsigned int olx __rte_unused)
1945 uint16_t n_elts = tail - txq->elts_tail;
1947 MLX5_ASSERT(n_elts);
1948 MLX5_ASSERT(n_elts <= txq->elts_s);
1950 * Implement a loop to support ring buffer wraparound
1951 * with single inlining of mlx5_tx_free_mbuf().
1956 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
1957 part = RTE_MIN(part, n_elts);
1959 MLX5_ASSERT(part <= txq->elts_s);
1960 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
1962 txq->elts_tail += part;
1968 * Store the mbuf being sent into elts ring buffer.
1969 * On Tx completion these mbufs will be freed.
1972 * Pointer to Tx queue structure.
1974 * Pointer to array of packets to be stored.
1976 * Number of packets to be stored.
1978 * Configured Tx offloads mask. It is fully defined at
1979 * compile time and may be used for optimization.
1981 static __rte_always_inline void
1982 mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
1983 struct rte_mbuf **__rte_restrict pkts,
1984 unsigned int pkts_n,
1985 unsigned int olx __rte_unused)
1988 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
1991 MLX5_ASSERT(pkts_n);
1992 part = txq->elts_s - (txq->elts_head & txq->elts_m);
1994 MLX5_ASSERT(part <= txq->elts_s);
1995 /* This code is a good candidate for vectorizing with SIMD. */
1996 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
1998 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
1999 txq->elts_head += pkts_n;
2000 if (unlikely(part < pkts_n))
2001 /* The copy is wrapping around the elts array. */
2002 rte_memcpy((void *)elts, (void *)(pkts + part),
2003 (pkts_n - part) * sizeof(struct rte_mbuf *));
2007 * Update completion queue consuming index via doorbell
2008 * and flush the completed data buffers.
2011 * Pointer to TX queue structure.
2012 * @param valid CQE pointer
2013 * if not NULL update txq->wqe_pi and flush the buffers
2015 * Configured Tx offloads mask. It is fully defined at
2016 * compile time and may be used for optimization.
2018 static __rte_always_inline void
2019 mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
2020 volatile struct mlx5_cqe *last_cqe,
2021 unsigned int olx __rte_unused)
2023 if (likely(last_cqe != NULL)) {
2026 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2027 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2028 if (likely(tail != txq->elts_tail)) {
2029 mlx5_tx_free_elts(txq, tail, olx);
2030 MLX5_ASSERT(tail == txq->elts_tail);
2036 * Manage TX completions. This routine checks the CQ for
2037 * arrived CQEs, deduces the last accomplished WQE in SQ,
2038 * updates SQ producing index and frees all completed mbufs.
2041 * Pointer to TX queue structure.
2043 * Configured Tx offloads mask. It is fully defined at
2044 * compile time and may be used for optimization.
2046 * NOTE: not inlined intentionally, it makes tx_burst
2047 * routine smaller, simple and faster - from experiments.
2050 mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
2051 unsigned int olx __rte_unused)
2053 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2054 volatile struct mlx5_cqe *last_cqe = NULL;
2055 bool ring_doorbell = false;
2058 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2059 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2061 volatile struct mlx5_cqe *cqe;
2063 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
2064 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
2065 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2066 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2067 /* No new CQEs in completion queue. */
2068 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
2072 * Some error occurred, try to restart.
2073 * We have no barrier after WQE related Doorbell
2074 * written, make sure all writes are completed
2075 * here, before we might perform SQ reset.
2078 ret = mlx5_tx_error_cqe_handle
2079 (txq, (volatile struct mlx5_err_cqe *)cqe);
2080 if (unlikely(ret < 0)) {
2082 * Some error occurred on queue error
2083 * handling, we do not advance the index
2084 * here, allowing to retry on next call.
2089 * We are going to fetch all entries with
2090 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2091 * The send queue is supposed to be empty.
2093 ring_doorbell = true;
2095 txq->cq_pi = txq->cq_ci;
2099 /* Normal transmit completion. */
2100 MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
2101 MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
2103 ring_doorbell = true;
2107 * We have to restrict the amount of processed CQEs
2108 * in one tx_burst routine call. The CQ may be large
2109 * and many CQEs may be updated by the NIC in one
2110 * transaction. Buffers freeing is time consuming,
2111 * multiple iterations may introduce significant
2114 if (likely(--count == 0))
2117 if (likely(ring_doorbell)) {
2118 /* Ring doorbell to notify hardware. */
2119 rte_compiler_barrier();
2120 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
2121 mlx5_tx_comp_flush(txq, last_cqe, olx);
2126 * Check if the completion request flag should be set in the last WQE.
2127 * Both pushed mbufs and WQEs are monitored and the completion request
2128 * flag is set if any of thresholds is reached.
2131 * Pointer to TX queue structure.
2133 * Pointer to burst routine local context.
2135 * Configured Tx offloads mask. It is fully defined at
2136 * compile time and may be used for optimization.
2138 static __rte_always_inline void
2139 mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
2140 struct mlx5_txq_local *__rte_restrict loc,
2143 uint16_t head = txq->elts_head;
2146 part = MLX5_TXOFF_CONFIG(INLINE) ?
2147 0 : loc->pkts_sent - loc->pkts_copy;
2149 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2150 (MLX5_TXOFF_CONFIG(INLINE) &&
2151 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2152 volatile struct mlx5_wqe *last = loc->wqe_last;
2155 txq->elts_comp = head;
2156 if (MLX5_TXOFF_CONFIG(INLINE))
2157 txq->wqe_comp = txq->wqe_ci;
2158 /* Request unconditional completion on last WQE. */
2159 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2160 MLX5_COMP_MODE_OFFSET);
2161 /* Save elts_head in dedicated free on completion queue. */
2162 #ifdef RTE_LIBRTE_MLX5_DEBUG
2163 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2164 (last->cseg.opcode >> 8) << 16;
2166 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2168 /* A CQE slot must always be available. */
2169 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2174 * DPDK callback to check the status of a tx descriptor.
2179 * The index of the descriptor in the ring.
2182 * The status of the tx descriptor.
2185 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2187 struct mlx5_txq_data *__rte_restrict txq = tx_queue;
2190 mlx5_tx_handle_completion(txq, 0);
2191 used = txq->elts_head - txq->elts_tail;
2193 return RTE_ETH_TX_DESC_FULL;
2194 return RTE_ETH_TX_DESC_DONE;
2198 * Build the Control Segment with specified opcode:
2199 * - MLX5_OPCODE_SEND
2200 * - MLX5_OPCODE_ENHANCED_MPSW
2204 * Pointer to TX queue structure.
2206 * Pointer to burst routine local context.
2208 * Pointer to WQE to fill with built Control Segment.
2210 * Supposed length of WQE in segments.
2212 * SQ WQE opcode to put into Control Segment.
2214 * Configured Tx offloads mask. It is fully defined at
2215 * compile time and may be used for optimization.
2217 static __rte_always_inline void
2218 mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
2219 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2220 struct mlx5_wqe *__rte_restrict wqe,
2222 unsigned int opcode,
2223 unsigned int olx __rte_unused)
2225 struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
2227 /* For legacy MPW replace the EMPW by TSO with modifier. */
2228 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2229 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2230 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2231 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2232 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2233 MLX5_COMP_MODE_OFFSET);
2234 cs->misc = RTE_BE32(0);
2238 * Build the Synchronize Queue Segment with specified completion index.
2241 * Pointer to TX queue structure.
2243 * Pointer to burst routine local context.
2245 * Pointer to WQE to fill with built Control Segment.
2247 * Completion index in Clock Queue to wait.
2249 * Configured Tx offloads mask. It is fully defined at
2250 * compile time and may be used for optimization.
2252 static __rte_always_inline void
2253 mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
2254 struct mlx5_txq_local *restrict loc __rte_unused,
2255 struct mlx5_wqe *restrict wqe,
2257 unsigned int olx __rte_unused)
2259 struct mlx5_wqe_qseg *qs;
2261 qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
2262 qs->max_index = rte_cpu_to_be_32(wci);
2263 qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq->id);
2264 qs->reserved0 = RTE_BE32(0);
2265 qs->reserved1 = RTE_BE32(0);
2269 * Build the Ethernet Segment without inlined data.
2270 * Supports Software Parser, Checksums and VLAN
2271 * insertion Tx offload features.
2274 * Pointer to TX queue structure.
2276 * Pointer to burst routine local context.
2278 * Pointer to WQE to fill with built Ethernet Segment.
2280 * Configured Tx offloads mask. It is fully defined at
2281 * compile time and may be used for optimization.
2283 static __rte_always_inline void
2284 mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2285 struct mlx5_txq_local *__rte_restrict loc,
2286 struct mlx5_wqe *__rte_restrict wqe,
2289 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2293 * Calculate and set check sum flags first, dword field
2294 * in segment may be shared with Software Parser flags.
2296 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2297 es->flags = rte_cpu_to_le_32(csum);
2299 * Calculate and set Software Parser offsets and flags.
2300 * These flags a set for custom UDP and IP tunnel packets.
2302 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2303 /* Fill metadata field if needed. */
2304 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2305 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2306 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2307 /* Engage VLAN tag insertion feature if requested. */
2308 if (MLX5_TXOFF_CONFIG(VLAN) &&
2309 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2311 * We should get here only if device support
2312 * this feature correctly.
2314 MLX5_ASSERT(txq->vlan_en);
2315 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2316 loc->mbuf->vlan_tci);
2318 es->inline_hdr = RTE_BE32(0);
2323 * Build the Ethernet Segment with minimal inlined data
2324 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2325 * used to fill the gap in single WQEBB WQEs.
2326 * Supports Software Parser, Checksums and VLAN
2327 * insertion Tx offload features.
2330 * Pointer to TX queue structure.
2332 * Pointer to burst routine local context.
2334 * Pointer to WQE to fill with built Ethernet Segment.
2336 * Length of VLAN tag insertion if any.
2338 * Configured Tx offloads mask. It is fully defined at
2339 * compile time and may be used for optimization.
2341 static __rte_always_inline void
2342 mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
2343 struct mlx5_txq_local *__rte_restrict loc,
2344 struct mlx5_wqe *__rte_restrict wqe,
2348 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2350 uint8_t *psrc, *pdst;
2353 * Calculate and set check sum flags first, dword field
2354 * in segment may be shared with Software Parser flags.
2356 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2357 es->flags = rte_cpu_to_le_32(csum);
2359 * Calculate and set Software Parser offsets and flags.
2360 * These flags a set for custom UDP and IP tunnel packets.
2362 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2363 /* Fill metadata field if needed. */
2364 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2365 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2366 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2367 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2369 sizeof(rte_v128u32_t)),
2370 "invalid Ethernet Segment data size");
2371 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2373 sizeof(struct rte_vlan_hdr) +
2374 2 * RTE_ETHER_ADDR_LEN),
2375 "invalid Ethernet Segment data size");
2376 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2377 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2378 es->inline_data = *(unaligned_uint16_t *)psrc;
2379 psrc += sizeof(uint16_t);
2380 pdst = (uint8_t *)(es + 1);
2381 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2382 /* Implement VLAN tag insertion as part inline data. */
2383 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2384 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2385 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2386 /* Insert VLAN ethertype + VLAN tag. */
2387 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2388 ((RTE_ETHER_TYPE_VLAN << 16) |
2389 loc->mbuf->vlan_tci);
2390 pdst += sizeof(struct rte_vlan_hdr);
2391 /* Copy the rest two bytes from packet data. */
2392 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2393 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2395 /* Fill the gap in the title WQEBB with inline data. */
2396 rte_mov16(pdst, psrc);
2401 * Build the Ethernet Segment with entire packet
2402 * data inlining. Checks the boundary of WQEBB and
2403 * ring buffer wrapping, supports Software Parser,
2404 * Checksums and VLAN insertion Tx offload features.
2407 * Pointer to TX queue structure.
2409 * Pointer to burst routine local context.
2411 * Pointer to WQE to fill with built Ethernet Segment.
2413 * Length of VLAN tag insertion if any.
2415 * Length of data to inline (VLAN included, if any).
2417 * TSO flag, set mss field from the packet.
2419 * Configured Tx offloads mask. It is fully defined at
2420 * compile time and may be used for optimization.
2423 * Pointer to the next Data Segment (aligned and wrapped around).
2425 static __rte_always_inline struct mlx5_wqe_dseg *
2426 mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
2427 struct mlx5_txq_local *__rte_restrict loc,
2428 struct mlx5_wqe *__rte_restrict wqe,
2434 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2436 uint8_t *psrc, *pdst;
2440 * Calculate and set check sum flags first, dword field
2441 * in segment may be shared with Software Parser flags.
2443 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2446 csum |= loc->mbuf->tso_segsz;
2447 es->flags = rte_cpu_to_be_32(csum);
2449 es->flags = rte_cpu_to_le_32(csum);
2452 * Calculate and set Software Parser offsets and flags.
2453 * These flags a set for custom UDP and IP tunnel packets.
2455 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2456 /* Fill metadata field if needed. */
2457 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2458 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2459 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2460 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2462 sizeof(rte_v128u32_t)),
2463 "invalid Ethernet Segment data size");
2464 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2466 sizeof(struct rte_vlan_hdr) +
2467 2 * RTE_ETHER_ADDR_LEN),
2468 "invalid Ethernet Segment data size");
2469 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2470 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2471 es->inline_data = *(unaligned_uint16_t *)psrc;
2472 psrc += sizeof(uint16_t);
2473 pdst = (uint8_t *)(es + 1);
2474 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2475 /* Implement VLAN tag insertion as part inline data. */
2476 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2477 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2478 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2479 /* Insert VLAN ethertype + VLAN tag. */
2480 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2481 ((RTE_ETHER_TYPE_VLAN << 16) |
2482 loc->mbuf->vlan_tci);
2483 pdst += sizeof(struct rte_vlan_hdr);
2484 /* Copy the rest two bytes from packet data. */
2485 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2486 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2487 psrc += sizeof(uint16_t);
2489 /* Fill the gap in the title WQEBB with inline data. */
2490 rte_mov16(pdst, psrc);
2491 psrc += sizeof(rte_v128u32_t);
2493 pdst = (uint8_t *)(es + 2);
2494 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2495 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2496 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2498 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2499 return (struct mlx5_wqe_dseg *)pdst;
2502 * The WQEBB space availability is checked by caller.
2503 * Here we should be aware of WQE ring buffer wraparound only.
2505 part = (uint8_t *)txq->wqes_end - pdst;
2506 part = RTE_MIN(part, inlen);
2508 rte_memcpy(pdst, psrc, part);
2510 if (likely(!inlen)) {
2512 * If return value is not used by the caller
2513 * the code below will be optimized out.
2516 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2517 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2518 pdst = (uint8_t *)txq->wqes;
2519 return (struct mlx5_wqe_dseg *)pdst;
2521 pdst = (uint8_t *)txq->wqes;
2528 * Copy data from chain of mbuf to the specified linear buffer.
2529 * Checksums and VLAN insertion Tx offload features. If data
2530 * from some mbuf copied completely this mbuf is freed. Local
2531 * structure is used to keep the byte stream state.
2534 * Pointer to the destination linear buffer.
2536 * Pointer to burst routine local context.
2538 * Length of data to be copied.
2540 * Length of data to be copied ignoring no inline hint.
2542 * Configured Tx offloads mask. It is fully defined at
2543 * compile time and may be used for optimization.
2546 * Number of actual copied data bytes. This is always greater than or
2547 * equal to must parameter and might be lesser than len in no inline
2548 * hint flag is encountered.
2550 static __rte_always_inline unsigned int
2551 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2552 struct mlx5_txq_local *__rte_restrict loc,
2555 unsigned int olx __rte_unused)
2557 struct rte_mbuf *mbuf;
2558 unsigned int part, dlen, copy = 0;
2562 MLX5_ASSERT(must <= len);
2564 /* Allow zero length packets, must check first. */
2565 dlen = rte_pktmbuf_data_len(loc->mbuf);
2566 if (dlen <= loc->mbuf_off) {
2567 /* Exhausted packet, just free. */
2569 loc->mbuf = mbuf->next;
2570 rte_pktmbuf_free_seg(mbuf);
2572 MLX5_ASSERT(loc->mbuf_nseg > 1);
2573 MLX5_ASSERT(loc->mbuf);
2575 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2580 * We already copied the minimal
2581 * requested amount of data.
2586 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
2588 * Copy only the minimal required
2589 * part of the data buffer.
2596 dlen -= loc->mbuf_off;
2597 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2599 part = RTE_MIN(len, dlen);
2600 rte_memcpy(pdst, psrc, part);
2602 loc->mbuf_off += part;
2605 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2607 /* Exhausted packet, just free. */
2609 loc->mbuf = mbuf->next;
2610 rte_pktmbuf_free_seg(mbuf);
2612 MLX5_ASSERT(loc->mbuf_nseg >= 1);
2622 * Build the Ethernet Segment with inlined data from
2623 * multi-segment packet. Checks the boundary of WQEBB
2624 * and ring buffer wrapping, supports Software Parser,
2625 * Checksums and VLAN insertion Tx offload features.
2628 * Pointer to TX queue structure.
2630 * Pointer to burst routine local context.
2632 * Pointer to WQE to fill with built Ethernet Segment.
2634 * Length of VLAN tag insertion if any.
2636 * Length of data to inline (VLAN included, if any).
2638 * TSO flag, set mss field from the packet.
2640 * Configured Tx offloads mask. It is fully defined at
2641 * compile time and may be used for optimization.
2644 * Pointer to the next Data Segment (aligned and
2645 * possible NOT wrapped around - caller should do
2646 * wrapping check on its own).
2648 static __rte_always_inline struct mlx5_wqe_dseg *
2649 mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
2650 struct mlx5_txq_local *__rte_restrict loc,
2651 struct mlx5_wqe *__rte_restrict wqe,
2657 struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
2660 unsigned int part, tlen = 0;
2663 * Calculate and set check sum flags first, uint32_t field
2664 * in segment may be shared with Software Parser flags.
2666 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2669 csum |= loc->mbuf->tso_segsz;
2670 es->flags = rte_cpu_to_be_32(csum);
2672 es->flags = rte_cpu_to_le_32(csum);
2675 * Calculate and set Software Parser offsets and flags.
2676 * These flags a set for custom UDP and IP tunnel packets.
2678 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2679 /* Fill metadata field if needed. */
2680 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2681 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2682 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2683 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2685 sizeof(rte_v128u32_t)),
2686 "invalid Ethernet Segment data size");
2687 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2689 sizeof(struct rte_vlan_hdr) +
2690 2 * RTE_ETHER_ADDR_LEN),
2691 "invalid Ethernet Segment data size");
2692 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2693 pdst = (uint8_t *)&es->inline_data;
2694 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2695 /* Implement VLAN tag insertion as part inline data. */
2696 mlx5_tx_mseg_memcpy(pdst, loc,
2697 2 * RTE_ETHER_ADDR_LEN,
2698 2 * RTE_ETHER_ADDR_LEN, olx);
2699 pdst += 2 * RTE_ETHER_ADDR_LEN;
2700 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2701 ((RTE_ETHER_TYPE_VLAN << 16) |
2702 loc->mbuf->vlan_tci);
2703 pdst += sizeof(struct rte_vlan_hdr);
2704 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2706 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2708 * The WQEBB space availability is checked by caller.
2709 * Here we should be aware of WQE ring buffer wraparound only.
2711 part = (uint8_t *)txq->wqes_end - pdst;
2712 part = RTE_MIN(part, inlen - tlen);
2718 * Copying may be interrupted inside the routine
2719 * if run into no inline hint flag.
2721 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
2722 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
2724 if (likely(inlen <= tlen) || copy < part) {
2725 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
2727 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2728 return (struct mlx5_wqe_dseg *)pdst;
2730 pdst = (uint8_t *)txq->wqes;
2731 part = inlen - tlen;
2736 * Build the Data Segment of pointer type.
2739 * Pointer to TX queue structure.
2741 * Pointer to burst routine local context.
2743 * Pointer to WQE to fill with built Data Segment.
2745 * Data buffer to point.
2747 * Data buffer length.
2749 * Configured Tx offloads mask. It is fully defined at
2750 * compile time and may be used for optimization.
2752 static __rte_always_inline void
2753 mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
2754 struct mlx5_txq_local *__rte_restrict loc,
2755 struct mlx5_wqe_dseg *__rte_restrict dseg,
2758 unsigned int olx __rte_unused)
2762 dseg->bcount = rte_cpu_to_be_32(len);
2763 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2764 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2768 * Build the Data Segment of pointer type or inline
2769 * if data length is less than buffer in minimal
2770 * Data Segment size.
2773 * Pointer to TX queue structure.
2775 * Pointer to burst routine local context.
2777 * Pointer to WQE to fill with built Data Segment.
2779 * Data buffer to point.
2781 * Data buffer length.
2783 * Configured Tx offloads mask. It is fully defined at
2784 * compile time and may be used for optimization.
2786 static __rte_always_inline void
2787 mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
2788 struct mlx5_txq_local *__rte_restrict loc,
2789 struct mlx5_wqe_dseg *__rte_restrict dseg,
2792 unsigned int olx __rte_unused)
2798 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2799 dseg->bcount = rte_cpu_to_be_32(len);
2800 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2801 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2805 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2806 /* Unrolled implementation of generic rte_memcpy. */
2807 dst = (uintptr_t)&dseg->inline_data[0];
2808 src = (uintptr_t)buf;
2810 #ifdef RTE_ARCH_STRICT_ALIGN
2811 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2812 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2813 dst += sizeof(uint32_t);
2814 src += sizeof(uint32_t);
2815 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2816 dst += sizeof(uint32_t);
2817 src += sizeof(uint32_t);
2819 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2820 dst += sizeof(uint64_t);
2821 src += sizeof(uint64_t);
2825 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2826 dst += sizeof(uint32_t);
2827 src += sizeof(uint32_t);
2830 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2831 dst += sizeof(uint16_t);
2832 src += sizeof(uint16_t);
2835 *(uint8_t *)dst = *(uint8_t *)src;
2839 * Build the Data Segment of inlined data from single
2840 * segment packet, no VLAN insertion.
2843 * Pointer to TX queue structure.
2845 * Pointer to burst routine local context.
2847 * Pointer to WQE to fill with built Data Segment.
2849 * Data buffer to point.
2851 * Data buffer length.
2853 * Configured Tx offloads mask. It is fully defined at
2854 * compile time and may be used for optimization.
2857 * Pointer to the next Data Segment after inlined data.
2858 * Ring buffer wraparound check is needed. We do not
2859 * do it here because it may not be needed for the
2860 * last packet in the eMPW session.
2862 static __rte_always_inline struct mlx5_wqe_dseg *
2863 mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
2864 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2865 struct mlx5_wqe_dseg *__rte_restrict dseg,
2868 unsigned int olx __rte_unused)
2873 if (!MLX5_TXOFF_CONFIG(MPW)) {
2874 /* Store the descriptor byte counter for eMPW sessions. */
2875 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2876 pdst = &dseg->inline_data[0];
2878 /* The entire legacy MPW session counter is stored on close. */
2879 pdst = (uint8_t *)dseg;
2882 * The WQEBB space availability is checked by caller.
2883 * Here we should be aware of WQE ring buffer wraparound only.
2885 part = (uint8_t *)txq->wqes_end - pdst;
2886 part = RTE_MIN(part, len);
2888 rte_memcpy(pdst, buf, part);
2892 if (!MLX5_TXOFF_CONFIG(MPW))
2893 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2894 /* Note: no final wraparound check here. */
2895 return (struct mlx5_wqe_dseg *)pdst;
2897 pdst = (uint8_t *)txq->wqes;
2904 * Build the Data Segment of inlined data from single
2905 * segment packet with VLAN insertion.
2908 * Pointer to TX queue structure.
2910 * Pointer to burst routine local context.
2912 * Pointer to the dseg fill with built Data Segment.
2914 * Data buffer to point.
2916 * Data buffer length.
2918 * Configured Tx offloads mask. It is fully defined at
2919 * compile time and may be used for optimization.
2922 * Pointer to the next Data Segment after inlined data.
2923 * Ring buffer wraparound check is needed.
2925 static __rte_always_inline struct mlx5_wqe_dseg *
2926 mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
2927 struct mlx5_txq_local *__rte_restrict loc __rte_unused,
2928 struct mlx5_wqe_dseg *__rte_restrict dseg,
2931 unsigned int olx __rte_unused)
2937 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
2938 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
2939 (2 * RTE_ETHER_ADDR_LEN),
2940 "invalid Data Segment data size");
2941 if (!MLX5_TXOFF_CONFIG(MPW)) {
2942 /* Store the descriptor byte counter for eMPW sessions. */
2943 dseg->bcount = rte_cpu_to_be_32
2944 ((len + sizeof(struct rte_vlan_hdr)) |
2945 MLX5_ETH_WQE_DATA_INLINE);
2946 pdst = &dseg->inline_data[0];
2948 /* The entire legacy MPW session counter is stored on close. */
2949 pdst = (uint8_t *)dseg;
2951 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
2952 buf += MLX5_DSEG_MIN_INLINE_SIZE;
2953 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
2954 len -= MLX5_DSEG_MIN_INLINE_SIZE;
2955 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
2956 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2957 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2958 pdst = (uint8_t *)txq->wqes;
2959 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
2960 loc->mbuf->vlan_tci);
2961 pdst += sizeof(struct rte_vlan_hdr);
2963 * The WQEBB space availability is checked by caller.
2964 * Here we should be aware of WQE ring buffer wraparound only.
2966 part = (uint8_t *)txq->wqes_end - pdst;
2967 part = RTE_MIN(part, len);
2969 rte_memcpy(pdst, buf, part);
2973 if (!MLX5_TXOFF_CONFIG(MPW))
2974 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2975 /* Note: no final wraparound check here. */
2976 return (struct mlx5_wqe_dseg *)pdst;
2978 pdst = (uint8_t *)txq->wqes;
2985 * Build the Ethernet Segment with optionally inlined data with
2986 * VLAN insertion and following Data Segments (if any) from
2987 * multi-segment packet. Used by ordinary send and TSO.
2990 * Pointer to TX queue structure.
2992 * Pointer to burst routine local context.
2994 * Pointer to WQE to fill with built Ethernet/Data Segments.
2996 * Length of VLAN header to insert, 0 means no VLAN insertion.
2998 * Data length to inline. For TSO this parameter specifies
2999 * exact value, for ordinary send routine can be aligned by
3000 * caller to provide better WQE space saving and data buffer
3001 * start address alignment. This length includes VLAN header
3004 * Zero means ordinary send, inlined data can be extended,
3005 * otherwise this is TSO, inlined data length is fixed.
3007 * Configured Tx offloads mask. It is fully defined at
3008 * compile time and may be used for optimization.
3011 * Actual size of built WQE in segments.
3013 static __rte_always_inline unsigned int
3014 mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
3015 struct mlx5_txq_local *__rte_restrict loc,
3016 struct mlx5_wqe *__rte_restrict wqe,
3020 unsigned int olx __rte_unused)
3022 struct mlx5_wqe_dseg *__rte_restrict dseg;
3025 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
3026 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
3029 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
3030 if (!loc->mbuf_nseg)
3033 * There are still some mbuf remaining, not inlined.
3034 * The first mbuf may be partially inlined and we
3035 * must process the possible non-zero data offset.
3037 if (loc->mbuf_off) {
3042 * Exhausted packets must be dropped before.
3043 * Non-zero offset means there are some data
3044 * remained in the packet.
3046 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
3047 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
3048 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
3050 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
3052 * Build the pointer/minimal data Data Segment.
3053 * Do ring buffer wrapping check in advance.
3055 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3056 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3057 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
3058 /* Store the mbuf to be freed on completion. */
3059 MLX5_ASSERT(loc->elts_free);
3060 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3063 if (--loc->mbuf_nseg == 0)
3065 loc->mbuf = loc->mbuf->next;
3069 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3070 struct rte_mbuf *mbuf;
3072 /* Zero length segment found, just skip. */
3074 loc->mbuf = loc->mbuf->next;
3075 rte_pktmbuf_free_seg(mbuf);
3076 if (--loc->mbuf_nseg == 0)
3079 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3080 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3083 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3084 rte_pktmbuf_data_len(loc->mbuf), olx);
3085 MLX5_ASSERT(loc->elts_free);
3086 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3089 if (--loc->mbuf_nseg == 0)
3091 loc->mbuf = loc->mbuf->next;
3096 /* Calculate actual segments used from the dseg pointer. */
3097 if ((uintptr_t)wqe < (uintptr_t)dseg)
3098 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3100 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3101 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3106 * The routine checks timestamp flag in the current packet,
3107 * and push WAIT WQE into the queue if scheduling is required.
3110 * Pointer to TX queue structure.
3112 * Pointer to burst routine local context.
3114 * Configured Tx offloads mask. It is fully defined at
3115 * compile time and may be used for optimization.
3118 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3119 * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
3120 * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
3121 * Local context variables partially updated.
3123 static __rte_always_inline enum mlx5_txcmp_code
3124 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
3125 struct mlx5_txq_local *restrict loc,
3128 if (MLX5_TXOFF_CONFIG(TXPP) &&
3129 loc->mbuf->ol_flags & txq->ts_mask) {
3130 struct mlx5_wqe *wqe;
3135 * Estimate the required space quickly and roughly.
3136 * We would like to ensure the packet can be pushed
3137 * to the queue and we won't get the orphan WAIT WQE.
3139 if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
3140 loc->elts_free < NB_SEGS(loc->mbuf))
3141 return MLX5_TXCMP_CODE_EXIT;
3142 /* Convert the timestamp into completion to wait. */
3143 ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
3144 wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
3145 if (unlikely(wci < 0))
3146 return MLX5_TXCMP_CODE_SINGLE;
3147 /* Build the WAIT WQE with specified completion. */
3148 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3149 mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
3150 mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
3153 return MLX5_TXCMP_CODE_MULTI;
3155 return MLX5_TXCMP_CODE_SINGLE;
3159 * Tx one packet function for multi-segment TSO. Supports all
3160 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3161 * sends one packet per WQE.
3163 * This routine is responsible for storing processed mbuf
3164 * into elts ring buffer and update elts_head.
3167 * Pointer to TX queue structure.
3169 * Pointer to burst routine local context.
3171 * Configured Tx offloads mask. It is fully defined at
3172 * compile time and may be used for optimization.
3175 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3176 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3177 * Local context variables partially updated.
3179 static __rte_always_inline enum mlx5_txcmp_code
3180 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
3181 struct mlx5_txq_local *__rte_restrict loc,
3184 struct mlx5_wqe *__rte_restrict wqe;
3185 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3187 if (MLX5_TXOFF_CONFIG(TXPP)) {
3188 enum mlx5_txcmp_code wret;
3190 /* Generate WAIT for scheduling if requested. */
3191 wret = mlx5_tx_schedule_send(txq, loc, olx);
3192 if (wret == MLX5_TXCMP_CODE_EXIT)
3193 return MLX5_TXCMP_CODE_EXIT;
3194 if (wret == MLX5_TXCMP_CODE_ERROR)
3195 return MLX5_TXCMP_CODE_ERROR;
3198 * Calculate data length to be inlined to estimate
3199 * the required space in WQE ring buffer.
3201 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3202 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3203 vlan = sizeof(struct rte_vlan_hdr);
3204 inlen = loc->mbuf->l2_len + vlan +
3205 loc->mbuf->l3_len + loc->mbuf->l4_len;
3206 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3207 return MLX5_TXCMP_CODE_ERROR;
3208 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3209 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3210 /* Packet must contain all TSO headers. */
3211 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3212 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3213 inlen > (dlen + vlan)))
3214 return MLX5_TXCMP_CODE_ERROR;
3215 MLX5_ASSERT(inlen >= txq->inlen_mode);
3217 * Check whether there are enough free WQEBBs:
3219 * - Ethernet Segment
3220 * - First Segment of inlined Ethernet data
3221 * - ... data continued ...
3222 * - Data Segments of pointer/min inline type
3224 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3225 MLX5_ESEG_MIN_INLINE_SIZE +
3227 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3228 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3229 return MLX5_TXCMP_CODE_EXIT;
3230 /* Check for maximal WQE size. */
3231 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3232 return MLX5_TXCMP_CODE_ERROR;
3233 #ifdef MLX5_PMD_SOFT_COUNTERS
3234 /* Update sent data bytes/packets counters. */
3235 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3236 loc->mbuf->tso_segsz;
3238 * One will be added for mbuf itself
3239 * at the end of the mlx5_tx_burst from
3240 * loc->pkts_sent field.
3243 txq->stats.opackets += ntcp;
3244 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3246 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3247 loc->wqe_last = wqe;
3248 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3249 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3250 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3251 txq->wqe_ci += (ds + 3) / 4;
3252 loc->wqe_free -= (ds + 3) / 4;
3253 return MLX5_TXCMP_CODE_MULTI;
3257 * Tx one packet function for multi-segment SEND. Supports all
3258 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3259 * sends one packet per WQE, without any data inlining in
3262 * This routine is responsible for storing processed mbuf
3263 * into elts ring buffer and update elts_head.
3266 * Pointer to TX queue structure.
3268 * Pointer to burst routine local context.
3270 * Configured Tx offloads mask. It is fully defined at
3271 * compile time and may be used for optimization.
3274 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3275 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3276 * Local context variables partially updated.
3278 static __rte_always_inline enum mlx5_txcmp_code
3279 mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
3280 struct mlx5_txq_local *__rte_restrict loc,
3283 struct mlx5_wqe_dseg *__rte_restrict dseg;
3284 struct mlx5_wqe *__rte_restrict wqe;
3285 unsigned int ds, nseg;
3287 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3288 if (MLX5_TXOFF_CONFIG(TXPP)) {
3289 enum mlx5_txcmp_code wret;
3291 /* Generate WAIT for scheduling if requested. */
3292 wret = mlx5_tx_schedule_send(txq, loc, olx);
3293 if (wret == MLX5_TXCMP_CODE_EXIT)
3294 return MLX5_TXCMP_CODE_EXIT;
3295 if (wret == MLX5_TXCMP_CODE_ERROR)
3296 return MLX5_TXCMP_CODE_ERROR;
3299 * No inline at all, it means the CPU cycles saving
3300 * is prioritized at configuration, we should not
3301 * copy any packet data to WQE.
3303 nseg = NB_SEGS(loc->mbuf);
3305 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3306 return MLX5_TXCMP_CODE_EXIT;
3307 /* Check for maximal WQE size. */
3308 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3309 return MLX5_TXCMP_CODE_ERROR;
3311 * Some Tx offloads may cause an error if
3312 * packet is not long enough, check against
3313 * assumed minimal length.
3315 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3316 return MLX5_TXCMP_CODE_ERROR;
3317 #ifdef MLX5_PMD_SOFT_COUNTERS
3318 /* Update sent data bytes counter. */
3319 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3320 if (MLX5_TXOFF_CONFIG(VLAN) &&
3321 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3322 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3325 * SEND WQE, one WQEBB:
3326 * - Control Segment, SEND opcode
3327 * - Ethernet Segment, optional VLAN, no inline
3328 * - Data Segments, pointer only type
3330 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3331 loc->wqe_last = wqe;
3332 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3333 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3334 dseg = &wqe->dseg[0];
3336 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3337 struct rte_mbuf *mbuf;
3340 * Zero length segment found, have to
3341 * correct total size of WQE in segments.
3342 * It is supposed to be rare occasion, so
3343 * in normal case (no zero length segments)
3344 * we avoid extra writing to the Control
3348 wqe->cseg.sq_ds -= RTE_BE32(1);
3350 loc->mbuf = mbuf->next;
3351 rte_pktmbuf_free_seg(mbuf);
3357 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3358 rte_pktmbuf_data_len(loc->mbuf), olx);
3359 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3364 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3365 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3366 loc->mbuf = loc->mbuf->next;
3369 txq->wqe_ci += (ds + 3) / 4;
3370 loc->wqe_free -= (ds + 3) / 4;
3371 return MLX5_TXCMP_CODE_MULTI;
3375 * Tx one packet function for multi-segment SEND. Supports all
3376 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3377 * sends one packet per WQE, with data inlining in
3378 * Ethernet Segment and minimal Data Segments.
3380 * This routine is responsible for storing processed mbuf
3381 * into elts ring buffer and update elts_head.
3384 * Pointer to TX queue structure.
3386 * Pointer to burst routine local context.
3388 * Configured Tx offloads mask. It is fully defined at
3389 * compile time and may be used for optimization.
3392 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3393 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3394 * Local context variables partially updated.
3396 static __rte_always_inline enum mlx5_txcmp_code
3397 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
3398 struct mlx5_txq_local *__rte_restrict loc,
3401 struct mlx5_wqe *__rte_restrict wqe;
3402 unsigned int ds, inlen, dlen, vlan = 0;
3404 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3405 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3406 if (MLX5_TXOFF_CONFIG(TXPP)) {
3407 enum mlx5_txcmp_code wret;
3409 /* Generate WAIT for scheduling if requested. */
3410 wret = mlx5_tx_schedule_send(txq, loc, olx);
3411 if (wret == MLX5_TXCMP_CODE_EXIT)
3412 return MLX5_TXCMP_CODE_EXIT;
3413 if (wret == MLX5_TXCMP_CODE_ERROR)
3414 return MLX5_TXCMP_CODE_ERROR;
3417 * First calculate data length to be inlined
3418 * to estimate the required space for WQE.
3420 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3421 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3422 vlan = sizeof(struct rte_vlan_hdr);
3423 inlen = dlen + vlan;
3424 /* Check against minimal length. */
3425 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3426 return MLX5_TXCMP_CODE_ERROR;
3427 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3428 if (inlen > txq->inlen_send ||
3429 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
3430 struct rte_mbuf *mbuf;
3435 * Packet length exceeds the allowed inline
3436 * data length, check whether the minimal
3437 * inlining is required.
3439 if (txq->inlen_mode) {
3440 MLX5_ASSERT(txq->inlen_mode >=
3441 MLX5_ESEG_MIN_INLINE_SIZE);
3442 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
3443 inlen = txq->inlen_mode;
3445 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
3446 !vlan || txq->vlan_en) {
3448 * VLAN insertion will be done inside by HW.
3449 * It is not utmost effective - VLAN flag is
3450 * checked twice, but we should proceed the
3451 * inlining length correctly and take into
3452 * account the VLAN header being inserted.
3454 return mlx5_tx_packet_multi_send
3457 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3460 * Now we know the minimal amount of data is requested
3461 * to inline. Check whether we should inline the buffers
3462 * from the chain beginning to eliminate some mbufs.
3465 nxlen = rte_pktmbuf_data_len(mbuf);
3466 if (unlikely(nxlen <= txq->inlen_send)) {
3467 /* We can inline first mbuf at least. */
3468 if (nxlen < inlen) {
3471 /* Scan mbufs till inlen filled. */
3476 nxlen = rte_pktmbuf_data_len(mbuf);
3478 } while (unlikely(nxlen < inlen));
3479 if (unlikely(nxlen > txq->inlen_send)) {
3480 /* We cannot inline entire mbuf. */
3481 smlen = inlen - smlen;
3482 start = rte_pktmbuf_mtod_offset
3483 (mbuf, uintptr_t, smlen);
3490 /* There should be not end of packet. */
3492 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3493 } while (unlikely(nxlen < txq->inlen_send));
3495 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3497 * Check whether we can do inline to align start
3498 * address of data buffer to cacheline.
3501 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3502 if (unlikely(start)) {
3504 if (start <= txq->inlen_send)
3509 * Check whether there are enough free WQEBBs:
3511 * - Ethernet Segment
3512 * - First Segment of inlined Ethernet data
3513 * - ... data continued ...
3514 * - Data Segments of pointer/min inline type
3516 * Estimate the number of Data Segments conservatively,
3517 * supposing no any mbufs is being freed during inlining.
3519 MLX5_ASSERT(inlen <= txq->inlen_send);
3520 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3521 MLX5_ESEG_MIN_INLINE_SIZE +
3523 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3524 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3525 return MLX5_TXCMP_CODE_EXIT;
3526 /* Check for maximal WQE size. */
3527 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3528 return MLX5_TXCMP_CODE_ERROR;
3529 #ifdef MLX5_PMD_SOFT_COUNTERS
3530 /* Update sent data bytes/packets counters. */
3531 txq->stats.obytes += dlen + vlan;
3533 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3534 loc->wqe_last = wqe;
3535 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3536 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3537 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3538 txq->wqe_ci += (ds + 3) / 4;
3539 loc->wqe_free -= (ds + 3) / 4;
3540 return MLX5_TXCMP_CODE_MULTI;
3544 * Tx burst function for multi-segment packets. Supports all
3545 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3546 * sends one packet per WQE. Function stops sending if it
3547 * encounters the single-segment packet.
3549 * This routine is responsible for storing processed mbuf
3550 * into elts ring buffer and update elts_head.
3553 * Pointer to TX queue structure.
3555 * Packets to transmit.
3557 * Number of packets in array.
3559 * Pointer to burst routine local context.
3561 * Configured Tx offloads mask. It is fully defined at
3562 * compile time and may be used for optimization.
3565 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3566 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3567 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3568 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3569 * Local context variables updated.
3571 static __rte_always_inline enum mlx5_txcmp_code
3572 mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
3573 struct rte_mbuf **__rte_restrict pkts,
3574 unsigned int pkts_n,
3575 struct mlx5_txq_local *__rte_restrict loc,
3578 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3579 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3580 pkts += loc->pkts_sent + 1;
3581 pkts_n -= loc->pkts_sent;
3583 enum mlx5_txcmp_code ret;
3585 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3587 * Estimate the number of free elts quickly but
3588 * conservatively. Some segment may be fully inlined
3589 * and freed, ignore this here - precise estimation
3592 if (loc->elts_free < NB_SEGS(loc->mbuf))
3593 return MLX5_TXCMP_CODE_EXIT;
3594 if (MLX5_TXOFF_CONFIG(TSO) &&
3595 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3596 /* Proceed with multi-segment TSO. */
3597 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3598 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3599 /* Proceed with multi-segment SEND with inlining. */
3600 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3602 /* Proceed with multi-segment SEND w/o inlining. */
3603 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3605 if (ret == MLX5_TXCMP_CODE_EXIT)
3606 return MLX5_TXCMP_CODE_EXIT;
3607 if (ret == MLX5_TXCMP_CODE_ERROR)
3608 return MLX5_TXCMP_CODE_ERROR;
3609 /* WQE is built, go to the next packet. */
3612 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3613 return MLX5_TXCMP_CODE_EXIT;
3614 loc->mbuf = *pkts++;
3616 rte_prefetch0(*pkts);
3617 if (likely(NB_SEGS(loc->mbuf) > 1))
3619 /* Here ends the series of multi-segment packets. */
3620 if (MLX5_TXOFF_CONFIG(TSO) &&
3621 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3622 return MLX5_TXCMP_CODE_TSO;
3623 return MLX5_TXCMP_CODE_SINGLE;
3629 * Tx burst function for single-segment packets with TSO.
3630 * Supports all types of Tx offloads, except multi-packets.
3631 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3632 * Function stops sending if it encounters the multi-segment
3633 * packet or packet without TSO requested.
3635 * The routine is responsible for storing processed mbuf
3636 * into elts ring buffer and update elts_head if inline
3637 * offloads is requested due to possible early freeing
3638 * of the inlined mbufs (can not store pkts array in elts
3642 * Pointer to TX queue structure.
3644 * Packets to transmit.
3646 * Number of packets in array.
3648 * Pointer to burst routine local context.
3650 * Configured Tx offloads mask. It is fully defined at
3651 * compile time and may be used for optimization.
3654 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3655 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3656 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3657 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3658 * Local context variables updated.
3660 static __rte_always_inline enum mlx5_txcmp_code
3661 mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
3662 struct rte_mbuf **__rte_restrict pkts,
3663 unsigned int pkts_n,
3664 struct mlx5_txq_local *__rte_restrict loc,
3667 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3668 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3669 pkts += loc->pkts_sent + 1;
3670 pkts_n -= loc->pkts_sent;
3672 struct mlx5_wqe_dseg *__rte_restrict dseg;
3673 struct mlx5_wqe *__rte_restrict wqe;
3674 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3677 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3678 if (MLX5_TXOFF_CONFIG(TXPP)) {
3679 enum mlx5_txcmp_code wret;
3681 /* Generate WAIT for scheduling if requested. */
3682 wret = mlx5_tx_schedule_send(txq, loc, olx);
3683 if (wret == MLX5_TXCMP_CODE_EXIT)
3684 return MLX5_TXCMP_CODE_EXIT;
3685 if (wret == MLX5_TXCMP_CODE_ERROR)
3686 return MLX5_TXCMP_CODE_ERROR;
3688 dlen = rte_pktmbuf_data_len(loc->mbuf);
3689 if (MLX5_TXOFF_CONFIG(VLAN) &&
3690 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3691 vlan = sizeof(struct rte_vlan_hdr);
3694 * First calculate the WQE size to check
3695 * whether we have enough space in ring buffer.
3697 hlen = loc->mbuf->l2_len + vlan +
3698 loc->mbuf->l3_len + loc->mbuf->l4_len;
3699 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3700 return MLX5_TXCMP_CODE_ERROR;
3701 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3702 hlen += loc->mbuf->outer_l2_len +
3703 loc->mbuf->outer_l3_len;
3704 /* Segment must contain all TSO headers. */
3705 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3706 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3707 hlen > (dlen + vlan)))
3708 return MLX5_TXCMP_CODE_ERROR;
3710 * Check whether there are enough free WQEBBs:
3712 * - Ethernet Segment
3713 * - First Segment of inlined Ethernet data
3714 * - ... data continued ...
3715 * - Finishing Data Segment of pointer type
3717 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3718 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3719 if (loc->wqe_free < ((ds + 3) / 4))
3720 return MLX5_TXCMP_CODE_EXIT;
3721 #ifdef MLX5_PMD_SOFT_COUNTERS
3722 /* Update sent data bytes/packets counters. */
3723 ntcp = (dlen + vlan - hlen +
3724 loc->mbuf->tso_segsz - 1) /
3725 loc->mbuf->tso_segsz;
3727 * One will be added for mbuf itself at the end
3728 * of the mlx5_tx_burst from loc->pkts_sent field.
3731 txq->stats.opackets += ntcp;
3732 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3735 * Build the TSO WQE:
3737 * - Ethernet Segment with hlen bytes inlined
3738 * - Data Segment of pointer type
3740 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3741 loc->wqe_last = wqe;
3742 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3743 MLX5_OPCODE_TSO, olx);
3744 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3745 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3746 dlen -= hlen - vlan;
3747 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3749 * WQE is built, update the loop parameters
3750 * and go to the next packet.
3752 txq->wqe_ci += (ds + 3) / 4;
3753 loc->wqe_free -= (ds + 3) / 4;
3754 if (MLX5_TXOFF_CONFIG(INLINE))
3755 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3759 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3760 return MLX5_TXCMP_CODE_EXIT;
3761 loc->mbuf = *pkts++;
3763 rte_prefetch0(*pkts);
3764 if (MLX5_TXOFF_CONFIG(MULTI) &&
3765 unlikely(NB_SEGS(loc->mbuf) > 1))
3766 return MLX5_TXCMP_CODE_MULTI;
3767 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3768 return MLX5_TXCMP_CODE_SINGLE;
3769 /* Continue with the next TSO packet. */
3775 * Analyze the packet and select the best method to send.
3778 * Pointer to TX queue structure.
3780 * Pointer to burst routine local context.
3782 * Configured Tx offloads mask. It is fully defined at
3783 * compile time and may be used for optimization.
3785 * The predefined flag whether do complete check for
3786 * multi-segment packets and TSO.
3789 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3790 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3791 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3792 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3794 static __rte_always_inline enum mlx5_txcmp_code
3795 mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
3796 struct mlx5_txq_local *__rte_restrict loc,
3800 /* Check for multi-segment packet. */
3802 MLX5_TXOFF_CONFIG(MULTI) &&
3803 unlikely(NB_SEGS(loc->mbuf) > 1))
3804 return MLX5_TXCMP_CODE_MULTI;
3805 /* Check for TSO packet. */
3807 MLX5_TXOFF_CONFIG(TSO) &&
3808 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3809 return MLX5_TXCMP_CODE_TSO;
3810 /* Check if eMPW is enabled at all. */
3811 if (!MLX5_TXOFF_CONFIG(EMPW))
3812 return MLX5_TXCMP_CODE_SINGLE;
3813 /* Check if eMPW can be engaged. */
3814 if (MLX5_TXOFF_CONFIG(VLAN) &&
3815 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3816 (!MLX5_TXOFF_CONFIG(INLINE) ||
3817 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3818 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3820 * eMPW does not support VLAN insertion offload,
3821 * we have to inline the entire packet but
3822 * packet is too long for inlining.
3824 return MLX5_TXCMP_CODE_SINGLE;
3826 return MLX5_TXCMP_CODE_EMPW;
3830 * Check the next packet attributes to match with the eMPW batch ones.
3831 * In addition, for legacy MPW the packet length is checked either.
3834 * Pointer to TX queue structure.
3836 * Pointer to Ethernet Segment of eMPW batch.
3838 * Pointer to burst routine local context.
3840 * Length of previous packet in MPW descriptor.
3842 * Configured Tx offloads mask. It is fully defined at
3843 * compile time and may be used for optimization.
3846 * true - packet match with eMPW batch attributes.
3847 * false - no match, eMPW should be restarted.
3849 static __rte_always_inline bool
3850 mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
3851 struct mlx5_wqe_eseg *__rte_restrict es,
3852 struct mlx5_txq_local *__rte_restrict loc,
3856 uint8_t swp_flags = 0;
3858 /* Compare the checksum flags, if any. */
3859 if (MLX5_TXOFF_CONFIG(CSUM) &&
3860 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3862 /* Compare the Software Parser offsets and flags. */
3863 if (MLX5_TXOFF_CONFIG(SWP) &&
3864 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3865 es->swp_flags != swp_flags))
3867 /* Fill metadata field if needed. */
3868 if (MLX5_TXOFF_CONFIG(METADATA) &&
3869 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3870 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3872 /* Legacy MPW can send packets with the same lengt only. */
3873 if (MLX5_TXOFF_CONFIG(MPW) &&
3874 dlen != rte_pktmbuf_data_len(loc->mbuf))
3876 /* There must be no VLAN packets in eMPW loop. */
3877 if (MLX5_TXOFF_CONFIG(VLAN))
3878 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3879 /* Check if the scheduling is requested. */
3880 if (MLX5_TXOFF_CONFIG(TXPP) &&
3881 loc->mbuf->ol_flags & txq->ts_mask)
3887 * Update send loop variables and WQE for eMPW loop
3888 * without data inlining. Number of Data Segments is
3889 * equal to the number of sent packets.
3892 * Pointer to TX queue structure.
3894 * Pointer to burst routine local context.
3896 * Number of packets/Data Segments/Packets.
3898 * Accumulated statistics, bytes sent
3900 * Configured Tx offloads mask. It is fully defined at
3901 * compile time and may be used for optimization.
3904 * true - packet match with eMPW batch attributes.
3905 * false - no match, eMPW should be restarted.
3907 static __rte_always_inline void
3908 mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
3909 struct mlx5_txq_local *__rte_restrict loc,
3912 unsigned int olx __rte_unused)
3914 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3915 #ifdef MLX5_PMD_SOFT_COUNTERS
3916 /* Update sent data bytes counter. */
3917 txq->stats.obytes += slen;
3921 loc->elts_free -= ds;
3922 loc->pkts_sent += ds;
3924 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3925 txq->wqe_ci += (ds + 3) / 4;
3926 loc->wqe_free -= (ds + 3) / 4;
3930 * Update send loop variables and WQE for eMPW loop
3931 * with data inlining. Gets the size of pushed descriptors
3932 * and data to the WQE.
3935 * Pointer to TX queue structure.
3937 * Pointer to burst routine local context.
3939 * Total size of descriptor/data in bytes.
3941 * Accumulated statistics, data bytes sent.
3943 * The base WQE for the eMPW/MPW descriptor.
3945 * Configured Tx offloads mask. It is fully defined at
3946 * compile time and may be used for optimization.
3949 * true - packet match with eMPW batch attributes.
3950 * false - no match, eMPW should be restarted.
3952 static __rte_always_inline void
3953 mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
3954 struct mlx5_txq_local *__rte_restrict loc,
3957 struct mlx5_wqe *__rte_restrict wqem,
3958 unsigned int olx __rte_unused)
3960 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
3962 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3963 #ifdef MLX5_PMD_SOFT_COUNTERS
3964 /* Update sent data bytes counter. */
3965 txq->stats.obytes += slen;
3969 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
3971 * If the legacy MPW session contains the inline packets
3972 * we should set the only inline data segment length
3973 * and align the total length to the segment size.
3975 MLX5_ASSERT(len > sizeof(dseg->bcount));
3976 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
3977 MLX5_ETH_WQE_DATA_INLINE);
3978 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
3981 * The session is not legacy MPW or contains the
3982 * data buffer pointer segments.
3984 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
3985 len = len / MLX5_WSEG_SIZE + 2;
3987 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
3988 txq->wqe_ci += (len + 3) / 4;
3989 loc->wqe_free -= (len + 3) / 4;
3990 loc->wqe_last = wqem;
3994 * The set of Tx burst functions for single-segment packets
3995 * without TSO and with Multi-Packet Writing feature support.
3996 * Supports all types of Tx offloads, except multi-packets
3999 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
4000 * as many packet per WQE as it can. If eMPW is not configured
4001 * or packet can not be sent with eMPW (VLAN insertion) the
4002 * ordinary SEND opcode is used and only one packet placed
4005 * Functions stop sending if it encounters the multi-segment
4006 * packet or packet with TSO requested.
4008 * The routines are responsible for storing processed mbuf
4009 * into elts ring buffer and update elts_head if inlining
4010 * offload is requested. Otherwise the copying mbufs to elts
4011 * can be postponed and completed at the end of burst routine.
4014 * Pointer to TX queue structure.
4016 * Packets to transmit.
4018 * Number of packets in array.
4020 * Pointer to burst routine local context.
4022 * Configured Tx offloads mask. It is fully defined at
4023 * compile time and may be used for optimization.
4026 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
4027 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
4028 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
4029 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
4030 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
4031 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
4033 * Local context variables updated.
4036 * The routine sends packets with MLX5_OPCODE_EMPW
4037 * without inlining, this is dedicated optimized branch.
4038 * No VLAN insertion is supported.
4040 static __rte_always_inline enum mlx5_txcmp_code
4041 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
4042 struct rte_mbuf **__rte_restrict pkts,
4043 unsigned int pkts_n,
4044 struct mlx5_txq_local *__rte_restrict loc,
4048 * Subroutine is the part of mlx5_tx_burst_single()
4049 * and sends single-segment packet with eMPW opcode
4050 * without data inlining.
4052 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4053 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4054 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4055 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4056 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4057 pkts += loc->pkts_sent + 1;
4058 pkts_n -= loc->pkts_sent;
4060 struct mlx5_wqe_dseg *__rte_restrict dseg;
4061 struct mlx5_wqe_eseg *__rte_restrict eseg;
4062 enum mlx5_txcmp_code ret;
4063 unsigned int part, loop;
4064 unsigned int slen = 0;
4067 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4068 if (MLX5_TXOFF_CONFIG(TXPP)) {
4069 enum mlx5_txcmp_code wret;
4071 /* Generate WAIT for scheduling if requested. */
4072 wret = mlx5_tx_schedule_send(txq, loc, olx);
4073 if (wret == MLX5_TXCMP_CODE_EXIT)
4074 return MLX5_TXCMP_CODE_EXIT;
4075 if (wret == MLX5_TXCMP_CODE_ERROR)
4076 return MLX5_TXCMP_CODE_ERROR;
4078 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4079 MLX5_MPW_MAX_PACKETS :
4080 MLX5_EMPW_MAX_PACKETS);
4081 if (unlikely(loc->elts_free < part)) {
4082 /* We have no enough elts to save all mbufs. */
4083 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
4084 return MLX5_TXCMP_CODE_EXIT;
4085 /* But we still able to send at least minimal eMPW. */
4086 part = loc->elts_free;
4088 /* Check whether we have enough WQEs */
4089 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
4090 if (unlikely(loc->wqe_free <
4091 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4092 return MLX5_TXCMP_CODE_EXIT;
4093 part = (loc->wqe_free * 4) - 2;
4095 if (likely(part > 1))
4096 rte_prefetch0(*pkts);
4097 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4099 * Build eMPW title WQEBB:
4100 * - Control Segment, eMPW opcode
4101 * - Ethernet Segment, no inline
4103 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
4104 MLX5_OPCODE_ENHANCED_MPSW, olx);
4105 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4106 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4107 eseg = &loc->wqe_last->eseg;
4108 dseg = &loc->wqe_last->dseg[0];
4110 /* Store the packet length for legacy MPW. */
4111 if (MLX5_TXOFF_CONFIG(MPW))
4112 eseg->mss = rte_cpu_to_be_16
4113 (rte_pktmbuf_data_len(loc->mbuf));
4115 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4116 #ifdef MLX5_PMD_SOFT_COUNTERS
4117 /* Update sent data bytes counter. */
4122 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4124 if (unlikely(--loop == 0))
4126 loc->mbuf = *pkts++;
4127 if (likely(loop > 1))
4128 rte_prefetch0(*pkts);
4129 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4131 * Unroll the completion code to avoid
4132 * returning variable value - it results in
4133 * unoptimized sequent checking in caller.
4135 if (ret == MLX5_TXCMP_CODE_MULTI) {
4137 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4138 if (unlikely(!loc->elts_free ||
4140 return MLX5_TXCMP_CODE_EXIT;
4141 return MLX5_TXCMP_CODE_MULTI;
4143 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4144 if (ret == MLX5_TXCMP_CODE_TSO) {
4146 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4147 if (unlikely(!loc->elts_free ||
4149 return MLX5_TXCMP_CODE_EXIT;
4150 return MLX5_TXCMP_CODE_TSO;
4152 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4154 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4155 if (unlikely(!loc->elts_free ||
4157 return MLX5_TXCMP_CODE_EXIT;
4158 return MLX5_TXCMP_CODE_SINGLE;
4160 if (ret != MLX5_TXCMP_CODE_EMPW) {
4163 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4164 return MLX5_TXCMP_CODE_ERROR;
4167 * Check whether packet parameters coincide
4168 * within assumed eMPW batch:
4169 * - check sum settings
4171 * - software parser settings
4172 * - packets length (legacy MPW only)
4173 * - scheduling is not required
4175 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
4178 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4179 if (unlikely(!loc->elts_free ||
4181 return MLX5_TXCMP_CODE_EXIT;
4185 /* Packet attributes match, continue the same eMPW. */
4187 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4188 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4190 /* eMPW is built successfully, update loop parameters. */
4192 MLX5_ASSERT(pkts_n >= part);
4193 #ifdef MLX5_PMD_SOFT_COUNTERS
4194 /* Update sent data bytes counter. */
4195 txq->stats.obytes += slen;
4197 loc->elts_free -= part;
4198 loc->pkts_sent += part;
4199 txq->wqe_ci += (2 + part + 3) / 4;
4200 loc->wqe_free -= (2 + part + 3) / 4;
4202 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4203 return MLX5_TXCMP_CODE_EXIT;
4204 loc->mbuf = *pkts++;
4205 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4206 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4208 /* Continue sending eMPW batches. */
4214 * The routine sends packets with MLX5_OPCODE_EMPW
4215 * with inlining, optionally supports VLAN insertion.
4217 static __rte_always_inline enum mlx5_txcmp_code
4218 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
4219 struct rte_mbuf **__rte_restrict pkts,
4220 unsigned int pkts_n,
4221 struct mlx5_txq_local *__rte_restrict loc,
4225 * Subroutine is the part of mlx5_tx_burst_single()
4226 * and sends single-segment packet with eMPW opcode
4227 * with data inlining.
4229 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4230 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4231 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4232 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4233 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4234 pkts += loc->pkts_sent + 1;
4235 pkts_n -= loc->pkts_sent;
4237 struct mlx5_wqe_dseg *__rte_restrict dseg;
4238 struct mlx5_wqe *__rte_restrict wqem;
4239 enum mlx5_txcmp_code ret;
4240 unsigned int room, part, nlim;
4241 unsigned int slen = 0;
4243 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4244 if (MLX5_TXOFF_CONFIG(TXPP)) {
4245 enum mlx5_txcmp_code wret;
4247 /* Generate WAIT for scheduling if requested. */
4248 wret = mlx5_tx_schedule_send(txq, loc, olx);
4249 if (wret == MLX5_TXCMP_CODE_EXIT)
4250 return MLX5_TXCMP_CODE_EXIT;
4251 if (wret == MLX5_TXCMP_CODE_ERROR)
4252 return MLX5_TXCMP_CODE_ERROR;
4255 * Limits the amount of packets in one WQE
4256 * to improve CQE latency generation.
4258 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4259 MLX5_MPW_INLINE_MAX_PACKETS :
4260 MLX5_EMPW_MAX_PACKETS);
4261 /* Check whether we have minimal amount WQEs */
4262 if (unlikely(loc->wqe_free <
4263 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4264 return MLX5_TXCMP_CODE_EXIT;
4265 if (likely(pkts_n > 1))
4266 rte_prefetch0(*pkts);
4267 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4269 * Build eMPW title WQEBB:
4270 * - Control Segment, eMPW opcode, zero DS
4271 * - Ethernet Segment, no inline
4273 mlx5_tx_cseg_init(txq, loc, wqem, 0,
4274 MLX5_OPCODE_ENHANCED_MPSW, olx);
4275 mlx5_tx_eseg_none(txq, loc, wqem,
4276 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4277 dseg = &wqem->dseg[0];
4278 /* Store the packet length for legacy MPW. */
4279 if (MLX5_TXOFF_CONFIG(MPW))
4280 wqem->eseg.mss = rte_cpu_to_be_16
4281 (rte_pktmbuf_data_len(loc->mbuf));
4282 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4283 loc->wqe_free) * MLX5_WQE_SIZE -
4284 MLX5_WQE_CSEG_SIZE -
4286 /* Limit the room for legacy MPW sessions for performance. */
4287 if (MLX5_TXOFF_CONFIG(MPW))
4288 room = RTE_MIN(room,
4289 RTE_MAX(txq->inlen_empw +
4290 sizeof(dseg->bcount) +
4291 (MLX5_TXOFF_CONFIG(VLAN) ?
4292 sizeof(struct rte_vlan_hdr) : 0),
4293 MLX5_MPW_INLINE_MAX_PACKETS *
4294 MLX5_WQE_DSEG_SIZE));
4295 /* Build WQE till we have space, packets and resources. */
4298 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4299 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4302 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4303 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
4304 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4306 * Some Tx offloads may cause an error if
4307 * packet is not long enough, check against
4308 * assumed minimal length.
4310 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4312 if (unlikely(!part))
4313 return MLX5_TXCMP_CODE_ERROR;
4315 * We have some successfully built
4316 * packet Data Segments to send.
4318 mlx5_tx_idone_empw(txq, loc, part,
4320 return MLX5_TXCMP_CODE_ERROR;
4322 /* Inline or not inline - that's the Question. */
4323 if (dlen > txq->inlen_empw ||
4324 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
4326 if (MLX5_TXOFF_CONFIG(MPW)) {
4327 if (dlen > txq->inlen_send)
4331 /* Open new inline MPW session. */
4332 tlen += sizeof(dseg->bcount);
4333 dseg->bcount = RTE_BE32(0);
4335 (dseg, sizeof(dseg->bcount));
4338 * No pointer and inline descriptor
4339 * intermix for legacy MPW sessions.
4341 if (wqem->dseg[0].bcount)
4345 tlen = sizeof(dseg->bcount) + dlen;
4347 /* Inline entire packet, optional VLAN insertion. */
4348 if (MLX5_TXOFF_CONFIG(VLAN) &&
4349 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4351 * The packet length must be checked in
4352 * mlx5_tx_able_to_empw() and packet
4353 * fits into inline length guaranteed.
4356 sizeof(struct rte_vlan_hdr)) <=
4358 tlen += sizeof(struct rte_vlan_hdr);
4361 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4363 #ifdef MLX5_PMD_SOFT_COUNTERS
4364 /* Update sent data bytes counter. */
4365 slen += sizeof(struct rte_vlan_hdr);
4370 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4373 if (!MLX5_TXOFF_CONFIG(MPW))
4374 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4375 MLX5_ASSERT(room >= tlen);
4378 * Packet data are completely inlined,
4379 * free the packet immediately.
4381 rte_pktmbuf_free_seg(loc->mbuf);
4385 * No pointer and inline descriptor
4386 * intermix for legacy MPW sessions.
4388 if (MLX5_TXOFF_CONFIG(MPW) &&
4390 wqem->dseg[0].bcount == RTE_BE32(0))
4393 * Not inlinable VLAN packets are
4394 * proceeded outside of this routine.
4396 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4397 if (MLX5_TXOFF_CONFIG(VLAN))
4398 MLX5_ASSERT(!(loc->mbuf->ol_flags &
4400 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4401 /* We have to store mbuf in elts.*/
4402 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4403 room -= MLX5_WQE_DSEG_SIZE;
4404 /* Ring buffer wraparound is checked at the loop end.*/
4407 #ifdef MLX5_PMD_SOFT_COUNTERS
4408 /* Update sent data bytes counter. */
4414 if (unlikely(!pkts_n || !loc->elts_free)) {
4416 * We have no resources/packets to
4417 * continue build descriptors.
4420 mlx5_tx_idone_empw(txq, loc, part,
4422 return MLX5_TXCMP_CODE_EXIT;
4424 loc->mbuf = *pkts++;
4425 if (likely(pkts_n > 1))
4426 rte_prefetch0(*pkts);
4427 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4429 * Unroll the completion code to avoid
4430 * returning variable value - it results in
4431 * unoptimized sequent checking in caller.
4433 if (ret == MLX5_TXCMP_CODE_MULTI) {
4435 mlx5_tx_idone_empw(txq, loc, part,
4437 if (unlikely(!loc->elts_free ||
4439 return MLX5_TXCMP_CODE_EXIT;
4440 return MLX5_TXCMP_CODE_MULTI;
4442 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4443 if (ret == MLX5_TXCMP_CODE_TSO) {
4445 mlx5_tx_idone_empw(txq, loc, part,
4447 if (unlikely(!loc->elts_free ||
4449 return MLX5_TXCMP_CODE_EXIT;
4450 return MLX5_TXCMP_CODE_TSO;
4452 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4454 mlx5_tx_idone_empw(txq, loc, part,
4456 if (unlikely(!loc->elts_free ||
4458 return MLX5_TXCMP_CODE_EXIT;
4459 return MLX5_TXCMP_CODE_SINGLE;
4461 if (ret != MLX5_TXCMP_CODE_EMPW) {
4464 mlx5_tx_idone_empw(txq, loc, part,
4466 return MLX5_TXCMP_CODE_ERROR;
4468 /* Check if we have minimal room left. */
4470 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4473 * Check whether packet parameters coincide
4474 * within assumed eMPW batch:
4475 * - check sum settings
4477 * - software parser settings
4478 * - packets length (legacy MPW only)
4479 * - scheduling is not required
4481 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
4484 /* Packet attributes match, continue the same eMPW. */
4485 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4486 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4489 * We get here to close an existing eMPW
4490 * session and start the new one.
4492 MLX5_ASSERT(pkts_n);
4494 if (unlikely(!part))
4495 return MLX5_TXCMP_CODE_EXIT;
4496 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
4497 if (unlikely(!loc->elts_free ||
4499 return MLX5_TXCMP_CODE_EXIT;
4500 /* Continue the loop with new eMPW session. */
4506 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4507 * Data inlining and VLAN insertion are supported.
4509 static __rte_always_inline enum mlx5_txcmp_code
4510 mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
4511 struct rte_mbuf **__rte_restrict pkts,
4512 unsigned int pkts_n,
4513 struct mlx5_txq_local *__rte_restrict loc,
4517 * Subroutine is the part of mlx5_tx_burst_single()
4518 * and sends single-segment packet with SEND opcode.
4520 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4521 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4522 pkts += loc->pkts_sent + 1;
4523 pkts_n -= loc->pkts_sent;
4525 struct mlx5_wqe *__rte_restrict wqe;
4526 enum mlx5_txcmp_code ret;
4528 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4529 if (MLX5_TXOFF_CONFIG(TXPP)) {
4530 enum mlx5_txcmp_code wret;
4532 /* Generate WAIT for scheduling if requested. */
4533 wret = mlx5_tx_schedule_send(txq, loc, olx);
4534 if (wret == MLX5_TXCMP_CODE_EXIT)
4535 return MLX5_TXCMP_CODE_EXIT;
4536 if (wret == MLX5_TXCMP_CODE_ERROR)
4537 return MLX5_TXCMP_CODE_ERROR;
4539 if (MLX5_TXOFF_CONFIG(INLINE)) {
4540 unsigned int inlen, vlan = 0;
4542 inlen = rte_pktmbuf_data_len(loc->mbuf);
4543 if (MLX5_TXOFF_CONFIG(VLAN) &&
4544 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4545 vlan = sizeof(struct rte_vlan_hdr);
4547 static_assert((sizeof(struct rte_vlan_hdr) +
4548 sizeof(struct rte_ether_hdr)) ==
4549 MLX5_ESEG_MIN_INLINE_SIZE,
4550 "invalid min inline data size");
4553 * If inlining is enabled at configuration time
4554 * the limit must be not less than minimal size.
4555 * Otherwise we would do extra check for data
4556 * size to avoid crashes due to length overflow.
4558 MLX5_ASSERT(txq->inlen_send >=
4559 MLX5_ESEG_MIN_INLINE_SIZE);
4560 if (inlen <= txq->inlen_send) {
4561 unsigned int seg_n, wqe_n;
4563 rte_prefetch0(rte_pktmbuf_mtod
4564 (loc->mbuf, uint8_t *));
4565 /* Check against minimal length. */
4566 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4567 return MLX5_TXCMP_CODE_ERROR;
4568 if (loc->mbuf->ol_flags &
4569 PKT_TX_DYNF_NOINLINE) {
4571 * The hint flag not to inline packet
4572 * data is set. Check whether we can
4575 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
4577 (MLX5_TXOFF_CONFIG(MPW) &&
4579 if (inlen <= txq->inlen_send)
4582 * The hardware requires the
4583 * minimal inline data header.
4585 goto single_min_inline;
4587 if (MLX5_TXOFF_CONFIG(VLAN) &&
4588 vlan && !txq->vlan_en) {
4590 * We must insert VLAN tag
4591 * by software means.
4593 goto single_part_inline;
4595 goto single_no_inline;
4599 * Completely inlined packet data WQE:
4600 * - Control Segment, SEND opcode
4601 * - Ethernet Segment, no VLAN insertion
4602 * - Data inlined, VLAN optionally inserted
4603 * - Alignment to MLX5_WSEG_SIZE
4604 * Have to estimate amount of WQEBBs
4606 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4607 MLX5_ESEG_MIN_INLINE_SIZE +
4608 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4609 /* Check if there are enough WQEBBs. */
4610 wqe_n = (seg_n + 3) / 4;
4611 if (wqe_n > loc->wqe_free)
4612 return MLX5_TXCMP_CODE_EXIT;
4613 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4614 loc->wqe_last = wqe;
4615 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4616 MLX5_OPCODE_SEND, olx);
4617 mlx5_tx_eseg_data(txq, loc, wqe,
4618 vlan, inlen, 0, olx);
4619 txq->wqe_ci += wqe_n;
4620 loc->wqe_free -= wqe_n;
4622 * Packet data are completely inlined,
4623 * free the packet immediately.
4625 rte_pktmbuf_free_seg(loc->mbuf);
4626 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4627 MLX5_TXOFF_CONFIG(MPW)) &&
4630 * If minimal inlining is requested the eMPW
4631 * feature should be disabled due to data is
4632 * inlined into Ethernet Segment, which can
4633 * not contain inlined data for eMPW due to
4634 * segment shared for all packets.
4636 struct mlx5_wqe_dseg *__rte_restrict dseg;
4641 * The inline-mode settings require
4642 * to inline the specified amount of
4643 * data bytes to the Ethernet Segment.
4644 * We should check the free space in
4645 * WQE ring buffer to inline partially.
4648 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
4649 MLX5_ASSERT(inlen > txq->inlen_mode);
4650 MLX5_ASSERT(txq->inlen_mode >=
4651 MLX5_ESEG_MIN_INLINE_SIZE);
4653 * Check whether there are enough free WQEBBs:
4655 * - Ethernet Segment
4656 * - First Segment of inlined Ethernet data
4657 * - ... data continued ...
4658 * - Finishing Data Segment of pointer type
4660 ds = (MLX5_WQE_CSEG_SIZE +
4661 MLX5_WQE_ESEG_SIZE +
4662 MLX5_WQE_DSEG_SIZE +
4664 MLX5_ESEG_MIN_INLINE_SIZE +
4665 MLX5_WQE_DSEG_SIZE +
4666 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4667 if (loc->wqe_free < ((ds + 3) / 4))
4668 return MLX5_TXCMP_CODE_EXIT;
4670 * Build the ordinary SEND WQE:
4672 * - Ethernet Segment, inline inlen_mode bytes
4673 * - Data Segment of pointer type
4675 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4676 loc->wqe_last = wqe;
4677 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4678 MLX5_OPCODE_SEND, olx);
4679 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4682 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4683 txq->inlen_mode - vlan;
4684 inlen -= txq->inlen_mode;
4685 mlx5_tx_dseg_ptr(txq, loc, dseg,
4688 * WQE is built, update the loop parameters
4689 * and got to the next packet.
4691 txq->wqe_ci += (ds + 3) / 4;
4692 loc->wqe_free -= (ds + 3) / 4;
4693 /* We have to store mbuf in elts.*/
4694 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4695 txq->elts[txq->elts_head++ & txq->elts_m] =
4703 * Partially inlined packet data WQE, we have
4704 * some space in title WQEBB, we can fill it
4705 * with some packet data. It takes one WQEBB,
4706 * it is available, no extra space check:
4707 * - Control Segment, SEND opcode
4708 * - Ethernet Segment, no VLAN insertion
4709 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4710 * - Data Segment, pointer type
4712 * We also get here if VLAN insertion is not
4713 * supported by HW, the inline is enabled.
4716 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4717 loc->wqe_last = wqe;
4718 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4719 MLX5_OPCODE_SEND, olx);
4720 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4721 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4722 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4724 * The length check is performed above, by
4725 * comparing with txq->inlen_send. We should
4726 * not get overflow here.
4728 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4729 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4730 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4734 /* We have to store mbuf in elts.*/
4735 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4736 txq->elts[txq->elts_head++ & txq->elts_m] =
4740 #ifdef MLX5_PMD_SOFT_COUNTERS
4741 /* Update sent data bytes counter. */
4742 txq->stats.obytes += vlan +
4743 rte_pktmbuf_data_len(loc->mbuf);
4747 * No inline at all, it means the CPU cycles saving
4748 * is prioritized at configuration, we should not
4749 * copy any packet data to WQE.
4751 * SEND WQE, one WQEBB:
4752 * - Control Segment, SEND opcode
4753 * - Ethernet Segment, optional VLAN, no inline
4754 * - Data Segment, pointer type
4757 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4758 loc->wqe_last = wqe;
4759 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4760 MLX5_OPCODE_SEND, olx);
4761 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4763 (txq, loc, &wqe->dseg[0],
4764 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4765 rte_pktmbuf_data_len(loc->mbuf), olx);
4769 * We should not store mbuf pointer in elts
4770 * if no inlining is configured, this is done
4771 * by calling routine in a batch copy.
4773 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4775 #ifdef MLX5_PMD_SOFT_COUNTERS
4776 /* Update sent data bytes counter. */
4777 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4778 if (MLX5_TXOFF_CONFIG(VLAN) &&
4779 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4780 txq->stats.obytes +=
4781 sizeof(struct rte_vlan_hdr);
4786 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4787 return MLX5_TXCMP_CODE_EXIT;
4788 loc->mbuf = *pkts++;
4790 rte_prefetch0(*pkts);
4791 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4792 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4798 static __rte_always_inline enum mlx5_txcmp_code
4799 mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
4800 struct rte_mbuf **__rte_restrict pkts,
4801 unsigned int pkts_n,
4802 struct mlx5_txq_local *__rte_restrict loc,
4805 enum mlx5_txcmp_code ret;
4807 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4808 if (ret == MLX5_TXCMP_CODE_SINGLE)
4810 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
4812 /* Optimize for inline/no inline eMPW send. */
4813 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4814 mlx5_tx_burst_empw_inline
4815 (txq, pkts, pkts_n, loc, olx) :
4816 mlx5_tx_burst_empw_simple
4817 (txq, pkts, pkts_n, loc, olx);
4818 if (ret != MLX5_TXCMP_CODE_SINGLE)
4820 /* The resources to send one packet should remain. */
4821 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4823 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4824 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
4825 if (ret != MLX5_TXCMP_CODE_EMPW)
4827 /* The resources to send one packet should remain. */
4828 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4833 * DPDK Tx callback template. This is configured template
4834 * used to generate routines optimized for specified offload setup.
4835 * One of this generated functions is chosen at SQ configuration
4839 * Generic pointer to TX queue structure.
4841 * Packets to transmit.
4843 * Number of packets in array.
4845 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4846 * values. Should be static to take compile time static configuration
4850 * Number of packets successfully transmitted (<= pkts_n).
4852 static __rte_always_inline uint16_t
4853 mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
4854 struct rte_mbuf **__rte_restrict pkts,
4858 struct mlx5_txq_local loc;
4859 enum mlx5_txcmp_code ret;
4862 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4863 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4864 if (unlikely(!pkts_n))
4868 loc.wqe_last = NULL;
4871 loc.pkts_loop = loc.pkts_sent;
4873 * Check if there are some CQEs, if any:
4874 * - process an encountered errors
4875 * - process the completed WQEs
4876 * - free related mbufs
4877 * - doorbell the NIC about processed CQEs
4879 rte_prefetch0(*(pkts + loc.pkts_sent));
4880 mlx5_tx_handle_completion(txq, olx);
4882 * Calculate the number of available resources - elts and WQEs.
4883 * There are two possible different scenarios:
4884 * - no data inlining into WQEs, one WQEBB may contains up to
4885 * four packets, in this case elts become scarce resource
4886 * - data inlining into WQEs, one packet may require multiple
4887 * WQEBBs, the WQEs become the limiting factor.
4889 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4890 loc.elts_free = txq->elts_s -
4891 (uint16_t)(txq->elts_head - txq->elts_tail);
4892 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4893 loc.wqe_free = txq->wqe_s -
4894 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4895 if (unlikely(!loc.elts_free || !loc.wqe_free))
4899 * Fetch the packet from array. Usually this is
4900 * the first packet in series of multi/single
4903 loc.mbuf = *(pkts + loc.pkts_sent);
4904 /* Dedicated branch for multi-segment packets. */
4905 if (MLX5_TXOFF_CONFIG(MULTI) &&
4906 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4908 * Multi-segment packet encountered.
4909 * Hardware is able to process it only
4910 * with SEND/TSO opcodes, one packet
4911 * per WQE, do it in dedicated routine.
4914 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
4915 part = loc.pkts_sent - loc.pkts_copy;
4916 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4918 * There are some single-segment mbufs not
4919 * stored in elts. The mbufs must be in the
4920 * same order as WQEs, so we must copy the
4921 * mbufs to elts here, before the coming
4922 * multi-segment packet mbufs is appended.
4924 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4926 loc.pkts_copy = loc.pkts_sent;
4928 MLX5_ASSERT(pkts_n > loc.pkts_sent);
4929 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4930 if (!MLX5_TXOFF_CONFIG(INLINE))
4931 loc.pkts_copy = loc.pkts_sent;
4933 * These returned code checks are supposed
4934 * to be optimized out due to routine inlining.
4936 if (ret == MLX5_TXCMP_CODE_EXIT) {
4938 * The routine returns this code when
4939 * all packets are sent or there is no
4940 * enough resources to complete request.
4944 if (ret == MLX5_TXCMP_CODE_ERROR) {
4946 * The routine returns this code when
4947 * some error in the incoming packets
4950 txq->stats.oerrors++;
4953 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4955 * The single-segment packet was encountered
4956 * in the array, try to send it with the
4957 * best optimized way, possible engaging eMPW.
4959 goto enter_send_single;
4961 if (MLX5_TXOFF_CONFIG(TSO) &&
4962 ret == MLX5_TXCMP_CODE_TSO) {
4964 * The single-segment TSO packet was
4965 * encountered in the array.
4967 goto enter_send_tso;
4969 /* We must not get here. Something is going wrong. */
4971 txq->stats.oerrors++;
4974 /* Dedicated branch for single-segment TSO packets. */
4975 if (MLX5_TXOFF_CONFIG(TSO) &&
4976 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4978 * TSO might require special way for inlining
4979 * (dedicated parameters) and is sent with
4980 * MLX5_OPCODE_TSO opcode only, provide this
4981 * in dedicated branch.
4984 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
4985 MLX5_ASSERT(pkts_n > loc.pkts_sent);
4986 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4988 * These returned code checks are supposed
4989 * to be optimized out due to routine inlining.
4991 if (ret == MLX5_TXCMP_CODE_EXIT)
4993 if (ret == MLX5_TXCMP_CODE_ERROR) {
4994 txq->stats.oerrors++;
4997 if (ret == MLX5_TXCMP_CODE_SINGLE)
4998 goto enter_send_single;
4999 if (MLX5_TXOFF_CONFIG(MULTI) &&
5000 ret == MLX5_TXCMP_CODE_MULTI) {
5002 * The multi-segment packet was
5003 * encountered in the array.
5005 goto enter_send_multi;
5007 /* We must not get here. Something is going wrong. */
5009 txq->stats.oerrors++;
5013 * The dedicated branch for the single-segment packets
5014 * without TSO. Often these ones can be sent using
5015 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
5016 * The routine builds the WQEs till it encounters
5017 * the TSO or multi-segment packet (in case if these
5018 * offloads are requested at SQ configuration time).
5021 MLX5_ASSERT(pkts_n > loc.pkts_sent);
5022 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
5024 * These returned code checks are supposed
5025 * to be optimized out due to routine inlining.
5027 if (ret == MLX5_TXCMP_CODE_EXIT)
5029 if (ret == MLX5_TXCMP_CODE_ERROR) {
5030 txq->stats.oerrors++;
5033 if (MLX5_TXOFF_CONFIG(MULTI) &&
5034 ret == MLX5_TXCMP_CODE_MULTI) {
5036 * The multi-segment packet was
5037 * encountered in the array.
5039 goto enter_send_multi;
5041 if (MLX5_TXOFF_CONFIG(TSO) &&
5042 ret == MLX5_TXCMP_CODE_TSO) {
5044 * The single-segment TSO packet was
5045 * encountered in the array.
5047 goto enter_send_tso;
5049 /* We must not get here. Something is going wrong. */
5051 txq->stats.oerrors++;
5055 * Main Tx loop is completed, do the rest:
5056 * - set completion request if thresholds are reached
5057 * - doorbell the hardware
5058 * - copy the rest of mbufs to elts (if any)
5060 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
5061 loc.pkts_sent >= loc.pkts_copy);
5062 /* Take a shortcut if nothing is sent. */
5063 if (unlikely(loc.pkts_sent == loc.pkts_loop))
5065 /* Request CQE generation if limits are reached. */
5066 mlx5_tx_request_completion(txq, &loc, olx);
5068 * Ring QP doorbell immediately after WQE building completion
5069 * to improve latencies. The pure software related data treatment
5070 * can be completed after doorbell. Tx CQEs for this SQ are
5071 * processed in this thread only by the polling.
5073 * The rdma core library can map doorbell register in two ways,
5074 * depending on the environment variable "MLX5_SHUT_UP_BF":
5076 * - as regular cached memory, the variable is either missing or
5077 * set to zero. This type of mapping may cause the significant
5078 * doorbell register writing latency and requires explicit
5079 * memory write barrier to mitigate this issue and prevent
5082 * - as non-cached memory, the variable is present and set to
5083 * not "0" value. This type of mapping may cause performance
5084 * impact under heavy loading conditions but the explicit write
5085 * memory barrier is not required and it may improve core
5088 * - the legacy behaviour (prior 19.08 release) was to use some
5089 * heuristics to decide whether write memory barrier should
5090 * be performed. This behavior is supported with specifying
5091 * tx_db_nc=2, write barrier is skipped if application
5092 * provides the full recommended burst of packets, it
5093 * supposes the next packets are coming and the write barrier
5094 * will be issued on the next burst (after descriptor writing,
5097 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
5098 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
5099 /* Not all of the mbufs may be stored into elts yet. */
5100 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
5101 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
5103 * There are some single-segment mbufs not stored in elts.
5104 * It can be only if the last packet was single-segment.
5105 * The copying is gathered into one place due to it is
5106 * a good opportunity to optimize that with SIMD.
5107 * Unfortunately if inlining is enabled the gaps in
5108 * pointer array may happen due to early freeing of the
5111 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
5112 loc.pkts_copy = loc.pkts_sent;
5114 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
5115 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
5116 if (pkts_n > loc.pkts_sent) {
5118 * If burst size is large there might be no enough CQE
5119 * fetched from completion queue and no enough resources
5120 * freed to send all the packets.
5125 #ifdef MLX5_PMD_SOFT_COUNTERS
5126 /* Increment sent packets counter. */
5127 txq->stats.opackets += loc.pkts_sent;
5129 return loc.pkts_sent;
5132 /* Generate routines with Enhanced Multi-Packet Write support. */
5133 MLX5_TXOFF_DECL(full_empw,
5134 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
5136 MLX5_TXOFF_DECL(none_empw,
5137 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5139 MLX5_TXOFF_DECL(md_empw,
5140 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5142 MLX5_TXOFF_DECL(mt_empw,
5143 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5144 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5146 MLX5_TXOFF_DECL(mtsc_empw,
5147 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5148 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5149 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5151 MLX5_TXOFF_DECL(mti_empw,
5152 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5153 MLX5_TXOFF_CONFIG_INLINE |
5154 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5156 MLX5_TXOFF_DECL(mtv_empw,
5157 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5158 MLX5_TXOFF_CONFIG_VLAN |
5159 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5161 MLX5_TXOFF_DECL(mtiv_empw,
5162 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5163 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5164 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5166 MLX5_TXOFF_DECL(sc_empw,
5167 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5168 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5170 MLX5_TXOFF_DECL(sci_empw,
5171 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5172 MLX5_TXOFF_CONFIG_INLINE |
5173 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5175 MLX5_TXOFF_DECL(scv_empw,
5176 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5177 MLX5_TXOFF_CONFIG_VLAN |
5178 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5180 MLX5_TXOFF_DECL(sciv_empw,
5181 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5182 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5183 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5185 MLX5_TXOFF_DECL(i_empw,
5186 MLX5_TXOFF_CONFIG_INLINE |
5187 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5189 MLX5_TXOFF_DECL(v_empw,
5190 MLX5_TXOFF_CONFIG_VLAN |
5191 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5193 MLX5_TXOFF_DECL(iv_empw,
5194 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5195 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5197 /* Generate routines without Enhanced Multi-Packet Write support. */
5198 MLX5_TXOFF_DECL(full,
5199 MLX5_TXOFF_CONFIG_FULL)
5201 MLX5_TXOFF_DECL(none,
5202 MLX5_TXOFF_CONFIG_NONE)
5205 MLX5_TXOFF_CONFIG_METADATA)
5208 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5209 MLX5_TXOFF_CONFIG_METADATA)
5211 MLX5_TXOFF_DECL(mtsc,
5212 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5213 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5214 MLX5_TXOFF_CONFIG_METADATA)
5216 MLX5_TXOFF_DECL(mti,
5217 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5218 MLX5_TXOFF_CONFIG_INLINE |
5219 MLX5_TXOFF_CONFIG_METADATA)
5222 MLX5_TXOFF_DECL(mtv,
5223 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5224 MLX5_TXOFF_CONFIG_VLAN |
5225 MLX5_TXOFF_CONFIG_METADATA)
5228 MLX5_TXOFF_DECL(mtiv,
5229 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5230 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5231 MLX5_TXOFF_CONFIG_METADATA)
5234 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5235 MLX5_TXOFF_CONFIG_METADATA)
5237 MLX5_TXOFF_DECL(sci,
5238 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5239 MLX5_TXOFF_CONFIG_INLINE |
5240 MLX5_TXOFF_CONFIG_METADATA)
5243 MLX5_TXOFF_DECL(scv,
5244 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5245 MLX5_TXOFF_CONFIG_VLAN |
5246 MLX5_TXOFF_CONFIG_METADATA)
5249 MLX5_TXOFF_DECL(sciv,
5250 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5251 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5252 MLX5_TXOFF_CONFIG_METADATA)
5255 MLX5_TXOFF_CONFIG_INLINE |
5256 MLX5_TXOFF_CONFIG_METADATA)
5259 MLX5_TXOFF_CONFIG_VLAN |
5260 MLX5_TXOFF_CONFIG_METADATA)
5263 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5264 MLX5_TXOFF_CONFIG_METADATA)
5266 /* Generate routines with timestamp scheduling. */
5267 MLX5_TXOFF_DECL(full_ts_nompw,
5268 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
5270 MLX5_TXOFF_DECL(full_ts_nompwi,
5271 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5272 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5273 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5274 MLX5_TXOFF_CONFIG_TXPP)
5276 MLX5_TXOFF_DECL(full_ts,
5277 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
5278 MLX5_TXOFF_CONFIG_EMPW)
5280 MLX5_TXOFF_DECL(full_ts_noi,
5281 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5282 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5283 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5284 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5286 MLX5_TXOFF_DECL(none_ts,
5287 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
5288 MLX5_TXOFF_CONFIG_EMPW)
5290 MLX5_TXOFF_DECL(mdi_ts,
5291 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5292 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5294 MLX5_TXOFF_DECL(mti_ts,
5295 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5296 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5297 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5299 MLX5_TXOFF_DECL(mtiv_ts,
5300 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5301 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5302 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
5303 MLX5_TXOFF_CONFIG_EMPW)
5306 * Generate routines with Legacy Multi-Packet Write support.
5307 * This mode is supported by ConnectX-4 Lx only and imposes
5308 * offload limitations, not supported:
5309 * - ACL/Flows (metadata are becoming meaningless)
5310 * - WQE Inline headers
5311 * - SRIOV (E-Switch offloads)
5313 * - tunnel encapsulation/decapsulation
5316 MLX5_TXOFF_DECL(none_mpw,
5317 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5318 MLX5_TXOFF_CONFIG_MPW)
5320 MLX5_TXOFF_DECL(mci_mpw,
5321 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5322 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5323 MLX5_TXOFF_CONFIG_MPW)
5325 MLX5_TXOFF_DECL(mc_mpw,
5326 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5327 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5329 MLX5_TXOFF_DECL(i_mpw,
5330 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5331 MLX5_TXOFF_CONFIG_MPW)
5334 * Array of declared and compiled Tx burst function and corresponding
5335 * supported offloads set. The array is used to select the Tx burst
5336 * function for specified offloads set at Tx queue configuration time.
5339 eth_tx_burst_t func;
5342 MLX5_TXOFF_INFO(full_empw,
5343 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5344 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5345 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5346 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5348 MLX5_TXOFF_INFO(none_empw,
5349 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5351 MLX5_TXOFF_INFO(md_empw,
5352 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5354 MLX5_TXOFF_INFO(mt_empw,
5355 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5356 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5358 MLX5_TXOFF_INFO(mtsc_empw,
5359 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5360 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5361 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5363 MLX5_TXOFF_INFO(mti_empw,
5364 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5365 MLX5_TXOFF_CONFIG_INLINE |
5366 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5368 MLX5_TXOFF_INFO(mtv_empw,
5369 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5370 MLX5_TXOFF_CONFIG_VLAN |
5371 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5373 MLX5_TXOFF_INFO(mtiv_empw,
5374 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5375 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5376 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5378 MLX5_TXOFF_INFO(sc_empw,
5379 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5380 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5382 MLX5_TXOFF_INFO(sci_empw,
5383 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5384 MLX5_TXOFF_CONFIG_INLINE |
5385 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5387 MLX5_TXOFF_INFO(scv_empw,
5388 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5389 MLX5_TXOFF_CONFIG_VLAN |
5390 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5392 MLX5_TXOFF_INFO(sciv_empw,
5393 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5394 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5395 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5397 MLX5_TXOFF_INFO(i_empw,
5398 MLX5_TXOFF_CONFIG_INLINE |
5399 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5401 MLX5_TXOFF_INFO(v_empw,
5402 MLX5_TXOFF_CONFIG_VLAN |
5403 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5405 MLX5_TXOFF_INFO(iv_empw,
5406 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5407 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5409 MLX5_TXOFF_INFO(full_ts_nompw,
5410 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
5412 MLX5_TXOFF_INFO(full_ts_nompwi,
5413 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5414 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5415 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5416 MLX5_TXOFF_CONFIG_TXPP)
5418 MLX5_TXOFF_INFO(full_ts,
5419 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
5420 MLX5_TXOFF_CONFIG_EMPW)
5422 MLX5_TXOFF_INFO(full_ts_noi,
5423 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5424 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5425 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
5426 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5428 MLX5_TXOFF_INFO(none_ts,
5429 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
5430 MLX5_TXOFF_CONFIG_EMPW)
5432 MLX5_TXOFF_INFO(mdi_ts,
5433 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5434 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5436 MLX5_TXOFF_INFO(mti_ts,
5437 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5438 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
5439 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
5441 MLX5_TXOFF_INFO(mtiv_ts,
5442 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5443 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5444 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
5445 MLX5_TXOFF_CONFIG_EMPW)
5447 MLX5_TXOFF_INFO(full,
5448 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5449 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5450 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5451 MLX5_TXOFF_CONFIG_METADATA)
5453 MLX5_TXOFF_INFO(none,
5454 MLX5_TXOFF_CONFIG_NONE)
5457 MLX5_TXOFF_CONFIG_METADATA)
5460 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5461 MLX5_TXOFF_CONFIG_METADATA)
5463 MLX5_TXOFF_INFO(mtsc,
5464 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5465 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5466 MLX5_TXOFF_CONFIG_METADATA)
5468 MLX5_TXOFF_INFO(mti,
5469 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5470 MLX5_TXOFF_CONFIG_INLINE |
5471 MLX5_TXOFF_CONFIG_METADATA)
5473 MLX5_TXOFF_INFO(mtv,
5474 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5475 MLX5_TXOFF_CONFIG_VLAN |
5476 MLX5_TXOFF_CONFIG_METADATA)
5478 MLX5_TXOFF_INFO(mtiv,
5479 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5480 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5481 MLX5_TXOFF_CONFIG_METADATA)
5484 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5485 MLX5_TXOFF_CONFIG_METADATA)
5487 MLX5_TXOFF_INFO(sci,
5488 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5489 MLX5_TXOFF_CONFIG_INLINE |
5490 MLX5_TXOFF_CONFIG_METADATA)
5492 MLX5_TXOFF_INFO(scv,
5493 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5494 MLX5_TXOFF_CONFIG_VLAN |
5495 MLX5_TXOFF_CONFIG_METADATA)
5497 MLX5_TXOFF_INFO(sciv,
5498 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5499 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5500 MLX5_TXOFF_CONFIG_METADATA)
5503 MLX5_TXOFF_CONFIG_INLINE |
5504 MLX5_TXOFF_CONFIG_METADATA)
5507 MLX5_TXOFF_CONFIG_VLAN |
5508 MLX5_TXOFF_CONFIG_METADATA)
5511 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5512 MLX5_TXOFF_CONFIG_METADATA)
5514 MLX5_TXOFF_INFO(none_mpw,
5515 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5516 MLX5_TXOFF_CONFIG_MPW)
5518 MLX5_TXOFF_INFO(mci_mpw,
5519 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5520 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5521 MLX5_TXOFF_CONFIG_MPW)
5523 MLX5_TXOFF_INFO(mc_mpw,
5524 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5525 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5527 MLX5_TXOFF_INFO(i_mpw,
5528 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5529 MLX5_TXOFF_CONFIG_MPW)
5533 * Configure the Tx function to use. The routine checks configured
5534 * Tx offloads for the device and selects appropriate Tx burst
5535 * routine. There are multiple Tx burst routines compiled from
5536 * the same template in the most optimal way for the dedicated
5540 * Pointer to private data structure.
5543 * Pointer to selected Tx burst function.
5546 mlx5_select_tx_function(struct rte_eth_dev *dev)
5548 struct mlx5_priv *priv = dev->data->dev_private;
5549 struct mlx5_dev_config *config = &priv->config;
5550 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5551 unsigned int diff = 0, olx = 0, i, m;
5553 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5554 MLX5_DSEG_MAX, "invalid WQE max size");
5555 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5556 "invalid WQE Control Segment size");
5557 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5558 "invalid WQE Ethernet Segment size");
5559 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5560 "invalid WQE Data Segment size");
5561 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5562 "invalid WQE size");
5564 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5565 /* We should support Multi-Segment Packets. */
5566 olx |= MLX5_TXOFF_CONFIG_MULTI;
5568 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5569 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5570 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5571 DEV_TX_OFFLOAD_IP_TNL_TSO |
5572 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5573 /* We should support TCP Send Offload. */
5574 olx |= MLX5_TXOFF_CONFIG_TSO;
5576 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5577 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5578 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5579 /* We should support Software Parser for Tunnels. */
5580 olx |= MLX5_TXOFF_CONFIG_SWP;
5582 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5583 DEV_TX_OFFLOAD_UDP_CKSUM |
5584 DEV_TX_OFFLOAD_TCP_CKSUM |
5585 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5586 /* We should support IP/TCP/UDP Checksums. */
5587 olx |= MLX5_TXOFF_CONFIG_CSUM;
5589 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5590 /* We should support VLAN insertion. */
5591 olx |= MLX5_TXOFF_CONFIG_VLAN;
5593 if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
5594 rte_mbuf_dynflag_lookup
5595 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
5596 rte_mbuf_dynfield_lookup
5597 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL) >= 0) {
5598 /* Offload configured, dynamic entities registered. */
5599 olx |= MLX5_TXOFF_CONFIG_TXPP;
5601 if (priv->txqs_n && (*priv->txqs)[0]) {
5602 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5604 if (txd->inlen_send) {
5606 * Check the data inline requirements. Data inline
5607 * is enabled on per device basis, we can check
5608 * the first Tx queue only.
5610 * If device does not support VLAN insertion in WQE
5611 * and some queues are requested to perform VLAN
5612 * insertion offload than inline must be enabled.
5614 olx |= MLX5_TXOFF_CONFIG_INLINE;
5617 if (config->mps == MLX5_MPW_ENHANCED &&
5618 config->txq_inline_min <= 0) {
5620 * The NIC supports Enhanced Multi-Packet Write
5621 * and does not require minimal inline data.
5623 olx |= MLX5_TXOFF_CONFIG_EMPW;
5625 if (rte_flow_dynf_metadata_avail()) {
5626 /* We should support Flow metadata. */
5627 olx |= MLX5_TXOFF_CONFIG_METADATA;
5629 if (config->mps == MLX5_MPW) {
5631 * The NIC supports Legacy Multi-Packet Write.
5632 * The MLX5_TXOFF_CONFIG_MPW controls the
5633 * descriptor building method in combination
5634 * with MLX5_TXOFF_CONFIG_EMPW.
5636 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5637 MLX5_TXOFF_CONFIG_SWP |
5638 MLX5_TXOFF_CONFIG_VLAN |
5639 MLX5_TXOFF_CONFIG_METADATA)))
5640 olx |= MLX5_TXOFF_CONFIG_EMPW |
5641 MLX5_TXOFF_CONFIG_MPW;
5644 * Scan the routines table to find the minimal
5645 * satisfying routine with requested offloads.
5647 m = RTE_DIM(txoff_func);
5648 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5651 tmp = txoff_func[i].olx;
5653 /* Meets requested offloads exactly.*/
5657 if ((tmp & olx) != olx) {
5658 /* Does not meet requested offloads at all. */
5661 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW)
5662 /* Do not enable legacy MPW if not configured. */
5664 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5665 /* Do not enable eMPW if not configured. */
5667 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5668 /* Do not enable inlining if not configured. */
5670 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_TXPP)
5671 /* Do not enable scheduling if not configured. */
5674 * Some routine meets the requirements.
5675 * Check whether it has minimal amount
5676 * of not requested offloads.
5678 tmp = __builtin_popcountl(tmp & ~olx);
5679 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5680 /* First or better match, save and continue. */
5686 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5687 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5688 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5689 /* Lighter not requested offload. */
5694 if (m >= RTE_DIM(txoff_func)) {
5695 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5696 " for requested offloads %04X",
5697 dev->data->port_id, olx);
5700 DRV_LOG(DEBUG, "port %u has selected Tx function"
5701 " supporting offloads %04X/%04X",
5702 dev->data->port_id, olx, txoff_func[m].olx);
5703 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5704 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5705 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5706 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5707 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5708 DRV_LOG(DEBUG, "\tSWP (software parser)");
5709 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5710 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5711 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5712 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5713 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5714 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5715 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5716 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5717 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TXPP)
5718 DRV_LOG(DEBUG, "\tMETAD (tx Scheduling)");
5719 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5720 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5721 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5723 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5725 return txoff_func[m].func;
5729 * DPDK callback to get the TX queue information
5732 * Pointer to the device structure.
5734 * @param tx_queue_id
5735 * Tx queue identificator.
5738 * Pointer to the TX queue information structure.
5745 mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
5746 struct rte_eth_txq_info *qinfo)
5748 struct mlx5_priv *priv = dev->data->dev_private;
5749 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
5750 struct mlx5_txq_ctrl *txq_ctrl =
5751 container_of(txq, struct mlx5_txq_ctrl, txq);
5755 qinfo->nb_desc = txq->elts_s;
5756 qinfo->conf.tx_thresh.pthresh = 0;
5757 qinfo->conf.tx_thresh.hthresh = 0;
5758 qinfo->conf.tx_thresh.wthresh = 0;
5759 qinfo->conf.tx_rs_thresh = 0;
5760 qinfo->conf.tx_free_thresh = 0;
5761 qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
5762 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
5766 * DPDK callback to get the TX packet burst mode information
5769 * Pointer to the device structure.
5771 * @param tx_queue_id
5772 * Tx queue identificatior.
5775 * Pointer to the burts mode information.
5778 * 0 as success, -EINVAL as failure.
5782 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
5783 uint16_t tx_queue_id __rte_unused,
5784 struct rte_eth_burst_mode *mode)
5786 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
5787 unsigned int i, olx;
5789 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5790 if (pkt_burst == txoff_func[i].func) {
5791 olx = txoff_func[i].olx;
5792 snprintf(mode->info, sizeof(mode->info),
5793 "%s%s%s%s%s%s%s%s%s",
5794 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
5795 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
5796 "Legacy MPW" : "Enhanced MPW") : "No MPW",
5797 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
5799 (olx & MLX5_TXOFF_CONFIG_TSO) ?
5801 (olx & MLX5_TXOFF_CONFIG_SWP) ?
5803 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
5805 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
5807 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
5809 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
5811 (olx & MLX5_TXOFF_CONFIG_TXPP) ?