1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
25 #include <rte_common.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_ether.h>
28 #include <rte_cycles.h>
31 #include "mlx5_utils.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_autoconf.h"
34 #include "mlx5_defs.h"
37 /* TX burst subroutines return codes. */
38 enum mlx5_txcmp_code {
39 MLX5_TXCMP_CODE_EXIT = 0,
40 MLX5_TXCMP_CODE_ERROR,
41 MLX5_TXCMP_CODE_SINGLE,
42 MLX5_TXCMP_CODE_MULTI,
48 * These defines are used to configure Tx burst routine option set
49 * supported at compile time. The not specified options are optimized out
50 * out due to if conditions can be explicitly calculated at compile time.
51 * The offloads with bigger runtime check (require more CPU cycles to
52 * skip) overhead should have the bigger index - this is needed to
53 * select the better matching routine function if no exact match and
54 * some offloads are not actually requested.
56 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
57 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
58 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
59 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
60 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
61 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
62 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
63 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
65 /* The most common offloads groups. */
66 #define MLX5_TXOFF_CONFIG_NONE 0
67 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
68 MLX5_TXOFF_CONFIG_TSO | \
69 MLX5_TXOFF_CONFIG_SWP | \
70 MLX5_TXOFF_CONFIG_CSUM | \
71 MLX5_TXOFF_CONFIG_INLINE | \
72 MLX5_TXOFF_CONFIG_VLAN | \
73 MLX5_TXOFF_CONFIG_METADATA)
75 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
77 #define MLX5_TXOFF_DECL(func, olx) \
78 static uint16_t mlx5_tx_burst_##func(void *txq, \
79 struct rte_mbuf **pkts, \
82 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
83 pkts, pkts_n, (olx)); \
86 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
88 static __rte_always_inline uint32_t
89 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
91 static __rte_always_inline int
92 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
93 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
95 static __rte_always_inline uint32_t
96 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
98 static __rte_always_inline void
99 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
100 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
102 static __rte_always_inline void
103 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx);
106 mlx5_queue_state_modify(struct rte_eth_dev *dev,
107 struct mlx5_mp_arg_queue_state_modify *sm);
109 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
110 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
113 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
114 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
117 * Build a table to translate Rx completion flags to packet type.
119 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
122 mlx5_set_ptype_table(void)
125 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
127 /* Last entry must not be overwritten, reserved for errored packet. */
128 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
129 (*p)[i] = RTE_PTYPE_UNKNOWN;
131 * The index to the array should have:
132 * bit[1:0] = l3_hdr_type
133 * bit[4:2] = l4_hdr_type
136 * bit[7] = outer_l3_type
139 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
141 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
142 RTE_PTYPE_L4_NONFRAG;
143 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
144 RTE_PTYPE_L4_NONFRAG;
146 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
148 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
151 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
153 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
155 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
157 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
159 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
161 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
164 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
166 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
168 /* Repeat with outer_l3_type being set. Just in case. */
169 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
170 RTE_PTYPE_L4_NONFRAG;
171 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
172 RTE_PTYPE_L4_NONFRAG;
173 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
175 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
177 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
179 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
181 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
183 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
185 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
187 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
189 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
191 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
194 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
195 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
196 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
197 RTE_PTYPE_INNER_L4_NONFRAG;
198 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
199 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
200 RTE_PTYPE_INNER_L4_NONFRAG;
201 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
202 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
203 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
204 RTE_PTYPE_INNER_L4_NONFRAG;
205 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
206 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
207 RTE_PTYPE_INNER_L4_NONFRAG;
208 /* Tunneled - Fragmented */
209 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
210 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
211 RTE_PTYPE_INNER_L4_FRAG;
212 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
213 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L4_FRAG;
215 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
216 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
217 RTE_PTYPE_INNER_L4_FRAG;
218 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
219 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
220 RTE_PTYPE_INNER_L4_FRAG;
222 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
224 RTE_PTYPE_INNER_L4_TCP;
225 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L4_TCP;
228 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L4_TCP;
231 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
232 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L4_TCP;
234 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L4_TCP;
237 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L4_TCP;
240 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
241 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L4_TCP;
243 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
244 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L4_TCP;
246 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
247 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L4_TCP;
249 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
250 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L4_TCP;
252 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L4_TCP;
255 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L4_TCP;
259 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_UDP;
262 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L4_UDP;
265 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L4_UDP;
268 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
269 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L4_UDP;
274 * Build a table to translate packet to checksum type of Verbs.
277 mlx5_set_cksum_table(void)
283 * The index should have:
284 * bit[0] = PKT_TX_TCP_SEG
285 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
286 * bit[4] = PKT_TX_IP_CKSUM
287 * bit[8] = PKT_TX_OUTER_IP_CKSUM
290 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
293 /* Tunneled packet. */
294 if (i & (1 << 8)) /* Outer IP. */
295 v |= MLX5_ETH_WQE_L3_CSUM;
296 if (i & (1 << 4)) /* Inner IP. */
297 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
298 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
299 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
302 if (i & (1 << 4)) /* IP. */
303 v |= MLX5_ETH_WQE_L3_CSUM;
304 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
305 v |= MLX5_ETH_WQE_L4_CSUM;
307 mlx5_cksum_table[i] = v;
312 * Build a table to translate packet type of mbuf to SWP type of Verbs.
315 mlx5_set_swp_types_table(void)
321 * The index should have:
322 * bit[0:1] = PKT_TX_L4_MASK
323 * bit[4] = PKT_TX_IPV6
324 * bit[8] = PKT_TX_OUTER_IPV6
325 * bit[9] = PKT_TX_OUTER_UDP
327 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
330 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
332 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
334 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
335 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
336 v |= MLX5_ETH_WQE_L4_INNER_UDP;
337 mlx5_swp_types_table[i] = v;
342 * Internal function to compute the number of used descriptors in an RX queue
348 * The number of used rx descriptor.
351 rx_queue_count(struct mlx5_rxq_data *rxq)
353 struct rxq_zip *zip = &rxq->zip;
354 volatile struct mlx5_cqe *cqe;
355 const unsigned int cqe_n = (1 << rxq->cqe_n);
356 const unsigned int cqe_cnt = cqe_n - 1;
360 /* if we are processing a compressed cqe */
362 used = zip->cqe_cnt - zip->ca;
368 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
369 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
373 op_own = cqe->op_own;
374 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
375 n = rte_be_to_cpu_32(cqe->byte_cnt);
380 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
382 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
387 * DPDK callback to check the status of a rx descriptor.
392 * The index of the descriptor in the ring.
395 * The status of the tx descriptor.
398 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
400 struct mlx5_rxq_data *rxq = rx_queue;
401 struct mlx5_rxq_ctrl *rxq_ctrl =
402 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
403 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
405 if (dev->rx_pkt_burst != mlx5_rx_burst) {
409 if (offset >= (1 << rxq->elts_n)) {
413 if (offset < rx_queue_count(rxq))
414 return RTE_ETH_RX_DESC_DONE;
415 return RTE_ETH_RX_DESC_AVAIL;
419 * DPDK callback to get the number of used descriptors in a RX queue
422 * Pointer to the device structure.
428 * The number of used rx descriptor.
429 * -EINVAL if the queue is invalid
432 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
434 struct mlx5_priv *priv = dev->data->dev_private;
435 struct mlx5_rxq_data *rxq;
437 if (dev->rx_pkt_burst != mlx5_rx_burst) {
441 rxq = (*priv->rxqs)[rx_queue_id];
446 return rx_queue_count(rxq);
449 #define MLX5_SYSTEM_LOG_DIR "/var/log"
451 * Dump debug information to log file.
456 * If not NULL this string is printed as a header to the output
457 * and the output will be in hexadecimal view.
459 * This is the buffer address to print out.
461 * The number of bytes to dump out.
464 mlx5_dump_debug_information(const char *fname, const char *hex_title,
465 const void *buf, unsigned int hex_len)
469 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
470 fd = fopen(path, "a+");
472 DRV_LOG(WARNING, "cannot open %s for debug dump\n",
474 MKSTR(path2, "./%s", fname);
475 fd = fopen(path2, "a+");
477 DRV_LOG(ERR, "cannot open %s for debug dump\n",
481 DRV_LOG(INFO, "New debug dump in file %s\n", path2);
483 DRV_LOG(INFO, "New debug dump in file %s\n", path);
486 rte_hexdump(fd, hex_title, buf, hex_len);
488 fprintf(fd, "%s", (const char *)buf);
489 fprintf(fd, "\n\n\n");
494 * Move QP from error state to running state and initialize indexes.
497 * Pointer to TX queue control structure.
500 * 0 on success, else -1.
503 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
505 struct mlx5_mp_arg_queue_state_modify sm = {
507 .queue_id = txq_ctrl->txq.idx,
510 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
512 txq_ctrl->txq.wqe_ci = 0;
513 txq_ctrl->txq.wqe_pi = 0;
514 txq_ctrl->txq.elts_comp = 0;
518 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
520 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
522 static const uint8_t magic[] = "seen";
526 for (i = 0; i < sizeof(magic); ++i)
527 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
529 err_cqe->rsvd1[i] = magic[i];
538 * Pointer to TX queue structure.
540 * Pointer to the error CQE.
543 * The last Tx buffer element to free.
546 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *txq,
547 volatile struct mlx5_err_cqe *err_cqe)
549 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
550 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
551 struct mlx5_txq_ctrl *txq_ctrl =
552 container_of(txq, struct mlx5_txq_ctrl, txq);
553 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
554 int seen = check_err_cqe_seen(err_cqe);
556 if (!seen && txq_ctrl->dump_file_n <
557 txq_ctrl->priv->config.max_dump_files_num) {
558 MKSTR(err_str, "Unexpected CQE error syndrome "
559 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
560 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
561 txq->cqe_s, txq->qp_num_8s >> 8,
562 rte_be_to_cpu_16(err_cqe->wqe_counter),
563 txq->wqe_ci, txq->cq_ci);
564 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
565 PORT_ID(txq_ctrl->priv), txq->idx,
566 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
567 mlx5_dump_debug_information(name, NULL, err_str, 0);
568 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
569 (const void *)((uintptr_t)
573 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
574 (const void *)((uintptr_t)
578 txq_ctrl->dump_file_n++;
582 * Count errors in WQEs units.
583 * Later it can be improved to count error packets,
584 * for example, by SQ parsing to find how much packets
585 * should be counted for each WQE.
587 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
589 if (tx_recover_qp(txq_ctrl) == 0) {
591 /* Release all the remaining buffers. */
592 return txq->elts_head;
594 /* Recovering failed - try again later on the same WQE. */
598 /* Do not release buffers. */
599 return txq->elts_tail;
603 * Translate RX completion flags to packet type.
606 * Pointer to RX queue structure.
610 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
613 * Packet type for struct rte_mbuf.
615 static inline uint32_t
616 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
619 uint8_t pinfo = cqe->pkt_info;
620 uint16_t ptype = cqe->hdr_type_etc;
623 * The index to the array should have:
624 * bit[1:0] = l3_hdr_type
625 * bit[4:2] = l4_hdr_type
628 * bit[7] = outer_l3_type
630 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
631 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
635 * Initialize Rx WQ and indexes.
638 * Pointer to RX queue structure.
641 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
643 const unsigned int wqe_n = 1 << rxq->elts_n;
646 for (i = 0; (i != wqe_n); ++i) {
647 volatile struct mlx5_wqe_data_seg *scat;
651 if (mlx5_rxq_mprq_enabled(rxq)) {
652 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
654 scat = &((volatile struct mlx5_wqe_mprq *)
656 addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
657 byte_count = (1 << rxq->strd_sz_n) *
658 (1 << rxq->strd_num_n);
660 struct rte_mbuf *buf = (*rxq->elts)[i];
662 scat = &((volatile struct mlx5_wqe_data_seg *)
664 addr = rte_pktmbuf_mtod(buf, uintptr_t);
665 byte_count = DATA_LEN(buf);
667 /* scat->addr must be able to store a pointer. */
668 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
669 *scat = (struct mlx5_wqe_data_seg){
670 .addr = rte_cpu_to_be_64(addr),
671 .byte_count = rte_cpu_to_be_32(byte_count),
672 .lkey = mlx5_rx_addr2mr(rxq, addr),
675 rxq->consumed_strd = 0;
676 rxq->decompressed = 0;
678 rxq->zip = (struct rxq_zip){
681 /* Update doorbell counter. */
682 rxq->rq_ci = wqe_n >> rxq->sges_n;
684 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
688 * Modify a Verbs queue state.
689 * This must be called from the primary process.
692 * Pointer to Ethernet device.
694 * State modify request parameters.
697 * 0 in case of success else non-zero value and rte_errno is set.
700 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
701 const struct mlx5_mp_arg_queue_state_modify *sm)
704 struct mlx5_priv *priv = dev->data->dev_private;
707 struct ibv_wq_attr mod = {
708 .attr_mask = IBV_WQ_ATTR_STATE,
709 .wq_state = sm->state,
711 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
712 struct mlx5_rxq_ctrl *rxq_ctrl =
713 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
715 ret = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
717 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s\n",
718 sm->state, strerror(errno));
723 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
724 struct mlx5_txq_ctrl *txq_ctrl =
725 container_of(txq, struct mlx5_txq_ctrl, txq);
726 struct ibv_qp_attr mod = {
727 .qp_state = IBV_QPS_RESET,
728 .port_num = (uint8_t)priv->ibv_port,
730 struct ibv_qp *qp = txq_ctrl->ibv->qp;
732 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
734 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
735 "%s\n", strerror(errno));
739 mod.qp_state = IBV_QPS_INIT;
740 ret = mlx5_glue->modify_qp(qp, &mod,
741 (IBV_QP_STATE | IBV_QP_PORT));
743 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s\n",
748 mod.qp_state = IBV_QPS_RTR;
749 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
751 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s\n",
756 mod.qp_state = IBV_QPS_RTS;
757 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
759 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s\n",
769 * Modify a Verbs queue state.
772 * Pointer to Ethernet device.
774 * State modify request parameters.
777 * 0 in case of success else non-zero value.
780 mlx5_queue_state_modify(struct rte_eth_dev *dev,
781 struct mlx5_mp_arg_queue_state_modify *sm)
785 switch (rte_eal_process_type()) {
786 case RTE_PROC_PRIMARY:
787 ret = mlx5_queue_state_modify_primary(dev, sm);
789 case RTE_PROC_SECONDARY:
790 ret = mlx5_mp_req_queue_state_modify(dev, sm);
800 * The function inserts the RQ state to reset when the first error CQE is
801 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
802 * it moves the RQ state to ready and initializes the RQ.
803 * Next CQE identification and error counting are in the caller responsibility.
806 * Pointer to RX queue structure.
807 * @param[in] mbuf_prepare
808 * Whether to prepare mbufs for the RQ.
811 * -1 in case of recovery error, otherwise the CQE status.
814 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t mbuf_prepare)
816 const uint16_t cqe_n = 1 << rxq->cqe_n;
817 const uint16_t cqe_mask = cqe_n - 1;
818 const unsigned int wqe_n = 1 << rxq->elts_n;
819 struct mlx5_rxq_ctrl *rxq_ctrl =
820 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
822 volatile struct mlx5_cqe *cqe;
823 volatile struct mlx5_err_cqe *err_cqe;
825 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
827 struct mlx5_mp_arg_queue_state_modify sm;
830 switch (rxq->err_state) {
831 case MLX5_RXQ_ERR_STATE_NO_ERROR:
832 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
834 case MLX5_RXQ_ERR_STATE_NEED_RESET:
836 sm.queue_id = rxq->idx;
837 sm.state = IBV_WQS_RESET;
838 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
840 if (rxq_ctrl->dump_file_n <
841 rxq_ctrl->priv->config.max_dump_files_num) {
842 MKSTR(err_str, "Unexpected CQE error syndrome "
843 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
844 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
845 rxq->cqn, rxq_ctrl->wqn,
846 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
847 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
848 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
849 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
850 mlx5_dump_debug_information(name, NULL, err_str, 0);
851 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
852 (const void *)((uintptr_t)
854 sizeof(*u.cqe) * cqe_n);
855 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
856 (const void *)((uintptr_t)
859 rxq_ctrl->dump_file_n++;
861 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
863 case MLX5_RXQ_ERR_STATE_NEED_READY:
864 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
865 if (ret == MLX5_CQE_STATUS_HW_OWN) {
867 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
870 * The RQ consumer index must be zeroed while moving
871 * from RESET state to RDY state.
873 *rxq->rq_db = rte_cpu_to_be_32(0);
876 sm.queue_id = rxq->idx;
877 sm.state = IBV_WQS_RDY;
878 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
882 const uint16_t q_mask = wqe_n - 1;
884 struct rte_mbuf **elt;
886 unsigned int n = wqe_n - (rxq->rq_ci -
889 for (i = 0; i < (int)n; ++i) {
890 elt_idx = (rxq->rq_ci + i) & q_mask;
891 elt = &(*rxq->elts)[elt_idx];
892 *elt = rte_mbuf_raw_alloc(rxq->mp);
894 for (i--; i >= 0; --i) {
895 elt_idx = (rxq->rq_ci +
906 mlx5_rxq_initialize(rxq);
907 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
916 * Get size of the next packet for a given CQE. For compressed CQEs, the
917 * consumer index is updated only once all packets of the current one have
921 * Pointer to RX queue.
925 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
929 * 0 in case of empty CQE, otherwise the packet size in bytes.
932 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
933 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
935 struct rxq_zip *zip = &rxq->zip;
936 uint16_t cqe_n = cqe_cnt + 1;
942 /* Process compressed data in the CQE and mini arrays. */
944 volatile struct mlx5_mini_cqe8 (*mc)[8] =
945 (volatile struct mlx5_mini_cqe8 (*)[8])
946 (uintptr_t)(&(*rxq->cqes)[zip->ca &
949 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
950 *mcqe = &(*mc)[zip->ai & 7];
951 if ((++zip->ai & 7) == 0) {
952 /* Invalidate consumed CQEs */
956 (*rxq->cqes)[idx & cqe_cnt].op_own =
961 * Increment consumer index to skip the number
962 * of CQEs consumed. Hardware leaves holes in
963 * the CQ ring for software use.
968 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
969 /* Invalidate the rest */
974 (*rxq->cqes)[idx & cqe_cnt].op_own =
978 rxq->cq_ci = zip->cq_ci;
982 * No compressed data, get next CQE and verify if it is
989 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
990 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
991 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
993 ret = mlx5_rx_err_handle(rxq, 0);
994 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1002 op_own = cqe->op_own;
1003 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1004 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1005 (volatile struct mlx5_mini_cqe8 (*)[8])
1006 (uintptr_t)(&(*rxq->cqes)
1010 /* Fix endianness. */
1011 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1013 * Current mini array position is the one
1014 * returned by check_cqe64().
1016 * If completion comprises several mini arrays,
1017 * as a special case the second one is located
1018 * 7 CQEs after the initial CQE instead of 8
1019 * for subsequent ones.
1021 zip->ca = rxq->cq_ci;
1022 zip->na = zip->ca + 7;
1023 /* Compute the next non compressed CQE. */
1025 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1026 /* Get packet size to return. */
1027 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1030 /* Prefetch all to be invalidated */
1033 while (idx != end) {
1034 rte_prefetch0(&(*rxq->cqes)[(idx) &
1039 len = rte_be_to_cpu_32(cqe->byte_cnt);
1042 if (unlikely(rxq->err_state)) {
1043 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1044 ++rxq->stats.idropped;
1052 * Translate RX completion flags to offload flags.
1058 * Offload flags (ol_flags) for struct rte_mbuf.
1060 static inline uint32_t
1061 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1063 uint32_t ol_flags = 0;
1064 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1068 MLX5_CQE_RX_L3_HDR_VALID,
1069 PKT_RX_IP_CKSUM_GOOD) |
1071 MLX5_CQE_RX_L4_HDR_VALID,
1072 PKT_RX_L4_CKSUM_GOOD);
1077 * Fill in mbuf fields from RX completion flags.
1078 * Note that pkt->ol_flags should be initialized outside of this function.
1081 * Pointer to RX queue.
1086 * @param rss_hash_res
1087 * Packet RSS Hash result.
1090 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1091 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1093 /* Update packet information. */
1094 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1095 if (rss_hash_res && rxq->rss_hash) {
1096 pkt->hash.rss = rss_hash_res;
1097 pkt->ol_flags |= PKT_RX_RSS_HASH;
1099 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1100 pkt->ol_flags |= PKT_RX_FDIR;
1101 if (cqe->sop_drop_qpn !=
1102 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1103 uint32_t mark = cqe->sop_drop_qpn;
1105 pkt->ol_flags |= PKT_RX_FDIR_ID;
1106 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1110 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1111 if (rxq->vlan_strip &&
1112 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1113 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1114 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1116 if (rxq->hw_timestamp) {
1117 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1118 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1123 * DPDK callback for RX.
1126 * Generic pointer to RX queue structure.
1128 * Array to store received packets.
1130 * Maximum number of packets in array.
1133 * Number of packets successfully received (<= pkts_n).
1136 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1138 struct mlx5_rxq_data *rxq = dpdk_rxq;
1139 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1140 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1141 const unsigned int sges_n = rxq->sges_n;
1142 struct rte_mbuf *pkt = NULL;
1143 struct rte_mbuf *seg = NULL;
1144 volatile struct mlx5_cqe *cqe =
1145 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1147 unsigned int rq_ci = rxq->rq_ci << sges_n;
1148 int len = 0; /* keep its value across iterations. */
1151 unsigned int idx = rq_ci & wqe_cnt;
1152 volatile struct mlx5_wqe_data_seg *wqe =
1153 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1154 struct rte_mbuf *rep = (*rxq->elts)[idx];
1155 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1156 uint32_t rss_hash_res;
1164 rep = rte_mbuf_raw_alloc(rxq->mp);
1165 if (unlikely(rep == NULL)) {
1166 ++rxq->stats.rx_nombuf;
1169 * no buffers before we even started,
1170 * bail out silently.
1174 while (pkt != seg) {
1175 assert(pkt != (*rxq->elts)[idx]);
1179 rte_mbuf_raw_free(pkt);
1185 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1186 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1188 rte_mbuf_raw_free(rep);
1192 assert(len >= (rxq->crc_present << 2));
1194 /* If compressed, take hash result from mini-CQE. */
1195 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1197 mcqe->rx_hash_result);
1198 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1199 if (rxq->crc_present)
1200 len -= RTE_ETHER_CRC_LEN;
1203 DATA_LEN(rep) = DATA_LEN(seg);
1204 PKT_LEN(rep) = PKT_LEN(seg);
1205 SET_DATA_OFF(rep, DATA_OFF(seg));
1206 PORT(rep) = PORT(seg);
1207 (*rxq->elts)[idx] = rep;
1209 * Fill NIC descriptor with the new buffer. The lkey and size
1210 * of the buffers are already known, only the buffer address
1213 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1214 /* If there's only one MR, no need to replace LKey in WQE. */
1215 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1216 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1217 if (len > DATA_LEN(seg)) {
1218 len -= DATA_LEN(seg);
1223 DATA_LEN(seg) = len;
1224 #ifdef MLX5_PMD_SOFT_COUNTERS
1225 /* Increment bytes counter. */
1226 rxq->stats.ibytes += PKT_LEN(pkt);
1228 /* Return packet. */
1233 /* Align consumer index to the next stride. */
1238 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1240 /* Update the consumer index. */
1241 rxq->rq_ci = rq_ci >> sges_n;
1243 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1245 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1246 #ifdef MLX5_PMD_SOFT_COUNTERS
1247 /* Increment packets counter. */
1248 rxq->stats.ipackets += i;
1254 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1256 struct mlx5_mprq_buf *buf = opaque;
1258 if (rte_atomic16_read(&buf->refcnt) == 1) {
1259 rte_mempool_put(buf->mp, buf);
1260 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1261 rte_atomic16_set(&buf->refcnt, 1);
1262 rte_mempool_put(buf->mp, buf);
1267 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1269 mlx5_mprq_buf_free_cb(NULL, buf);
1273 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
1275 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1276 volatile struct mlx5_wqe_data_seg *wqe =
1277 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1280 assert(rep != NULL);
1281 /* Replace MPRQ buf. */
1282 (*rxq->mprq_bufs)[rq_idx] = rep;
1284 addr = mlx5_mprq_buf_addr(rep);
1285 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1286 /* If there's only one MR, no need to replace LKey in WQE. */
1287 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1288 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1289 /* Stash a mbuf for next replacement. */
1290 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1291 rxq->mprq_repl = rep;
1293 rxq->mprq_repl = NULL;
1297 * DPDK callback for RX with Multi-Packet RQ support.
1300 * Generic pointer to RX queue structure.
1302 * Array to store received packets.
1304 * Maximum number of packets in array.
1307 * Number of packets successfully received (<= pkts_n).
1310 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1312 struct mlx5_rxq_data *rxq = dpdk_rxq;
1313 const unsigned int strd_n = 1 << rxq->strd_num_n;
1314 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1315 const unsigned int strd_shift =
1316 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1317 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1318 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1319 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1321 uint32_t rq_ci = rxq->rq_ci;
1322 uint16_t consumed_strd = rxq->consumed_strd;
1323 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1325 while (i < pkts_n) {
1326 struct rte_mbuf *pkt;
1334 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1335 uint32_t rss_hash_res = 0;
1337 if (consumed_strd == strd_n) {
1338 /* Replace WQE only if the buffer is still in use. */
1339 if (rte_atomic16_read(&buf->refcnt) > 1) {
1340 mprq_buf_replace(rxq, rq_ci & wq_mask);
1341 /* Release the old buffer. */
1342 mlx5_mprq_buf_free(buf);
1343 } else if (unlikely(rxq->mprq_repl == NULL)) {
1344 struct mlx5_mprq_buf *rep;
1347 * Currently, the MPRQ mempool is out of buffer
1348 * and doing memcpy regardless of the size of Rx
1349 * packet. Retry allocation to get back to
1352 if (!rte_mempool_get(rxq->mprq_mp,
1354 rxq->mprq_repl = rep;
1356 /* Advance to the next WQE. */
1359 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1361 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1362 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1366 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1367 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1369 consumed_strd += strd_cnt;
1370 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1373 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1374 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1376 /* mini-CQE for MPRQ doesn't have hash result. */
1377 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1379 assert(strd_idx < strd_n);
1380 assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
1382 * Currently configured to receive a packet per a stride. But if
1383 * MTU is adjusted through kernel interface, device could
1384 * consume multiple strides without raising an error. In this
1385 * case, the packet should be dropped because it is bigger than
1386 * the max_rx_pkt_len.
1388 if (unlikely(strd_cnt > 1)) {
1389 ++rxq->stats.idropped;
1392 pkt = rte_pktmbuf_alloc(rxq->mp);
1393 if (unlikely(pkt == NULL)) {
1394 ++rxq->stats.rx_nombuf;
1397 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1398 assert((int)len >= (rxq->crc_present << 2));
1399 if (rxq->crc_present)
1400 len -= RTE_ETHER_CRC_LEN;
1401 offset = strd_idx * strd_sz + strd_shift;
1402 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset);
1403 /* Initialize the offload flag. */
1406 * Memcpy packets to the target mbuf if:
1407 * - The size of packet is smaller than mprq_max_memcpy_len.
1408 * - Out of buffer in the Mempool for Multi-Packet RQ.
1410 if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
1412 * When memcpy'ing packet due to out-of-buffer, the
1413 * packet must be smaller than the target mbuf.
1415 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1416 rte_pktmbuf_free_seg(pkt);
1417 ++rxq->stats.idropped;
1420 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
1422 rte_iova_t buf_iova;
1423 struct rte_mbuf_ext_shared_info *shinfo;
1424 uint16_t buf_len = strd_cnt * strd_sz;
1426 /* Increment the refcnt of the whole chunk. */
1427 rte_atomic16_add_return(&buf->refcnt, 1);
1428 assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1430 addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
1432 * MLX5 device doesn't use iova but it is necessary in a
1433 * case where the Rx packet is transmitted via a
1436 buf_iova = rte_mempool_virt2iova(buf) +
1437 RTE_PTR_DIFF(addr, buf);
1438 shinfo = rte_pktmbuf_ext_shinfo_init_helper(addr,
1439 &buf_len, mlx5_mprq_buf_free_cb, buf);
1441 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1442 * attaching the stride to mbuf and more offload flags
1443 * will be added below by calling rxq_cq_to_mbuf().
1444 * Other fields will be overwritten.
1446 rte_pktmbuf_attach_extbuf(pkt, addr, buf_iova, buf_len,
1448 rte_pktmbuf_reset_headroom(pkt);
1449 assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
1451 * Prevent potential overflow due to MTU change through
1454 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
1455 rte_pktmbuf_free_seg(pkt);
1456 ++rxq->stats.idropped;
1460 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1462 DATA_LEN(pkt) = len;
1463 PORT(pkt) = rxq->port_id;
1464 #ifdef MLX5_PMD_SOFT_COUNTERS
1465 /* Increment bytes counter. */
1466 rxq->stats.ibytes += PKT_LEN(pkt);
1468 /* Return packet. */
1472 /* Update the consumer indexes. */
1473 rxq->consumed_strd = consumed_strd;
1475 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1476 if (rq_ci != rxq->rq_ci) {
1479 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1481 #ifdef MLX5_PMD_SOFT_COUNTERS
1482 /* Increment packets counter. */
1483 rxq->stats.ipackets += i;
1489 * Dummy DPDK callback for TX.
1491 * This function is used to temporarily replace the real callback during
1492 * unsafe control operations on the queue, or in case of error.
1495 * Generic pointer to TX queue structure.
1497 * Packets to transmit.
1499 * Number of packets in array.
1502 * Number of packets successfully transmitted (<= pkts_n).
1505 removed_tx_burst(void *dpdk_txq __rte_unused,
1506 struct rte_mbuf **pkts __rte_unused,
1507 uint16_t pkts_n __rte_unused)
1514 * Dummy DPDK callback for RX.
1516 * This function is used to temporarily replace the real callback during
1517 * unsafe control operations on the queue, or in case of error.
1520 * Generic pointer to RX queue structure.
1522 * Array to store received packets.
1524 * Maximum number of packets in array.
1527 * Number of packets successfully received (<= pkts_n).
1530 removed_rx_burst(void *dpdk_txq __rte_unused,
1531 struct rte_mbuf **pkts __rte_unused,
1532 uint16_t pkts_n __rte_unused)
1539 * Vectorized Rx/Tx routines are not compiled in when required vector
1540 * instructions are not supported on a target architecture. The following null
1541 * stubs are needed for linkage when those are not included outside of this file
1542 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1546 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1547 struct rte_mbuf **pkts __rte_unused,
1548 uint16_t pkts_n __rte_unused)
1554 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1560 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1566 * DPDK callback to check the status of a tx descriptor.
1571 * The index of the descriptor in the ring.
1574 * The status of the tx descriptor.
1577 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
1581 return RTE_ETH_TX_DESC_FULL;
1585 * DPDK Tx callback template. This is configured template
1586 * used to generate routines optimized for specified offload setup.
1587 * One of this generated functions is chosen at SQ configuration
1591 * Generic pointer to TX queue structure.
1593 * Packets to transmit.
1595 * Number of packets in array.
1597 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
1598 * values. Should be static to take compile time static configuration
1602 * Number of packets successfully transmitted (<= pkts_n).
1604 static __rte_always_inline uint16_t
1605 mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
1606 struct rte_mbuf **restrict pkts,
1617 /* Generate routines with Enhanced Multi-Packet Write support. */
1618 MLX5_TXOFF_DECL(full_empw,
1619 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
1621 MLX5_TXOFF_DECL(none_empw,
1622 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
1624 MLX5_TXOFF_DECL(md_empw,
1625 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1627 MLX5_TXOFF_DECL(mt_empw,
1628 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1629 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1631 MLX5_TXOFF_DECL(mtsc_empw,
1632 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1633 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1634 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1636 MLX5_TXOFF_DECL(mti_empw,
1637 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1638 MLX5_TXOFF_CONFIG_INLINE |
1639 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1641 MLX5_TXOFF_DECL(mtv_empw,
1642 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1643 MLX5_TXOFF_CONFIG_VLAN |
1644 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1646 MLX5_TXOFF_DECL(mtiv_empw,
1647 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1648 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1649 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1651 MLX5_TXOFF_DECL(sc_empw,
1652 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1653 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1655 MLX5_TXOFF_DECL(sci_empw,
1656 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1657 MLX5_TXOFF_CONFIG_INLINE |
1658 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1660 MLX5_TXOFF_DECL(scv_empw,
1661 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1662 MLX5_TXOFF_CONFIG_VLAN |
1663 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1665 MLX5_TXOFF_DECL(sciv_empw,
1666 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1667 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1668 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1670 MLX5_TXOFF_DECL(i_empw,
1671 MLX5_TXOFF_CONFIG_INLINE |
1672 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1674 MLX5_TXOFF_DECL(v_empw,
1675 MLX5_TXOFF_CONFIG_VLAN |
1676 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1678 MLX5_TXOFF_DECL(iv_empw,
1679 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1680 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1682 /* Generate routines without Enhanced Multi-Packet Write support. */
1683 MLX5_TXOFF_DECL(full,
1684 MLX5_TXOFF_CONFIG_FULL)
1686 MLX5_TXOFF_DECL(none,
1687 MLX5_TXOFF_CONFIG_NONE)
1690 MLX5_TXOFF_CONFIG_METADATA)
1693 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1694 MLX5_TXOFF_CONFIG_METADATA)
1696 MLX5_TXOFF_DECL(mtsc,
1697 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1698 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1699 MLX5_TXOFF_CONFIG_METADATA)
1701 MLX5_TXOFF_DECL(mti,
1702 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1703 MLX5_TXOFF_CONFIG_INLINE |
1704 MLX5_TXOFF_CONFIG_METADATA)
1707 MLX5_TXOFF_DECL(mtv,
1708 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1709 MLX5_TXOFF_CONFIG_VLAN |
1710 MLX5_TXOFF_CONFIG_METADATA)
1713 MLX5_TXOFF_DECL(mtiv,
1714 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1715 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1716 MLX5_TXOFF_CONFIG_METADATA)
1719 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1720 MLX5_TXOFF_CONFIG_METADATA)
1722 MLX5_TXOFF_DECL(sci,
1723 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1724 MLX5_TXOFF_CONFIG_INLINE |
1725 MLX5_TXOFF_CONFIG_METADATA)
1728 MLX5_TXOFF_DECL(scv,
1729 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1730 MLX5_TXOFF_CONFIG_VLAN |
1731 MLX5_TXOFF_CONFIG_METADATA)
1734 MLX5_TXOFF_DECL(sciv,
1735 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1736 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1737 MLX5_TXOFF_CONFIG_METADATA)
1740 MLX5_TXOFF_CONFIG_INLINE |
1741 MLX5_TXOFF_CONFIG_METADATA)
1744 MLX5_TXOFF_CONFIG_VLAN |
1745 MLX5_TXOFF_CONFIG_METADATA)
1748 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1749 MLX5_TXOFF_CONFIG_METADATA)
1752 * Array of declared and compiled Tx burst function and corresponding
1753 * supported offloads set. The array is used to select the Tx burst
1754 * function for specified offloads set at Tx queue configuration time.
1757 eth_tx_burst_t func;
1760 MLX5_TXOFF_INFO(full_empw,
1761 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1762 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1763 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1764 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1766 MLX5_TXOFF_INFO(none_empw,
1767 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
1769 MLX5_TXOFF_INFO(md_empw,
1770 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1772 MLX5_TXOFF_INFO(mt_empw,
1773 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1774 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1776 MLX5_TXOFF_INFO(mtsc_empw,
1777 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1778 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1779 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1781 MLX5_TXOFF_INFO(mti_empw,
1782 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1783 MLX5_TXOFF_CONFIG_INLINE |
1784 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1786 MLX5_TXOFF_INFO(mtv_empw,
1787 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1788 MLX5_TXOFF_CONFIG_VLAN |
1789 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1791 MLX5_TXOFF_INFO(mtiv_empw,
1792 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1793 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1794 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1796 MLX5_TXOFF_INFO(sc_empw,
1797 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1798 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1800 MLX5_TXOFF_INFO(sci_empw,
1801 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1802 MLX5_TXOFF_CONFIG_INLINE |
1803 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1805 MLX5_TXOFF_INFO(scv_empw,
1806 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1807 MLX5_TXOFF_CONFIG_VLAN |
1808 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1810 MLX5_TXOFF_INFO(sciv_empw,
1811 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1812 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1813 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1815 MLX5_TXOFF_INFO(i_empw,
1816 MLX5_TXOFF_CONFIG_INLINE |
1817 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1819 MLX5_TXOFF_INFO(v_empw,
1820 MLX5_TXOFF_CONFIG_VLAN |
1821 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1823 MLX5_TXOFF_INFO(iv_empw,
1824 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1825 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
1827 MLX5_TXOFF_INFO(full,
1828 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1829 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1830 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1831 MLX5_TXOFF_CONFIG_METADATA)
1833 MLX5_TXOFF_INFO(none,
1834 MLX5_TXOFF_CONFIG_NONE)
1837 MLX5_TXOFF_CONFIG_METADATA)
1840 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1841 MLX5_TXOFF_CONFIG_METADATA)
1843 MLX5_TXOFF_INFO(mtsc,
1844 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1845 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1846 MLX5_TXOFF_CONFIG_METADATA)
1848 MLX5_TXOFF_INFO(mti,
1849 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1850 MLX5_TXOFF_CONFIG_INLINE |
1851 MLX5_TXOFF_CONFIG_METADATA)
1854 MLX5_TXOFF_INFO(mtv,
1855 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1856 MLX5_TXOFF_CONFIG_VLAN |
1857 MLX5_TXOFF_CONFIG_METADATA)
1859 MLX5_TXOFF_INFO(mtiv,
1860 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
1861 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1862 MLX5_TXOFF_CONFIG_METADATA)
1865 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1866 MLX5_TXOFF_CONFIG_METADATA)
1868 MLX5_TXOFF_INFO(sci,
1869 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1870 MLX5_TXOFF_CONFIG_INLINE |
1871 MLX5_TXOFF_CONFIG_METADATA)
1873 MLX5_TXOFF_INFO(scv,
1874 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1875 MLX5_TXOFF_CONFIG_VLAN |
1876 MLX5_TXOFF_CONFIG_METADATA)
1878 MLX5_TXOFF_INFO(sciv,
1879 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
1880 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1881 MLX5_TXOFF_CONFIG_METADATA)
1884 MLX5_TXOFF_CONFIG_INLINE |
1885 MLX5_TXOFF_CONFIG_METADATA)
1888 MLX5_TXOFF_CONFIG_VLAN |
1889 MLX5_TXOFF_CONFIG_METADATA)
1892 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
1893 MLX5_TXOFF_CONFIG_METADATA)
1897 * Configure the Tx function to use. The routine checks configured
1898 * Tx offloads for the device and selects appropriate Tx burst
1899 * routine. There are multiple Tx burst routines compiled from
1900 * the same template in the most optimal way for the dedicated
1904 * Pointer to private data structure.
1907 * Pointer to selected Tx burst function.
1910 mlx5_select_tx_function(struct rte_eth_dev *dev)
1912 struct mlx5_priv *priv = dev->data->dev_private;
1913 struct mlx5_dev_config *config = &priv->config;
1914 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
1915 unsigned int diff = 0, olx = 0, i, m;
1917 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
1918 MLX5_DSEG_MAX, "invalid WQE max size");
1919 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
1920 "invalid WQE Control Segment size");
1921 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
1922 "invalid WQE Ethernet Segment size");
1923 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
1924 "invalid WQE Data Segment size");
1925 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
1926 "invalid WQE size");
1928 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
1929 /* We should support Multi-Segment Packets. */
1930 olx |= MLX5_TXOFF_CONFIG_MULTI;
1932 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
1933 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1934 DEV_TX_OFFLOAD_GRE_TNL_TSO |
1935 DEV_TX_OFFLOAD_IP_TNL_TSO |
1936 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
1937 /* We should support TCP Send Offload. */
1938 olx |= MLX5_TXOFF_CONFIG_TSO;
1940 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
1941 DEV_TX_OFFLOAD_UDP_TNL_TSO |
1942 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
1943 /* We should support Software Parser for Tunnels. */
1944 olx |= MLX5_TXOFF_CONFIG_SWP;
1946 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
1947 DEV_TX_OFFLOAD_UDP_CKSUM |
1948 DEV_TX_OFFLOAD_TCP_CKSUM |
1949 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
1950 /* We should support IP/TCP/UDP Checksums. */
1951 olx |= MLX5_TXOFF_CONFIG_CSUM;
1953 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
1954 /* We should support VLAN insertion. */
1955 olx |= MLX5_TXOFF_CONFIG_VLAN;
1957 if (priv->txqs_n && (*priv->txqs)[0]) {
1958 struct mlx5_txq_data *txd = (*priv->txqs)[0];
1960 if (txd->inlen_send) {
1962 * Check the data inline requirements. Data inline
1963 * is enabled on per device basis, we can check
1964 * the first Tx queue only.
1966 * If device does not support VLAN insertion in WQE
1967 * and some queues are requested to perform VLAN
1968 * insertion offload than inline must be enabled.
1970 olx |= MLX5_TXOFF_CONFIG_INLINE;
1973 if (config->mps == MLX5_MPW_ENHANCED &&
1974 config->txq_inline_min <= 0) {
1976 * The NIC supports Enhanced Multi-Packet Write.
1977 * We do not support legacy MPW due to its
1978 * hardware related problems, so we just ignore
1979 * legacy MLX5_MPW settings. There should be no
1980 * minimal required inline data.
1982 olx |= MLX5_TXOFF_CONFIG_EMPW;
1984 if (tx_offloads & DEV_TX_OFFLOAD_MATCH_METADATA) {
1985 /* We should support Flow metadata. */
1986 olx |= MLX5_TXOFF_CONFIG_METADATA;
1989 * Scan the routines table to find the minimal
1990 * satisfying routine with requested offloads.
1992 m = RTE_DIM(txoff_func);
1993 for (i = 0; i < RTE_DIM(txoff_func); i++) {
1996 tmp = txoff_func[i].olx;
1998 /* Meets requested offloads exactly.*/
2002 if ((tmp & olx) != olx) {
2003 /* Does not meet requested offloads at all. */
2006 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
2007 /* Do not enable eMPW if not configured. */
2009 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
2010 /* Do not enable inlining if not configured. */
2013 * Some routine meets the requirements.
2014 * Check whether it has minimal amount
2015 * of not requested offloads.
2017 tmp = __builtin_popcountl(tmp & ~olx);
2018 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
2019 /* First or better match, save and continue. */
2025 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
2026 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
2027 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
2028 /* Lighter not requested offload. */
2033 if (m >= RTE_DIM(txoff_func)) {
2034 DRV_LOG(DEBUG, "port %u has no selected Tx function"
2035 " for requested offloads %04X",
2036 dev->data->port_id, olx);
2039 DRV_LOG(DEBUG, "port %u has selected Tx function"
2040 " supporting offloads %04X/%04X",
2041 dev->data->port_id, olx, txoff_func[m].olx);
2042 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
2043 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
2044 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
2045 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
2046 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
2047 DRV_LOG(DEBUG, "\tSWP (software parser)");
2048 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
2049 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
2050 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
2051 DRV_LOG(DEBUG, "\tINLIN (inline data)");
2052 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
2053 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
2054 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
2055 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
2056 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW)
2057 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
2058 return txoff_func[m].func;