1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
25 #include <rte_common.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_ether.h>
30 #include "mlx5_utils.h"
31 #include "mlx5_rxtx.h"
32 #include "mlx5_autoconf.h"
33 #include "mlx5_defs.h"
36 static __rte_always_inline uint32_t
37 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
39 static __rte_always_inline int
40 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
41 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
43 static __rte_always_inline uint32_t
44 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
46 static __rte_always_inline void
47 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
48 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
50 static __rte_always_inline void
51 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx);
53 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
54 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
57 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
58 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
61 * Build a table to translate Rx completion flags to packet type.
63 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
66 mlx5_set_ptype_table(void)
69 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
71 /* Last entry must not be overwritten, reserved for errored packet. */
72 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
73 (*p)[i] = RTE_PTYPE_UNKNOWN;
75 * The index to the array should have:
76 * bit[1:0] = l3_hdr_type
77 * bit[4:2] = l4_hdr_type
80 * bit[7] = outer_l3_type
83 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
85 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
87 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
90 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
92 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
95 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
97 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
99 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
101 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
103 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
105 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
108 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
110 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
112 /* Repeat with outer_l3_type being set. Just in case. */
113 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
114 RTE_PTYPE_L4_NONFRAG;
115 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
116 RTE_PTYPE_L4_NONFRAG;
117 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
119 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
121 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
123 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
125 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
127 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
129 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
131 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
133 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
135 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
138 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
139 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
140 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
141 RTE_PTYPE_INNER_L4_NONFRAG;
142 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
143 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
144 RTE_PTYPE_INNER_L4_NONFRAG;
145 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
146 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
147 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
148 RTE_PTYPE_INNER_L4_NONFRAG;
149 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
150 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
151 RTE_PTYPE_INNER_L4_NONFRAG;
152 /* Tunneled - Fragmented */
153 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
154 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
155 RTE_PTYPE_INNER_L4_FRAG;
156 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
157 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
158 RTE_PTYPE_INNER_L4_FRAG;
159 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
160 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
161 RTE_PTYPE_INNER_L4_FRAG;
162 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
163 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
164 RTE_PTYPE_INNER_L4_FRAG;
166 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
167 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
168 RTE_PTYPE_INNER_L4_TCP;
169 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
170 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
171 RTE_PTYPE_INNER_L4_TCP;
172 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
173 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
174 RTE_PTYPE_INNER_L4_TCP;
175 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
176 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
177 RTE_PTYPE_INNER_L4_TCP;
178 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
179 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
180 RTE_PTYPE_INNER_L4_TCP;
181 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
182 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
183 RTE_PTYPE_INNER_L4_TCP;
184 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
185 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
186 RTE_PTYPE_INNER_L4_TCP;
187 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
188 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
189 RTE_PTYPE_INNER_L4_TCP;
190 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
191 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
192 RTE_PTYPE_INNER_L4_TCP;
193 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
194 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
195 RTE_PTYPE_INNER_L4_TCP;
196 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
197 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
198 RTE_PTYPE_INNER_L4_TCP;
199 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
200 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
201 RTE_PTYPE_INNER_L4_TCP;
203 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
204 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
205 RTE_PTYPE_INNER_L4_UDP;
206 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
207 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
208 RTE_PTYPE_INNER_L4_UDP;
209 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
210 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
211 RTE_PTYPE_INNER_L4_UDP;
212 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
213 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L4_UDP;
218 * Build a table to translate packet to checksum type of Verbs.
221 mlx5_set_cksum_table(void)
227 * The index should have:
228 * bit[0] = PKT_TX_TCP_SEG
229 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
230 * bit[4] = PKT_TX_IP_CKSUM
231 * bit[8] = PKT_TX_OUTER_IP_CKSUM
234 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
237 /* Tunneled packet. */
238 if (i & (1 << 8)) /* Outer IP. */
239 v |= MLX5_ETH_WQE_L3_CSUM;
240 if (i & (1 << 4)) /* Inner IP. */
241 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
242 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
243 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
246 if (i & (1 << 4)) /* IP. */
247 v |= MLX5_ETH_WQE_L3_CSUM;
248 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
249 v |= MLX5_ETH_WQE_L4_CSUM;
251 mlx5_cksum_table[i] = v;
256 * Build a table to translate packet type of mbuf to SWP type of Verbs.
259 mlx5_set_swp_types_table(void)
265 * The index should have:
266 * bit[0:1] = PKT_TX_L4_MASK
267 * bit[4] = PKT_TX_IPV6
268 * bit[8] = PKT_TX_OUTER_IPV6
269 * bit[9] = PKT_TX_OUTER_UDP
271 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
274 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
276 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
278 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
279 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
280 v |= MLX5_ETH_WQE_L4_INNER_UDP;
281 mlx5_swp_types_table[i] = v;
286 * Return the size of tailroom of WQ.
289 * Pointer to TX queue structure.
291 * Pointer to tail of WQ.
297 tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr)
300 tailroom = (uintptr_t)(txq->wqes) +
301 (1 << txq->wqe_n) * MLX5_WQE_SIZE -
307 * Copy data to tailroom of circular queue.
310 * Pointer to destination.
314 * Number of bytes to copy.
316 * Pointer to head of queue.
318 * Size of tailroom from dst.
321 * Pointer after copied data.
324 mlx5_copy_to_wq(void *dst, const void *src, size_t n,
325 void *base, size_t tailroom)
330 rte_memcpy(dst, src, tailroom);
331 rte_memcpy(base, (void *)((uintptr_t)src + tailroom),
333 ret = (uint8_t *)base + n - tailroom;
335 rte_memcpy(dst, src, n);
336 ret = (n == tailroom) ? base : (uint8_t *)dst + n;
342 * Inline TSO headers into WQE.
345 * 0 on success, negative errno value on failure.
348 inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
351 uint16_t *pkt_inline_sz,
355 uint16_t *tso_header_sz)
357 uintptr_t end = (uintptr_t)(((uintptr_t)txq->wqes) +
358 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
360 uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
361 const uint8_t tunneled = txq->tunnel_en && (buf->ol_flags &
365 *tso_segsz = buf->tso_segsz;
366 *tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len;
367 if (unlikely(*tso_segsz == 0 || *tso_header_sz == 0)) {
368 txq->stats.oerrors++;
372 *tso_header_sz += buf->outer_l2_len + buf->outer_l3_len;
373 /* First seg must contain all TSO headers. */
374 if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER) ||
375 *tso_header_sz > DATA_LEN(buf)) {
376 txq->stats.oerrors++;
379 copy_b = *tso_header_sz - *pkt_inline_sz;
380 if (!copy_b || ((end - (uintptr_t)*raw) < copy_b))
382 n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
383 if (unlikely(*max_wqe < n_wqe))
386 rte_memcpy((void *)*raw, (void *)*addr, copy_b);
389 copy_b = MLX5_WQE_DS(copy_b) * MLX5_WQE_DWORD_SIZE;
390 *pkt_inline_sz += copy_b;
396 * DPDK callback to check the status of a tx descriptor.
401 * The index of the descriptor in the ring.
404 * The status of the tx descriptor.
407 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
409 struct mlx5_txq_data *txq = tx_queue;
412 mlx5_tx_complete(txq);
413 used = txq->elts_head - txq->elts_tail;
415 return RTE_ETH_TX_DESC_FULL;
416 return RTE_ETH_TX_DESC_DONE;
420 * DPDK callback to check the status of a rx descriptor.
425 * The index of the descriptor in the ring.
428 * The status of the tx descriptor.
431 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
433 struct mlx5_rxq_data *rxq = rx_queue;
434 struct rxq_zip *zip = &rxq->zip;
435 volatile struct mlx5_cqe *cqe;
436 const unsigned int cqe_n = (1 << rxq->cqe_n);
437 const unsigned int cqe_cnt = cqe_n - 1;
441 /* if we are processing a compressed cqe */
443 used = zip->cqe_cnt - zip->ca;
449 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
450 while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
454 op_own = cqe->op_own;
455 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
456 n = rte_be_to_cpu_32(cqe->byte_cnt);
461 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
463 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
465 return RTE_ETH_RX_DESC_DONE;
466 return RTE_ETH_RX_DESC_AVAIL;
470 * DPDK callback for TX.
473 * Generic pointer to TX queue structure.
475 * Packets to transmit.
477 * Number of packets in array.
480 * Number of packets successfully transmitted (<= pkts_n).
483 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
485 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
486 uint16_t elts_head = txq->elts_head;
487 const uint16_t elts_n = 1 << txq->elts_n;
488 const uint16_t elts_m = elts_n - 1;
495 volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
496 unsigned int segs_n = 0;
497 const unsigned int max_inline = txq->max_inline;
500 if (unlikely(!pkts_n))
502 /* Prefetch first packet cacheline. */
503 rte_prefetch0(*pkts);
504 /* Start processing. */
505 mlx5_tx_complete(txq);
506 max_elts = (elts_n - (elts_head - txq->elts_tail));
507 max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
508 if (unlikely(!max_wqe))
511 struct rte_mbuf *buf = *pkts; /* First_seg. */
513 volatile struct mlx5_wqe_v *wqe = NULL;
514 volatile rte_v128u32_t *dseg = NULL;
517 unsigned int sg = 0; /* counter of additional segs attached. */
519 uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
520 uint16_t tso_header_sz = 0;
523 uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
524 uint32_t swp_offsets = 0;
525 uint8_t swp_types = 0;
526 uint16_t tso_segsz = 0;
527 #ifdef MLX5_PMD_SOFT_COUNTERS
528 uint32_t total_length = 0;
532 segs_n = buf->nb_segs;
534 * Make sure there is enough room to store this packet and
535 * that one ring entry remains unused.
538 if (max_elts < segs_n)
542 if (unlikely(--max_wqe == 0))
544 wqe = (volatile struct mlx5_wqe_v *)
545 tx_mlx5_wqe(txq, txq->wqe_ci);
546 rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
548 rte_prefetch0(*(pkts + 1));
549 addr = rte_pktmbuf_mtod(buf, uintptr_t);
550 length = DATA_LEN(buf);
551 ehdr = (((uint8_t *)addr)[1] << 8) |
552 ((uint8_t *)addr)[0];
553 #ifdef MLX5_PMD_SOFT_COUNTERS
554 total_length = length;
556 if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
557 txq->stats.oerrors++;
560 /* Update element. */
561 (*txq->elts)[elts_head & elts_m] = buf;
562 /* Prefetch next buffer data. */
565 rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
566 cs_flags = txq_ol_cksum_to_cs(buf);
567 txq_mbuf_to_swp(txq, buf, (uint8_t *)&swp_offsets, &swp_types);
568 raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
569 /* Replace the Ethernet type by the VLAN if necessary. */
570 if (buf->ol_flags & PKT_TX_VLAN_PKT) {
571 uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
573 unsigned int len = 2 * ETHER_ADDR_LEN - 2;
577 /* Copy Destination and source mac address. */
578 memcpy((uint8_t *)raw, ((uint8_t *)addr), len);
580 memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan));
581 /* Copy missing two bytes to end the DSeg. */
582 memcpy((uint8_t *)raw + len + sizeof(vlan),
583 ((uint8_t *)addr) + len, 2);
587 memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
588 MLX5_WQE_DWORD_SIZE);
589 length -= pkt_inline_sz;
590 addr += pkt_inline_sz;
592 raw += MLX5_WQE_DWORD_SIZE;
594 ret = inline_tso(txq, buf, &length,
595 &addr, &pkt_inline_sz,
597 &tso_segsz, &tso_header_sz);
598 if (ret == -EINVAL) {
600 } else if (ret == -EAGAIN) {
602 wqe->ctrl = (rte_v128u32_t){
603 rte_cpu_to_be_32(txq->wqe_ci << 8),
604 rte_cpu_to_be_32(txq->qp_num_8s | 1),
609 #ifdef MLX5_PMD_SOFT_COUNTERS
616 /* Inline if enough room. */
617 if (max_inline || tso) {
619 uintptr_t end = (uintptr_t)
620 (((uintptr_t)txq->wqes) +
621 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
622 unsigned int inline_room = max_inline *
623 RTE_CACHE_LINE_SIZE -
624 (pkt_inline_sz - 2) -
630 addr_end = RTE_ALIGN_FLOOR(addr + inline_room,
631 RTE_CACHE_LINE_SIZE);
632 copy_b = (addr_end > addr) ?
633 RTE_MIN((addr_end - addr), length) : 0;
634 if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
636 * One Dseg remains in the current WQE. To
637 * keep the computation positive, it is
638 * removed after the bytes to Dseg conversion.
640 uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
642 if (unlikely(max_wqe < n))
647 inl = rte_cpu_to_be_32(copy_b |
649 rte_memcpy((void *)raw,
650 (void *)&inl, sizeof(inl));
652 pkt_inline_sz += sizeof(inl);
654 rte_memcpy((void *)raw, (void *)addr, copy_b);
657 pkt_inline_sz += copy_b;
660 * 2 DWORDs consumed by the WQE header + ETH segment +
661 * the size of the inline part of the packet.
663 ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
665 if (ds % (MLX5_WQE_SIZE /
666 MLX5_WQE_DWORD_SIZE) == 0) {
667 if (unlikely(--max_wqe == 0))
669 dseg = (volatile rte_v128u32_t *)
670 tx_mlx5_wqe(txq, txq->wqe_ci +
673 dseg = (volatile rte_v128u32_t *)
675 (ds * MLX5_WQE_DWORD_SIZE));
678 } else if (!segs_n) {
682 * Further inline the next segment only for
687 inline_room -= copy_b;
691 /* Move to the next segment. */
695 addr = rte_pktmbuf_mtod(buf, uintptr_t);
696 length = DATA_LEN(buf);
697 #ifdef MLX5_PMD_SOFT_COUNTERS
698 total_length += length;
700 (*txq->elts)[++elts_head & elts_m] = buf;
705 * No inline has been done in the packet, only the
706 * Ethernet Header as been stored.
708 dseg = (volatile rte_v128u32_t *)
709 ((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
712 /* Add the remaining packet as a simple ds. */
713 addr_64 = rte_cpu_to_be_64(addr);
714 *dseg = (rte_v128u32_t){
715 rte_cpu_to_be_32(length),
716 mlx5_tx_mb2mr(txq, buf),
729 * Spill on next WQE when the current one does not have
730 * enough room left. Size of WQE must a be a multiple
731 * of data segment size.
733 assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
734 if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
735 if (unlikely(--max_wqe == 0))
737 dseg = (volatile rte_v128u32_t *)
738 tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4);
739 rte_prefetch0(tx_mlx5_wqe(txq,
740 txq->wqe_ci + ds / 4 + 1));
747 length = DATA_LEN(buf);
748 #ifdef MLX5_PMD_SOFT_COUNTERS
749 total_length += length;
751 /* Store segment information. */
752 addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
753 *dseg = (rte_v128u32_t){
754 rte_cpu_to_be_32(length),
755 mlx5_tx_mb2mr(txq, buf),
759 (*txq->elts)[++elts_head & elts_m] = buf;
763 if (ds > MLX5_DSEG_MAX) {
764 txq->stats.oerrors++;
771 /* Initialize known and common part of the WQE structure. */
773 wqe->ctrl = (rte_v128u32_t){
774 rte_cpu_to_be_32((txq->wqe_ci << 8) |
776 rte_cpu_to_be_32(txq->qp_num_8s | ds),
780 wqe->eseg = (rte_v128u32_t){
782 cs_flags | (swp_types << 8) |
783 (rte_cpu_to_be_16(tso_segsz) << 16),
785 (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
788 wqe->ctrl = (rte_v128u32_t){
789 rte_cpu_to_be_32((txq->wqe_ci << 8) |
791 rte_cpu_to_be_32(txq->qp_num_8s | ds),
795 wqe->eseg = (rte_v128u32_t){
797 cs_flags | (swp_types << 8),
799 (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
803 txq->wqe_ci += (ds + 3) / 4;
804 /* Save the last successful WQE for completion request */
805 last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
806 #ifdef MLX5_PMD_SOFT_COUNTERS
807 /* Increment sent bytes counter. */
808 txq->stats.obytes += total_length;
810 } while (i < pkts_n);
811 /* Take a shortcut if nothing must be sent. */
812 if (unlikely((i + k) == 0))
814 txq->elts_head += (i + j);
815 /* Check whether completion threshold has been reached. */
816 comp = txq->elts_comp + i + j + k;
817 if (comp >= MLX5_TX_COMP_THRESH) {
818 /* A CQE slot must always be available. */
819 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
820 /* Request completion on last WQE. */
821 last_wqe->ctrl2 = rte_cpu_to_be_32(8);
822 /* Save elts_head in unused "immediate" field of WQE. */
823 last_wqe->ctrl3 = txq->elts_head;
826 txq->elts_comp = comp;
828 #ifdef MLX5_PMD_SOFT_COUNTERS
829 /* Increment sent packets counter. */
830 txq->stats.opackets += i;
832 /* Ring QP doorbell. */
833 mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
838 * Open a MPW session.
841 * Pointer to TX queue structure.
843 * Pointer to MPW session structure.
848 mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
850 uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
851 volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
852 (volatile struct mlx5_wqe_data_seg (*)[])
853 tx_mlx5_wqe(txq, idx + 1);
855 mpw->state = MLX5_MPW_STATE_OPENED;
859 mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
860 mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
861 mpw->wqe->eseg.inline_hdr_sz = 0;
862 mpw->wqe->eseg.rsvd0 = 0;
863 mpw->wqe->eseg.rsvd1 = 0;
864 mpw->wqe->eseg.rsvd2 = 0;
865 mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
868 mpw->wqe->ctrl[2] = 0;
869 mpw->wqe->ctrl[3] = 0;
870 mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
871 (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
872 mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *)
873 (((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE));
874 mpw->data.dseg[2] = &(*dseg)[0];
875 mpw->data.dseg[3] = &(*dseg)[1];
876 mpw->data.dseg[4] = &(*dseg)[2];
880 * Close a MPW session.
883 * Pointer to TX queue structure.
885 * Pointer to MPW session structure.
888 mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
890 unsigned int num = mpw->pkts_n;
893 * Store size in multiple of 16 bytes. Control and Ethernet segments
896 mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
897 mpw->state = MLX5_MPW_STATE_CLOSED;
902 rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
903 rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
907 * DPDK callback for TX with MPW support.
910 * Generic pointer to TX queue structure.
912 * Packets to transmit.
914 * Number of packets in array.
917 * Number of packets successfully transmitted (<= pkts_n).
920 mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
922 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
923 uint16_t elts_head = txq->elts_head;
924 const uint16_t elts_n = 1 << txq->elts_n;
925 const uint16_t elts_m = elts_n - 1;
931 struct mlx5_mpw mpw = {
932 .state = MLX5_MPW_STATE_CLOSED,
935 if (unlikely(!pkts_n))
937 /* Prefetch first packet cacheline. */
938 rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
939 rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
940 /* Start processing. */
941 mlx5_tx_complete(txq);
942 max_elts = (elts_n - (elts_head - txq->elts_tail));
943 max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
944 if (unlikely(!max_wqe))
947 struct rte_mbuf *buf = *(pkts++);
949 unsigned int segs_n = buf->nb_segs;
953 * Make sure there is enough room to store this packet and
954 * that one ring entry remains unused.
957 if (max_elts < segs_n)
959 /* Do not bother with large packets MPW cannot handle. */
960 if (segs_n > MLX5_MPW_DSEG_MAX) {
961 txq->stats.oerrors++;
966 cs_flags = txq_ol_cksum_to_cs(buf);
967 /* Retrieve packet information. */
968 length = PKT_LEN(buf);
970 /* Start new session if packet differs. */
971 if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
972 ((mpw.len != length) ||
974 (mpw.wqe->eseg.cs_flags != cs_flags)))
975 mlx5_mpw_close(txq, &mpw);
976 if (mpw.state == MLX5_MPW_STATE_CLOSED) {
978 * Multi-Packet WQE consumes at most two WQE.
979 * mlx5_mpw_new() expects to be able to use such
982 if (unlikely(max_wqe < 2))
985 mlx5_mpw_new(txq, &mpw, length);
986 mpw.wqe->eseg.cs_flags = cs_flags;
988 /* Multi-segment packets must be alone in their MPW. */
989 assert((segs_n == 1) || (mpw.pkts_n == 0));
990 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
994 volatile struct mlx5_wqe_data_seg *dseg;
998 (*txq->elts)[elts_head++ & elts_m] = buf;
999 dseg = mpw.data.dseg[mpw.pkts_n];
1000 addr = rte_pktmbuf_mtod(buf, uintptr_t);
1001 *dseg = (struct mlx5_wqe_data_seg){
1002 .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
1003 .lkey = mlx5_tx_mb2mr(txq, buf),
1004 .addr = rte_cpu_to_be_64(addr),
1006 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1007 length += DATA_LEN(buf);
1013 assert(length == mpw.len);
1014 if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
1015 mlx5_mpw_close(txq, &mpw);
1016 #ifdef MLX5_PMD_SOFT_COUNTERS
1017 /* Increment sent bytes counter. */
1018 txq->stats.obytes += length;
1022 /* Take a shortcut if nothing must be sent. */
1023 if (unlikely(i == 0))
1025 /* Check whether completion threshold has been reached. */
1026 /* "j" includes both packets and segments. */
1027 comp = txq->elts_comp + j;
1028 if (comp >= MLX5_TX_COMP_THRESH) {
1029 volatile struct mlx5_wqe *wqe = mpw.wqe;
1031 /* A CQE slot must always be available. */
1032 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
1033 /* Request completion on last WQE. */
1034 wqe->ctrl[2] = rte_cpu_to_be_32(8);
1035 /* Save elts_head in unused "immediate" field of WQE. */
1036 wqe->ctrl[3] = elts_head;
1039 txq->elts_comp = comp;
1041 #ifdef MLX5_PMD_SOFT_COUNTERS
1042 /* Increment sent packets counter. */
1043 txq->stats.opackets += i;
1045 /* Ring QP doorbell. */
1046 if (mpw.state == MLX5_MPW_STATE_OPENED)
1047 mlx5_mpw_close(txq, &mpw);
1048 mlx5_tx_dbrec(txq, mpw.wqe);
1049 txq->elts_head = elts_head;
1054 * Open a MPW inline session.
1057 * Pointer to TX queue structure.
1059 * Pointer to MPW session structure.
1064 mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
1067 uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
1068 struct mlx5_wqe_inl_small *inl;
1070 mpw->state = MLX5_MPW_INL_STATE_OPENED;
1074 mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
1075 mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
1076 (txq->wqe_ci << 8) |
1078 mpw->wqe->ctrl[2] = 0;
1079 mpw->wqe->ctrl[3] = 0;
1080 mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
1081 mpw->wqe->eseg.inline_hdr_sz = 0;
1082 mpw->wqe->eseg.cs_flags = 0;
1083 mpw->wqe->eseg.rsvd0 = 0;
1084 mpw->wqe->eseg.rsvd1 = 0;
1085 mpw->wqe->eseg.rsvd2 = 0;
1086 inl = (struct mlx5_wqe_inl_small *)
1087 (((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE);
1088 mpw->data.raw = (uint8_t *)&inl->raw;
1092 * Close a MPW inline session.
1095 * Pointer to TX queue structure.
1097 * Pointer to MPW session structure.
1100 mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
1103 struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
1104 (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
1106 size = MLX5_WQE_SIZE - MLX5_MWQE64_INL_DATA + mpw->total_len;
1108 * Store size in multiple of 16 bytes. Control and Ethernet segments
1111 mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
1113 mpw->state = MLX5_MPW_STATE_CLOSED;
1114 inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
1115 txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
1119 * DPDK callback for TX with MPW inline support.
1122 * Generic pointer to TX queue structure.
1124 * Packets to transmit.
1126 * Number of packets in array.
1129 * Number of packets successfully transmitted (<= pkts_n).
1132 mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
1135 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
1136 uint16_t elts_head = txq->elts_head;
1137 const uint16_t elts_n = 1 << txq->elts_n;
1138 const uint16_t elts_m = elts_n - 1;
1144 unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
1145 struct mlx5_mpw mpw = {
1146 .state = MLX5_MPW_STATE_CLOSED,
1149 * Compute the maximum number of WQE which can be consumed by inline
1152 * - 1 control segment,
1153 * - 1 Ethernet segment,
1154 * - N Dseg from the inline request.
1156 const unsigned int wqe_inl_n =
1157 ((2 * MLX5_WQE_DWORD_SIZE +
1158 txq->max_inline * RTE_CACHE_LINE_SIZE) +
1159 RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
1161 if (unlikely(!pkts_n))
1163 /* Prefetch first packet cacheline. */
1164 rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
1165 rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
1166 /* Start processing. */
1167 mlx5_tx_complete(txq);
1168 max_elts = (elts_n - (elts_head - txq->elts_tail));
1170 struct rte_mbuf *buf = *(pkts++);
1173 unsigned int segs_n = buf->nb_segs;
1177 * Make sure there is enough room to store this packet and
1178 * that one ring entry remains unused.
1181 if (max_elts < segs_n)
1183 /* Do not bother with large packets MPW cannot handle. */
1184 if (segs_n > MLX5_MPW_DSEG_MAX) {
1185 txq->stats.oerrors++;
1191 * Compute max_wqe in case less WQE were consumed in previous
1194 max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
1195 cs_flags = txq_ol_cksum_to_cs(buf);
1196 /* Retrieve packet information. */
1197 length = PKT_LEN(buf);
1198 /* Start new session if packet differs. */
1199 if (mpw.state == MLX5_MPW_STATE_OPENED) {
1200 if ((mpw.len != length) ||
1202 (mpw.wqe->eseg.cs_flags != cs_flags))
1203 mlx5_mpw_close(txq, &mpw);
1204 } else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
1205 if ((mpw.len != length) ||
1207 (length > inline_room) ||
1208 (mpw.wqe->eseg.cs_flags != cs_flags)) {
1209 mlx5_mpw_inline_close(txq, &mpw);
1211 txq->max_inline * RTE_CACHE_LINE_SIZE;
1214 if (mpw.state == MLX5_MPW_STATE_CLOSED) {
1215 if ((segs_n != 1) ||
1216 (length > inline_room)) {
1218 * Multi-Packet WQE consumes at most two WQE.
1219 * mlx5_mpw_new() expects to be able to use
1222 if (unlikely(max_wqe < 2))
1225 mlx5_mpw_new(txq, &mpw, length);
1226 mpw.wqe->eseg.cs_flags = cs_flags;
1228 if (unlikely(max_wqe < wqe_inl_n))
1230 max_wqe -= wqe_inl_n;
1231 mlx5_mpw_inline_new(txq, &mpw, length);
1232 mpw.wqe->eseg.cs_flags = cs_flags;
1235 /* Multi-segment packets must be alone in their MPW. */
1236 assert((segs_n == 1) || (mpw.pkts_n == 0));
1237 if (mpw.state == MLX5_MPW_STATE_OPENED) {
1238 assert(inline_room ==
1239 txq->max_inline * RTE_CACHE_LINE_SIZE);
1240 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1244 volatile struct mlx5_wqe_data_seg *dseg;
1247 (*txq->elts)[elts_head++ & elts_m] = buf;
1248 dseg = mpw.data.dseg[mpw.pkts_n];
1249 addr = rte_pktmbuf_mtod(buf, uintptr_t);
1250 *dseg = (struct mlx5_wqe_data_seg){
1252 rte_cpu_to_be_32(DATA_LEN(buf)),
1253 .lkey = mlx5_tx_mb2mr(txq, buf),
1254 .addr = rte_cpu_to_be_64(addr),
1256 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1257 length += DATA_LEN(buf);
1263 assert(length == mpw.len);
1264 if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
1265 mlx5_mpw_close(txq, &mpw);
1269 assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
1270 assert(length <= inline_room);
1271 assert(length == DATA_LEN(buf));
1272 addr = rte_pktmbuf_mtod(buf, uintptr_t);
1273 (*txq->elts)[elts_head++ & elts_m] = buf;
1274 /* Maximum number of bytes before wrapping. */
1275 max = ((((uintptr_t)(txq->wqes)) +
1278 (uintptr_t)mpw.data.raw);
1280 rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1283 mpw.data.raw = (volatile void *)txq->wqes;
1284 rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1285 (void *)(addr + max),
1287 mpw.data.raw += length - max;
1289 rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1295 (volatile void *)txq->wqes;
1297 mpw.data.raw += length;
1300 mpw.total_len += length;
1302 if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
1303 mlx5_mpw_inline_close(txq, &mpw);
1305 txq->max_inline * RTE_CACHE_LINE_SIZE;
1307 inline_room -= length;
1310 #ifdef MLX5_PMD_SOFT_COUNTERS
1311 /* Increment sent bytes counter. */
1312 txq->stats.obytes += length;
1316 /* Take a shortcut if nothing must be sent. */
1317 if (unlikely(i == 0))
1319 /* Check whether completion threshold has been reached. */
1320 /* "j" includes both packets and segments. */
1321 comp = txq->elts_comp + j;
1322 if (comp >= MLX5_TX_COMP_THRESH) {
1323 volatile struct mlx5_wqe *wqe = mpw.wqe;
1325 /* A CQE slot must always be available. */
1326 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
1327 /* Request completion on last WQE. */
1328 wqe->ctrl[2] = rte_cpu_to_be_32(8);
1329 /* Save elts_head in unused "immediate" field of WQE. */
1330 wqe->ctrl[3] = elts_head;
1333 txq->elts_comp = comp;
1335 #ifdef MLX5_PMD_SOFT_COUNTERS
1336 /* Increment sent packets counter. */
1337 txq->stats.opackets += i;
1339 /* Ring QP doorbell. */
1340 if (mpw.state == MLX5_MPW_INL_STATE_OPENED)
1341 mlx5_mpw_inline_close(txq, &mpw);
1342 else if (mpw.state == MLX5_MPW_STATE_OPENED)
1343 mlx5_mpw_close(txq, &mpw);
1344 mlx5_tx_dbrec(txq, mpw.wqe);
1345 txq->elts_head = elts_head;
1350 * Open an Enhanced MPW session.
1353 * Pointer to TX queue structure.
1355 * Pointer to MPW session structure.
1360 mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding)
1362 uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
1364 mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED;
1366 mpw->total_len = sizeof(struct mlx5_wqe);
1367 mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
1369 rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
1370 (txq->wqe_ci << 8) |
1371 MLX5_OPCODE_ENHANCED_MPSW);
1372 mpw->wqe->ctrl[2] = 0;
1373 mpw->wqe->ctrl[3] = 0;
1374 memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
1375 if (unlikely(padding)) {
1376 uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
1378 /* Pad the first 2 DWORDs with zero-length inline header. */
1379 *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
1380 *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
1381 rte_cpu_to_be_32(MLX5_INLINE_SEG);
1382 mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
1383 /* Start from the next WQEBB. */
1384 mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
1386 mpw->data.raw = (volatile void *)(mpw->wqe + 1);
1391 * Close an Enhanced MPW session.
1394 * Pointer to TX queue structure.
1396 * Pointer to MPW session structure.
1399 * Number of consumed WQEs.
1401 static inline uint16_t
1402 mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
1406 /* Store size in multiple of 16 bytes. Control and Ethernet segments
1409 mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
1410 MLX5_WQE_DS(mpw->total_len));
1411 mpw->state = MLX5_MPW_STATE_CLOSED;
1412 ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
1418 * TX with Enhanced MPW support.
1421 * Pointer to TX queue structure.
1423 * Packets to transmit.
1425 * Number of packets in array.
1428 * Number of packets successfully transmitted (<= pkts_n).
1430 static inline uint16_t
1431 txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
1434 uint16_t elts_head = txq->elts_head;
1435 const uint16_t elts_n = 1 << txq->elts_n;
1436 const uint16_t elts_m = elts_n - 1;
1441 unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
1442 unsigned int mpw_room = 0;
1443 unsigned int inl_pad = 0;
1446 struct mlx5_mpw mpw = {
1447 .state = MLX5_MPW_STATE_CLOSED,
1450 if (unlikely(!pkts_n))
1452 /* Start processing. */
1453 mlx5_tx_complete(txq);
1454 max_elts = (elts_n - (elts_head - txq->elts_tail));
1455 max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
1456 if (unlikely(!max_wqe))
1459 struct rte_mbuf *buf = *(pkts++);
1461 unsigned int do_inline = 0; /* Whether inline is possible. */
1465 /* Multi-segmented packet is handled in slow-path outside. */
1466 assert(NB_SEGS(buf) == 1);
1467 /* Make sure there is enough room to store this packet. */
1468 if (max_elts - j == 0)
1470 cs_flags = txq_ol_cksum_to_cs(buf);
1471 /* Retrieve packet information. */
1472 length = PKT_LEN(buf);
1473 /* Start new session if:
1474 * - multi-segment packet
1475 * - no space left even for a dseg
1476 * - next packet can be inlined with a new WQE
1479 if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
1480 if ((inl_pad + sizeof(struct mlx5_wqe_data_seg) >
1482 (length <= txq->inline_max_packet_sz &&
1483 inl_pad + sizeof(inl_hdr) + length >
1485 (mpw.wqe->eseg.cs_flags != cs_flags))
1486 max_wqe -= mlx5_empw_close(txq, &mpw);
1488 if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
1489 /* In Enhanced MPW, inline as much as the budget is
1490 * allowed. The remaining space is to be filled with
1491 * dsegs. If the title WQEBB isn't padded, it will have
1494 mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
1495 (max_inline ? max_inline :
1496 pkts_n * MLX5_WQE_DWORD_SIZE) +
1498 if (unlikely(max_wqe * MLX5_WQE_SIZE < mpw_room))
1500 /* Don't pad the title WQEBB to not waste WQ. */
1501 mlx5_empw_new(txq, &mpw, 0);
1502 mpw_room -= mpw.total_len;
1504 do_inline = length <= txq->inline_max_packet_sz &&
1505 sizeof(inl_hdr) + length <= mpw_room &&
1507 mpw.wqe->eseg.cs_flags = cs_flags;
1509 /* Evaluate whether the next packet can be inlined.
1510 * Inlininig is possible when:
1511 * - length is less than configured value
1512 * - length fits for remaining space
1513 * - not required to fill the title WQEBB with dsegs
1516 length <= txq->inline_max_packet_sz &&
1517 inl_pad + sizeof(inl_hdr) + length <=
1519 (!txq->mpw_hdr_dseg ||
1520 mpw.total_len >= MLX5_WQE_SIZE);
1522 if (max_inline && do_inline) {
1523 /* Inline packet into WQE. */
1526 assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
1527 assert(length == DATA_LEN(buf));
1528 inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
1529 addr = rte_pktmbuf_mtod(buf, uintptr_t);
1530 mpw.data.raw = (volatile void *)
1531 ((uintptr_t)mpw.data.raw + inl_pad);
1532 max = tx_mlx5_wq_tailroom(txq,
1533 (void *)(uintptr_t)mpw.data.raw);
1534 /* Copy inline header. */
1535 mpw.data.raw = (volatile void *)
1537 (void *)(uintptr_t)mpw.data.raw,
1540 (void *)(uintptr_t)txq->wqes,
1542 max = tx_mlx5_wq_tailroom(txq,
1543 (void *)(uintptr_t)mpw.data.raw);
1544 /* Copy packet data. */
1545 mpw.data.raw = (volatile void *)
1547 (void *)(uintptr_t)mpw.data.raw,
1550 (void *)(uintptr_t)txq->wqes,
1553 mpw.total_len += (inl_pad + sizeof(inl_hdr) + length);
1554 /* No need to get completion as the entire packet is
1555 * copied to WQ. Free the buf right away.
1557 rte_pktmbuf_free_seg(buf);
1558 mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
1559 /* Add pad in the next packet if any. */
1560 inl_pad = (((uintptr_t)mpw.data.raw +
1561 (MLX5_WQE_DWORD_SIZE - 1)) &
1562 ~(MLX5_WQE_DWORD_SIZE - 1)) -
1563 (uintptr_t)mpw.data.raw;
1565 /* No inline. Load a dseg of packet pointer. */
1566 volatile rte_v128u32_t *dseg;
1568 assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
1569 assert((inl_pad + sizeof(*dseg)) <= mpw_room);
1570 assert(length == DATA_LEN(buf));
1571 if (!tx_mlx5_wq_tailroom(txq,
1572 (void *)((uintptr_t)mpw.data.raw
1574 dseg = (volatile void *)txq->wqes;
1576 dseg = (volatile void *)
1577 ((uintptr_t)mpw.data.raw +
1579 (*txq->elts)[elts_head++ & elts_m] = buf;
1580 addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
1582 *dseg = (rte_v128u32_t) {
1583 rte_cpu_to_be_32(length),
1584 mlx5_tx_mb2mr(txq, buf),
1588 mpw.data.raw = (volatile void *)(dseg + 1);
1589 mpw.total_len += (inl_pad + sizeof(*dseg));
1592 mpw_room -= (inl_pad + sizeof(*dseg));
1595 #ifdef MLX5_PMD_SOFT_COUNTERS
1596 /* Increment sent bytes counter. */
1597 txq->stats.obytes += length;
1600 } while (i < pkts_n);
1601 /* Take a shortcut if nothing must be sent. */
1602 if (unlikely(i == 0))
1604 /* Check whether completion threshold has been reached. */
1605 if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH ||
1606 (uint16_t)(txq->wqe_ci - txq->mpw_comp) >=
1607 (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
1608 volatile struct mlx5_wqe *wqe = mpw.wqe;
1610 /* A CQE slot must always be available. */
1611 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
1612 /* Request completion on last WQE. */
1613 wqe->ctrl[2] = rte_cpu_to_be_32(8);
1614 /* Save elts_head in unused "immediate" field of WQE. */
1615 wqe->ctrl[3] = elts_head;
1617 txq->mpw_comp = txq->wqe_ci;
1619 txq->elts_comp += j;
1621 #ifdef MLX5_PMD_SOFT_COUNTERS
1622 /* Increment sent packets counter. */
1623 txq->stats.opackets += i;
1625 if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
1626 mlx5_empw_close(txq, &mpw);
1627 /* Ring QP doorbell. */
1628 mlx5_tx_dbrec(txq, mpw.wqe);
1629 txq->elts_head = elts_head;
1634 * DPDK callback for TX with Enhanced MPW support.
1637 * Generic pointer to TX queue structure.
1639 * Packets to transmit.
1641 * Number of packets in array.
1644 * Number of packets successfully transmitted (<= pkts_n).
1647 mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1649 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
1652 while (pkts_n > nb_tx) {
1656 n = txq_count_contig_multi_seg(&pkts[nb_tx], pkts_n - nb_tx);
1658 ret = mlx5_tx_burst(dpdk_txq, &pkts[nb_tx], n);
1663 n = txq_count_contig_single_seg(&pkts[nb_tx], pkts_n - nb_tx);
1665 ret = txq_burst_empw(txq, &pkts[nb_tx], n);
1675 * Translate RX completion flags to packet type.
1678 * Pointer to RX queue structure.
1682 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
1685 * Packet type for struct rte_mbuf.
1687 static inline uint32_t
1688 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
1691 uint8_t pinfo = cqe->pkt_info;
1692 uint16_t ptype = cqe->hdr_type_etc;
1695 * The index to the array should have:
1696 * bit[1:0] = l3_hdr_type
1697 * bit[4:2] = l4_hdr_type
1700 * bit[7] = outer_l3_type
1702 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
1703 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
1707 * Get size of the next packet for a given CQE. For compressed CQEs, the
1708 * consumer index is updated only once all packets of the current one have
1712 * Pointer to RX queue.
1716 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1720 * Packet size in bytes (0 if there is none), -1 in case of completion
1724 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1725 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1727 struct rxq_zip *zip = &rxq->zip;
1728 uint16_t cqe_n = cqe_cnt + 1;
1732 /* Process compressed data in the CQE and mini arrays. */
1734 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1735 (volatile struct mlx5_mini_cqe8 (*)[8])
1736 (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
1738 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1739 *mcqe = &(*mc)[zip->ai & 7];
1740 if ((++zip->ai & 7) == 0) {
1741 /* Invalidate consumed CQEs */
1744 while (idx != end) {
1745 (*rxq->cqes)[idx & cqe_cnt].op_own =
1746 MLX5_CQE_INVALIDATE;
1750 * Increment consumer index to skip the number of
1751 * CQEs consumed. Hardware leaves holes in the CQ
1752 * ring for software use.
1757 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1758 /* Invalidate the rest */
1762 while (idx != end) {
1763 (*rxq->cqes)[idx & cqe_cnt].op_own =
1764 MLX5_CQE_INVALIDATE;
1767 rxq->cq_ci = zip->cq_ci;
1770 /* No compressed data, get next CQE and verify if it is compressed. */
1775 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1776 if (unlikely(ret == 1))
1779 op_own = cqe->op_own;
1781 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1782 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1783 (volatile struct mlx5_mini_cqe8 (*)[8])
1784 (uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
1787 /* Fix endianness. */
1788 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1790 * Current mini array position is the one returned by
1793 * If completion comprises several mini arrays, as a
1794 * special case the second one is located 7 CQEs after
1795 * the initial CQE instead of 8 for subsequent ones.
1797 zip->ca = rxq->cq_ci;
1798 zip->na = zip->ca + 7;
1799 /* Compute the next non compressed CQE. */
1801 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1802 /* Get packet size to return. */
1803 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1806 /* Prefetch all the entries to be invalidated */
1809 while (idx != end) {
1810 rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]);
1814 len = rte_be_to_cpu_32(cqe->byte_cnt);
1816 /* Error while receiving packet. */
1817 if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
1824 * Translate RX completion flags to offload flags.
1830 * Offload flags (ol_flags) for struct rte_mbuf.
1832 static inline uint32_t
1833 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1835 uint32_t ol_flags = 0;
1836 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1840 MLX5_CQE_RX_L3_HDR_VALID,
1841 PKT_RX_IP_CKSUM_GOOD) |
1843 MLX5_CQE_RX_L4_HDR_VALID,
1844 PKT_RX_L4_CKSUM_GOOD);
1849 * Fill in mbuf fields from RX completion flags.
1850 * Note that pkt->ol_flags should be initialized outside of this function.
1853 * Pointer to RX queue.
1858 * @param rss_hash_res
1859 * Packet RSS Hash result.
1862 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1863 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1865 /* Update packet information. */
1866 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1867 if (rss_hash_res && rxq->rss_hash) {
1868 pkt->hash.rss = rss_hash_res;
1869 pkt->ol_flags |= PKT_RX_RSS_HASH;
1871 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1872 pkt->ol_flags |= PKT_RX_FDIR;
1873 if (cqe->sop_drop_qpn !=
1874 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1875 uint32_t mark = cqe->sop_drop_qpn;
1877 pkt->ol_flags |= PKT_RX_FDIR_ID;
1878 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1882 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1883 if (rxq->vlan_strip &&
1884 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1885 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1886 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1888 if (rxq->hw_timestamp) {
1889 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1890 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1895 * DPDK callback for RX.
1898 * Generic pointer to RX queue structure.
1900 * Array to store received packets.
1902 * Maximum number of packets in array.
1905 * Number of packets successfully received (<= pkts_n).
1908 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1910 struct mlx5_rxq_data *rxq = dpdk_rxq;
1911 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1912 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1913 const unsigned int sges_n = rxq->sges_n;
1914 struct rte_mbuf *pkt = NULL;
1915 struct rte_mbuf *seg = NULL;
1916 volatile struct mlx5_cqe *cqe =
1917 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1919 unsigned int rq_ci = rxq->rq_ci << sges_n;
1920 int len = 0; /* keep its value across iterations. */
1923 unsigned int idx = rq_ci & wqe_cnt;
1924 volatile struct mlx5_wqe_data_seg *wqe =
1925 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1926 struct rte_mbuf *rep = (*rxq->elts)[idx];
1927 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1928 uint32_t rss_hash_res;
1936 rep = rte_mbuf_raw_alloc(rxq->mp);
1937 if (unlikely(rep == NULL)) {
1938 ++rxq->stats.rx_nombuf;
1941 * no buffers before we even started,
1942 * bail out silently.
1946 while (pkt != seg) {
1947 assert(pkt != (*rxq->elts)[idx]);
1951 rte_mbuf_raw_free(pkt);
1957 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1958 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1960 rte_mbuf_raw_free(rep);
1963 if (unlikely(len == -1)) {
1964 /* RX error, packet is likely too large. */
1965 rte_mbuf_raw_free(rep);
1966 ++rxq->stats.idropped;
1970 assert(len >= (rxq->crc_present << 2));
1972 /* If compressed, take hash result from mini-CQE. */
1973 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1975 mcqe->rx_hash_result);
1976 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1977 if (rxq->crc_present)
1978 len -= ETHER_CRC_LEN;
1981 DATA_LEN(rep) = DATA_LEN(seg);
1982 PKT_LEN(rep) = PKT_LEN(seg);
1983 SET_DATA_OFF(rep, DATA_OFF(seg));
1984 PORT(rep) = PORT(seg);
1985 (*rxq->elts)[idx] = rep;
1987 * Fill NIC descriptor with the new buffer. The lkey and size
1988 * of the buffers are already known, only the buffer address
1991 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1992 /* If there's only one MR, no need to replace LKey in WQE. */
1993 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1994 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1995 if (len > DATA_LEN(seg)) {
1996 len -= DATA_LEN(seg);
2001 DATA_LEN(seg) = len;
2002 #ifdef MLX5_PMD_SOFT_COUNTERS
2003 /* Increment bytes counter. */
2004 rxq->stats.ibytes += PKT_LEN(pkt);
2006 /* Return packet. */
2012 /* Align consumer index to the next stride. */
2017 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
2019 /* Update the consumer index. */
2020 rxq->rq_ci = rq_ci >> sges_n;
2022 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
2024 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
2025 #ifdef MLX5_PMD_SOFT_COUNTERS
2026 /* Increment packets counter. */
2027 rxq->stats.ipackets += i;
2033 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
2035 struct mlx5_mprq_buf *buf = opaque;
2037 if (rte_atomic16_read(&buf->refcnt) == 1) {
2038 rte_mempool_put(buf->mp, buf);
2039 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
2040 rte_atomic16_set(&buf->refcnt, 1);
2041 rte_mempool_put(buf->mp, buf);
2046 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
2048 mlx5_mprq_buf_free_cb(NULL, buf);
2052 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
2054 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
2055 volatile struct mlx5_wqe_data_seg *wqe =
2056 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
2059 assert(rep != NULL);
2060 /* Replace MPRQ buf. */
2061 (*rxq->mprq_bufs)[rq_idx] = rep;
2063 addr = mlx5_mprq_buf_addr(rep);
2064 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
2065 /* If there's only one MR, no need to replace LKey in WQE. */
2066 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
2067 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
2068 /* Stash a mbuf for next replacement. */
2069 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
2070 rxq->mprq_repl = rep;
2072 rxq->mprq_repl = NULL;
2076 * DPDK callback for RX with Multi-Packet RQ support.
2079 * Generic pointer to RX queue structure.
2081 * Array to store received packets.
2083 * Maximum number of packets in array.
2086 * Number of packets successfully received (<= pkts_n).
2089 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
2091 struct mlx5_rxq_data *rxq = dpdk_rxq;
2092 const unsigned int strd_n = 1 << rxq->strd_num_n;
2093 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
2094 const unsigned int strd_shift =
2095 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
2096 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
2097 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
2098 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
2100 uint32_t rq_ci = rxq->rq_ci;
2101 uint16_t consumed_strd = rxq->consumed_strd;
2102 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
2104 while (i < pkts_n) {
2105 struct rte_mbuf *pkt;
2113 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
2114 uint32_t rss_hash_res = 0;
2116 if (consumed_strd == strd_n) {
2117 /* Replace WQE only if the buffer is still in use. */
2118 if (rte_atomic16_read(&buf->refcnt) > 1) {
2119 mprq_buf_replace(rxq, rq_ci & wq_mask);
2120 /* Release the old buffer. */
2121 mlx5_mprq_buf_free(buf);
2122 } else if (unlikely(rxq->mprq_repl == NULL)) {
2123 struct mlx5_mprq_buf *rep;
2126 * Currently, the MPRQ mempool is out of buffer
2127 * and doing memcpy regardless of the size of Rx
2128 * packet. Retry allocation to get back to
2131 if (!rte_mempool_get(rxq->mprq_mp,
2133 rxq->mprq_repl = rep;
2135 /* Advance to the next WQE. */
2138 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
2140 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
2141 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
2144 if (unlikely(ret == -1)) {
2145 /* RX error, packet is likely too large. */
2146 ++rxq->stats.idropped;
2150 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
2151 MLX5_MPRQ_STRIDE_NUM_SHIFT;
2153 consumed_strd += strd_cnt;
2154 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
2157 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
2158 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
2160 /* mini-CQE for MPRQ doesn't have hash result. */
2161 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
2163 assert(strd_idx < strd_n);
2164 assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
2166 * Currently configured to receive a packet per a stride. But if
2167 * MTU is adjusted through kernel interface, device could
2168 * consume multiple strides without raising an error. In this
2169 * case, the packet should be dropped because it is bigger than
2170 * the max_rx_pkt_len.
2172 if (unlikely(strd_cnt > 1)) {
2173 ++rxq->stats.idropped;
2176 pkt = rte_pktmbuf_alloc(rxq->mp);
2177 if (unlikely(pkt == NULL)) {
2178 ++rxq->stats.rx_nombuf;
2181 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
2182 assert((int)len >= (rxq->crc_present << 2));
2183 if (rxq->crc_present)
2184 len -= ETHER_CRC_LEN;
2185 offset = strd_idx * strd_sz + strd_shift;
2186 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset);
2187 /* Initialize the offload flag. */
2190 * Memcpy packets to the target mbuf if:
2191 * - The size of packet is smaller than mprq_max_memcpy_len.
2192 * - Out of buffer in the Mempool for Multi-Packet RQ.
2194 if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
2196 * When memcpy'ing packet due to out-of-buffer, the
2197 * packet must be smaller than the target mbuf.
2199 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
2200 rte_pktmbuf_free_seg(pkt);
2201 ++rxq->stats.idropped;
2204 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
2206 rte_iova_t buf_iova;
2207 struct rte_mbuf_ext_shared_info *shinfo;
2208 uint16_t buf_len = strd_cnt * strd_sz;
2210 /* Increment the refcnt of the whole chunk. */
2211 rte_atomic16_add_return(&buf->refcnt, 1);
2212 assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
2214 addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
2216 * MLX5 device doesn't use iova but it is necessary in a
2217 * case where the Rx packet is transmitted via a
2220 buf_iova = rte_mempool_virt2iova(buf) +
2221 RTE_PTR_DIFF(addr, buf);
2222 shinfo = rte_pktmbuf_ext_shinfo_init_helper(addr,
2223 &buf_len, mlx5_mprq_buf_free_cb, buf);
2225 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
2226 * attaching the stride to mbuf and more offload flags
2227 * will be added below by calling rxq_cq_to_mbuf().
2228 * Other fields will be overwritten.
2230 rte_pktmbuf_attach_extbuf(pkt, addr, buf_iova, buf_len,
2232 rte_pktmbuf_reset_headroom(pkt);
2233 assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
2235 * Prevent potential overflow due to MTU change through
2238 if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
2239 rte_pktmbuf_free_seg(pkt);
2240 ++rxq->stats.idropped;
2244 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
2246 DATA_LEN(pkt) = len;
2247 PORT(pkt) = rxq->port_id;
2248 #ifdef MLX5_PMD_SOFT_COUNTERS
2249 /* Increment bytes counter. */
2250 rxq->stats.ibytes += PKT_LEN(pkt);
2252 /* Return packet. */
2256 /* Update the consumer indexes. */
2257 rxq->consumed_strd = consumed_strd;
2259 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
2260 if (rq_ci != rxq->rq_ci) {
2263 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
2265 #ifdef MLX5_PMD_SOFT_COUNTERS
2266 /* Increment packets counter. */
2267 rxq->stats.ipackets += i;
2273 * Dummy DPDK callback for TX.
2275 * This function is used to temporarily replace the real callback during
2276 * unsafe control operations on the queue, or in case of error.
2279 * Generic pointer to TX queue structure.
2281 * Packets to transmit.
2283 * Number of packets in array.
2286 * Number of packets successfully transmitted (<= pkts_n).
2289 removed_tx_burst(void *dpdk_txq __rte_unused,
2290 struct rte_mbuf **pkts __rte_unused,
2291 uint16_t pkts_n __rte_unused)
2297 * Dummy DPDK callback for RX.
2299 * This function is used to temporarily replace the real callback during
2300 * unsafe control operations on the queue, or in case of error.
2303 * Generic pointer to RX queue structure.
2305 * Array to store received packets.
2307 * Maximum number of packets in array.
2310 * Number of packets successfully received (<= pkts_n).
2313 removed_rx_burst(void *dpdk_txq __rte_unused,
2314 struct rte_mbuf **pkts __rte_unused,
2315 uint16_t pkts_n __rte_unused)
2321 * Vectorized Rx/Tx routines are not compiled in when required vector
2322 * instructions are not supported on a target architecture. The following null
2323 * stubs are needed for linkage when those are not included outside of this file
2324 * (e.g. mlx5_rxtx_vec_sse.c for x86).
2327 uint16_t __attribute__((weak))
2328 mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
2329 struct rte_mbuf **pkts __rte_unused,
2330 uint16_t pkts_n __rte_unused)
2335 uint16_t __attribute__((weak))
2336 mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
2337 struct rte_mbuf **pkts __rte_unused,
2338 uint16_t pkts_n __rte_unused)
2343 uint16_t __attribute__((weak))
2344 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
2345 struct rte_mbuf **pkts __rte_unused,
2346 uint16_t pkts_n __rte_unused)
2351 int __attribute__((weak))
2352 mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
2357 int __attribute__((weak))
2358 mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
2363 int __attribute__((weak))
2364 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
2369 int __attribute__((weak))
2370 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)