4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <smmintrin.h>
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-Wpedantic"
45 #include <infiniband/verbs.h>
46 #include <infiniband/mlx5dv.h>
48 #pragma GCC diagnostic error "-Wpedantic"
52 #include <rte_mempool.h>
53 #include <rte_prefetch.h>
56 #include "mlx5_utils.h"
57 #include "mlx5_rxtx.h"
58 #include "mlx5_autoconf.h"
59 #include "mlx5_defs.h"
62 #ifndef __INTEL_COMPILER
63 #pragma GCC diagnostic ignored "-Wcast-qual"
67 * Fill in buffer descriptors in a multi-packet send descriptor.
70 * Pointer to TX queue structure.
72 * Pointer to buffer descriptor to be writen.
74 * Pointer to array of packets to be sent.
76 * Number of packets to be filled.
79 txq_wr_dseg_v(struct mlx5_txq_data *txq, __m128i *dseg,
80 struct rte_mbuf **pkts, unsigned int n)
84 const __m128i shuf_mask_dseg =
85 _mm_set_epi8(8, 9, 10, 11, /* addr, bswap64 */
87 7, 6, 5, 4, /* lkey */
88 0, 1, 2, 3 /* length, bswap32 */);
89 #ifdef MLX5_PMD_SOFT_COUNTERS
93 for (pos = 0; pos < n; ++pos, ++dseg) {
95 struct rte_mbuf *pkt = pkts[pos];
97 addr = rte_pktmbuf_mtod(pkt, uintptr_t);
98 desc = _mm_set_epi32(addr >> 32,
100 mlx5_tx_mb2mr(txq, pkt),
102 desc = _mm_shuffle_epi8(desc, shuf_mask_dseg);
103 _mm_store_si128(dseg, desc);
104 #ifdef MLX5_PMD_SOFT_COUNTERS
105 tx_byte += DATA_LEN(pkt);
108 #ifdef MLX5_PMD_SOFT_COUNTERS
109 txq->stats.obytes += tx_byte;
114 * Count the number of continuous single segment packets.
117 * Pointer to array of packets.
122 * Number of continuous single segment packets.
124 static inline unsigned int
125 txq_check_multiseg(struct rte_mbuf **pkts, uint16_t pkts_n)
131 /* Count the number of continuous single segment packets. */
132 for (pos = 0; pos < pkts_n; ++pos)
133 if (NB_SEGS(pkts[pos]) > 1)
139 * Count the number of packets having same ol_flags and calculate cs_flags.
142 * Pointer to TX queue structure.
144 * Pointer to array of packets.
148 * Pointer of flags to be returned.
151 * Number of packets having same ol_flags.
153 static inline unsigned int
154 txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
155 uint16_t pkts_n, uint8_t *cs_flags)
158 const uint64_t ol_mask =
159 PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
160 PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE |
161 PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM;
165 /* Count the number of packets having same ol_flags. */
166 for (pos = 1; pos < pkts_n; ++pos)
167 if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
169 /* Should open another MPW session for the rest. */
170 if (pkts[0]->ol_flags &
171 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
172 const uint64_t is_tunneled =
175 PKT_TX_TUNNEL_VXLAN);
177 if (is_tunneled && txq->tunnel_en) {
178 *cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
179 MLX5_ETH_WQE_L4_INNER_CSUM;
180 if (pkts[0]->ol_flags & PKT_TX_OUTER_IP_CKSUM)
181 *cs_flags |= MLX5_ETH_WQE_L3_CSUM;
183 *cs_flags = MLX5_ETH_WQE_L3_CSUM |
184 MLX5_ETH_WQE_L4_CSUM;
191 * Send multi-segmented packets until it encounters a single segment packet in
195 * Pointer to TX queue structure.
197 * Pointer to array of packets to be sent.
199 * Number of packets to be sent.
202 * Number of packets successfully transmitted (<= pkts_n).
205 txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
208 uint16_t elts_head = txq->elts_head;
209 const uint16_t elts_n = 1 << txq->elts_n;
210 const uint16_t elts_m = elts_n - 1;
211 const uint16_t wq_n = 1 << txq->wqe_n;
212 const uint16_t wq_mask = wq_n - 1;
213 const unsigned int nb_dword_per_wqebb =
214 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
215 const unsigned int nb_dword_in_hdr =
216 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
218 volatile struct mlx5_wqe *wqe = NULL;
220 assert(elts_n > pkts_n);
221 mlx5_tx_complete(txq);
222 if (unlikely(!pkts_n))
224 for (n = 0; n < pkts_n; ++n) {
225 struct rte_mbuf *buf = pkts[n];
226 unsigned int segs_n = buf->nb_segs;
227 unsigned int ds = nb_dword_in_hdr;
228 unsigned int len = PKT_LEN(buf);
229 uint16_t wqe_ci = txq->wqe_ci;
230 const __m128i shuf_mask_ctrl =
231 _mm_set_epi8(15, 14, 13, 12,
232 8, 9, 10, 11, /* bswap32 */
233 4, 5, 6, 7, /* bswap32 */
234 0, 1, 2, 3 /* bswap32 */);
235 uint8_t cs_flags = 0;
238 __m128i *t_wqe, *dseg;
242 max_elts = elts_n - (elts_head - txq->elts_tail);
243 max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
245 * A MPW session consumes 2 WQEs at most to
246 * include MLX5_MPW_DSEG_MAX pointers.
249 max_elts < segs_n || max_wqe < 2)
251 if (segs_n > MLX5_MPW_DSEG_MAX) {
252 txq->stats.oerrors++;
255 wqe = &((volatile struct mlx5_wqe64 *)
256 txq->wqes)[wqe_ci & wq_mask].hdr;
258 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
259 const uint64_t is_tunneled = buf->ol_flags &
261 PKT_TX_TUNNEL_VXLAN);
263 if (is_tunneled && txq->tunnel_en) {
264 cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
265 MLX5_ETH_WQE_L4_INNER_CSUM;
266 if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
267 cs_flags |= MLX5_ETH_WQE_L3_CSUM;
269 cs_flags = MLX5_ETH_WQE_L3_CSUM |
270 MLX5_ETH_WQE_L4_CSUM;
273 /* Title WQEBB pointer. */
274 t_wqe = (__m128i *)wqe;
275 dseg = (__m128i *)(wqe + 1);
277 if (!(ds++ % nb_dword_per_wqebb)) {
279 &((volatile struct mlx5_wqe64 *)
280 txq->wqes)[++wqe_ci & wq_mask];
282 txq_wr_dseg_v(txq, dseg++, &buf, 1);
283 (*txq->elts)[elts_head++ & elts_m] = buf;
287 /* Fill CTRL in the header. */
288 ctrl = _mm_set_epi32(0, 0, txq->qp_num_8s | ds,
289 MLX5_OPC_MOD_MPW << 24 |
290 txq->wqe_ci << 8 | MLX5_OPCODE_TSO);
291 ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
292 _mm_store_si128(t_wqe, ctrl);
293 /* Fill ESEG in the header. */
294 _mm_store_si128(t_wqe + 1,
295 _mm_set_epi16(0, 0, 0, 0,
296 rte_cpu_to_be_16(len), cs_flags,
298 txq->wqe_ci = wqe_ci;
302 txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
303 txq->elts_head = elts_head;
304 if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
305 wqe->ctrl[2] = rte_cpu_to_be_32(8);
306 wqe->ctrl[3] = txq->elts_head;
310 #ifdef MLX5_PMD_SOFT_COUNTERS
311 txq->stats.opackets += n;
313 mlx5_tx_dbrec(txq, wqe);
318 * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
319 * it returns to make it processed by txq_scatter_v(). All the packets in
320 * the pkts list should be single segment packets having same offload flags.
321 * This must be checked by txq_check_multiseg() and txq_calc_offload().
324 * Pointer to TX queue structure.
326 * Pointer to array of packets to be sent.
328 * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
330 * Checksum offload flags to be written in the descriptor.
333 * Number of packets successfully transmitted (<= pkts_n).
335 static inline uint16_t
336 txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
339 struct rte_mbuf **elts;
340 uint16_t elts_head = txq->elts_head;
341 const uint16_t elts_n = 1 << txq->elts_n;
342 const uint16_t elts_m = elts_n - 1;
343 const unsigned int nb_dword_per_wqebb =
344 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
345 const unsigned int nb_dword_in_hdr =
346 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
351 uint32_t comp_req = 0;
352 const uint16_t wq_n = 1 << txq->wqe_n;
353 const uint16_t wq_mask = wq_n - 1;
354 uint16_t wq_idx = txq->wqe_ci & wq_mask;
355 volatile struct mlx5_wqe64 *wq =
356 &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
357 volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
358 const __m128i shuf_mask_ctrl =
359 _mm_set_epi8(15, 14, 13, 12,
360 8, 9, 10, 11, /* bswap32 */
361 4, 5, 6, 7, /* bswap32 */
362 0, 1, 2, 3 /* bswap32 */);
363 __m128i *t_wqe, *dseg;
366 /* Make sure all packets can fit into a single WQE. */
367 assert(elts_n > pkts_n);
368 mlx5_tx_complete(txq);
369 max_elts = (elts_n - (elts_head - txq->elts_tail));
370 max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
371 pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
372 assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
373 if (unlikely(!pkts_n))
375 elts = &(*txq->elts)[elts_head & elts_m];
376 /* Loop for available tailroom first. */
377 n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
378 for (pos = 0; pos < (n & -2); pos += 2)
379 _mm_storeu_si128((__m128i *)&elts[pos],
380 _mm_loadu_si128((__m128i *)&pkts[pos]));
382 elts[pos] = pkts[pos];
383 /* Check if it crosses the end of the queue. */
384 if (unlikely(n < pkts_n)) {
385 elts = &(*txq->elts)[0];
386 for (pos = 0; pos < pkts_n - n; ++pos)
387 elts[pos] = pkts[n + pos];
389 txq->elts_head += pkts_n;
390 /* Save title WQEBB pointer. */
391 t_wqe = (__m128i *)wqe;
392 dseg = (__m128i *)(wqe + 1);
393 /* Calculate the number of entries to the end. */
395 (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
398 txq_wr_dseg_v(txq, dseg, pkts, n);
399 /* Check if it crosses the end of the queue. */
401 dseg = (__m128i *)txq->wqes;
402 txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
404 if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
405 txq->elts_comp += pkts_n;
407 /* Request a completion. */
412 /* Fill CTRL in the header. */
413 ctrl = _mm_set_epi32(txq->elts_head, comp_req,
414 txq->qp_num_8s | (pkts_n + 2),
415 MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
416 txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW);
417 ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
418 _mm_store_si128(t_wqe, ctrl);
419 /* Fill ESEG in the header. */
420 _mm_store_si128(t_wqe + 1,
421 _mm_set_epi8(0, 0, 0, 0,
425 #ifdef MLX5_PMD_SOFT_COUNTERS
426 txq->stats.opackets += pkts_n;
428 txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
430 /* Ring QP doorbell. */
431 mlx5_tx_dbrec(txq, wqe);
436 * DPDK callback for vectorized TX.
439 * Generic pointer to TX queue structure.
441 * Packets to transmit.
443 * Number of packets in array.
446 * Number of packets successfully transmitted (<= pkts_n).
449 mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
452 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
455 while (pkts_n > nb_tx) {
459 n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
460 ret = txq_burst_v(txq, &pkts[nb_tx], n, 0);
469 * DPDK callback for vectorized TX with multi-seg packets and offload.
472 * Generic pointer to TX queue structure.
474 * Packets to transmit.
476 * Number of packets in array.
479 * Number of packets successfully transmitted (<= pkts_n).
482 mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
484 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
487 while (pkts_n > nb_tx) {
488 uint8_t cs_flags = 0;
492 /* Transmit multi-seg packets in the head of pkts list. */
493 if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
494 NB_SEGS(pkts[nb_tx]) > 1)
495 nb_tx += txq_scatter_v(txq,
498 n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
499 if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
500 n = txq_check_multiseg(&pkts[nb_tx], n);
501 if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
502 n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
503 ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
512 * Store free buffers to RX SW ring.
515 * Pointer to RX queue structure.
517 * Pointer to array of packets to be stored.
519 * Number of packets to be stored.
522 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
524 const uint16_t q_mask = (1 << rxq->elts_n) - 1;
525 struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
529 for (pos = 0; pos < p; pos += 2) {
532 mbp = _mm_loadu_si128((__m128i *)&elts[pos]);
533 _mm_storeu_si128((__m128i *)&pkts[pos], mbp);
536 pkts[pos] = elts[pos];
540 * Replenish buffers for RX in bulk.
543 * Pointer to RX queue structure.
545 * Number of buffers to be replenished.
548 rxq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
550 const uint16_t q_n = 1 << rxq->elts_n;
551 const uint16_t q_mask = q_n - 1;
552 const uint16_t elts_idx = rxq->rq_ci & q_mask;
553 struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
554 volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
557 assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
558 assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
559 assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
560 /* Not to cross queue end. */
561 n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
562 if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
563 rxq->stats.rx_nombuf += n;
566 for (i = 0; i < n; ++i)
567 wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
568 RTE_PKTMBUF_HEADROOM);
571 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
575 * Decompress a compressed completion and fill in mbufs in RX SW ring with data
576 * extracted from the title completion descriptor.
579 * Pointer to RX queue structure.
581 * Pointer to completion array having a compressed completion at first.
583 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
584 * the title completion descriptor to be copied to the rest of mbufs.
587 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq,
588 volatile struct mlx5_cqe *cq,
589 struct rte_mbuf **elts)
591 volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1);
592 struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
595 unsigned int inv = 0;
596 /* Mask to shuffle from extracted mini CQE to mbuf. */
597 const __m128i shuf_mask1 =
598 _mm_set_epi8(0, 1, 2, 3, /* rss, bswap32 */
599 -1, -1, /* skip vlan_tci */
600 6, 7, /* data_len, bswap16 */
601 -1, -1, 6, 7, /* pkt_len, bswap16 */
602 -1, -1, -1, -1 /* skip packet_type */);
603 const __m128i shuf_mask2 =
604 _mm_set_epi8(8, 9, 10, 11, /* rss, bswap32 */
605 -1, -1, /* skip vlan_tci */
606 14, 15, /* data_len, bswap16 */
607 -1, -1, 14, 15, /* pkt_len, bswap16 */
608 -1, -1, -1, -1 /* skip packet_type */);
609 /* Restore the compressed count. Must be 16 bits. */
610 const uint16_t mcqe_n = t_pkt->data_len +
611 (rxq->crc_present * ETHER_CRC_LEN);
612 const __m128i rearm =
613 _mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
615 _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
616 const __m128i crc_adj =
617 _mm_set_epi16(0, 0, 0,
618 rxq->crc_present * ETHER_CRC_LEN,
620 rxq->crc_present * ETHER_CRC_LEN,
622 const uint32_t flow_tag = t_pkt->hash.fdir.hi;
623 #ifdef MLX5_PMD_SOFT_COUNTERS
624 const __m128i zero = _mm_setzero_si128();
625 const __m128i ones = _mm_cmpeq_epi32(zero, zero);
626 uint32_t rcvd_byte = 0;
627 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
628 const __m128i len_shuf_mask =
629 _mm_set_epi8(-1, -1, -1, -1,
635 /* Compile time sanity check for this function. */
636 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
637 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
638 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
639 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
640 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
641 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
643 * Not to overflow elts array. Decompress next time after mbuf
646 if (unlikely(mcqe_n + MLX5_VPMD_DESCS_PER_LOOP >
647 (uint16_t)(rxq->rq_ci - rxq->cq_ci)))
650 * A. load mCQEs into a 128bit register.
651 * B. store rearm data to mbuf.
652 * C. combine data from mCQEs with rx_descriptor_fields1.
653 * D. store rx_descriptor_fields1.
654 * E. store flow tag (rte_flow mark).
656 for (pos = 0; pos < mcqe_n; ) {
657 __m128i mcqe1, mcqe2;
658 __m128i rxdf1, rxdf2;
659 #ifdef MLX5_PMD_SOFT_COUNTERS
660 __m128i byte_cnt, invalid_mask;
663 if (!(pos & 0x7) && pos + 8 < mcqe_n)
664 rte_prefetch0((void *)(cq + pos + 8));
665 /* A.1 load mCQEs into a 128bit register. */
666 mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
667 mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
668 /* B.1 store rearm data to mbuf. */
669 _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm);
670 _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm);
671 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
672 rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1);
673 rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2);
674 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
675 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
676 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
677 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
678 /* D.1 store rx_descriptor_fields1. */
679 _mm_storeu_si128((__m128i *)
680 &elts[pos]->rx_descriptor_fields1,
682 _mm_storeu_si128((__m128i *)
683 &elts[pos + 1]->rx_descriptor_fields1,
685 /* B.1 store rearm data to mbuf. */
686 _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm);
687 _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm);
688 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
689 rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1);
690 rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2);
691 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
692 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
693 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
694 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
695 /* D.1 store rx_descriptor_fields1. */
696 _mm_storeu_si128((__m128i *)
697 &elts[pos + 2]->rx_descriptor_fields1,
699 _mm_storeu_si128((__m128i *)
700 &elts[pos + 3]->rx_descriptor_fields1,
702 #ifdef MLX5_PMD_SOFT_COUNTERS
703 invalid_mask = _mm_set_epi64x(0,
705 sizeof(uint16_t) * 8);
706 invalid_mask = _mm_sll_epi64(ones, invalid_mask);
707 mcqe1 = _mm_srli_si128(mcqe1, 4);
708 byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc);
709 byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask);
710 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
711 byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
712 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
715 /* E.1 store flow tag (rte_flow mark). */
716 elts[pos]->hash.fdir.hi = flow_tag;
717 elts[pos + 1]->hash.fdir.hi = flow_tag;
718 elts[pos + 2]->hash.fdir.hi = flow_tag;
719 elts[pos + 3]->hash.fdir.hi = flow_tag;
721 pos += MLX5_VPMD_DESCS_PER_LOOP;
722 /* Move to next CQE and invalidate consumed CQEs. */
723 if (!(pos & 0x7) && pos < mcqe_n) {
724 mcq = (void *)(cq + pos);
725 for (i = 0; i < 8; ++i)
726 cq[inv++].op_own = MLX5_CQE_INVALIDATE;
729 /* Invalidate the rest of CQEs. */
730 for (; inv < mcqe_n; ++inv)
731 cq[inv].op_own = MLX5_CQE_INVALIDATE;
732 #ifdef MLX5_PMD_SOFT_COUNTERS
733 rxq->stats.ipackets += mcqe_n;
734 rxq->stats.ibytes += rcvd_byte;
736 rxq->cq_ci += mcqe_n;
740 * Calculate packet type and offload flag for mbuf and store it.
743 * Pointer to RX queue structure.
745 * Array of four 16bytes completions extracted from the original completion
748 * Opcode vector having responder error status. Each field is 4B.
750 * Pointer to array of packets to be filled.
753 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
754 __m128i op_err, struct rte_mbuf **pkts)
756 __m128i pinfo0, pinfo1;
757 __m128i pinfo, ptype;
758 __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH);
760 const __m128i zero = _mm_setzero_si128();
761 const __m128i ptype_mask =
762 _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06);
763 const __m128i ptype_ol_mask =
764 _mm_set_epi32(0x106, 0x106, 0x106, 0x106);
765 const __m128i pinfo_mask =
766 _mm_set_epi32(0x3, 0x3, 0x3, 0x3);
767 const __m128i cv_flag_sel =
768 _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
769 (uint8_t)((PKT_RX_IP_CKSUM_GOOD |
770 PKT_RX_L4_CKSUM_GOOD) >> 1),
772 (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
774 (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
775 (uint8_t)(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED),
777 const __m128i cv_mask =
778 _mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
779 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
780 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
781 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
782 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
783 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
784 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
785 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
786 const __m128i mbuf_init =
787 _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
788 __m128i rearm0, rearm1, rearm2, rearm3;
790 /* Extract pkt_info field. */
791 pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
792 pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
793 pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1);
794 /* Extract hdr_type_etc field. */
795 pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]);
796 pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]);
797 ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
799 const __m128i pinfo_ft_mask =
800 _mm_set_epi32(0xffffff00, 0xffffff00,
801 0xffffff00, 0xffffff00);
802 const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
803 const __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
804 __m128i flow_tag, invalid_mask;
806 flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
807 /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
808 invalid_mask = _mm_cmpeq_epi32(flow_tag, zero);
809 ol_flags = _mm_or_si128(ol_flags,
810 _mm_andnot_si128(invalid_mask,
812 /* Mask out invalid entries. */
813 flow_tag = _mm_andnot_si128(invalid_mask, flow_tag);
814 /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
815 ol_flags = _mm_or_si128(ol_flags,
817 _mm_cmpeq_epi32(flow_tag,
822 * Merge the two fields to generate the following:
826 * bit[11:10] = l3_hdr_type
827 * bit[14:12] = l4_hdr_type
830 * bit[17] = outer_l3_type
832 ptype = _mm_and_si128(ptype, ptype_mask);
833 pinfo = _mm_and_si128(pinfo, pinfo_mask);
834 pinfo = _mm_slli_epi32(pinfo, 16);
835 /* Make pinfo has merged fields for ol_flags calculation. */
836 pinfo = _mm_or_si128(ptype, pinfo);
837 ptype = _mm_srli_epi32(pinfo, 10);
838 ptype = _mm_packs_epi32(ptype, zero);
839 /* Errored packets will have RTE_PTYPE_ALL_MASK. */
840 op_err = _mm_srli_epi16(op_err, 8);
841 ptype = _mm_or_si128(ptype, op_err);
842 pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)];
843 pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)];
844 pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)];
845 pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)];
846 /* Fill flags for checksum and VLAN. */
847 pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
848 pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
849 /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
850 cv_flags = _mm_slli_epi32(pinfo, 9);
851 cv_flags = _mm_or_si128(pinfo, cv_flags);
852 /* Move back flags to start from byte[0]. */
853 cv_flags = _mm_srli_epi32(cv_flags, 8);
854 /* Mask out garbage bits. */
855 cv_flags = _mm_and_si128(cv_flags, cv_mask);
856 /* Merge to ol_flags. */
857 ol_flags = _mm_or_si128(ol_flags, cv_flags);
858 /* Merge mbuf_init and ol_flags. */
859 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
860 offsetof(struct rte_mbuf, rearm_data) + 8);
861 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30);
862 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
863 rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
864 rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
865 /* Write 8B rearm_data and 8B ol_flags. */
866 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
867 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
868 _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
869 _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
870 _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
871 _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3);
875 * Skip error packets.
878 * Pointer to RX queue structure.
880 * Array to store received packets.
882 * Maximum number of packets in array.
885 * Number of packets successfully received (<= pkts_n).
888 rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
893 #ifdef MLX5_PMD_SOFT_COUNTERS
894 uint32_t err_bytes = 0;
897 for (i = 0; i < pkts_n; ++i) {
898 struct rte_mbuf *pkt = pkts[i];
900 if (pkt->packet_type == RTE_PTYPE_ALL_MASK) {
901 #ifdef MLX5_PMD_SOFT_COUNTERS
902 err_bytes += PKT_LEN(pkt);
904 rte_pktmbuf_free_seg(pkt);
909 rxq->stats.idropped += (pkts_n - n);
910 #ifdef MLX5_PMD_SOFT_COUNTERS
911 /* Correct counters of errored completions. */
912 rxq->stats.ipackets -= (pkts_n - n);
913 rxq->stats.ibytes -= err_bytes;
915 rxq->pending_err = 0;
920 * Receive burst of packets. An errored completion also consumes a mbuf, but the
921 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
922 * before returning to application.
925 * Pointer to RX queue structure.
927 * Array to store received packets.
929 * Maximum number of packets in array.
932 * Number of packets received including errors (<= pkts_n).
934 static inline uint16_t
935 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
937 const uint16_t q_n = 1 << rxq->cqe_n;
938 const uint16_t q_mask = q_n - 1;
939 volatile struct mlx5_cqe *cq;
940 struct rte_mbuf **elts;
944 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
945 uint16_t nocmp_n = 0;
946 uint16_t rcvd_pkt = 0;
947 unsigned int cq_idx = rxq->cq_ci & q_mask;
948 unsigned int elts_idx;
949 unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
950 const __m128i owner_check =
951 _mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL);
952 const __m128i opcode_check =
953 _mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL);
954 const __m128i format_check =
955 _mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL);
956 const __m128i resp_err_check =
957 _mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL);
958 #ifdef MLX5_PMD_SOFT_COUNTERS
959 uint32_t rcvd_byte = 0;
960 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
961 const __m128i len_shuf_mask =
962 _mm_set_epi8(-1, -1, -1, -1,
967 /* Mask to shuffle from extracted CQE to mbuf. */
968 const __m128i shuf_mask =
969 _mm_set_epi8(-1, 3, 2, 1, /* fdir.hi */
970 12, 13, 14, 15, /* rss, bswap32 */
971 10, 11, /* vlan_tci, bswap16 */
972 4, 5, /* data_len, bswap16 */
973 -1, -1, /* zero out 2nd half of pkt_len */
974 4, 5 /* pkt_len, bswap16 */);
975 /* Mask to blend from the last Qword to the first DQword. */
976 const __m128i blend_mask =
977 _mm_set_epi8(-1, -1, -1, -1,
981 const __m128i zero = _mm_setzero_si128();
982 const __m128i ones = _mm_cmpeq_epi32(zero, zero);
983 const __m128i crc_adj =
984 _mm_set_epi16(0, 0, 0, 0, 0,
985 rxq->crc_present * ETHER_CRC_LEN,
987 rxq->crc_present * ETHER_CRC_LEN);
988 const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
990 /* Compile time sanity check for this function. */
991 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
992 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
993 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
994 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
995 RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, pkt_info) != 0);
996 RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rx_hash_res) !=
997 offsetof(struct mlx5_cqe, pkt_info) + 12);
998 RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rsvd1) +
999 sizeof(((struct mlx5_cqe *)0)->rsvd1) !=
1000 offsetof(struct mlx5_cqe, hdr_type_etc));
1001 RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, vlan_info) !=
1002 offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
1003 RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rsvd2) +
1004 sizeof(((struct mlx5_cqe *)0)->rsvd2) !=
1005 offsetof(struct mlx5_cqe, byte_cnt));
1006 RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, sop_drop_qpn) !=
1007 RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
1008 RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, op_own) !=
1009 offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
1010 assert(rxq->sges_n == 0);
1011 assert(rxq->cqe_n == rxq->elts_n);
1012 cq = &(*rxq->cqes)[cq_idx];
1014 rte_prefetch0(cq + 1);
1015 rte_prefetch0(cq + 2);
1016 rte_prefetch0(cq + 3);
1017 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
1020 * rq_ci >= cq_ci >= rq_pi
1021 * Definition of indexes:
1022 * rq_ci - cq_ci := # of buffers owned by HW (posted).
1023 * cq_ci - rq_pi := # of buffers not returned to app (decompressed).
1024 * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
1026 repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
1027 if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
1028 rxq_replenish_bulk_mbuf(rxq, repl_n);
1029 /* See if there're unreturned mbufs from compressed CQE. */
1030 rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
1032 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
1033 rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
1034 rxq->rq_pi += rcvd_pkt;
1037 elts_idx = rxq->rq_pi & q_mask;
1038 elts = &(*rxq->elts)[elts_idx];
1039 pkts_n = RTE_MIN(pkts_n - rcvd_pkt,
1040 (uint16_t)(rxq->rq_ci - rxq->cq_ci));
1041 /* Not to overflow pkts/elts array. */
1042 pkts_n = RTE_ALIGN_FLOOR(pkts_n, MLX5_VPMD_DESCS_PER_LOOP);
1043 /* Not to cross queue end. */
1044 pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
1047 /* At this point, there shouldn't be any remained packets. */
1048 assert(rxq->rq_pi == rxq->cq_ci);
1050 * A. load first Qword (8bytes) in one loop.
1051 * B. copy 4 mbuf pointers from elts ring to returing pkts.
1052 * C. load remained CQE data and extract necessary fields.
1053 * Final 16bytes cqes[] extracted from original 64bytes CQE has the
1054 * following structure:
1057 * uint8_t flow_tag[3];
1058 * uint16_t byte_cnt;
1061 * uint16_t hdr_type_etc;
1062 * uint16_t vlan_info;
1063 * uint32_t rx_has_res;
1066 * E. get valid CQEs.
1067 * F. find compressed CQE.
1071 pos += MLX5_VPMD_DESCS_PER_LOOP) {
1072 __m128i cqes[MLX5_VPMD_DESCS_PER_LOOP];
1073 __m128i cqe_tmp1, cqe_tmp2;
1074 __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
1075 __m128i op_own, op_own_tmp1, op_own_tmp2;
1076 __m128i opcode, owner_mask, invalid_mask;
1079 #ifdef MLX5_PMD_SOFT_COUNTERS
1083 __m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0);
1084 unsigned int p1, p2, p3;
1086 /* Prefetch next 4 CQEs. */
1087 if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
1088 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
1089 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
1090 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
1091 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
1093 /* A.0 do not cross the end of CQ. */
1094 mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8);
1095 mask = _mm_sll_epi64(ones, mask);
1096 p = _mm_andnot_si128(mask, p);
1097 /* A.1 load cqes. */
1098 p3 = _mm_extract_epi16(p, 3);
1099 cqes[3] = _mm_loadl_epi64((__m128i *)
1100 &cq[pos + p3].sop_drop_qpn);
1101 rte_compiler_barrier();
1102 p2 = _mm_extract_epi16(p, 2);
1103 cqes[2] = _mm_loadl_epi64((__m128i *)
1104 &cq[pos + p2].sop_drop_qpn);
1105 rte_compiler_barrier();
1106 /* B.1 load mbuf pointers. */
1107 mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]);
1108 mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]);
1109 /* A.1 load a block having op_own. */
1110 p1 = _mm_extract_epi16(p, 1);
1111 cqes[1] = _mm_loadl_epi64((__m128i *)
1112 &cq[pos + p1].sop_drop_qpn);
1113 rte_compiler_barrier();
1114 cqes[0] = _mm_loadl_epi64((__m128i *)
1115 &cq[pos].sop_drop_qpn);
1116 /* B.2 copy mbuf pointers. */
1117 _mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
1118 _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
1119 rte_compiler_barrier();
1120 /* C.1 load remained CQE data and extract necessary fields. */
1121 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
1122 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
1123 cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
1124 cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
1125 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].rsvd1[3]);
1126 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].rsvd1[3]);
1127 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
1128 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
1129 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd2[10]);
1130 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd2[10]);
1131 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
1132 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
1133 /* C.2 generate final structure for mbuf with swapping bytes. */
1134 pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask);
1135 pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask);
1136 /* C.3 adjust CRC length. */
1137 pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj);
1138 pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj);
1139 /* C.4 adjust flow mark. */
1140 pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj);
1141 pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj);
1142 /* D.1 fill in mbuf - rx_descriptor_fields1. */
1143 _mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3);
1144 _mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2);
1145 /* E.1 extract op_own field. */
1146 op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
1147 /* C.1 load remained CQE data and extract necessary fields. */
1148 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]);
1149 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
1150 cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
1151 cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
1152 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].rsvd1[3]);
1153 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].rsvd1[3]);
1154 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
1155 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
1156 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd2[10]);
1157 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd2[10]);
1158 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
1159 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
1160 /* C.2 generate final structure for mbuf with swapping bytes. */
1161 pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask);
1162 pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask);
1163 /* C.3 adjust CRC length. */
1164 pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj);
1165 pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj);
1166 /* C.4 adjust flow mark. */
1167 pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj);
1168 pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj);
1169 /* E.1 extract op_own byte. */
1170 op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
1171 op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2);
1172 /* D.1 fill in mbuf - rx_descriptor_fields1. */
1173 _mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1);
1174 _mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0);
1175 /* E.2 flip owner bit to mark CQEs from last round. */
1176 owner_mask = _mm_and_si128(op_own, owner_check);
1178 owner_mask = _mm_xor_si128(owner_mask, owner_check);
1179 owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check);
1180 owner_mask = _mm_packs_epi32(owner_mask, zero);
1181 /* E.3 get mask for invalidated CQEs. */
1182 opcode = _mm_and_si128(op_own, opcode_check);
1183 invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode);
1184 invalid_mask = _mm_packs_epi32(invalid_mask, zero);
1185 /* E.4 mask out beyond boundary. */
1186 invalid_mask = _mm_or_si128(invalid_mask, mask);
1187 /* E.5 merge invalid_mask with invalid owner. */
1188 invalid_mask = _mm_or_si128(invalid_mask, owner_mask);
1189 /* F.1 find compressed CQE format. */
1190 comp_mask = _mm_and_si128(op_own, format_check);
1191 comp_mask = _mm_cmpeq_epi32(comp_mask, format_check);
1192 comp_mask = _mm_packs_epi32(comp_mask, zero);
1193 /* F.2 mask out invalid entries. */
1194 comp_mask = _mm_andnot_si128(invalid_mask, comp_mask);
1195 comp_idx = _mm_cvtsi128_si64(comp_mask);
1196 /* F.3 get the first compressed CQE. */
1197 comp_idx = comp_idx ?
1198 __builtin_ctzll(comp_idx) /
1199 (sizeof(uint16_t) * 8) :
1200 MLX5_VPMD_DESCS_PER_LOOP;
1201 /* E.6 mask out entries after the compressed CQE. */
1202 mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8);
1203 mask = _mm_sll_epi64(ones, mask);
1204 invalid_mask = _mm_or_si128(invalid_mask, mask);
1205 /* E.7 count non-compressed valid CQEs. */
1206 n = _mm_cvtsi128_si64(invalid_mask);
1207 n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
1208 MLX5_VPMD_DESCS_PER_LOOP;
1210 /* D.2 get the final invalid mask. */
1211 mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8);
1212 mask = _mm_sll_epi64(ones, mask);
1213 invalid_mask = _mm_or_si128(invalid_mask, mask);
1214 /* D.3 check error in opcode. */
1215 opcode = _mm_cmpeq_epi32(resp_err_check, opcode);
1216 opcode = _mm_packs_epi32(opcode, zero);
1217 opcode = _mm_andnot_si128(invalid_mask, opcode);
1218 /* D.4 mark if any error is set */
1219 rxq->pending_err |= !!_mm_cvtsi128_si64(opcode);
1220 /* D.5 fill in mbuf - rearm_data and packet_type. */
1221 rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
1222 #ifdef MLX5_PMD_SOFT_COUNTERS
1223 /* Add up received bytes count. */
1224 byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);
1225 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
1226 byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
1227 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
1230 * Break the loop unless more valid CQE is expected, or if
1231 * there's a compressed CQE.
1233 if (n != MLX5_VPMD_DESCS_PER_LOOP)
1236 /* If no new CQE seen, return without updating cq_db. */
1237 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
1239 /* Update the consumer indexes for non-compressed CQEs. */
1240 assert(nocmp_n <= pkts_n);
1241 rxq->cq_ci += nocmp_n;
1242 rxq->rq_pi += nocmp_n;
1243 rcvd_pkt += nocmp_n;
1244 #ifdef MLX5_PMD_SOFT_COUNTERS
1245 rxq->stats.ipackets += nocmp_n;
1246 rxq->stats.ibytes += rcvd_byte;
1248 /* Decompress the last CQE if compressed. */
1249 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
1250 assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
1251 rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
1252 /* Return more packets if needed. */
1253 if (nocmp_n < pkts_n) {
1254 uint16_t n = rxq->cq_ci - rxq->rq_pi;
1256 n = RTE_MIN(n, pkts_n - nocmp_n);
1257 rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
1263 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1268 * DPDK callback for vectorized RX.
1271 * Generic pointer to RX queue structure.
1273 * Array to store received packets.
1275 * Maximum number of packets in array.
1278 * Number of packets successfully received (<= pkts_n).
1281 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1283 struct mlx5_rxq_data *rxq = dpdk_rxq;
1286 nb_rx = rxq_burst_v(rxq, pkts, pkts_n);
1287 if (unlikely(rxq->pending_err))
1288 nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
1293 * Check Tx queue flags are set for raw vectorized Tx.
1296 * Pointer to private structure.
1299 * 1 if supported, negative errno value if not.
1301 int __attribute__((cold))
1302 priv_check_raw_vec_tx_support(struct priv *priv)
1306 /* All the configured queues should support. */
1307 for (i = 0; i < priv->txqs_n; ++i) {
1308 struct mlx5_txq_data *txq = (*priv->txqs)[i];
1310 if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
1311 !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
1314 if (i != priv->txqs_n)
1320 * Check a device can support vectorized TX.
1323 * Pointer to private structure.
1326 * 1 if supported, negative errno value if not.
1328 int __attribute__((cold))
1329 priv_check_vec_tx_support(struct priv *priv)
1331 if (!priv->tx_vec_en ||
1332 priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
1333 priv->mps != MLX5_MPW_ENHANCED ||
1340 * Check a RX queue can support vectorized RX.
1343 * Pointer to RX queue.
1346 * 1 if supported, negative errno value if not.
1348 int __attribute__((cold))
1349 rxq_check_vec_support(struct mlx5_rxq_data *rxq)
1351 struct mlx5_rxq_ctrl *ctrl =
1352 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1354 if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
1360 * Check a device can support vectorized RX.
1363 * Pointer to private structure.
1366 * 1 if supported, negative errno value if not.
1368 int __attribute__((cold))
1369 priv_check_vec_rx_support(struct priv *priv)
1373 if (!priv->rx_vec_en)
1375 /* All the configured queues should support. */
1376 for (i = 0; i < priv->rxqs_n; ++i) {
1377 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1381 if (rxq_check_vec_support(rxq) < 0)
1384 if (i != priv->rxqs_n)