1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_SSE_H_
13 #include <smmintrin.h>
16 #include <rte_mempool.h>
17 #include <rte_prefetch.h>
20 #include "mlx5_utils.h"
21 #include "mlx5_rxtx.h"
22 #include "mlx5_rxtx_vec.h"
23 #include "mlx5_autoconf.h"
24 #include "mlx5_defs.h"
27 #ifndef __INTEL_COMPILER
28 #pragma GCC diagnostic ignored "-Wcast-qual"
32 * Fill in buffer descriptors in a multi-packet send descriptor.
35 * Pointer to TX queue structure.
37 * Pointer to buffer descriptor to be written.
39 * Pointer to array of packets to be sent.
41 * Number of packets to be filled.
44 txq_wr_dseg_v(struct mlx5_txq_data *txq, __m128i *dseg,
45 struct rte_mbuf **pkts, unsigned int n)
49 const __m128i shuf_mask_dseg =
50 _mm_set_epi8(8, 9, 10, 11, /* addr, bswap64 */
52 7, 6, 5, 4, /* lkey */
53 0, 1, 2, 3 /* length, bswap32 */);
54 #ifdef MLX5_PMD_SOFT_COUNTERS
58 for (pos = 0; pos < n; ++pos, ++dseg) {
60 struct rte_mbuf *pkt = pkts[pos];
62 addr = rte_pktmbuf_mtod(pkt, uintptr_t);
63 desc = _mm_set_epi32(addr >> 32,
65 mlx5_tx_mb2mr(txq, pkt),
67 desc = _mm_shuffle_epi8(desc, shuf_mask_dseg);
68 _mm_store_si128(dseg, desc);
69 #ifdef MLX5_PMD_SOFT_COUNTERS
70 tx_byte += DATA_LEN(pkt);
73 #ifdef MLX5_PMD_SOFT_COUNTERS
74 txq->stats.obytes += tx_byte;
79 * Send multi-segmented packets until it encounters a single segment packet in
83 * Pointer to TX queue structure.
85 * Pointer to array of packets to be sent.
87 * Number of packets to be sent.
90 * Number of packets successfully transmitted (<= pkts_n).
93 txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
96 uint16_t elts_head = txq->elts_head;
97 const uint16_t elts_n = 1 << txq->elts_n;
98 const uint16_t elts_m = elts_n - 1;
99 const uint16_t wq_n = 1 << txq->wqe_n;
100 const uint16_t wq_mask = wq_n - 1;
101 const unsigned int nb_dword_per_wqebb =
102 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
103 const unsigned int nb_dword_in_hdr =
104 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
106 volatile struct mlx5_wqe *wqe = NULL;
108 txq->offloads & DEV_TX_OFFLOAD_MATCH_METADATA ? true : false;
110 assert(elts_n > pkts_n);
111 mlx5_tx_complete(txq);
112 if (unlikely(!pkts_n))
114 for (n = 0; n < pkts_n; ++n) {
115 struct rte_mbuf *buf = pkts[n];
116 unsigned int segs_n = buf->nb_segs;
117 unsigned int ds = nb_dword_in_hdr;
118 unsigned int len = PKT_LEN(buf);
119 uint16_t wqe_ci = txq->wqe_ci;
120 const __m128i shuf_mask_ctrl =
121 _mm_set_epi8(15, 14, 13, 12,
122 8, 9, 10, 11, /* bswap32 */
123 4, 5, 6, 7, /* bswap32 */
124 0, 1, 2, 3 /* bswap32 */);
128 __m128i *t_wqe, *dseg;
130 rte_be32_t metadata =
131 metadata_ol && (buf->ol_flags & PKT_TX_METADATA) ?
132 buf->tx_metadata : 0;
135 max_elts = elts_n - (elts_head - txq->elts_tail);
136 max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
138 * A MPW session consumes 2 WQEs at most to
139 * include MLX5_MPW_DSEG_MAX pointers.
142 max_elts < segs_n || max_wqe < 2)
144 if (segs_n > MLX5_MPW_DSEG_MAX) {
145 txq->stats.oerrors++;
148 wqe = &((volatile struct mlx5_wqe64 *)
149 txq->wqes)[wqe_ci & wq_mask].hdr;
150 cs_flags = txq_ol_cksum_to_cs(buf);
151 /* Title WQEBB pointer. */
152 t_wqe = (__m128i *)wqe;
153 dseg = (__m128i *)(wqe + 1);
155 if (!(ds++ % nb_dword_per_wqebb)) {
157 &((volatile struct mlx5_wqe64 *)
158 txq->wqes)[++wqe_ci & wq_mask];
160 txq_wr_dseg_v(txq, dseg++, &buf, 1);
161 (*txq->elts)[elts_head++ & elts_m] = buf;
165 /* Fill CTRL in the header. */
166 ctrl = _mm_set_epi32(0, 4, txq->qp_num_8s | ds,
167 MLX5_OPC_MOD_MPW << 24 |
168 txq->wqe_ci << 8 | MLX5_OPCODE_TSO);
169 ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
170 _mm_store_si128(t_wqe, ctrl);
171 /* Fill ESEG in the header. */
172 _mm_store_si128(t_wqe + 1,
173 _mm_set_epi32(0, metadata,
174 (rte_cpu_to_be_16(len) << 16) |
176 txq->wqe_ci = wqe_ci;
180 txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
181 txq->elts_head = elts_head;
182 if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
183 /* A CQE slot must always be available. */
184 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
185 wqe->ctrl[2] = rte_cpu_to_be_32(MLX5_COMP_ALWAYS <<
186 MLX5_COMP_MODE_OFFSET);
187 wqe->ctrl[3] = txq->elts_head;
190 #ifdef MLX5_PMD_SOFT_COUNTERS
191 txq->stats.opackets += n;
193 mlx5_tx_dbrec(txq, wqe);
198 * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
199 * it returns to make it processed by txq_scatter_v(). All the packets in
200 * the pkts list should be single segment packets having same offload flags.
201 * This must be checked by txq_count_contig_single_seg() and txq_calc_offload().
204 * Pointer to TX queue structure.
206 * Pointer to array of packets to be sent.
208 * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
210 * Checksum offload flags to be written in the descriptor.
212 * Metadata value to be written in the descriptor.
215 * Number of packets successfully transmitted (<= pkts_n).
217 static inline uint16_t
218 txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
219 uint8_t cs_flags, rte_be32_t metadata)
221 struct rte_mbuf **elts;
222 uint16_t elts_head = txq->elts_head;
223 const uint16_t elts_n = 1 << txq->elts_n;
224 const uint16_t elts_m = elts_n - 1;
225 const unsigned int nb_dword_per_wqebb =
226 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
227 const unsigned int nb_dword_in_hdr =
228 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
234 const uint16_t wq_n = 1 << txq->wqe_n;
235 const uint16_t wq_mask = wq_n - 1;
236 uint16_t wq_idx = txq->wqe_ci & wq_mask;
237 volatile struct mlx5_wqe64 *wq =
238 &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
239 volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
240 const __m128i shuf_mask_ctrl =
241 _mm_set_epi8(15, 14, 13, 12,
242 8, 9, 10, 11, /* bswap32 */
243 4, 5, 6, 7, /* bswap32 */
244 0, 1, 2, 3 /* bswap32 */);
245 __m128i *t_wqe, *dseg;
248 /* Make sure all packets can fit into a single WQE. */
249 assert(elts_n > pkts_n);
250 mlx5_tx_complete(txq);
251 max_elts = (elts_n - (elts_head - txq->elts_tail));
252 max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
253 pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
254 assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
255 if (unlikely(!pkts_n))
257 elts = &(*txq->elts)[elts_head & elts_m];
258 /* Loop for available tailroom first. */
259 n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
260 for (pos = 0; pos < (n & -2); pos += 2)
261 _mm_storeu_si128((__m128i *)&elts[pos],
262 _mm_loadu_si128((__m128i *)&pkts[pos]));
264 elts[pos] = pkts[pos];
265 /* Check if it crosses the end of the queue. */
266 if (unlikely(n < pkts_n)) {
267 elts = &(*txq->elts)[0];
268 for (pos = 0; pos < pkts_n - n; ++pos)
269 elts[pos] = pkts[n + pos];
271 txq->elts_head += pkts_n;
272 /* Save title WQEBB pointer. */
273 t_wqe = (__m128i *)wqe;
274 dseg = (__m128i *)(wqe + 1);
275 /* Calculate the number of entries to the end. */
277 (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
280 txq_wr_dseg_v(txq, dseg, pkts, n);
281 /* Check if it crosses the end of the queue. */
283 dseg = (__m128i *)txq->wqes;
284 txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
286 if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
287 txq->elts_comp += pkts_n;
288 comp_req = MLX5_COMP_ONLY_FIRST_ERR << MLX5_COMP_MODE_OFFSET;
290 /* A CQE slot must always be available. */
291 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
292 /* Request a completion. */
294 comp_req = MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET;
296 /* Fill CTRL in the header. */
297 ctrl = _mm_set_epi32(txq->elts_head, comp_req,
298 txq->qp_num_8s | (pkts_n + 2),
299 MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
300 txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW);
301 ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
302 _mm_store_si128(t_wqe, ctrl);
303 /* Fill ESEG in the header. */
304 _mm_store_si128(t_wqe + 1, _mm_set_epi32(0, metadata, cs_flags, 0));
305 #ifdef MLX5_PMD_SOFT_COUNTERS
306 txq->stats.opackets += pkts_n;
308 txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
310 /* Ring QP doorbell. */
311 mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST);
316 * Store free buffers to RX SW ring.
319 * Pointer to RX queue structure.
321 * Pointer to array of packets to be stored.
323 * Number of packets to be stored.
326 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
328 const uint16_t q_mask = (1 << rxq->elts_n) - 1;
329 struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
333 for (pos = 0; pos < p; pos += 2) {
336 mbp = _mm_loadu_si128((__m128i *)&elts[pos]);
337 _mm_storeu_si128((__m128i *)&pkts[pos], mbp);
340 pkts[pos] = elts[pos];
344 * Decompress a compressed completion and fill in mbufs in RX SW ring with data
345 * extracted from the title completion descriptor.
348 * Pointer to RX queue structure.
350 * Pointer to completion array having a compressed completion at first.
352 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
353 * the title completion descriptor to be copied to the rest of mbufs.
356 * Number of mini-CQEs successfully decompressed.
358 static inline uint16_t
359 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
360 struct rte_mbuf **elts)
362 volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1);
363 struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
366 unsigned int inv = 0;
367 /* Mask to shuffle from extracted mini CQE to mbuf. */
368 const __m128i shuf_mask1 =
369 _mm_set_epi8(0, 1, 2, 3, /* rss, bswap32 */
370 -1, -1, /* skip vlan_tci */
371 6, 7, /* data_len, bswap16 */
372 -1, -1, 6, 7, /* pkt_len, bswap16 */
373 -1, -1, -1, -1 /* skip packet_type */);
374 const __m128i shuf_mask2 =
375 _mm_set_epi8(8, 9, 10, 11, /* rss, bswap32 */
376 -1, -1, /* skip vlan_tci */
377 14, 15, /* data_len, bswap16 */
378 -1, -1, 14, 15, /* pkt_len, bswap16 */
379 -1, -1, -1, -1 /* skip packet_type */);
380 /* Restore the compressed count. Must be 16 bits. */
381 const uint16_t mcqe_n = t_pkt->data_len +
382 (rxq->crc_present * RTE_ETHER_CRC_LEN);
383 const __m128i rearm =
384 _mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
386 _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
387 const __m128i crc_adj =
388 _mm_set_epi16(0, 0, 0,
389 rxq->crc_present * RTE_ETHER_CRC_LEN,
391 rxq->crc_present * RTE_ETHER_CRC_LEN,
393 const uint32_t flow_tag = t_pkt->hash.fdir.hi;
394 #ifdef MLX5_PMD_SOFT_COUNTERS
395 const __m128i zero = _mm_setzero_si128();
396 const __m128i ones = _mm_cmpeq_epi32(zero, zero);
397 uint32_t rcvd_byte = 0;
398 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
399 const __m128i len_shuf_mask =
400 _mm_set_epi8(-1, -1, -1, -1,
407 * A. load mCQEs into a 128bit register.
408 * B. store rearm data to mbuf.
409 * C. combine data from mCQEs with rx_descriptor_fields1.
410 * D. store rx_descriptor_fields1.
411 * E. store flow tag (rte_flow mark).
413 for (pos = 0; pos < mcqe_n; ) {
414 __m128i mcqe1, mcqe2;
415 __m128i rxdf1, rxdf2;
416 #ifdef MLX5_PMD_SOFT_COUNTERS
417 __m128i byte_cnt, invalid_mask;
420 if (!(pos & 0x7) && pos + 8 < mcqe_n)
421 rte_prefetch0((void *)(cq + pos + 8));
422 /* A.1 load mCQEs into a 128bit register. */
423 mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
424 mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
425 /* B.1 store rearm data to mbuf. */
426 _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm);
427 _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm);
428 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
429 rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1);
430 rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2);
431 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
432 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
433 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
434 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
435 /* D.1 store rx_descriptor_fields1. */
436 _mm_storeu_si128((__m128i *)
437 &elts[pos]->rx_descriptor_fields1,
439 _mm_storeu_si128((__m128i *)
440 &elts[pos + 1]->rx_descriptor_fields1,
442 /* B.1 store rearm data to mbuf. */
443 _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm);
444 _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm);
445 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
446 rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1);
447 rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2);
448 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
449 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
450 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
451 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
452 /* D.1 store rx_descriptor_fields1. */
453 _mm_storeu_si128((__m128i *)
454 &elts[pos + 2]->rx_descriptor_fields1,
456 _mm_storeu_si128((__m128i *)
457 &elts[pos + 3]->rx_descriptor_fields1,
459 #ifdef MLX5_PMD_SOFT_COUNTERS
460 invalid_mask = _mm_set_epi64x(0,
462 sizeof(uint16_t) * 8);
463 invalid_mask = _mm_sll_epi64(ones, invalid_mask);
464 mcqe1 = _mm_srli_si128(mcqe1, 4);
465 byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc);
466 byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask);
467 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
468 byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
469 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
472 /* E.1 store flow tag (rte_flow mark). */
473 elts[pos]->hash.fdir.hi = flow_tag;
474 elts[pos + 1]->hash.fdir.hi = flow_tag;
475 elts[pos + 2]->hash.fdir.hi = flow_tag;
476 elts[pos + 3]->hash.fdir.hi = flow_tag;
478 pos += MLX5_VPMD_DESCS_PER_LOOP;
479 /* Move to next CQE and invalidate consumed CQEs. */
480 if (!(pos & 0x7) && pos < mcqe_n) {
481 mcq = (void *)(cq + pos);
482 for (i = 0; i < 8; ++i)
483 cq[inv++].op_own = MLX5_CQE_INVALIDATE;
486 /* Invalidate the rest of CQEs. */
487 for (; inv < mcqe_n; ++inv)
488 cq[inv].op_own = MLX5_CQE_INVALIDATE;
489 #ifdef MLX5_PMD_SOFT_COUNTERS
490 rxq->stats.ipackets += mcqe_n;
491 rxq->stats.ibytes += rcvd_byte;
493 rxq->cq_ci += mcqe_n;
498 * Calculate packet type and offload flag for mbuf and store it.
501 * Pointer to RX queue structure.
503 * Array of four 16bytes completions extracted from the original completion
506 * Opcode vector having responder error status. Each field is 4B.
508 * Pointer to array of packets to be filled.
511 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
512 __m128i op_err, struct rte_mbuf **pkts)
514 __m128i pinfo0, pinfo1;
515 __m128i pinfo, ptype;
516 __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH |
517 rxq->hw_timestamp * PKT_RX_TIMESTAMP);
519 const __m128i zero = _mm_setzero_si128();
520 const __m128i ptype_mask =
521 _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06);
522 const __m128i ptype_ol_mask =
523 _mm_set_epi32(0x106, 0x106, 0x106, 0x106);
524 const __m128i pinfo_mask =
525 _mm_set_epi32(0x3, 0x3, 0x3, 0x3);
526 const __m128i cv_flag_sel =
527 _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
528 (uint8_t)((PKT_RX_IP_CKSUM_GOOD |
529 PKT_RX_L4_CKSUM_GOOD) >> 1),
531 (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
533 (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
534 (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
536 const __m128i cv_mask =
537 _mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
538 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
539 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
540 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
541 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
542 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
543 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
544 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
545 const __m128i mbuf_init =
546 _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
547 __m128i rearm0, rearm1, rearm2, rearm3;
548 uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
550 /* Extract pkt_info field. */
551 pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
552 pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
553 pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1);
554 /* Extract hdr_type_etc field. */
555 pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]);
556 pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]);
557 ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
559 const __m128i pinfo_ft_mask =
560 _mm_set_epi32(0xffffff00, 0xffffff00,
561 0xffffff00, 0xffffff00);
562 const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
563 __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
564 __m128i flow_tag, invalid_mask;
566 flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
567 /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
568 invalid_mask = _mm_cmpeq_epi32(flow_tag, zero);
569 ol_flags = _mm_or_si128(ol_flags,
570 _mm_andnot_si128(invalid_mask,
572 /* Mask out invalid entries. */
573 fdir_id_flags = _mm_andnot_si128(invalid_mask, fdir_id_flags);
574 /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
575 ol_flags = _mm_or_si128(ol_flags,
577 _mm_cmpeq_epi32(flow_tag,
582 * Merge the two fields to generate the following:
586 * bit[11:10] = l3_hdr_type
587 * bit[14:12] = l4_hdr_type
590 * bit[17] = outer_l3_type
592 ptype = _mm_and_si128(ptype, ptype_mask);
593 pinfo = _mm_and_si128(pinfo, pinfo_mask);
594 pinfo = _mm_slli_epi32(pinfo, 16);
595 /* Make pinfo has merged fields for ol_flags calculation. */
596 pinfo = _mm_or_si128(ptype, pinfo);
597 ptype = _mm_srli_epi32(pinfo, 10);
598 ptype = _mm_packs_epi32(ptype, zero);
599 /* Errored packets will have RTE_PTYPE_ALL_MASK. */
600 op_err = _mm_srli_epi16(op_err, 8);
601 ptype = _mm_or_si128(ptype, op_err);
602 pt_idx0 = _mm_extract_epi8(ptype, 0);
603 pt_idx1 = _mm_extract_epi8(ptype, 2);
604 pt_idx2 = _mm_extract_epi8(ptype, 4);
605 pt_idx3 = _mm_extract_epi8(ptype, 6);
606 pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
607 !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
608 pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
609 !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
610 pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
611 !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
612 pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
613 !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
614 /* Fill flags for checksum and VLAN. */
615 pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
616 pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
617 /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
618 cv_flags = _mm_slli_epi32(pinfo, 9);
619 cv_flags = _mm_or_si128(pinfo, cv_flags);
620 /* Move back flags to start from byte[0]. */
621 cv_flags = _mm_srli_epi32(cv_flags, 8);
622 /* Mask out garbage bits. */
623 cv_flags = _mm_and_si128(cv_flags, cv_mask);
624 /* Merge to ol_flags. */
625 ol_flags = _mm_or_si128(ol_flags, cv_flags);
626 /* Merge mbuf_init and ol_flags. */
627 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30);
628 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
629 rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
630 rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
631 /* Write 8B rearm_data and 8B ol_flags. */
632 _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
633 _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
634 _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
635 _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3);
639 * Receive burst of packets. An errored completion also consumes a mbuf, but the
640 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
641 * before returning to application.
644 * Pointer to RX queue structure.
646 * Array to store received packets.
648 * Maximum number of packets in array.
650 * Pointer to a flag. Set non-zero value if pkts array has at least one error
654 * Number of packets received including errors (<= pkts_n).
656 static inline uint16_t
657 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
660 const uint16_t q_n = 1 << rxq->cqe_n;
661 const uint16_t q_mask = q_n - 1;
662 volatile struct mlx5_cqe *cq;
663 struct rte_mbuf **elts;
667 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
668 uint16_t nocmp_n = 0;
669 uint16_t rcvd_pkt = 0;
670 unsigned int cq_idx = rxq->cq_ci & q_mask;
671 unsigned int elts_idx;
672 unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
673 const __m128i owner_check =
674 _mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL);
675 const __m128i opcode_check =
676 _mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL);
677 const __m128i format_check =
678 _mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL);
679 const __m128i resp_err_check =
680 _mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL);
681 #ifdef MLX5_PMD_SOFT_COUNTERS
682 uint32_t rcvd_byte = 0;
683 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
684 const __m128i len_shuf_mask =
685 _mm_set_epi8(-1, -1, -1, -1,
690 /* Mask to shuffle from extracted CQE to mbuf. */
691 const __m128i shuf_mask =
692 _mm_set_epi8(-1, 3, 2, 1, /* fdir.hi */
693 12, 13, 14, 15, /* rss, bswap32 */
694 10, 11, /* vlan_tci, bswap16 */
695 4, 5, /* data_len, bswap16 */
696 -1, -1, /* zero out 2nd half of pkt_len */
697 4, 5 /* pkt_len, bswap16 */);
698 /* Mask to blend from the last Qword to the first DQword. */
699 const __m128i blend_mask =
700 _mm_set_epi8(-1, -1, -1, -1,
704 const __m128i zero = _mm_setzero_si128();
705 const __m128i ones = _mm_cmpeq_epi32(zero, zero);
706 const __m128i crc_adj =
707 _mm_set_epi16(0, 0, 0, 0, 0,
708 rxq->crc_present * RTE_ETHER_CRC_LEN,
710 rxq->crc_present * RTE_ETHER_CRC_LEN);
711 const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
713 assert(rxq->sges_n == 0);
714 assert(rxq->cqe_n == rxq->elts_n);
715 cq = &(*rxq->cqes)[cq_idx];
717 rte_prefetch0(cq + 1);
718 rte_prefetch0(cq + 2);
719 rte_prefetch0(cq + 3);
720 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
721 repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
722 if (repl_n >= rxq->rq_repl_thresh)
723 mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
724 /* See if there're unreturned mbufs from compressed CQE. */
725 rcvd_pkt = rxq->decompressed;
727 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
728 rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
729 rxq->rq_pi += rcvd_pkt;
730 rxq->decompressed -= rcvd_pkt;
733 elts_idx = rxq->rq_pi & q_mask;
734 elts = &(*rxq->elts)[elts_idx];
735 /* Not to overflow pkts array. */
736 pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
737 /* Not to cross queue end. */
738 pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
739 pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
742 /* At this point, there shouldn't be any remained packets. */
743 assert(rxq->decompressed == 0);
745 * A. load first Qword (8bytes) in one loop.
746 * B. copy 4 mbuf pointers from elts ring to returing pkts.
747 * C. load remained CQE data and extract necessary fields.
748 * Final 16bytes cqes[] extracted from original 64bytes CQE has the
749 * following structure:
752 * uint8_t flow_tag[3];
756 * uint16_t hdr_type_etc;
757 * uint16_t vlan_info;
758 * uint32_t rx_has_res;
762 * F. find compressed CQE.
766 pos += MLX5_VPMD_DESCS_PER_LOOP) {
767 __m128i cqes[MLX5_VPMD_DESCS_PER_LOOP];
768 __m128i cqe_tmp1, cqe_tmp2;
769 __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
770 __m128i op_own, op_own_tmp1, op_own_tmp2;
771 __m128i opcode, owner_mask, invalid_mask;
774 #ifdef MLX5_PMD_SOFT_COUNTERS
778 __m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0);
779 unsigned int p1, p2, p3;
781 /* Prefetch next 4 CQEs. */
782 if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
783 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
784 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
785 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
786 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
788 /* A.0 do not cross the end of CQ. */
789 mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8);
790 mask = _mm_sll_epi64(ones, mask);
791 p = _mm_andnot_si128(mask, p);
793 p3 = _mm_extract_epi16(p, 3);
794 cqes[3] = _mm_loadl_epi64((__m128i *)
795 &cq[pos + p3].sop_drop_qpn);
796 rte_compiler_barrier();
797 p2 = _mm_extract_epi16(p, 2);
798 cqes[2] = _mm_loadl_epi64((__m128i *)
799 &cq[pos + p2].sop_drop_qpn);
800 rte_compiler_barrier();
801 /* B.1 load mbuf pointers. */
802 mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]);
803 mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]);
804 /* A.1 load a block having op_own. */
805 p1 = _mm_extract_epi16(p, 1);
806 cqes[1] = _mm_loadl_epi64((__m128i *)
807 &cq[pos + p1].sop_drop_qpn);
808 rte_compiler_barrier();
809 cqes[0] = _mm_loadl_epi64((__m128i *)
810 &cq[pos].sop_drop_qpn);
811 /* B.2 copy mbuf pointers. */
812 _mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
813 _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
815 /* C.1 load remained CQE data and extract necessary fields. */
816 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
817 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
818 cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
819 cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
820 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].rsvd1[3]);
821 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].rsvd1[3]);
822 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
823 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
824 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd2[10]);
825 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd2[10]);
826 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
827 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
828 /* C.2 generate final structure for mbuf with swapping bytes. */
829 pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask);
830 pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask);
831 /* C.3 adjust CRC length. */
832 pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj);
833 pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj);
834 /* C.4 adjust flow mark. */
835 pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj);
836 pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj);
837 /* D.1 fill in mbuf - rx_descriptor_fields1. */
838 _mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3);
839 _mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2);
840 /* E.1 extract op_own field. */
841 op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
842 /* C.1 load remained CQE data and extract necessary fields. */
843 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]);
844 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
845 cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
846 cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
847 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].rsvd1[3]);
848 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].rsvd1[3]);
849 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
850 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
851 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd2[10]);
852 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd2[10]);
853 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
854 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
855 /* C.2 generate final structure for mbuf with swapping bytes. */
856 pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask);
857 pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask);
858 /* C.3 adjust CRC length. */
859 pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj);
860 pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj);
861 /* C.4 adjust flow mark. */
862 pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj);
863 pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj);
864 /* E.1 extract op_own byte. */
865 op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
866 op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2);
867 /* D.1 fill in mbuf - rx_descriptor_fields1. */
868 _mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1);
869 _mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0);
870 /* E.2 flip owner bit to mark CQEs from last round. */
871 owner_mask = _mm_and_si128(op_own, owner_check);
873 owner_mask = _mm_xor_si128(owner_mask, owner_check);
874 owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check);
875 owner_mask = _mm_packs_epi32(owner_mask, zero);
876 /* E.3 get mask for invalidated CQEs. */
877 opcode = _mm_and_si128(op_own, opcode_check);
878 invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode);
879 invalid_mask = _mm_packs_epi32(invalid_mask, zero);
880 /* E.4 mask out beyond boundary. */
881 invalid_mask = _mm_or_si128(invalid_mask, mask);
882 /* E.5 merge invalid_mask with invalid owner. */
883 invalid_mask = _mm_or_si128(invalid_mask, owner_mask);
884 /* F.1 find compressed CQE format. */
885 comp_mask = _mm_and_si128(op_own, format_check);
886 comp_mask = _mm_cmpeq_epi32(comp_mask, format_check);
887 comp_mask = _mm_packs_epi32(comp_mask, zero);
888 /* F.2 mask out invalid entries. */
889 comp_mask = _mm_andnot_si128(invalid_mask, comp_mask);
890 comp_idx = _mm_cvtsi128_si64(comp_mask);
891 /* F.3 get the first compressed CQE. */
892 comp_idx = comp_idx ?
893 __builtin_ctzll(comp_idx) /
894 (sizeof(uint16_t) * 8) :
895 MLX5_VPMD_DESCS_PER_LOOP;
896 /* E.6 mask out entries after the compressed CQE. */
897 mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8);
898 mask = _mm_sll_epi64(ones, mask);
899 invalid_mask = _mm_or_si128(invalid_mask, mask);
900 /* E.7 count non-compressed valid CQEs. */
901 n = _mm_cvtsi128_si64(invalid_mask);
902 n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
903 MLX5_VPMD_DESCS_PER_LOOP;
905 /* D.2 get the final invalid mask. */
906 mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8);
907 mask = _mm_sll_epi64(ones, mask);
908 invalid_mask = _mm_or_si128(invalid_mask, mask);
909 /* D.3 check error in opcode. */
910 opcode = _mm_cmpeq_epi32(resp_err_check, opcode);
911 opcode = _mm_packs_epi32(opcode, zero);
912 opcode = _mm_andnot_si128(invalid_mask, opcode);
913 /* D.4 mark if any error is set */
914 *err |= _mm_cvtsi128_si64(opcode);
915 /* D.5 fill in mbuf - rearm_data and packet_type. */
916 rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
917 if (rxq->hw_timestamp) {
918 pkts[pos]->timestamp =
919 rte_be_to_cpu_64(cq[pos].timestamp);
920 pkts[pos + 1]->timestamp =
921 rte_be_to_cpu_64(cq[pos + p1].timestamp);
922 pkts[pos + 2]->timestamp =
923 rte_be_to_cpu_64(cq[pos + p2].timestamp);
924 pkts[pos + 3]->timestamp =
925 rte_be_to_cpu_64(cq[pos + p3].timestamp);
927 #ifdef MLX5_PMD_SOFT_COUNTERS
928 /* Add up received bytes count. */
929 byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);
930 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
931 byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
932 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
935 * Break the loop unless more valid CQE is expected, or if
936 * there's a compressed CQE.
938 if (n != MLX5_VPMD_DESCS_PER_LOOP)
941 /* If no new CQE seen, return without updating cq_db. */
942 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
944 /* Update the consumer indexes for non-compressed CQEs. */
945 assert(nocmp_n <= pkts_n);
946 rxq->cq_ci += nocmp_n;
947 rxq->rq_pi += nocmp_n;
949 #ifdef MLX5_PMD_SOFT_COUNTERS
950 rxq->stats.ipackets += nocmp_n;
951 rxq->stats.ibytes += rcvd_byte;
953 /* Decompress the last CQE if compressed. */
954 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
955 assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
956 rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
958 /* Return more packets if needed. */
959 if (nocmp_n < pkts_n) {
960 uint16_t n = rxq->decompressed;
962 n = RTE_MIN(n, pkts_n - nocmp_n);
963 rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
966 rxq->decompressed -= n;
969 rte_compiler_barrier();
970 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
974 #endif /* RTE_PMD_MLX5_RXTX_VEC_SSE_H_ */