1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_SSE_H_
12 #include <smmintrin.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
20 #include "mlx5_defs.h"
22 #include "mlx5_utils.h"
23 #include "mlx5_rxtx.h"
24 #include "mlx5_rxtx_vec.h"
25 #include "mlx5_autoconf.h"
27 #ifndef __INTEL_COMPILER
28 #pragma GCC diagnostic ignored "-Wcast-qual"
32 * Store free buffers to RX SW ring.
35 * Pointer to SW ring to be filled.
37 * Pointer to array of packets to be stored.
39 * Number of packets to be stored.
42 rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
47 for (pos = 0; pos < p; pos += 2) {
50 mbp = _mm_loadu_si128((__m128i *)&elts[pos]);
51 _mm_storeu_si128((__m128i *)&pkts[pos], mbp);
54 pkts[pos] = elts[pos];
58 * Decompress a compressed completion and fill in mbufs in RX SW ring with data
59 * extracted from the title completion descriptor.
62 * Pointer to RX queue structure.
64 * Pointer to completion array having a compressed completion at first.
66 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
67 * the title completion descriptor to be copied to the rest of mbufs.
70 * Number of mini-CQEs successfully decompressed.
72 static inline uint16_t
73 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
74 struct rte_mbuf **elts)
76 volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1);
77 struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
81 /* Mask to shuffle from extracted mini CQE to mbuf. */
82 const __m128i shuf_mask1 =
83 _mm_set_epi8(0, 1, 2, 3, /* rss, bswap32 */
84 -1, -1, /* skip vlan_tci */
85 6, 7, /* data_len, bswap16 */
86 -1, -1, 6, 7, /* pkt_len, bswap16 */
87 -1, -1, -1, -1 /* skip packet_type */);
88 const __m128i shuf_mask2 =
89 _mm_set_epi8(8, 9, 10, 11, /* rss, bswap32 */
90 -1, -1, /* skip vlan_tci */
91 14, 15, /* data_len, bswap16 */
92 -1, -1, 14, 15, /* pkt_len, bswap16 */
93 -1, -1, -1, -1 /* skip packet_type */);
94 /* Restore the compressed count. Must be 16 bits. */
95 const uint16_t mcqe_n = t_pkt->data_len +
96 (rxq->crc_present * RTE_ETHER_CRC_LEN);
98 _mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
100 _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
101 const __m128i crc_adj =
102 _mm_set_epi16(0, 0, 0,
103 rxq->crc_present * RTE_ETHER_CRC_LEN,
105 rxq->crc_present * RTE_ETHER_CRC_LEN,
107 const uint32_t flow_tag = t_pkt->hash.fdir.hi;
108 #ifdef MLX5_PMD_SOFT_COUNTERS
109 const __m128i zero = _mm_setzero_si128();
110 const __m128i ones = _mm_cmpeq_epi32(zero, zero);
111 uint32_t rcvd_byte = 0;
112 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
113 const __m128i len_shuf_mask =
114 _mm_set_epi8(-1, -1, -1, -1,
120 * A. load mCQEs into a 128bit register.
121 * B. store rearm data to mbuf.
122 * C. combine data from mCQEs with rx_descriptor_fields1.
123 * D. store rx_descriptor_fields1.
124 * E. store flow tag (rte_flow mark).
126 for (pos = 0; pos < mcqe_n; ) {
127 __m128i mcqe1, mcqe2;
128 __m128i rxdf1, rxdf2;
129 #ifdef MLX5_PMD_SOFT_COUNTERS
130 __m128i byte_cnt, invalid_mask;
133 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
134 if (likely(pos + i < mcqe_n))
135 rte_prefetch0((void *)(cq + pos + i));
136 /* A.1 load mCQEs into a 128bit register. */
137 mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
138 mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
139 /* B.1 store rearm data to mbuf. */
140 _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm);
141 _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm);
142 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
143 rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1);
144 rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2);
145 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
146 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
147 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
148 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
149 /* D.1 store rx_descriptor_fields1. */
150 _mm_storeu_si128((__m128i *)
151 &elts[pos]->rx_descriptor_fields1,
153 _mm_storeu_si128((__m128i *)
154 &elts[pos + 1]->rx_descriptor_fields1,
156 /* B.1 store rearm data to mbuf. */
157 _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm);
158 _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm);
159 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
160 rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1);
161 rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2);
162 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
163 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
164 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
165 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
166 /* D.1 store rx_descriptor_fields1. */
167 _mm_storeu_si128((__m128i *)
168 &elts[pos + 2]->rx_descriptor_fields1,
170 _mm_storeu_si128((__m128i *)
171 &elts[pos + 3]->rx_descriptor_fields1,
173 #ifdef MLX5_PMD_SOFT_COUNTERS
174 invalid_mask = _mm_set_epi64x(0,
176 sizeof(uint16_t) * 8);
177 invalid_mask = _mm_sll_epi64(ones, invalid_mask);
178 mcqe1 = _mm_srli_si128(mcqe1, 4);
179 byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc);
180 byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask);
181 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
182 byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
183 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
186 /* E.1 store flow tag (rte_flow mark). */
187 elts[pos]->hash.fdir.hi = flow_tag;
188 elts[pos + 1]->hash.fdir.hi = flow_tag;
189 elts[pos + 2]->hash.fdir.hi = flow_tag;
190 elts[pos + 3]->hash.fdir.hi = flow_tag;
192 if (rxq->dynf_meta) {
193 int32_t offs = rxq->flow_meta_offset;
194 const uint32_t meta =
195 *RTE_MBUF_DYNFIELD(t_pkt, offs, uint32_t *);
197 /* Check if title packet has valid metadata. */
199 MLX5_ASSERT(t_pkt->ol_flags &
200 rxq->flow_meta_mask);
201 *RTE_MBUF_DYNFIELD(elts[pos], offs,
203 *RTE_MBUF_DYNFIELD(elts[pos + 1], offs,
205 *RTE_MBUF_DYNFIELD(elts[pos + 2], offs,
207 *RTE_MBUF_DYNFIELD(elts[pos + 3], offs,
211 pos += MLX5_VPMD_DESCS_PER_LOOP;
212 /* Move to next CQE and invalidate consumed CQEs. */
213 if (!(pos & 0x7) && pos < mcqe_n) {
214 if (pos + 8 < mcqe_n)
215 rte_prefetch0((void *)(cq + pos + 8));
216 mcq = (void *)(cq + pos);
217 for (i = 0; i < 8; ++i)
218 cq[inv++].op_own = MLX5_CQE_INVALIDATE;
221 /* Invalidate the rest of CQEs. */
222 for (; inv < mcqe_n; ++inv)
223 cq[inv].op_own = MLX5_CQE_INVALIDATE;
224 #ifdef MLX5_PMD_SOFT_COUNTERS
225 rxq->stats.ipackets += mcqe_n;
226 rxq->stats.ibytes += rcvd_byte;
232 * Calculate packet type and offload flag for mbuf and store it.
235 * Pointer to RX queue structure.
237 * Array of four 16bytes completions extracted from the original completion
240 * Opcode vector having responder error status. Each field is 4B.
242 * Pointer to array of packets to be filled.
245 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
246 __m128i op_err, struct rte_mbuf **pkts)
248 __m128i pinfo0, pinfo1;
249 __m128i pinfo, ptype;
250 __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH |
251 rxq->hw_timestamp * rxq->timestamp_rx_flag);
253 const __m128i zero = _mm_setzero_si128();
254 const __m128i ptype_mask =
255 _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06);
256 const __m128i ptype_ol_mask =
257 _mm_set_epi32(0x106, 0x106, 0x106, 0x106);
258 const __m128i pinfo_mask =
259 _mm_set_epi32(0x3, 0x3, 0x3, 0x3);
260 const __m128i cv_flag_sel =
261 _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
262 (uint8_t)((PKT_RX_IP_CKSUM_GOOD |
263 PKT_RX_L4_CKSUM_GOOD) >> 1),
265 (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
267 (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
268 (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
270 const __m128i cv_mask =
271 _mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
272 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
273 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
274 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
275 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
276 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
277 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
278 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
279 const __m128i mbuf_init =
280 _mm_load_si128((__m128i *)&rxq->mbuf_initializer);
281 __m128i rearm0, rearm1, rearm2, rearm3;
282 uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
284 /* Extract pkt_info field. */
285 pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
286 pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
287 pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1);
288 /* Extract hdr_type_etc field. */
289 pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]);
290 pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]);
291 ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
293 const __m128i pinfo_ft_mask = _mm_set1_epi32(0xffffff00);
294 const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
295 __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
296 __m128i flow_tag, invalid_mask;
298 flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
299 /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
300 invalid_mask = _mm_cmpeq_epi32(flow_tag, zero);
301 ol_flags = _mm_or_si128(ol_flags,
302 _mm_andnot_si128(invalid_mask,
304 /* Mask out invalid entries. */
305 fdir_id_flags = _mm_andnot_si128(invalid_mask, fdir_id_flags);
306 /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
307 ol_flags = _mm_or_si128(ol_flags,
309 _mm_cmpeq_epi32(flow_tag,
314 * Merge the two fields to generate the following:
318 * bit[11:10] = l3_hdr_type
319 * bit[14:12] = l4_hdr_type
322 * bit[17] = outer_l3_type
324 ptype = _mm_and_si128(ptype, ptype_mask);
325 pinfo = _mm_and_si128(pinfo, pinfo_mask);
326 pinfo = _mm_slli_epi32(pinfo, 16);
327 /* Make pinfo has merged fields for ol_flags calculation. */
328 pinfo = _mm_or_si128(ptype, pinfo);
329 ptype = _mm_srli_epi32(pinfo, 10);
330 ptype = _mm_packs_epi32(ptype, zero);
331 /* Errored packets will have RTE_PTYPE_ALL_MASK. */
332 op_err = _mm_srli_epi16(op_err, 8);
333 ptype = _mm_or_si128(ptype, op_err);
334 pt_idx0 = _mm_extract_epi8(ptype, 0);
335 pt_idx1 = _mm_extract_epi8(ptype, 2);
336 pt_idx2 = _mm_extract_epi8(ptype, 4);
337 pt_idx3 = _mm_extract_epi8(ptype, 6);
338 pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
339 !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
340 pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
341 !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
342 pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
343 !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
344 pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
345 !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
346 /* Fill flags for checksum and VLAN. */
347 pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
348 pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
349 /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
350 cv_flags = _mm_slli_epi32(pinfo, 9);
351 cv_flags = _mm_or_si128(pinfo, cv_flags);
352 /* Move back flags to start from byte[0]. */
353 cv_flags = _mm_srli_epi32(cv_flags, 8);
354 /* Mask out garbage bits. */
355 cv_flags = _mm_and_si128(cv_flags, cv_mask);
356 /* Merge to ol_flags. */
357 ol_flags = _mm_or_si128(ol_flags, cv_flags);
358 /* Merge mbuf_init and ol_flags. */
359 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30);
360 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
361 rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
362 rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
363 /* Write 8B rearm_data and 8B ol_flags. */
364 _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
365 _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
366 _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
367 _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3);
371 * Process a non-compressed completion and fill in mbufs in RX SW ring
372 * with data extracted from the title completion descriptor.
375 * Pointer to RX queue structure.
377 * Pointer to completion array having a non-compressed completion at first.
379 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
380 * the title completion descriptor to be copied to the rest of mbufs.
382 * Array to store received packets.
384 * Maximum number of packets in array.
386 * Pointer to a flag. Set non-zero value if pkts array has at least one error
389 * Pointer to a index. Set it to the first compressed completion if any.
392 * Number of CQEs successfully processed.
394 static inline uint16_t
395 rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
396 struct rte_mbuf **elts, struct rte_mbuf **pkts,
397 uint16_t pkts_n, uint64_t *err, uint64_t *comp)
399 const uint16_t q_n = 1 << rxq->cqe_n;
400 const uint16_t q_mask = q_n - 1;
403 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
404 uint16_t nocmp_n = 0;
405 unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
406 const __m128i owner_check = _mm_set1_epi64x(0x0100000001000000LL);
407 const __m128i opcode_check = _mm_set1_epi64x(0xf0000000f0000000LL);
408 const __m128i format_check = _mm_set1_epi64x(0x0c0000000c000000LL);
409 const __m128i resp_err_check = _mm_set1_epi64x(0xe0000000e0000000LL);
410 #ifdef MLX5_PMD_SOFT_COUNTERS
411 uint32_t rcvd_byte = 0;
412 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
413 const __m128i len_shuf_mask =
414 _mm_set_epi8(-1, -1, -1, -1,
419 /* Mask to shuffle from extracted CQE to mbuf. */
420 const __m128i shuf_mask =
421 _mm_set_epi8(-1, 3, 2, 1, /* fdir.hi */
422 12, 13, 14, 15, /* rss, bswap32 */
423 10, 11, /* vlan_tci, bswap16 */
424 4, 5, /* data_len, bswap16 */
425 -1, -1, /* zero out 2nd half of pkt_len */
426 4, 5 /* pkt_len, bswap16 */);
427 /* Mask to blend from the last Qword to the first DQword. */
428 const __m128i blend_mask =
429 _mm_set_epi8(-1, -1, -1, -1,
433 const __m128i zero = _mm_setzero_si128();
434 const __m128i ones = _mm_cmpeq_epi32(zero, zero);
435 const __m128i crc_adj =
436 _mm_set_epi16(0, 0, 0, 0, 0,
437 rxq->crc_present * RTE_ETHER_CRC_LEN,
439 rxq->crc_present * RTE_ETHER_CRC_LEN);
440 const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
442 * A. load first Qword (8bytes) in one loop.
443 * B. copy 4 mbuf pointers from elts ring to returing pkts.
444 * C. load remained CQE data and extract necessary fields.
445 * Final 16bytes cqes[] extracted from original 64bytes CQE has the
446 * following structure:
449 * uint8_t flow_tag[3];
453 * uint16_t hdr_type_etc;
454 * uint16_t vlan_info;
455 * uint32_t rx_has_res;
459 * F. find compressed CQE.
463 pos += MLX5_VPMD_DESCS_PER_LOOP) {
464 __m128i cqes[MLX5_VPMD_DESCS_PER_LOOP];
465 __m128i cqe_tmp1, cqe_tmp2;
466 __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
467 __m128i op_own, op_own_tmp1, op_own_tmp2;
468 __m128i opcode, owner_mask, invalid_mask;
471 #ifdef MLX5_PMD_SOFT_COUNTERS
475 __m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0);
476 unsigned int p1, p2, p3;
478 /* Prefetch next 4 CQEs. */
479 if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
480 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
481 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
482 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
483 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
485 /* A.0 do not cross the end of CQ. */
486 mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8);
487 mask = _mm_sll_epi64(ones, mask);
488 p = _mm_andnot_si128(mask, p);
490 p3 = _mm_extract_epi16(p, 3);
491 cqes[3] = _mm_loadl_epi64((__m128i *)
492 &cq[pos + p3].sop_drop_qpn);
493 rte_compiler_barrier();
494 p2 = _mm_extract_epi16(p, 2);
495 cqes[2] = _mm_loadl_epi64((__m128i *)
496 &cq[pos + p2].sop_drop_qpn);
497 rte_compiler_barrier();
498 /* B.1 load mbuf pointers. */
499 mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]);
500 mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]);
501 /* A.1 load a block having op_own. */
502 p1 = _mm_extract_epi16(p, 1);
503 cqes[1] = _mm_loadl_epi64((__m128i *)
504 &cq[pos + p1].sop_drop_qpn);
505 rte_compiler_barrier();
506 cqes[0] = _mm_loadl_epi64((__m128i *)
507 &cq[pos].sop_drop_qpn);
508 /* B.2 copy mbuf pointers. */
509 _mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
510 _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
512 /* C.1 load remained CQE data and extract necessary fields. */
513 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
514 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
515 cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
516 cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
517 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].csum);
518 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].csum);
519 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
520 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
521 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd4[2]);
522 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd4[2]);
523 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
524 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
525 /* C.2 generate final structure for mbuf with swapping bytes. */
526 pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask);
527 pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask);
528 /* C.3 adjust CRC length. */
529 pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj);
530 pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj);
531 /* C.4 adjust flow mark. */
532 pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj);
533 pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj);
534 /* D.1 fill in mbuf - rx_descriptor_fields1. */
535 _mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3);
536 _mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2);
537 /* E.1 extract op_own field. */
538 op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
539 /* C.1 load remained CQE data and extract necessary fields. */
540 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]);
541 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
542 cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
543 cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
544 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].csum);
545 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].csum);
546 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
547 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
548 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd4[2]);
549 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd4[2]);
550 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
551 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
552 /* C.2 generate final structure for mbuf with swapping bytes. */
553 pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask);
554 pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask);
555 /* C.3 adjust CRC length. */
556 pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj);
557 pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj);
558 /* C.4 adjust flow mark. */
559 pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj);
560 pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj);
561 /* E.1 extract op_own byte. */
562 op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
563 op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2);
564 /* D.1 fill in mbuf - rx_descriptor_fields1. */
565 _mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1);
566 _mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0);
567 /* E.2 flip owner bit to mark CQEs from last round. */
568 owner_mask = _mm_and_si128(op_own, owner_check);
570 owner_mask = _mm_xor_si128(owner_mask, owner_check);
571 owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check);
572 owner_mask = _mm_packs_epi32(owner_mask, zero);
573 /* E.3 get mask for invalidated CQEs. */
574 opcode = _mm_and_si128(op_own, opcode_check);
575 invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode);
576 invalid_mask = _mm_packs_epi32(invalid_mask, zero);
577 /* E.4 mask out beyond boundary. */
578 invalid_mask = _mm_or_si128(invalid_mask, mask);
579 /* E.5 merge invalid_mask with invalid owner. */
580 invalid_mask = _mm_or_si128(invalid_mask, owner_mask);
581 /* F.1 find compressed CQE format. */
582 comp_mask = _mm_and_si128(op_own, format_check);
583 comp_mask = _mm_cmpeq_epi32(comp_mask, format_check);
584 comp_mask = _mm_packs_epi32(comp_mask, zero);
585 /* F.2 mask out invalid entries. */
586 comp_mask = _mm_andnot_si128(invalid_mask, comp_mask);
587 comp_idx = _mm_cvtsi128_si64(comp_mask);
588 /* F.3 get the first compressed CQE. */
589 comp_idx = comp_idx ?
590 __builtin_ctzll(comp_idx) /
591 (sizeof(uint16_t) * 8) :
592 MLX5_VPMD_DESCS_PER_LOOP;
593 /* E.6 mask out entries after the compressed CQE. */
594 mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8);
595 mask = _mm_sll_epi64(ones, mask);
596 invalid_mask = _mm_or_si128(invalid_mask, mask);
597 /* E.7 count non-compressed valid CQEs. */
598 n = _mm_cvtsi128_si64(invalid_mask);
599 n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
600 MLX5_VPMD_DESCS_PER_LOOP;
602 /* D.2 get the final invalid mask. */
603 mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8);
604 mask = _mm_sll_epi64(ones, mask);
605 invalid_mask = _mm_or_si128(invalid_mask, mask);
606 /* D.3 check error in opcode. */
607 opcode = _mm_cmpeq_epi32(resp_err_check, opcode);
608 opcode = _mm_packs_epi32(opcode, zero);
609 opcode = _mm_andnot_si128(invalid_mask, opcode);
610 /* D.4 mark if any error is set */
611 *err |= _mm_cvtsi128_si64(opcode);
612 /* D.5 fill in mbuf - rearm_data and packet_type. */
613 rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
614 if (rxq->hw_timestamp) {
615 int offset = rxq->timestamp_offset;
616 if (rxq->rt_timestamp) {
617 struct mlx5_dev_ctx_shared *sh = rxq->sh;
620 ts = rte_be_to_cpu_64(cq[pos].timestamp);
621 mlx5_timestamp_set(pkts[pos], offset,
622 mlx5_txpp_convert_rx_ts(sh, ts));
623 ts = rte_be_to_cpu_64(cq[pos + p1].timestamp);
624 mlx5_timestamp_set(pkts[pos + 1], offset,
625 mlx5_txpp_convert_rx_ts(sh, ts));
626 ts = rte_be_to_cpu_64(cq[pos + p2].timestamp);
627 mlx5_timestamp_set(pkts[pos + 2], offset,
628 mlx5_txpp_convert_rx_ts(sh, ts));
629 ts = rte_be_to_cpu_64(cq[pos + p3].timestamp);
630 mlx5_timestamp_set(pkts[pos + 3], offset,
631 mlx5_txpp_convert_rx_ts(sh, ts));
633 mlx5_timestamp_set(pkts[pos], offset,
634 rte_be_to_cpu_64(cq[pos].timestamp));
635 mlx5_timestamp_set(pkts[pos + 1], offset,
636 rte_be_to_cpu_64(cq[pos + p1].timestamp));
637 mlx5_timestamp_set(pkts[pos + 2], offset,
638 rte_be_to_cpu_64(cq[pos + p2].timestamp));
639 mlx5_timestamp_set(pkts[pos + 3], offset,
640 rte_be_to_cpu_64(cq[pos + p3].timestamp));
643 if (rxq->dynf_meta) {
644 /* This code is subject for futher optimization. */
645 int32_t offs = rxq->flow_meta_offset;
647 *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
648 cq[pos].flow_table_metadata;
649 *RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
650 cq[pos + p1].flow_table_metadata;
651 *RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
652 cq[pos + p2].flow_table_metadata;
653 *RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
654 cq[pos + p3].flow_table_metadata;
655 if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *))
656 pkts[pos]->ol_flags |= rxq->flow_meta_mask;
657 if (*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *))
658 pkts[pos + 1]->ol_flags |= rxq->flow_meta_mask;
659 if (*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *))
660 pkts[pos + 2]->ol_flags |= rxq->flow_meta_mask;
661 if (*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *))
662 pkts[pos + 3]->ol_flags |= rxq->flow_meta_mask;
664 #ifdef MLX5_PMD_SOFT_COUNTERS
665 /* Add up received bytes count. */
666 byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);
667 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
668 byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
669 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
672 * Break the loop unless more valid CQE is expected, or if
673 * there's a compressed CQE.
675 if (n != MLX5_VPMD_DESCS_PER_LOOP)
678 #ifdef MLX5_PMD_SOFT_COUNTERS
679 rxq->stats.ipackets += nocmp_n;
680 rxq->stats.ibytes += rcvd_byte;
687 #endif /* RTE_PMD_MLX5_RXTX_VEC_SSE_H_ */