1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_SSE_H_
13 #include <smmintrin.h>
16 #include <rte_mempool.h>
17 #include <rte_prefetch.h>
21 #include "mlx5_defs.h"
23 #include "mlx5_utils.h"
24 #include "mlx5_rxtx.h"
25 #include "mlx5_rxtx_vec.h"
26 #include "mlx5_autoconf.h"
28 #ifndef __INTEL_COMPILER
29 #pragma GCC diagnostic ignored "-Wcast-qual"
33 * Store free buffers to RX SW ring.
36 * Pointer to RX queue structure.
38 * Pointer to array of packets to be stored.
40 * Number of packets to be stored.
43 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
45 const uint16_t q_mask = (1 << rxq->elts_n) - 1;
46 struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
50 for (pos = 0; pos < p; pos += 2) {
53 mbp = _mm_loadu_si128((__m128i *)&elts[pos]);
54 _mm_storeu_si128((__m128i *)&pkts[pos], mbp);
57 pkts[pos] = elts[pos];
61 * Decompress a compressed completion and fill in mbufs in RX SW ring with data
62 * extracted from the title completion descriptor.
65 * Pointer to RX queue structure.
67 * Pointer to completion array having a compressed completion at first.
69 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
70 * the title completion descriptor to be copied to the rest of mbufs.
73 * Number of mini-CQEs successfully decompressed.
75 static inline uint16_t
76 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
77 struct rte_mbuf **elts)
79 volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1);
80 struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
84 /* Mask to shuffle from extracted mini CQE to mbuf. */
85 const __m128i shuf_mask1 =
86 _mm_set_epi8(0, 1, 2, 3, /* rss, bswap32 */
87 -1, -1, /* skip vlan_tci */
88 6, 7, /* data_len, bswap16 */
89 -1, -1, 6, 7, /* pkt_len, bswap16 */
90 -1, -1, -1, -1 /* skip packet_type */);
91 const __m128i shuf_mask2 =
92 _mm_set_epi8(8, 9, 10, 11, /* rss, bswap32 */
93 -1, -1, /* skip vlan_tci */
94 14, 15, /* data_len, bswap16 */
95 -1, -1, 14, 15, /* pkt_len, bswap16 */
96 -1, -1, -1, -1 /* skip packet_type */);
97 /* Restore the compressed count. Must be 16 bits. */
98 const uint16_t mcqe_n = t_pkt->data_len +
99 (rxq->crc_present * RTE_ETHER_CRC_LEN);
100 const __m128i rearm =
101 _mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
103 _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
104 const __m128i crc_adj =
105 _mm_set_epi16(0, 0, 0,
106 rxq->crc_present * RTE_ETHER_CRC_LEN,
108 rxq->crc_present * RTE_ETHER_CRC_LEN,
110 const uint32_t flow_tag = t_pkt->hash.fdir.hi;
111 #ifdef MLX5_PMD_SOFT_COUNTERS
112 const __m128i zero = _mm_setzero_si128();
113 const __m128i ones = _mm_cmpeq_epi32(zero, zero);
114 uint32_t rcvd_byte = 0;
115 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
116 const __m128i len_shuf_mask =
117 _mm_set_epi8(-1, -1, -1, -1,
124 * A. load mCQEs into a 128bit register.
125 * B. store rearm data to mbuf.
126 * C. combine data from mCQEs with rx_descriptor_fields1.
127 * D. store rx_descriptor_fields1.
128 * E. store flow tag (rte_flow mark).
130 for (pos = 0; pos < mcqe_n; ) {
131 __m128i mcqe1, mcqe2;
132 __m128i rxdf1, rxdf2;
133 #ifdef MLX5_PMD_SOFT_COUNTERS
134 __m128i byte_cnt, invalid_mask;
137 if (!(pos & 0x7) && pos + 8 < mcqe_n)
138 rte_prefetch0((void *)(cq + pos + 8));
139 /* A.1 load mCQEs into a 128bit register. */
140 mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
141 mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
142 /* B.1 store rearm data to mbuf. */
143 _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm);
144 _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm);
145 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
146 rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1);
147 rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2);
148 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
149 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
150 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
151 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
152 /* D.1 store rx_descriptor_fields1. */
153 _mm_storeu_si128((__m128i *)
154 &elts[pos]->rx_descriptor_fields1,
156 _mm_storeu_si128((__m128i *)
157 &elts[pos + 1]->rx_descriptor_fields1,
159 /* B.1 store rearm data to mbuf. */
160 _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm);
161 _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm);
162 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
163 rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1);
164 rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2);
165 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
166 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
167 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
168 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
169 /* D.1 store rx_descriptor_fields1. */
170 _mm_storeu_si128((__m128i *)
171 &elts[pos + 2]->rx_descriptor_fields1,
173 _mm_storeu_si128((__m128i *)
174 &elts[pos + 3]->rx_descriptor_fields1,
176 #ifdef MLX5_PMD_SOFT_COUNTERS
177 invalid_mask = _mm_set_epi64x(0,
179 sizeof(uint16_t) * 8);
180 invalid_mask = _mm_sll_epi64(ones, invalid_mask);
181 mcqe1 = _mm_srli_si128(mcqe1, 4);
182 byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc);
183 byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask);
184 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
185 byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
186 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
189 /* E.1 store flow tag (rte_flow mark). */
190 elts[pos]->hash.fdir.hi = flow_tag;
191 elts[pos + 1]->hash.fdir.hi = flow_tag;
192 elts[pos + 2]->hash.fdir.hi = flow_tag;
193 elts[pos + 3]->hash.fdir.hi = flow_tag;
195 pos += MLX5_VPMD_DESCS_PER_LOOP;
196 /* Move to next CQE and invalidate consumed CQEs. */
197 if (!(pos & 0x7) && pos < mcqe_n) {
198 mcq = (void *)(cq + pos);
199 for (i = 0; i < 8; ++i)
200 cq[inv++].op_own = MLX5_CQE_INVALIDATE;
203 /* Invalidate the rest of CQEs. */
204 for (; inv < mcqe_n; ++inv)
205 cq[inv].op_own = MLX5_CQE_INVALIDATE;
206 #ifdef MLX5_PMD_SOFT_COUNTERS
207 rxq->stats.ipackets += mcqe_n;
208 rxq->stats.ibytes += rcvd_byte;
210 rxq->cq_ci += mcqe_n;
215 * Calculate packet type and offload flag for mbuf and store it.
218 * Pointer to RX queue structure.
220 * Array of four 16bytes completions extracted from the original completion
223 * Opcode vector having responder error status. Each field is 4B.
225 * Pointer to array of packets to be filled.
228 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
229 __m128i op_err, struct rte_mbuf **pkts)
231 __m128i pinfo0, pinfo1;
232 __m128i pinfo, ptype;
233 __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH |
234 rxq->hw_timestamp * PKT_RX_TIMESTAMP);
236 const __m128i zero = _mm_setzero_si128();
237 const __m128i ptype_mask =
238 _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06);
239 const __m128i ptype_ol_mask =
240 _mm_set_epi32(0x106, 0x106, 0x106, 0x106);
241 const __m128i pinfo_mask =
242 _mm_set_epi32(0x3, 0x3, 0x3, 0x3);
243 const __m128i cv_flag_sel =
244 _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
245 (uint8_t)((PKT_RX_IP_CKSUM_GOOD |
246 PKT_RX_L4_CKSUM_GOOD) >> 1),
248 (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
250 (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
251 (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
253 const __m128i cv_mask =
254 _mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
255 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
256 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
257 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
258 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
259 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
260 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
261 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
262 const __m128i mbuf_init =
263 _mm_load_si128((__m128i *)&rxq->mbuf_initializer);
264 __m128i rearm0, rearm1, rearm2, rearm3;
265 uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
267 /* Extract pkt_info field. */
268 pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
269 pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
270 pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1);
271 /* Extract hdr_type_etc field. */
272 pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]);
273 pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]);
274 ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
276 const __m128i pinfo_ft_mask =
277 _mm_set_epi32(0xffffff00, 0xffffff00,
278 0xffffff00, 0xffffff00);
279 const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
280 __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
281 __m128i flow_tag, invalid_mask;
283 flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
284 /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
285 invalid_mask = _mm_cmpeq_epi32(flow_tag, zero);
286 ol_flags = _mm_or_si128(ol_flags,
287 _mm_andnot_si128(invalid_mask,
289 /* Mask out invalid entries. */
290 fdir_id_flags = _mm_andnot_si128(invalid_mask, fdir_id_flags);
291 /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
292 ol_flags = _mm_or_si128(ol_flags,
294 _mm_cmpeq_epi32(flow_tag,
299 * Merge the two fields to generate the following:
303 * bit[11:10] = l3_hdr_type
304 * bit[14:12] = l4_hdr_type
307 * bit[17] = outer_l3_type
309 ptype = _mm_and_si128(ptype, ptype_mask);
310 pinfo = _mm_and_si128(pinfo, pinfo_mask);
311 pinfo = _mm_slli_epi32(pinfo, 16);
312 /* Make pinfo has merged fields for ol_flags calculation. */
313 pinfo = _mm_or_si128(ptype, pinfo);
314 ptype = _mm_srli_epi32(pinfo, 10);
315 ptype = _mm_packs_epi32(ptype, zero);
316 /* Errored packets will have RTE_PTYPE_ALL_MASK. */
317 op_err = _mm_srli_epi16(op_err, 8);
318 ptype = _mm_or_si128(ptype, op_err);
319 pt_idx0 = _mm_extract_epi8(ptype, 0);
320 pt_idx1 = _mm_extract_epi8(ptype, 2);
321 pt_idx2 = _mm_extract_epi8(ptype, 4);
322 pt_idx3 = _mm_extract_epi8(ptype, 6);
323 pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
324 !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
325 pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
326 !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
327 pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
328 !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
329 pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
330 !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
331 /* Fill flags for checksum and VLAN. */
332 pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
333 pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
334 /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
335 cv_flags = _mm_slli_epi32(pinfo, 9);
336 cv_flags = _mm_or_si128(pinfo, cv_flags);
337 /* Move back flags to start from byte[0]. */
338 cv_flags = _mm_srli_epi32(cv_flags, 8);
339 /* Mask out garbage bits. */
340 cv_flags = _mm_and_si128(cv_flags, cv_mask);
341 /* Merge to ol_flags. */
342 ol_flags = _mm_or_si128(ol_flags, cv_flags);
343 /* Merge mbuf_init and ol_flags. */
344 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30);
345 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
346 rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
347 rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
348 /* Write 8B rearm_data and 8B ol_flags. */
349 _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
350 _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
351 _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
352 _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3);
356 * Receive burst of packets. An errored completion also consumes a mbuf, but the
357 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
358 * before returning to application.
361 * Pointer to RX queue structure.
363 * Array to store received packets.
365 * Maximum number of packets in array.
367 * Pointer to a flag. Set non-zero value if pkts array has at least one error
371 * Number of packets received including errors (<= pkts_n).
373 static inline uint16_t
374 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
377 const uint16_t q_n = 1 << rxq->cqe_n;
378 const uint16_t q_mask = q_n - 1;
379 volatile struct mlx5_cqe *cq;
380 struct rte_mbuf **elts;
384 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
385 uint16_t nocmp_n = 0;
386 uint16_t rcvd_pkt = 0;
387 unsigned int cq_idx = rxq->cq_ci & q_mask;
388 unsigned int elts_idx;
389 unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
390 const __m128i owner_check =
391 _mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL);
392 const __m128i opcode_check =
393 _mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL);
394 const __m128i format_check =
395 _mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL);
396 const __m128i resp_err_check =
397 _mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL);
398 #ifdef MLX5_PMD_SOFT_COUNTERS
399 uint32_t rcvd_byte = 0;
400 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
401 const __m128i len_shuf_mask =
402 _mm_set_epi8(-1, -1, -1, -1,
407 /* Mask to shuffle from extracted CQE to mbuf. */
408 const __m128i shuf_mask =
409 _mm_set_epi8(-1, 3, 2, 1, /* fdir.hi */
410 12, 13, 14, 15, /* rss, bswap32 */
411 10, 11, /* vlan_tci, bswap16 */
412 4, 5, /* data_len, bswap16 */
413 -1, -1, /* zero out 2nd half of pkt_len */
414 4, 5 /* pkt_len, bswap16 */);
415 /* Mask to blend from the last Qword to the first DQword. */
416 const __m128i blend_mask =
417 _mm_set_epi8(-1, -1, -1, -1,
421 const __m128i zero = _mm_setzero_si128();
422 const __m128i ones = _mm_cmpeq_epi32(zero, zero);
423 const __m128i crc_adj =
424 _mm_set_epi16(0, 0, 0, 0, 0,
425 rxq->crc_present * RTE_ETHER_CRC_LEN,
427 rxq->crc_present * RTE_ETHER_CRC_LEN);
428 const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
430 assert(rxq->sges_n == 0);
431 assert(rxq->cqe_n == rxq->elts_n);
432 cq = &(*rxq->cqes)[cq_idx];
434 rte_prefetch0(cq + 1);
435 rte_prefetch0(cq + 2);
436 rte_prefetch0(cq + 3);
437 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
438 repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
439 if (repl_n >= rxq->rq_repl_thresh)
440 mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
441 /* See if there're unreturned mbufs from compressed CQE. */
442 rcvd_pkt = rxq->decompressed;
444 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
445 rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
446 rxq->rq_pi += rcvd_pkt;
447 rxq->decompressed -= rcvd_pkt;
450 elts_idx = rxq->rq_pi & q_mask;
451 elts = &(*rxq->elts)[elts_idx];
452 /* Not to overflow pkts array. */
453 pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
454 /* Not to cross queue end. */
455 pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
456 pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
459 /* At this point, there shouldn't be any remained packets. */
460 assert(rxq->decompressed == 0);
462 * A. load first Qword (8bytes) in one loop.
463 * B. copy 4 mbuf pointers from elts ring to returing pkts.
464 * C. load remained CQE data and extract necessary fields.
465 * Final 16bytes cqes[] extracted from original 64bytes CQE has the
466 * following structure:
469 * uint8_t flow_tag[3];
473 * uint16_t hdr_type_etc;
474 * uint16_t vlan_info;
475 * uint32_t rx_has_res;
479 * F. find compressed CQE.
483 pos += MLX5_VPMD_DESCS_PER_LOOP) {
484 __m128i cqes[MLX5_VPMD_DESCS_PER_LOOP];
485 __m128i cqe_tmp1, cqe_tmp2;
486 __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
487 __m128i op_own, op_own_tmp1, op_own_tmp2;
488 __m128i opcode, owner_mask, invalid_mask;
491 #ifdef MLX5_PMD_SOFT_COUNTERS
495 __m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0);
496 unsigned int p1, p2, p3;
498 /* Prefetch next 4 CQEs. */
499 if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
500 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
501 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
502 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
503 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
505 /* A.0 do not cross the end of CQ. */
506 mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8);
507 mask = _mm_sll_epi64(ones, mask);
508 p = _mm_andnot_si128(mask, p);
510 p3 = _mm_extract_epi16(p, 3);
511 cqes[3] = _mm_loadl_epi64((__m128i *)
512 &cq[pos + p3].sop_drop_qpn);
513 rte_compiler_barrier();
514 p2 = _mm_extract_epi16(p, 2);
515 cqes[2] = _mm_loadl_epi64((__m128i *)
516 &cq[pos + p2].sop_drop_qpn);
517 rte_compiler_barrier();
518 /* B.1 load mbuf pointers. */
519 mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]);
520 mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]);
521 /* A.1 load a block having op_own. */
522 p1 = _mm_extract_epi16(p, 1);
523 cqes[1] = _mm_loadl_epi64((__m128i *)
524 &cq[pos + p1].sop_drop_qpn);
525 rte_compiler_barrier();
526 cqes[0] = _mm_loadl_epi64((__m128i *)
527 &cq[pos].sop_drop_qpn);
528 /* B.2 copy mbuf pointers. */
529 _mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
530 _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
532 /* C.1 load remained CQE data and extract necessary fields. */
533 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
534 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
535 cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
536 cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
537 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].csum);
538 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].csum);
539 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
540 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
541 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd4[2]);
542 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd4[2]);
543 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
544 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
545 /* C.2 generate final structure for mbuf with swapping bytes. */
546 pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask);
547 pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask);
548 /* C.3 adjust CRC length. */
549 pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj);
550 pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj);
551 /* C.4 adjust flow mark. */
552 pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj);
553 pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj);
554 /* D.1 fill in mbuf - rx_descriptor_fields1. */
555 _mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3);
556 _mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2);
557 /* E.1 extract op_own field. */
558 op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
559 /* C.1 load remained CQE data and extract necessary fields. */
560 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]);
561 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
562 cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
563 cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
564 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].csum);
565 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].csum);
566 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
567 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
568 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd4[2]);
569 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd4[2]);
570 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
571 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
572 /* C.2 generate final structure for mbuf with swapping bytes. */
573 pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask);
574 pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask);
575 /* C.3 adjust CRC length. */
576 pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj);
577 pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj);
578 /* C.4 adjust flow mark. */
579 pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj);
580 pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj);
581 /* E.1 extract op_own byte. */
582 op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
583 op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2);
584 /* D.1 fill in mbuf - rx_descriptor_fields1. */
585 _mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1);
586 _mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0);
587 /* E.2 flip owner bit to mark CQEs from last round. */
588 owner_mask = _mm_and_si128(op_own, owner_check);
590 owner_mask = _mm_xor_si128(owner_mask, owner_check);
591 owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check);
592 owner_mask = _mm_packs_epi32(owner_mask, zero);
593 /* E.3 get mask for invalidated CQEs. */
594 opcode = _mm_and_si128(op_own, opcode_check);
595 invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode);
596 invalid_mask = _mm_packs_epi32(invalid_mask, zero);
597 /* E.4 mask out beyond boundary. */
598 invalid_mask = _mm_or_si128(invalid_mask, mask);
599 /* E.5 merge invalid_mask with invalid owner. */
600 invalid_mask = _mm_or_si128(invalid_mask, owner_mask);
601 /* F.1 find compressed CQE format. */
602 comp_mask = _mm_and_si128(op_own, format_check);
603 comp_mask = _mm_cmpeq_epi32(comp_mask, format_check);
604 comp_mask = _mm_packs_epi32(comp_mask, zero);
605 /* F.2 mask out invalid entries. */
606 comp_mask = _mm_andnot_si128(invalid_mask, comp_mask);
607 comp_idx = _mm_cvtsi128_si64(comp_mask);
608 /* F.3 get the first compressed CQE. */
609 comp_idx = comp_idx ?
610 __builtin_ctzll(comp_idx) /
611 (sizeof(uint16_t) * 8) :
612 MLX5_VPMD_DESCS_PER_LOOP;
613 /* E.6 mask out entries after the compressed CQE. */
614 mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8);
615 mask = _mm_sll_epi64(ones, mask);
616 invalid_mask = _mm_or_si128(invalid_mask, mask);
617 /* E.7 count non-compressed valid CQEs. */
618 n = _mm_cvtsi128_si64(invalid_mask);
619 n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
620 MLX5_VPMD_DESCS_PER_LOOP;
622 /* D.2 get the final invalid mask. */
623 mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8);
624 mask = _mm_sll_epi64(ones, mask);
625 invalid_mask = _mm_or_si128(invalid_mask, mask);
626 /* D.3 check error in opcode. */
627 opcode = _mm_cmpeq_epi32(resp_err_check, opcode);
628 opcode = _mm_packs_epi32(opcode, zero);
629 opcode = _mm_andnot_si128(invalid_mask, opcode);
630 /* D.4 mark if any error is set */
631 *err |= _mm_cvtsi128_si64(opcode);
632 /* D.5 fill in mbuf - rearm_data and packet_type. */
633 rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
634 if (rxq->hw_timestamp) {
635 pkts[pos]->timestamp =
636 rte_be_to_cpu_64(cq[pos].timestamp);
637 pkts[pos + 1]->timestamp =
638 rte_be_to_cpu_64(cq[pos + p1].timestamp);
639 pkts[pos + 2]->timestamp =
640 rte_be_to_cpu_64(cq[pos + p2].timestamp);
641 pkts[pos + 3]->timestamp =
642 rte_be_to_cpu_64(cq[pos + p3].timestamp);
644 if (rte_flow_dynf_metadata_avail()) {
645 /* This code is subject for futher optimization. */
646 *RTE_FLOW_DYNF_METADATA(pkts[pos]) =
647 cq[pos].flow_table_metadata;
648 *RTE_FLOW_DYNF_METADATA(pkts[pos + 1]) =
649 cq[pos + p1].flow_table_metadata;
650 *RTE_FLOW_DYNF_METADATA(pkts[pos + 2]) =
651 cq[pos + p2].flow_table_metadata;
652 *RTE_FLOW_DYNF_METADATA(pkts[pos + 3]) =
653 cq[pos + p3].flow_table_metadata;
654 if (*RTE_FLOW_DYNF_METADATA(pkts[pos]))
655 pkts[pos]->ol_flags |= PKT_RX_DYNF_METADATA;
656 if (*RTE_FLOW_DYNF_METADATA(pkts[pos + 1]))
657 pkts[pos + 1]->ol_flags |= PKT_RX_DYNF_METADATA;
658 if (*RTE_FLOW_DYNF_METADATA(pkts[pos + 2]))
659 pkts[pos + 2]->ol_flags |= PKT_RX_DYNF_METADATA;
660 if (*RTE_FLOW_DYNF_METADATA(pkts[pos + 3]))
661 pkts[pos + 3]->ol_flags |= PKT_RX_DYNF_METADATA;
663 #ifdef MLX5_PMD_SOFT_COUNTERS
664 /* Add up received bytes count. */
665 byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);
666 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
667 byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
668 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
671 * Break the loop unless more valid CQE is expected, or if
672 * there's a compressed CQE.
674 if (n != MLX5_VPMD_DESCS_PER_LOOP)
677 /* If no new CQE seen, return without updating cq_db. */
678 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
680 /* Update the consumer indexes for non-compressed CQEs. */
681 assert(nocmp_n <= pkts_n);
682 rxq->cq_ci += nocmp_n;
683 rxq->rq_pi += nocmp_n;
685 #ifdef MLX5_PMD_SOFT_COUNTERS
686 rxq->stats.ipackets += nocmp_n;
687 rxq->stats.ibytes += rcvd_byte;
689 /* Decompress the last CQE if compressed. */
690 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
691 assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
692 rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
694 /* Return more packets if needed. */
695 if (nocmp_n < pkts_n) {
696 uint16_t n = rxq->decompressed;
698 n = RTE_MIN(n, pkts_n - nocmp_n);
699 rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
702 rxq->decompressed -= n;
705 rte_compiler_barrier();
706 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
710 #endif /* RTE_PMD_MLX5_RXTX_VEC_SSE_H_ */