1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_NEON_H_
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
20 #include "mlx5_defs.h"
22 #include "mlx5_utils.h"
23 #include "mlx5_rxtx.h"
24 #include "mlx5_rxtx_vec.h"
25 #include "mlx5_autoconf.h"
27 #pragma GCC diagnostic ignored "-Wcast-qual"
30 * Store free buffers to RX SW ring.
33 * Pointer to RX queue structure.
35 * Pointer to array of packets to be stored.
37 * Number of packets to be stored.
40 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
42 const uint16_t q_mask = (1 << rxq->elts_n) - 1;
43 struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
47 for (pos = 0; pos < p; pos += 2) {
50 mbp = vld1q_u64((void *)&elts[pos]);
51 vst1q_u64((void *)&pkts[pos], mbp);
54 pkts[pos] = elts[pos];
58 * Decompress a compressed completion and fill in mbufs in RX SW ring with data
59 * extracted from the title completion descriptor.
62 * Pointer to RX queue structure.
64 * Pointer to completion array having a compressed completion at first.
66 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
67 * the title completion descriptor to be copied to the rest of mbufs.
70 * Number of mini-CQEs successfully decompressed.
72 static inline uint16_t
73 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
74 struct rte_mbuf **elts)
76 volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info;
77 struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
81 /* Mask to shuffle from extracted mini CQE to mbuf. */
82 const uint8x16_t mcqe_shuf_m1 = {
83 -1, -1, -1, -1, /* skip packet_type */
84 7, 6, -1, -1, /* pkt_len, bswap16 */
85 7, 6, /* data_len, bswap16 */
86 -1, -1, /* skip vlan_tci */
87 3, 2, 1, 0 /* hash.rss, bswap32 */
89 const uint8x16_t mcqe_shuf_m2 = {
90 -1, -1, -1, -1, /* skip packet_type */
91 15, 14, -1, -1, /* pkt_len, bswap16 */
92 15, 14, /* data_len, bswap16 */
93 -1, -1, /* skip vlan_tci */
94 11, 10, 9, 8 /* hash.rss, bswap32 */
96 /* Restore the compressed count. Must be 16 bits. */
97 const uint16_t mcqe_n = t_pkt->data_len +
98 (rxq->crc_present * RTE_ETHER_CRC_LEN);
99 const uint64x2_t rearm =
100 vld1q_u64((void *)&t_pkt->rearm_data);
101 const uint32x4_t rxdf_mask = {
102 0xffffffff, /* packet_type */
103 0, /* skip pkt_len */
104 0xffff0000, /* vlan_tci, skip data_len */
105 0, /* skip hash.rss */
107 const uint8x16_t rxdf =
108 vandq_u8(vld1q_u8((void *)&t_pkt->rx_descriptor_fields1),
109 vreinterpretq_u8_u32(rxdf_mask));
110 const uint16x8_t crc_adj = {
112 rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
113 rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
116 const uint32_t flow_tag = t_pkt->hash.fdir.hi;
117 #ifdef MLX5_PMD_SOFT_COUNTERS
118 uint32_t rcvd_byte = 0;
120 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
121 const uint8x8_t len_shuf_m = {
123 15, 14, /* 2nd mCQE */
124 23, 22, /* 3rd mCQE */
125 31, 30 /* 4th mCQE */
129 * A. load mCQEs into a 128bit register.
130 * B. store rearm data to mbuf.
131 * C. combine data from mCQEs with rx_descriptor_fields1.
132 * D. store rx_descriptor_fields1.
133 * E. store flow tag (rte_flow mark).
135 for (pos = 0; pos < mcqe_n; ) {
136 uint8_t *p = (void *)&mcq[pos % 8];
137 uint8_t *e0 = (void *)&elts[pos]->rearm_data;
138 uint8_t *e1 = (void *)&elts[pos + 1]->rearm_data;
139 uint8_t *e2 = (void *)&elts[pos + 2]->rearm_data;
140 uint8_t *e3 = (void *)&elts[pos + 3]->rearm_data;
142 #ifdef MLX5_PMD_SOFT_COUNTERS
143 uint16x4_t invalid_mask =
144 vcreate_u16(mcqe_n - pos < MLX5_VPMD_DESCS_PER_LOOP ?
145 -1UL << ((mcqe_n - pos) *
146 sizeof(uint16_t) * 8) : 0);
148 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
149 if (likely(pos + i < mcqe_n))
150 rte_prefetch0((void *)(cq + pos + i));
152 /* A.1 load mCQEs into a 128bit register. */
153 "ld1 {v16.16b - v17.16b}, [%[mcq]] \n\t"
154 /* B.1 store rearm data to mbuf. */
155 "st1 {%[rearm].2d}, [%[e0]] \n\t"
156 "add %[e0], %[e0], #16 \n\t"
157 "st1 {%[rearm].2d}, [%[e1]] \n\t"
158 "add %[e1], %[e1], #16 \n\t"
159 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
160 "tbl v18.16b, {v16.16b}, %[mcqe_shuf_m1].16b \n\t"
161 "tbl v19.16b, {v16.16b}, %[mcqe_shuf_m2].16b \n\t"
162 "sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
163 "sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
164 "orr v18.16b, v18.16b, %[rxdf].16b \n\t"
165 "orr v19.16b, v19.16b, %[rxdf].16b \n\t"
166 /* D.1 store rx_descriptor_fields1. */
167 "st1 {v18.2d}, [%[e0]] \n\t"
168 "st1 {v19.2d}, [%[e1]] \n\t"
169 /* B.1 store rearm data to mbuf. */
170 "st1 {%[rearm].2d}, [%[e2]] \n\t"
171 "add %[e2], %[e2], #16 \n\t"
172 "st1 {%[rearm].2d}, [%[e3]] \n\t"
173 "add %[e3], %[e3], #16 \n\t"
174 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
175 "tbl v18.16b, {v17.16b}, %[mcqe_shuf_m1].16b \n\t"
176 "tbl v19.16b, {v17.16b}, %[mcqe_shuf_m2].16b \n\t"
177 "sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
178 "sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
179 "orr v18.16b, v18.16b, %[rxdf].16b \n\t"
180 "orr v19.16b, v19.16b, %[rxdf].16b \n\t"
181 /* D.1 store rx_descriptor_fields1. */
182 "st1 {v18.2d}, [%[e2]] \n\t"
183 "st1 {v19.2d}, [%[e3]] \n\t"
184 #ifdef MLX5_PMD_SOFT_COUNTERS
185 "tbl %[byte_cnt].8b, {v16.16b - v17.16b}, %[len_shuf_m].8b \n\t"
187 :[byte_cnt]"=&w"(byte_cnt)
191 [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
192 [mcqe_shuf_m1]"w"(mcqe_shuf_m1),
193 [mcqe_shuf_m2]"w"(mcqe_shuf_m2),
194 [crc_adj]"w"(crc_adj),
195 [len_shuf_m]"w"(len_shuf_m)
196 :"memory", "v16", "v17", "v18", "v19");
197 #ifdef MLX5_PMD_SOFT_COUNTERS
198 byte_cnt = vbic_u16(byte_cnt, invalid_mask);
199 rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0);
202 /* E.1 store flow tag (rte_flow mark). */
203 elts[pos]->hash.fdir.hi = flow_tag;
204 elts[pos + 1]->hash.fdir.hi = flow_tag;
205 elts[pos + 2]->hash.fdir.hi = flow_tag;
206 elts[pos + 3]->hash.fdir.hi = flow_tag;
208 if (!!rxq->flow_meta_mask) {
209 int32_t offs = rxq->flow_meta_offset;
210 const uint32_t meta =
211 *RTE_MBUF_DYNFIELD(t_pkt, offs, uint32_t *);
213 /* Check if title packet has valid metadata. */
215 MLX5_ASSERT(t_pkt->ol_flags & offs);
216 *RTE_MBUF_DYNFIELD(elts[pos], offs,
218 *RTE_MBUF_DYNFIELD(elts[pos + 1], offs,
220 *RTE_MBUF_DYNFIELD(elts[pos + 2], offs,
222 *RTE_MBUF_DYNFIELD(elts[pos + 3], offs,
226 pos += MLX5_VPMD_DESCS_PER_LOOP;
227 /* Move to next CQE and invalidate consumed CQEs. */
228 if (!(pos & 0x7) && pos < mcqe_n) {
229 mcq = (void *)&(cq + pos)->pkt_info;
230 for (i = 0; i < 8; ++i)
231 cq[inv++].op_own = MLX5_CQE_INVALIDATE;
234 /* Invalidate the rest of CQEs. */
235 for (; inv < mcqe_n; ++inv)
236 cq[inv].op_own = MLX5_CQE_INVALIDATE;
237 #ifdef MLX5_PMD_SOFT_COUNTERS
238 rxq->stats.ipackets += mcqe_n;
239 rxq->stats.ibytes += rcvd_byte;
241 rxq->cq_ci += mcqe_n;
246 * Calculate packet type and offload flag for mbuf and store it.
249 * Pointer to RX queue structure.
251 * Array of four 4bytes packet type info extracted from the original
252 * completion descriptor.
254 * Array of four 4bytes flow ID extracted from the original completion
257 * Opcode vector having responder error status. Each field is 4B.
259 * Pointer to array of packets to be filled.
262 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
263 uint32x4_t ptype_info, uint32x4_t flow_tag,
264 uint16x4_t op_err, struct rte_mbuf **pkts)
267 uint32x4_t pinfo, cv_flags;
268 uint32x4_t ol_flags =
269 vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH |
270 rxq->hw_timestamp * PKT_RX_TIMESTAMP);
271 const uint32x4_t ptype_ol_mask = { 0x106, 0x106, 0x106, 0x106 };
272 const uint8x16_t cv_flag_sel = {
274 (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
275 (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
277 (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
279 (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
280 0, 0, 0, 0, 0, 0, 0, 0, 0
282 const uint32x4_t cv_mask =
283 vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
284 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
285 const uint64x2_t mbuf_init = vld1q_u64
286 ((const uint64_t *)&rxq->mbuf_initializer);
287 uint64x2_t rearm0, rearm1, rearm2, rearm3;
288 uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
291 const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
292 const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR);
293 uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID);
294 uint32x4_t invalid_mask;
296 /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
297 invalid_mask = vceqzq_u32(flow_tag);
298 ol_flags = vorrq_u32(ol_flags,
299 vbicq_u32(fdir_flags, invalid_mask));
300 /* Mask out invalid entries. */
301 fdir_id_flags = vbicq_u32(fdir_id_flags, invalid_mask);
302 /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
303 ol_flags = vorrq_u32(ol_flags,
304 vbicq_u32(fdir_id_flags,
305 vceqq_u32(flow_tag, ft_def)));
308 * ptype_info has the following:
312 * bit[11:10] = l3_hdr_type
313 * bit[14:12] = l4_hdr_type
316 * bit[17] = outer_l3_type
318 ptype = vshrn_n_u32(ptype_info, 10);
319 /* Errored packets will have RTE_PTYPE_ALL_MASK. */
320 ptype = vorr_u16(ptype, op_err);
321 pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6);
322 pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4);
323 pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2);
324 pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0);
325 pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
326 !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
327 pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
328 !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
329 pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
330 !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
331 pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
332 !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
333 /* Fill flags for checksum and VLAN. */
334 pinfo = vandq_u32(ptype_info, ptype_ol_mask);
335 pinfo = vreinterpretq_u32_u8(
336 vqtbl1q_u8(cv_flag_sel, vreinterpretq_u8_u32(pinfo)));
337 /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
338 cv_flags = vshlq_n_u32(pinfo, 9);
339 cv_flags = vorrq_u32(pinfo, cv_flags);
340 /* Move back flags to start from byte[0]. */
341 cv_flags = vshrq_n_u32(cv_flags, 8);
342 /* Mask out garbage bits. */
343 cv_flags = vandq_u32(cv_flags, cv_mask);
344 /* Merge to ol_flags. */
345 ol_flags = vorrq_u32(ol_flags, cv_flags);
346 /* Merge mbuf_init and ol_flags, and store. */
347 rearm0 = vreinterpretq_u64_u32(vsetq_lane_u32
348 (vgetq_lane_u32(ol_flags, 3),
349 vreinterpretq_u32_u64(mbuf_init), 2));
350 rearm1 = vreinterpretq_u64_u32(vsetq_lane_u32
351 (vgetq_lane_u32(ol_flags, 2),
352 vreinterpretq_u32_u64(mbuf_init), 2));
353 rearm2 = vreinterpretq_u64_u32(vsetq_lane_u32
354 (vgetq_lane_u32(ol_flags, 1),
355 vreinterpretq_u32_u64(mbuf_init), 2));
356 rearm3 = vreinterpretq_u64_u32(vsetq_lane_u32
357 (vgetq_lane_u32(ol_flags, 0),
358 vreinterpretq_u32_u64(mbuf_init), 2));
360 vst1q_u64((void *)&pkts[0]->rearm_data, rearm0);
361 vst1q_u64((void *)&pkts[1]->rearm_data, rearm1);
362 vst1q_u64((void *)&pkts[2]->rearm_data, rearm2);
363 vst1q_u64((void *)&pkts[3]->rearm_data, rearm3);
367 * Receive burst of packets. An errored completion also consumes a mbuf, but the
368 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
369 * before returning to application.
372 * Pointer to RX queue structure.
374 * Array to store received packets.
376 * Maximum number of packets in array.
378 * Pointer to a flag. Set non-zero value if pkts array has at least one error
382 * Number of packets received including errors (<= pkts_n).
384 static inline uint16_t
385 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
388 const uint16_t q_n = 1 << rxq->cqe_n;
389 const uint16_t q_mask = q_n - 1;
390 volatile struct mlx5_cqe *cq;
391 struct rte_mbuf **elts;
395 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
396 uint16_t nocmp_n = 0;
397 uint16_t rcvd_pkt = 0;
398 unsigned int cq_idx = rxq->cq_ci & q_mask;
399 unsigned int elts_idx;
400 const uint16x4_t ownership = vdup_n_u16(!(rxq->cq_ci & (q_mask + 1)));
401 const uint16x4_t owner_check = vcreate_u16(0x0001000100010001);
402 const uint16x4_t opcode_check = vcreate_u16(0x00f000f000f000f0);
403 const uint16x4_t format_check = vcreate_u16(0x000c000c000c000c);
404 const uint16x4_t resp_err_check = vcreate_u16(0x00e000e000e000e0);
405 #ifdef MLX5_PMD_SOFT_COUNTERS
406 uint32_t rcvd_byte = 0;
408 /* Mask to generate 16B length vector. */
409 const uint8x8_t len_shuf_m = {
410 52, 53, /* 4th CQE */
411 36, 37, /* 3rd CQE */
412 20, 21, /* 2nd CQE */
415 /* Mask to extract 16B data from a 64B CQE. */
416 const uint8x16_t cqe_shuf_m = {
417 28, 29, /* hdr_type_etc */
420 47, 46, /* byte_cnt, bswap16 */
421 31, 30, /* vlan_info, bswap16 */
422 15, 14, 13, 12, /* rx_hash_res, bswap32 */
423 57, 58, 59, /* flow_tag */
426 /* Mask to generate 16B data for mbuf. */
427 const uint8x16_t mb_shuf_m = {
428 4, 5, -1, -1, /* pkt_len */
431 8, 9, 10, 11, /* hash.rss */
432 12, 13, 14, -1 /* hash.fdir.hi */
434 /* Mask to generate 16B owner vector. */
435 const uint8x8_t owner_shuf_m = {
436 63, -1, /* 4th CQE */
437 47, -1, /* 3rd CQE */
438 31, -1, /* 2nd CQE */
441 /* Mask to generate a vector having packet_type/ol_flags. */
442 const uint8x16_t ptype_shuf_m = {
443 48, 49, 50, -1, /* 4th CQE */
444 32, 33, 34, -1, /* 3rd CQE */
445 16, 17, 18, -1, /* 2nd CQE */
446 0, 1, 2, -1 /* 1st CQE */
448 /* Mask to generate a vector having flow tags. */
449 const uint8x16_t ftag_shuf_m = {
450 60, 61, 62, -1, /* 4th CQE */
451 44, 45, 46, -1, /* 3rd CQE */
452 28, 29, 30, -1, /* 2nd CQE */
453 12, 13, 14, -1 /* 1st CQE */
455 const uint16x8_t crc_adj = {
456 0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0
458 const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) };
460 MLX5_ASSERT(rxq->sges_n == 0);
461 MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
462 cq = &(*rxq->cqes)[cq_idx];
463 rte_prefetch_non_temporal(cq);
464 rte_prefetch_non_temporal(cq + 1);
465 rte_prefetch_non_temporal(cq + 2);
466 rte_prefetch_non_temporal(cq + 3);
467 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
468 repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
469 if (repl_n >= rxq->rq_repl_thresh)
470 mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
471 /* See if there're unreturned mbufs from compressed CQE. */
472 rcvd_pkt = rxq->decompressed;
474 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
475 rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
476 rxq->rq_pi += rcvd_pkt;
478 rxq->decompressed -= rcvd_pkt;
480 elts_idx = rxq->rq_pi & q_mask;
481 elts = &(*rxq->elts)[elts_idx];
482 /* Not to overflow pkts array. */
483 pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
484 /* Not to cross queue end. */
485 pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
486 pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
489 /* At this point, there shouldn't be any remained packets. */
490 MLX5_ASSERT(rxq->decompressed == 0);
492 * Note that vectors have reverse order - {v3, v2, v1, v0}, because
493 * there's no instruction to count trailing zeros. __builtin_clzl() is
496 * A. copy 4 mbuf pointers from elts ring to returing pkts.
497 * B. load 64B CQE and extract necessary fields
498 * Final 16bytes cqes[] extracted from original 64bytes CQE has the
499 * following structure:
501 * uint16_t hdr_type_etc;
505 * uint16_t vlan_info;
506 * uint32_t rx_has_res;
507 * uint8_t flow_tag[3];
512 * E. find compressed CQE.
516 pos += MLX5_VPMD_DESCS_PER_LOOP) {
518 uint16x4_t opcode, owner_mask, invalid_mask;
519 uint16x4_t comp_mask;
522 uint32x4_t ptype_info, flow_tag;
523 register uint64x2_t c0, c1, c2, c3;
524 uint8_t *p0, *p1, *p2, *p3;
525 uint8_t *e0 = (void *)&elts[pos]->pkt_len;
526 uint8_t *e1 = (void *)&elts[pos + 1]->pkt_len;
527 uint8_t *e2 = (void *)&elts[pos + 2]->pkt_len;
528 uint8_t *e3 = (void *)&elts[pos + 3]->pkt_len;
529 void *elts_p = (void *)&elts[pos];
530 void *pkts_p = (void *)&pkts[pos];
532 /* A.0 do not cross the end of CQ. */
533 mask = vcreate_u16(pkts_n - pos < MLX5_VPMD_DESCS_PER_LOOP ?
534 -1UL >> ((pkts_n - pos) *
535 sizeof(uint16_t) * 8) : 0);
536 p0 = (void *)&cq[pos].pkt_info;
537 p1 = p0 + (pkts_n - pos > 1) * sizeof(struct mlx5_cqe);
538 p2 = p1 + (pkts_n - pos > 2) * sizeof(struct mlx5_cqe);
539 p3 = p2 + (pkts_n - pos > 3) * sizeof(struct mlx5_cqe);
540 /* B.0 (CQE 3) load a block having op_own. */
541 c3 = vld1q_u64((uint64_t *)(p3 + 48));
542 /* B.0 (CQE 2) load a block having op_own. */
543 c2 = vld1q_u64((uint64_t *)(p2 + 48));
544 /* B.0 (CQE 1) load a block having op_own. */
545 c1 = vld1q_u64((uint64_t *)(p1 + 48));
546 /* B.0 (CQE 0) load a block having op_own. */
547 c0 = vld1q_u64((uint64_t *)(p0 + 48));
548 /* Synchronize for loading the rest of blocks. */
550 /* Prefetch next 4 CQEs. */
551 if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
552 unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP;
553 rte_prefetch_non_temporal(&cq[next]);
554 rte_prefetch_non_temporal(&cq[next + 1]);
555 rte_prefetch_non_temporal(&cq[next + 2]);
556 rte_prefetch_non_temporal(&cq[next + 3]);
559 /* B.1 (CQE 3) load the rest of blocks. */
560 "ld1 {v16.16b - v18.16b}, [%[p3]] \n\t"
561 /* B.2 (CQE 3) move the block having op_own. */
562 "mov v19.16b, %[c3].16b \n\t"
563 /* B.3 (CQE 3) extract 16B fields. */
564 "tbl v23.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
565 /* B.1 (CQE 2) load the rest of blocks. */
566 "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t"
567 /* B.4 (CQE 3) adjust CRC length. */
568 "sub v23.8h, v23.8h, %[crc_adj].8h \n\t"
569 /* C.1 (CQE 3) generate final structure for mbuf. */
570 "tbl v15.16b, {v23.16b}, %[mb_shuf_m].16b \n\t"
571 /* B.2 (CQE 2) move the block having op_own. */
572 "mov v19.16b, %[c2].16b \n\t"
573 /* B.3 (CQE 2) extract 16B fields. */
574 "tbl v22.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
575 /* B.1 (CQE 1) load the rest of blocks. */
576 "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t"
577 /* B.4 (CQE 2) adjust CRC length. */
578 "sub v22.8h, v22.8h, %[crc_adj].8h \n\t"
579 /* C.1 (CQE 2) generate final structure for mbuf. */
580 "tbl v14.16b, {v22.16b}, %[mb_shuf_m].16b \n\t"
581 /* B.2 (CQE 1) move the block having op_own. */
582 "mov v19.16b, %[c1].16b \n\t"
583 /* B.3 (CQE 1) extract 16B fields. */
584 "tbl v21.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
585 /* B.1 (CQE 0) load the rest of blocks. */
586 "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t"
587 /* B.4 (CQE 1) adjust CRC length. */
588 "sub v21.8h, v21.8h, %[crc_adj].8h \n\t"
589 /* C.1 (CQE 1) generate final structure for mbuf. */
590 "tbl v13.16b, {v21.16b}, %[mb_shuf_m].16b \n\t"
591 /* B.2 (CQE 0) move the block having op_own. */
592 "mov v19.16b, %[c0].16b \n\t"
593 /* A.1 load mbuf pointers. */
594 "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t"
595 /* B.3 (CQE 0) extract 16B fields. */
596 "tbl v20.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
597 /* B.4 (CQE 0) adjust CRC length. */
598 "sub v20.8h, v20.8h, %[crc_adj].8h \n\t"
599 /* D.1 extract op_own byte. */
600 "tbl %[op_own].8b, {v20.16b - v23.16b}, %[owner_shuf_m].8b \n\t"
601 /* C.2 (CQE 3) adjust flow mark. */
602 "add v15.4s, v15.4s, %[flow_mark_adj].4s \n\t"
603 /* C.3 (CQE 3) fill in mbuf - rx_descriptor_fields1. */
604 "st1 {v15.2d}, [%[e3]] \n\t"
605 /* C.2 (CQE 2) adjust flow mark. */
606 "add v14.4s, v14.4s, %[flow_mark_adj].4s \n\t"
607 /* C.3 (CQE 2) fill in mbuf - rx_descriptor_fields1. */
608 "st1 {v14.2d}, [%[e2]] \n\t"
609 /* C.1 (CQE 0) generate final structure for mbuf. */
610 "tbl v12.16b, {v20.16b}, %[mb_shuf_m].16b \n\t"
611 /* C.2 (CQE 1) adjust flow mark. */
612 "add v13.4s, v13.4s, %[flow_mark_adj].4s \n\t"
613 /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
614 "st1 {v13.2d}, [%[e1]] \n\t"
615 #ifdef MLX5_PMD_SOFT_COUNTERS
616 /* Extract byte_cnt. */
617 "tbl %[byte_cnt].8b, {v20.16b - v23.16b}, %[len_shuf_m].8b \n\t"
619 /* Extract ptype_info. */
620 "tbl %[ptype_info].16b, {v20.16b - v23.16b}, %[ptype_shuf_m].16b \n\t"
621 /* Extract flow_tag. */
622 "tbl %[flow_tag].16b, {v20.16b - v23.16b}, %[ftag_shuf_m].16b \n\t"
623 /* A.2 copy mbuf pointers. */
624 "st1 {v24.2d - v25.2d}, [%[pkts_p]] \n\t"
625 /* C.2 (CQE 0) adjust flow mark. */
626 "add v12.4s, v12.4s, %[flow_mark_adj].4s \n\t"
627 /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
628 "st1 {v12.2d}, [%[e0]] \n\t"
629 :[op_own]"=&w"(op_own),
630 [byte_cnt]"=&w"(byte_cnt),
631 [ptype_info]"=&w"(ptype_info),
632 [flow_tag]"=&w"(flow_tag)
633 :[p3]"r"(p3), [p2]"r"(p2), [p1]"r"(p1), [p0]"r"(p0),
634 [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
635 [c3]"w"(c3), [c2]"w"(c2), [c1]"w"(c1), [c0]"w"(c0),
638 [cqe_shuf_m]"w"(cqe_shuf_m),
639 [mb_shuf_m]"w"(mb_shuf_m),
640 [owner_shuf_m]"w"(owner_shuf_m),
641 [len_shuf_m]"w"(len_shuf_m),
642 [ptype_shuf_m]"w"(ptype_shuf_m),
643 [ftag_shuf_m]"w"(ftag_shuf_m),
644 [crc_adj]"w"(crc_adj),
645 [flow_mark_adj]"w"(flow_mark_adj)
647 "v12", "v13", "v14", "v15",
648 "v16", "v17", "v18", "v19",
649 "v20", "v21", "v22", "v23",
651 /* D.2 flip owner bit to mark CQEs from last round. */
652 owner_mask = vand_u16(op_own, owner_check);
653 owner_mask = vceq_u16(owner_mask, ownership);
654 /* D.3 get mask for invalidated CQEs. */
655 opcode = vand_u16(op_own, opcode_check);
656 invalid_mask = vceq_u16(opcode_check, opcode);
657 /* E.1 find compressed CQE format. */
658 comp_mask = vand_u16(op_own, format_check);
659 comp_mask = vceq_u16(comp_mask, format_check);
660 /* D.4 mask out beyond boundary. */
661 invalid_mask = vorr_u16(invalid_mask, mask);
662 /* D.5 merge invalid_mask with invalid owner. */
663 invalid_mask = vorr_u16(invalid_mask, owner_mask);
664 /* E.2 mask out invalid entries. */
665 comp_mask = vbic_u16(comp_mask, invalid_mask);
666 /* E.3 get the first compressed CQE. */
667 comp_idx = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
669 (sizeof(uint16_t) * 8);
670 /* D.6 mask out entries after the compressed CQE. */
671 mask = vcreate_u16(comp_idx < MLX5_VPMD_DESCS_PER_LOOP ?
672 -1UL >> (comp_idx * sizeof(uint16_t) * 8) :
674 invalid_mask = vorr_u16(invalid_mask, mask);
675 /* D.7 count non-compressed valid CQEs. */
676 n = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
677 invalid_mask), 0)) / (sizeof(uint16_t) * 8);
679 /* D.2 get the final invalid mask. */
680 mask = vcreate_u16(n < MLX5_VPMD_DESCS_PER_LOOP ?
681 -1UL >> (n * sizeof(uint16_t) * 8) : 0);
682 invalid_mask = vorr_u16(invalid_mask, mask);
683 /* D.3 check error in opcode. */
684 opcode = vceq_u16(resp_err_check, opcode);
685 opcode = vbic_u16(opcode, invalid_mask);
686 /* D.4 mark if any error is set */
687 *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0);
688 /* C.4 fill in mbuf - rearm_data and packet_type. */
689 rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
691 if (rxq->hw_timestamp) {
692 elts[pos]->timestamp =
694 container_of(p0, struct mlx5_cqe,
695 pkt_info)->timestamp);
696 elts[pos + 1]->timestamp =
698 container_of(p1, struct mlx5_cqe,
699 pkt_info)->timestamp);
700 elts[pos + 2]->timestamp =
702 container_of(p2, struct mlx5_cqe,
703 pkt_info)->timestamp);
704 elts[pos + 3]->timestamp =
706 container_of(p3, struct mlx5_cqe,
707 pkt_info)->timestamp);
709 if (!!rxq->flow_meta_mask) {
710 /* This code is subject for futher optimization. */
711 int32_t offs = rxq->flow_meta_offset;
713 *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
714 container_of(p0, struct mlx5_cqe,
715 pkt_info)->flow_table_metadata;
716 *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
717 container_of(p1, struct mlx5_cqe,
718 pkt_info)->flow_table_metadata;
719 *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
720 container_of(p2, struct mlx5_cqe,
721 pkt_info)->flow_table_metadata;
722 *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
723 container_of(p3, struct mlx5_cqe,
724 pkt_info)->flow_table_metadata;
725 if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *))
726 elts[pos]->ol_flags |= rxq->flow_meta_mask;
727 if (*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *))
728 elts[pos + 1]->ol_flags |= rxq->flow_meta_mask;
729 if (*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *))
730 elts[pos + 2]->ol_flags |= rxq->flow_meta_mask;
731 if (*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *))
732 elts[pos + 3]->ol_flags |= rxq->flow_meta_mask;
734 #ifdef MLX5_PMD_SOFT_COUNTERS
735 /* Add up received bytes count. */
736 byte_cnt = vbic_u16(byte_cnt, invalid_mask);
737 rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0);
740 * Break the loop unless more valid CQE is expected, or if
741 * there's a compressed CQE.
743 if (n != MLX5_VPMD_DESCS_PER_LOOP)
746 /* If no new CQE seen, return without updating cq_db. */
747 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
749 /* Update the consumer indexes for non-compressed CQEs. */
750 MLX5_ASSERT(nocmp_n <= pkts_n);
751 rxq->cq_ci += nocmp_n;
752 rxq->rq_pi += nocmp_n;
754 #ifdef MLX5_PMD_SOFT_COUNTERS
755 rxq->stats.ipackets += nocmp_n;
756 rxq->stats.ibytes += rcvd_byte;
758 /* Decompress the last CQE if compressed. */
759 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
760 MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
761 rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
763 /* Return more packets if needed. */
764 if (nocmp_n < pkts_n) {
765 uint16_t n = rxq->decompressed;
767 n = RTE_MIN(n, pkts_n - nocmp_n);
768 rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
771 rxq->decompressed -= n;
775 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
779 #endif /* RTE_PMD_MLX5_RXTX_VEC_NEON_H_ */