1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
17 #include <rte_mempool.h>
18 #include <rte_prefetch.h>
21 #include "mlx5_utils.h"
22 #include "mlx5_rxtx.h"
23 #include "mlx5_rxtx_vec.h"
24 #include "mlx5_autoconf.h"
25 #include "mlx5_defs.h"
28 #ifndef __INTEL_COMPILER
29 #pragma GCC diagnostic ignored "-Wcast-qual"
30 #pragma GCC diagnostic ignored "-Wstrict-aliasing"
34 * Store free buffers to RX SW ring.
37 * Pointer to RX queue structure.
39 * Pointer to array of packets to be stored.
41 * Number of packets to be stored.
44 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
46 const uint16_t q_mask = (1 << rxq->elts_n) - 1;
47 struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
51 for (pos = 0; pos < p; pos += 2) {
52 vector unsigned char mbp;
54 mbp = (vector unsigned char)vec_vsx_ld(0,
55 (signed int const *)&elts[pos]);
56 *(vector unsigned char *)&pkts[pos] = mbp;
59 pkts[pos] = elts[pos];
63 * Decompress a compressed completion and fill in mbufs in RX SW ring with data
64 * extracted from the title completion descriptor.
67 * Pointer to RX queue structure.
69 * Pointer to completion array having a compressed completion at first.
71 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
72 * the title completion descriptor to be copied to the rest of mbufs.
75 * Number of mini-CQEs successfully decompressed.
77 static inline uint16_t
78 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
79 struct rte_mbuf **elts)
81 volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info;
82 struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
83 const vector unsigned char zero = (vector unsigned char){0};
84 /* Mask to shuffle from extracted mini CQE to mbuf. */
85 const vector unsigned char shuf_mask1 = (vector unsigned char){
86 -1, -1, -1, -1, /* skip packet_type */
87 7, 6, -1, -1, /* bswap16, pkt_len */
88 7, 6, /* bswap16, data_len */
89 -1, -1, /* skip vlan_tci */
90 3, 2, 1, 0}; /* bswap32, rss */
91 const vector unsigned char shuf_mask2 = (vector unsigned char){
92 -1, -1, -1, -1, /* skip packet_type */
93 15, 14, -1, -1, /* bswap16, pkt_len */
94 15, 14, /* data_len, bswap16 */
95 -1, -1, /* skip vlan_tci */
96 11, 10, 9, 8}; /* bswap32, rss */
97 /* Restore the compressed count. Must be 16 bits. */
98 const uint16_t mcqe_n = t_pkt->data_len +
99 (rxq->crc_present * RTE_ETHER_CRC_LEN);
100 const vector unsigned char rearm =
101 (vector unsigned char)vec_vsx_ld(0,
102 (signed int const *)&t_pkt->rearm_data);
103 const vector unsigned char rxdf =
104 (vector unsigned char)vec_vsx_ld(0,
105 (signed int const *)&t_pkt->rx_descriptor_fields1);
106 const vector unsigned char crc_adj =
107 (vector unsigned char)(vector unsigned short){
108 0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
109 rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0};
110 const vector unsigned short rxdf_sel_mask =
111 (vector unsigned short){
112 0xffff, 0xffff, 0, 0, 0, 0xffff, 0, 0};
113 const uint32_t flow_tag = t_pkt->hash.fdir.hi;
116 unsigned int inv = 0;
118 #ifdef MLX5_PMD_SOFT_COUNTERS
119 const vector unsigned char ones = vec_splat_u8(-1);
120 uint32_t rcvd_byte = 0;
121 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
122 const vector unsigned char len_shuf_mask = (vector unsigned char){
130 * A. load mCQEs into a 128bit register.
131 * B. store rearm data to mbuf.
132 * C. combine data from mCQEs with rx_descriptor_fields1.
133 * D. store rx_descriptor_fields1.
134 * E. store flow tag (rte_flow mark).
136 for (pos = 0; pos < mcqe_n; ) {
137 vector unsigned char mcqe1, mcqe2;
138 vector unsigned char rxdf1, rxdf2;
139 #ifdef MLX5_PMD_SOFT_COUNTERS
140 const vector unsigned short mcqe_sel_mask =
141 (vector unsigned short){0, 0, 0xffff, 0xffff,
142 0, 0, 0xfff, 0xffff};
143 const vector unsigned char lower_half = {
144 0, 1, 4, 5, 8, 9, 12, 13, 16,
145 17, 20, 21, 24, 25, 28, 29};
146 const vector unsigned char upper_half = {
147 2, 3, 6, 7, 10, 11, 14, 15,
148 18, 19, 22, 23, 26, 27, 30, 31};
149 vector unsigned short left, right;
150 vector unsigned char byte_cnt, invalid_mask;
151 vector unsigned long lshift;
152 __attribute__((altivec(vector__)))
153 __attribute__((altivec(bool__)))
154 unsigned long long shmask;
155 const vector unsigned long shmax = {64, 64};
158 if (!(pos & 0x7) && pos + 8 < mcqe_n)
159 rte_prefetch0((void *)(cq + pos + 8));
161 /* A.1 load mCQEs into a 128bit register. */
162 mcqe1 = (vector unsigned char)vec_vsx_ld(0,
163 (signed int const *)&mcq[pos % 8]);
164 mcqe2 = (vector unsigned char)vec_vsx_ld(0,
165 (signed int const *)&mcq[pos % 8 + 2]);
167 /* B.1 store rearm data to mbuf. */
168 *(vector unsigned char *)
169 &elts[pos]->rearm_data = rearm;
170 *(vector unsigned char *)
171 &elts[pos + 1]->rearm_data = rearm;
173 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
174 rxdf1 = vec_perm(mcqe1, zero, shuf_mask1);
175 rxdf2 = vec_perm(mcqe1, zero, shuf_mask2);
176 rxdf1 = (vector unsigned char)
177 ((vector unsigned short)rxdf1 -
178 (vector unsigned short)crc_adj);
179 rxdf2 = (vector unsigned char)
180 ((vector unsigned short)rxdf2 -
181 (vector unsigned short)crc_adj);
182 rxdf1 = (vector unsigned char)
183 vec_sel((vector unsigned short)rxdf1,
184 (vector unsigned short)rxdf, rxdf_sel_mask);
185 rxdf2 = (vector unsigned char)
186 vec_sel((vector unsigned short)rxdf2,
187 (vector unsigned short)rxdf, rxdf_sel_mask);
189 /* D.1 store rx_descriptor_fields1. */
190 *(vector unsigned char *)
191 &elts[pos]->rx_descriptor_fields1 = rxdf1;
192 *(vector unsigned char *)
193 &elts[pos + 1]->rx_descriptor_fields1 = rxdf2;
195 /* B.1 store rearm data to mbuf. */
196 *(vector unsigned char *)
197 &elts[pos + 2]->rearm_data = rearm;
198 *(vector unsigned char *)
199 &elts[pos + 3]->rearm_data = rearm;
201 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
202 rxdf1 = vec_perm(mcqe2, zero, shuf_mask1);
203 rxdf2 = vec_perm(mcqe2, zero, shuf_mask2);
204 rxdf1 = (vector unsigned char)
205 ((vector unsigned short)rxdf1 -
206 (vector unsigned short)crc_adj);
207 rxdf2 = (vector unsigned char)
208 ((vector unsigned short)rxdf2 -
209 (vector unsigned short)crc_adj);
210 rxdf1 = (vector unsigned char)
211 vec_sel((vector unsigned short)rxdf1,
212 (vector unsigned short)rxdf, rxdf_sel_mask);
213 rxdf2 = (vector unsigned char)
214 vec_sel((vector unsigned short)rxdf2,
215 (vector unsigned short)rxdf, rxdf_sel_mask);
217 /* D.1 store rx_descriptor_fields1. */
218 *(vector unsigned char *)
219 &elts[pos + 2]->rx_descriptor_fields1 = rxdf1;
220 *(vector unsigned char *)
221 &elts[pos + 3]->rx_descriptor_fields1 = rxdf2;
223 #ifdef MLX5_PMD_SOFT_COUNTERS
224 invalid_mask = (vector unsigned char)(vector unsigned long){
225 (mcqe_n - pos) * sizeof(uint16_t) * 8, 0};
228 vec_splat((vector unsigned long)invalid_mask, 0);
229 shmask = vec_cmpgt(shmax, lshift);
230 invalid_mask = (vector unsigned char)
231 vec_sl((vector unsigned long)ones, lshift);
232 invalid_mask = (vector unsigned char)
233 vec_sel((vector unsigned long)shmask,
234 (vector unsigned long)invalid_mask, shmask);
236 mcqe1 = (vector unsigned char)
237 vec_sro((vector unsigned short)mcqe1,
238 (vector unsigned char){32}),
239 byte_cnt = (vector unsigned char)
240 vec_sel((vector unsigned short)mcqe1,
241 (vector unsigned short)mcqe2, mcqe_sel_mask);
242 byte_cnt = vec_perm(byte_cnt, zero, len_shuf_mask);
243 byte_cnt = (vector unsigned char)
244 vec_andc((vector unsigned long)byte_cnt,
245 (vector unsigned long)invalid_mask);
246 left = vec_perm((vector unsigned short)byte_cnt,
247 (vector unsigned short)zero, lower_half);
248 right = vec_perm((vector unsigned short)byte_cnt,
249 (vector unsigned short)zero, upper_half);
250 byte_cnt = (vector unsigned char)vec_add(left, right);
251 left = vec_perm((vector unsigned short)byte_cnt,
252 (vector unsigned short)zero, lower_half);
253 right = vec_perm((vector unsigned short)byte_cnt,
254 (vector unsigned short)zero, upper_half);
255 byte_cnt = (vector unsigned char)vec_add(left, right);
256 rcvd_byte += ((vector unsigned long)byte_cnt)[0];
260 /* E.1 store flow tag (rte_flow mark). */
261 elts[pos]->hash.fdir.hi = flow_tag;
262 elts[pos + 1]->hash.fdir.hi = flow_tag;
263 elts[pos + 2]->hash.fdir.hi = flow_tag;
264 elts[pos + 3]->hash.fdir.hi = flow_tag;
267 pos += MLX5_VPMD_DESCS_PER_LOOP;
268 /* Move to next CQE and invalidate consumed CQEs. */
269 if (!(pos & 0x7) && pos < mcqe_n) {
270 mcq = (void *)&(cq + pos)->pkt_info;
271 for (i = 0; i < 8; ++i)
272 cq[inv++].op_own = MLX5_CQE_INVALIDATE;
276 /* Invalidate the rest of CQEs. */
277 for (; inv < mcqe_n; ++inv)
278 cq[inv].op_own = MLX5_CQE_INVALIDATE;
280 #ifdef MLX5_PMD_SOFT_COUNTERS
281 rxq->stats.ipackets += mcqe_n;
282 rxq->stats.ibytes += rcvd_byte;
285 rxq->cq_ci += mcqe_n;
290 * Calculate packet type and offload flag for mbuf and store it.
293 * Pointer to RX queue structure.
295 * Array of four 16bytes completions extracted from the original completion
298 * Opcode vector having responder error status. Each field is 4B.
300 * Pointer to array of packets to be filled.
303 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
304 vector unsigned char cqes[4], vector unsigned char op_err,
305 struct rte_mbuf **pkts)
307 vector unsigned char pinfo0, pinfo1;
308 vector unsigned char pinfo, ptype;
309 vector unsigned char ol_flags = (vector unsigned char)
310 (vector unsigned int){
311 rxq->rss_hash * PKT_RX_RSS_HASH |
312 rxq->hw_timestamp * PKT_RX_TIMESTAMP,
313 rxq->rss_hash * PKT_RX_RSS_HASH |
314 rxq->hw_timestamp * PKT_RX_TIMESTAMP,
315 rxq->rss_hash * PKT_RX_RSS_HASH |
316 rxq->hw_timestamp * PKT_RX_TIMESTAMP,
317 rxq->rss_hash * PKT_RX_RSS_HASH |
318 rxq->hw_timestamp * PKT_RX_TIMESTAMP};
319 vector unsigned char cv_flags;
320 const vector unsigned char zero = (vector unsigned char){0};
321 const vector unsigned char ptype_mask =
322 (vector unsigned char)(vector unsigned int){
323 0x0000fd06, 0x0000fd06, 0x0000fd06, 0x0000fd06};
324 const vector unsigned char ptype_ol_mask =
325 (vector unsigned char)(vector unsigned int){
326 0x00000106, 0x00000106, 0x00000106, 0x00000106};
327 const vector unsigned char pinfo_mask =
328 (vector unsigned char)(vector unsigned int){
329 0x00000003, 0x00000003, 0x00000003, 0x00000003};
330 const vector unsigned char cv_flag_sel = (vector unsigned char){
331 0, (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
332 (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1), 0,
333 (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1), 0,
334 (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
335 0, 0, 0, 0, 0, 0, 0, 0, 0};
336 const vector unsigned char cv_mask =
337 (vector unsigned char)(vector unsigned int){
338 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
339 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
340 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
341 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
342 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
343 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
344 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
345 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED};
346 const vector unsigned char mbuf_init =
347 (vector unsigned char)(vector unsigned long){
348 *(__attribute__((__aligned__(8))) unsigned long *)
349 &rxq->mbuf_initializer, 0LL};
350 const vector unsigned short rearm_sel_mask =
351 (vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
352 vector unsigned char rearm0, rearm1, rearm2, rearm3;
353 uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
355 /* Extract pkt_info field. */
356 pinfo0 = (vector unsigned char)
357 vec_mergeh((vector unsigned int)cqes[0],
358 (vector unsigned int)cqes[1]);
359 pinfo1 = (vector unsigned char)
360 vec_mergeh((vector unsigned int)cqes[2],
361 (vector unsigned int)cqes[3]);
362 pinfo = (vector unsigned char)
363 vec_mergeh((vector unsigned long)pinfo0,
364 (vector unsigned long)pinfo1);
366 /* Extract hdr_type_etc field. */
367 pinfo0 = (vector unsigned char)
368 vec_mergel((vector unsigned int)cqes[0],
369 (vector unsigned int)cqes[1]);
370 pinfo1 = (vector unsigned char)
371 vec_mergel((vector unsigned int)cqes[2],
372 (vector unsigned int)cqes[3]);
373 ptype = (vector unsigned char)
374 vec_mergeh((vector unsigned long)pinfo0,
375 (vector unsigned long)pinfo1);
378 const vector unsigned char pinfo_ft_mask =
379 (vector unsigned char)(vector unsigned int){
380 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00};
381 const vector unsigned char fdir_flags =
382 (vector unsigned char)(vector unsigned int){
383 PKT_RX_FDIR, PKT_RX_FDIR,
384 PKT_RX_FDIR, PKT_RX_FDIR};
385 vector unsigned char fdir_id_flags =
386 (vector unsigned char)(vector unsigned int){
387 PKT_RX_FDIR_ID, PKT_RX_FDIR_ID,
388 PKT_RX_FDIR_ID, PKT_RX_FDIR_ID};
389 vector unsigned char flow_tag, invalid_mask;
391 flow_tag = (vector unsigned char)
392 vec_and((vector unsigned long)pinfo,
393 (vector unsigned long)pinfo_ft_mask);
395 /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
396 invalid_mask = (vector unsigned char)
397 vec_cmpeq((vector unsigned int)flow_tag,
398 (vector unsigned int)zero);
399 ol_flags = (vector unsigned char)
400 vec_or((vector unsigned long)ol_flags,
401 (vector unsigned long)
402 vec_andc((vector unsigned long)fdir_flags,
403 (vector unsigned long)invalid_mask));
405 /* Mask out invalid entries. */
406 fdir_id_flags = (vector unsigned char)
407 vec_andc((vector unsigned long)fdir_id_flags,
408 (vector unsigned long)invalid_mask);
410 /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
411 ol_flags = (vector unsigned char)
412 vec_or((vector unsigned long)ol_flags,
413 (vector unsigned long)
414 vec_andc((vector unsigned long)fdir_id_flags,
415 (vector unsigned long)
416 vec_cmpeq((vector unsigned int)flow_tag,
417 (vector unsigned int)pinfo_ft_mask)));
421 * Merge the two fields to generate the following:
425 * bit[11:10] = l3_hdr_type
426 * bit[14:12] = l4_hdr_type
429 * bit[17] = outer_l3_type
431 ptype = (vector unsigned char)
432 vec_and((vector unsigned long)ptype,
433 (vector unsigned long)ptype_mask);
434 pinfo = (vector unsigned char)
435 vec_and((vector unsigned long)pinfo,
436 (vector unsigned long)pinfo_mask);
437 pinfo = (vector unsigned char)
438 vec_sl((vector unsigned int)pinfo,
439 (vector unsigned int){16, 16, 16, 16});
441 /* Make pinfo has merged fields for ol_flags calculation. */
442 pinfo = (vector unsigned char)
443 vec_or((vector unsigned long)ptype,
444 (vector unsigned long)pinfo);
445 ptype = (vector unsigned char)
446 vec_sr((vector unsigned int)pinfo,
447 (vector unsigned int){10, 10, 10, 10});
448 ptype = (vector unsigned char)
449 vec_packs((vector unsigned int)ptype,
450 (vector unsigned int)zero);
452 /* Errored packets will have RTE_PTYPE_ALL_MASK. */
453 op_err = (vector unsigned char)
454 vec_sr((vector unsigned short)op_err,
455 (vector unsigned short){8, 8, 8, 8, 8, 8, 8, 8});
456 ptype = (vector unsigned char)
457 vec_or((vector unsigned long)ptype,
458 (vector unsigned long)op_err);
460 pt_idx0 = (uint8_t)((vector unsigned char)ptype)[0];
461 pt_idx1 = (uint8_t)((vector unsigned char)ptype)[2];
462 pt_idx2 = (uint8_t)((vector unsigned char)ptype)[4];
463 pt_idx3 = (uint8_t)((vector unsigned char)ptype)[6];
465 pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
466 !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
467 pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
468 !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
469 pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
470 !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
471 pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
472 !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
474 /* Fill flags for checksum and VLAN. */
475 pinfo = (vector unsigned char)
476 vec_and((vector unsigned long)pinfo,
477 (vector unsigned long)ptype_ol_mask);
478 pinfo = vec_perm(cv_flag_sel, zero, pinfo);
480 /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
481 cv_flags = (vector unsigned char)
482 vec_sl((vector unsigned int)pinfo,
483 (vector unsigned int){9, 9, 9, 9});
484 cv_flags = (vector unsigned char)
485 vec_or((vector unsigned long)pinfo,
486 (vector unsigned long)cv_flags);
488 /* Move back flags to start from byte[0]. */
489 cv_flags = (vector unsigned char)
490 vec_sr((vector unsigned int)cv_flags,
491 (vector unsigned int){8, 8, 8, 8});
493 /* Mask out garbage bits. */
494 cv_flags = (vector unsigned char)
495 vec_and((vector unsigned long)cv_flags,
496 (vector unsigned long)cv_mask);
498 /* Merge to ol_flags. */
499 ol_flags = (vector unsigned char)
500 vec_or((vector unsigned long)ol_flags,
501 (vector unsigned long)cv_flags);
503 /* Merge mbuf_init and ol_flags. */
504 rearm0 = (vector unsigned char)
505 vec_sel((vector unsigned short)mbuf_init,
506 (vector unsigned short)
507 vec_slo((vector unsigned short)ol_flags,
508 (vector unsigned char){64}), rearm_sel_mask);
509 rearm1 = (vector unsigned char)
510 vec_sel((vector unsigned short)mbuf_init,
511 (vector unsigned short)
512 vec_slo((vector unsigned short)ol_flags,
513 (vector unsigned char){32}), rearm_sel_mask);
514 rearm2 = (vector unsigned char)
515 vec_sel((vector unsigned short)mbuf_init,
516 (vector unsigned short)ol_flags, rearm_sel_mask);
517 rearm3 = (vector unsigned char)
518 vec_sel((vector unsigned short)mbuf_init,
519 (vector unsigned short)
520 vec_sro((vector unsigned short)ol_flags,
521 (vector unsigned char){32}), rearm_sel_mask);
523 /* Write 8B rearm_data and 8B ol_flags. */
524 vec_vsx_st(rearm0, 0,
525 (vector unsigned char *)&pkts[0]->rearm_data);
526 vec_vsx_st(rearm1, 0,
527 (vector unsigned char *)&pkts[1]->rearm_data);
528 vec_vsx_st(rearm2, 0,
529 (vector unsigned char *)&pkts[2]->rearm_data);
530 vec_vsx_st(rearm3, 0,
531 (vector unsigned char *)&pkts[3]->rearm_data);
536 * Receive burst of packets. An errored completion also consumes a mbuf, but the
537 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
538 * before returning to application.
541 * Pointer to RX queue structure.
543 * Array to store received packets.
545 * Maximum number of packets in array.
547 * Pointer to a flag. Set non-zero value if pkts array has at least one error
551 * Number of packets received including errors (<= pkts_n).
553 static inline uint16_t
554 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
557 const uint16_t q_n = 1 << rxq->cqe_n;
558 const uint16_t q_mask = q_n - 1;
559 volatile struct mlx5_cqe *cq;
560 struct rte_mbuf **elts;
564 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
565 uint16_t nocmp_n = 0;
566 uint16_t rcvd_pkt = 0;
567 unsigned int cq_idx = rxq->cq_ci & q_mask;
568 unsigned int elts_idx;
569 unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
570 const vector unsigned char zero = (vector unsigned char){0};
571 const vector unsigned char ones = vec_splat_u8(-1);
572 const vector unsigned char owner_check =
573 (vector unsigned char)(vector unsigned long){
574 0x0100000001000000LL, 0x0100000001000000LL};
575 const vector unsigned char opcode_check =
576 (vector unsigned char)(vector unsigned long){
577 0xf0000000f0000000LL, 0xf0000000f0000000LL};
578 const vector unsigned char format_check =
579 (vector unsigned char)(vector unsigned long){
580 0x0c0000000c000000LL, 0x0c0000000c000000LL};
581 const vector unsigned char resp_err_check =
582 (vector unsigned char)(vector unsigned long){
583 0xe0000000e0000000LL, 0xe0000000e0000000LL};
584 #ifdef MLX5_PMD_SOFT_COUNTERS
585 uint32_t rcvd_byte = 0;
586 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
587 const vector unsigned char len_shuf_mask = (vector unsigned char){
593 /* Mask to shuffle from extracted CQE to mbuf. */
594 const vector unsigned char shuf_mask = (vector unsigned char){
595 5, 4, /* bswap16, pkt_len */
596 -1, -1, /* zero out 2nd half of pkt_len */
597 5, 4, /* bswap16, data_len */
598 11, 10, /* bswap16, vlan+tci */
599 15, 14, 13, 12, /* bswap32, rss */
600 1, 2, 3, -1}; /* fdir.hi */
601 /* Mask to blend from the last Qword to the first DQword. */
602 /* Mask to blend from the last Qword to the first DQword. */
603 const vector unsigned char blend_mask = (vector unsigned char){
608 const vector unsigned char crc_adj =
609 (vector unsigned char)(vector unsigned short){
610 rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
611 rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0};
612 const vector unsigned char flow_mark_adj =
613 (vector unsigned char)(vector unsigned int){
614 0, 0, 0, rxq->mark * (-1)};
615 const vector unsigned short cqe_sel_mask1 =
616 (vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
617 const vector unsigned short cqe_sel_mask2 =
618 (vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0};
620 assert(rxq->sges_n == 0);
621 assert(rxq->cqe_n == rxq->elts_n);
622 cq = &(*rxq->cqes)[cq_idx];
624 rte_prefetch0(cq + 1);
625 rte_prefetch0(cq + 2);
626 rte_prefetch0(cq + 3);
627 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
629 repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
630 if (repl_n >= rxq->rq_repl_thresh)
631 mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
632 /* See if there're unreturned mbufs from compressed CQE. */
633 rcvd_pkt = rxq->decompressed;
635 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
636 rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
637 rxq->rq_pi += rcvd_pkt;
638 rxq->decompressed -= rcvd_pkt;
641 elts_idx = rxq->rq_pi & q_mask;
642 elts = &(*rxq->elts)[elts_idx];
643 /* Not to overflow pkts array. */
644 pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
645 /* Not to cross queue end. */
646 pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
647 pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
650 /* At this point, there shouldn't be any remaining packets. */
651 assert(rxq->decompressed == 0);
654 * A. load first Qword (8bytes) in one loop.
655 * B. copy 4 mbuf pointers from elts ring to returing pkts.
656 * C. load remaining CQE data and extract necessary fields.
657 * Final 16bytes cqes[] extracted from original 64bytes CQE has the
658 * following structure:
661 * uint8_t flow_tag[3];
665 * uint16_t hdr_type_etc;
666 * uint16_t vlan_info;
667 * uint32_t rx_has_res;
671 * F. find compressed CQE.
675 pos += MLX5_VPMD_DESCS_PER_LOOP) {
676 vector unsigned char cqes[MLX5_VPMD_DESCS_PER_LOOP];
677 vector unsigned char cqe_tmp1, cqe_tmp2;
678 vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
679 vector unsigned char op_own, op_own_tmp1, op_own_tmp2;
680 vector unsigned char opcode, owner_mask, invalid_mask;
681 vector unsigned char comp_mask;
682 vector unsigned char mask;
683 #ifdef MLX5_PMD_SOFT_COUNTERS
684 const vector unsigned char lower_half = {
685 0, 1, 4, 5, 8, 9, 12, 13,
686 16, 17, 20, 21, 24, 25, 28, 29};
687 const vector unsigned char upper_half = {
688 2, 3, 6, 7, 10, 11, 14, 15,
689 18, 19, 22, 23, 26, 27, 30, 31};
690 const vector unsigned long shmax = {64, 64};
691 vector unsigned char byte_cnt;
692 vector unsigned short left, right;
693 vector unsigned long lshift;
694 vector __attribute__((altivec(bool__)))
695 unsigned long shmask;
697 vector unsigned char mbp1, mbp2;
698 vector unsigned char p =
699 (vector unsigned char)(vector unsigned short){
700 0, 1, 2, 3, 0, 0, 0, 0};
701 unsigned int p1, p2, p3;
703 /* Prefetch next 4 CQEs. */
704 if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
705 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
706 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
707 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
708 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
711 /* A.0 do not cross the end of CQ. */
712 mask = (vector unsigned char)(vector unsigned long){
713 (pkts_n - pos) * sizeof(uint16_t) * 8, 0};
716 vector unsigned long lshift;
717 vector __attribute__((altivec(bool__)))
718 unsigned long shmask;
719 const vector unsigned long shmax = {64, 64};
721 lshift = vec_splat((vector unsigned long)mask, 0);
722 shmask = vec_cmpgt(shmax, lshift);
723 mask = (vector unsigned char)
724 vec_sl((vector unsigned long)ones, lshift);
725 mask = (vector unsigned char)
726 vec_sel((vector unsigned long)shmask,
727 (vector unsigned long)mask, shmask);
730 p = (vector unsigned char)
731 vec_andc((vector unsigned long)p,
732 (vector unsigned long)mask);
735 p3 = (unsigned int)((vector unsigned short)p)[3];
736 cqes[3] = (vector unsigned char)(vector unsigned long){
737 *(__attribute__((__aligned__(8))) unsigned long *)
738 &cq[pos + p3].sop_drop_qpn, 0LL};
739 rte_compiler_barrier();
741 p2 = (unsigned int)((vector unsigned short)p)[2];
742 cqes[2] = (vector unsigned char)(vector unsigned long){
743 *(__attribute__((__aligned__(8))) unsigned long *)
744 &cq[pos + p2].sop_drop_qpn, 0LL};
745 rte_compiler_barrier();
747 /* B.1 load mbuf pointers. */
748 mbp1 = (vector unsigned char)vec_vsx_ld(0,
749 (signed int const *)&elts[pos]);
750 mbp2 = (vector unsigned char)vec_vsx_ld(0,
751 (signed int const *)&elts[pos + 2]);
753 /* A.1 load a block having op_own. */
754 p1 = (unsigned int)((vector unsigned short)p)[1];
755 cqes[1] = (vector unsigned char)(vector unsigned long){
756 *(__attribute__((__aligned__(8))) unsigned long *)
757 &cq[pos + p1].sop_drop_qpn, 0LL};
758 rte_compiler_barrier();
760 cqes[0] = (vector unsigned char)(vector unsigned long){
761 *(__attribute__((__aligned__(8))) unsigned long *)
762 &cq[pos].sop_drop_qpn, 0LL};
763 rte_compiler_barrier();
765 /* B.2 copy mbuf pointers. */
766 *(vector unsigned char *)&pkts[pos] = mbp1;
767 *(vector unsigned char *)&pkts[pos + 2] = mbp2;
770 /* C.1 load remaining CQE data and extract necessary fields. */
771 cqe_tmp2 = *(vector unsigned char *)
772 &cq[pos + p3].pkt_info;
773 cqe_tmp1 = *(vector unsigned char *)
774 &cq[pos + p2].pkt_info;
775 cqes[3] = vec_sel(cqes[3], cqe_tmp2, blend_mask);
776 cqes[2] = vec_sel(cqes[2], cqe_tmp1, blend_mask);
777 cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
778 (signed int const *)&cq[pos + p3].csum);
779 cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
780 (signed int const *)&cq[pos + p2].csum);
781 cqes[3] = (vector unsigned char)
782 vec_sel((vector unsigned short)cqes[3],
783 (vector unsigned short)cqe_tmp2, cqe_sel_mask1);
784 cqes[2] = (vector unsigned char)
785 vec_sel((vector unsigned short)cqes[2],
786 (vector unsigned short)cqe_tmp1, cqe_sel_mask1);
787 cqe_tmp2 = (vector unsigned char)(vector unsigned long){
788 *(__attribute__((__aligned__(8))) unsigned long *)
789 &cq[pos + p3].rsvd3[9], 0LL};
790 cqe_tmp1 = (vector unsigned char)(vector unsigned long){
791 *(__attribute__((__aligned__(8))) unsigned long *)
792 &cq[pos + p2].rsvd3[9], 0LL};
793 cqes[3] = (vector unsigned char)
794 vec_sel((vector unsigned short)cqes[3],
795 (vector unsigned short)cqe_tmp2,
796 (vector unsigned short)cqe_sel_mask2);
797 cqes[2] = (vector unsigned char)
798 vec_sel((vector unsigned short)cqes[2],
799 (vector unsigned short)cqe_tmp1,
800 (vector unsigned short)cqe_sel_mask2);
802 /* C.2 generate final structure for mbuf with swapping bytes. */
803 pkt_mb3 = vec_perm(cqes[3], zero, shuf_mask);
804 pkt_mb2 = vec_perm(cqes[2], zero, shuf_mask);
806 /* C.3 adjust CRC length. */
807 pkt_mb3 = (vector unsigned char)
808 ((vector unsigned short)pkt_mb3 -
809 (vector unsigned short)crc_adj);
810 pkt_mb2 = (vector unsigned char)
811 ((vector unsigned short)pkt_mb2 -
812 (vector unsigned short)crc_adj);
814 /* C.4 adjust flow mark. */
815 pkt_mb3 = (vector unsigned char)
816 ((vector unsigned int)pkt_mb3 +
817 (vector unsigned int)flow_mark_adj);
818 pkt_mb2 = (vector unsigned char)
819 ((vector unsigned int)pkt_mb2 +
820 (vector unsigned int)flow_mark_adj);
822 /* D.1 fill in mbuf - rx_descriptor_fields1. */
823 *(vector unsigned char *)
824 &pkts[pos + 3]->pkt_len = pkt_mb3;
825 *(vector unsigned char *)
826 &pkts[pos + 2]->pkt_len = pkt_mb2;
828 /* E.1 extract op_own field. */
829 op_own_tmp2 = (vector unsigned char)
830 vec_mergeh((vector unsigned int)cqes[2],
831 (vector unsigned int)cqes[3]);
833 /* C.1 load remaining CQE data and extract necessary fields. */
834 cqe_tmp2 = *(vector unsigned char *)
835 &cq[pos + p1].pkt_info;
836 cqe_tmp1 = *(vector unsigned char *)
838 cqes[1] = vec_sel(cqes[1], cqe_tmp2, blend_mask);
839 cqes[0] = vec_sel(cqes[0], cqe_tmp2, blend_mask);
840 cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
841 (signed int const *)&cq[pos + p1].csum);
842 cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
843 (signed int const *)&cq[pos].csum);
844 cqes[1] = (vector unsigned char)
845 vec_sel((vector unsigned short)cqes[1],
846 (vector unsigned short)cqe_tmp2, cqe_sel_mask1);
847 cqes[0] = (vector unsigned char)
848 vec_sel((vector unsigned short)cqes[0],
849 (vector unsigned short)cqe_tmp1, cqe_sel_mask1);
850 cqe_tmp2 = (vector unsigned char)(vector unsigned long){
851 *(__attribute__((__aligned__(8))) unsigned long *)
852 &cq[pos + p1].rsvd3[9], 0LL};
853 cqe_tmp1 = (vector unsigned char)(vector unsigned long){
854 *(__attribute__((__aligned__(8))) unsigned long *)
855 &cq[pos].rsvd3[9], 0LL};
856 cqes[1] = (vector unsigned char)
857 vec_sel((vector unsigned short)cqes[1],
858 (vector unsigned short)cqe_tmp2, cqe_sel_mask2);
859 cqes[0] = (vector unsigned char)
860 vec_sel((vector unsigned short)cqes[0],
861 (vector unsigned short)cqe_tmp1, cqe_sel_mask2);
863 /* C.2 generate final structure for mbuf with swapping bytes. */
864 pkt_mb1 = vec_perm(cqes[1], zero, shuf_mask);
865 pkt_mb0 = vec_perm(cqes[0], zero, shuf_mask);
867 /* C.3 adjust CRC length. */
868 pkt_mb1 = (vector unsigned char)
869 ((vector unsigned short)pkt_mb1 -
870 (vector unsigned short)crc_adj);
871 pkt_mb0 = (vector unsigned char)
872 ((vector unsigned short)pkt_mb0 -
873 (vector unsigned short)crc_adj);
875 /* C.4 adjust flow mark. */
876 pkt_mb1 = (vector unsigned char)
877 ((vector unsigned int)pkt_mb1 +
878 (vector unsigned int)flow_mark_adj);
879 pkt_mb0 = (vector unsigned char)
880 ((vector unsigned int)pkt_mb0 +
881 (vector unsigned int)flow_mark_adj);
883 /* E.1 extract op_own byte. */
884 op_own_tmp1 = (vector unsigned char)
885 vec_mergeh((vector unsigned int)cqes[0],
886 (vector unsigned int)cqes[1]);
887 op_own = (vector unsigned char)
888 vec_mergel((vector unsigned long)op_own_tmp1,
889 (vector unsigned long)op_own_tmp2);
891 /* D.1 fill in mbuf - rx_descriptor_fields1. */
892 *(vector unsigned char *)
893 &pkts[pos + 1]->pkt_len = pkt_mb1;
894 *(vector unsigned char *)
895 &pkts[pos]->pkt_len = pkt_mb0;
897 /* E.2 flip owner bit to mark CQEs from last round. */
898 owner_mask = (vector unsigned char)
899 vec_and((vector unsigned long)op_own,
900 (vector unsigned long)owner_check);
902 owner_mask = (vector unsigned char)
903 vec_xor((vector unsigned long)owner_mask,
904 (vector unsigned long)owner_check);
905 owner_mask = (vector unsigned char)
906 vec_cmpeq((vector unsigned int)owner_mask,
907 (vector unsigned int)owner_check);
908 owner_mask = (vector unsigned char)
909 vec_packs((vector unsigned int)owner_mask,
910 (vector unsigned int)zero);
912 /* E.3 get mask for invalidated CQEs. */
913 opcode = (vector unsigned char)
914 vec_and((vector unsigned long)op_own,
915 (vector unsigned long)opcode_check);
916 invalid_mask = (vector unsigned char)
917 vec_cmpeq((vector unsigned int)opcode_check,
918 (vector unsigned int)opcode);
919 invalid_mask = (vector unsigned char)
920 vec_packs((vector unsigned int)invalid_mask,
921 (vector unsigned int)zero);
923 /* E.4 mask out beyond boundary. */
924 invalid_mask = (vector unsigned char)
925 vec_or((vector unsigned long)invalid_mask,
926 (vector unsigned long)mask);
928 /* E.5 merge invalid_mask with invalid owner. */
929 invalid_mask = (vector unsigned char)
930 vec_or((vector unsigned long)invalid_mask,
931 (vector unsigned long)owner_mask);
933 /* F.1 find compressed CQE format. */
934 comp_mask = (vector unsigned char)
935 vec_and((vector unsigned long)op_own,
936 (vector unsigned long)format_check);
937 comp_mask = (vector unsigned char)
938 vec_cmpeq((vector unsigned int)comp_mask,
939 (vector unsigned int)format_check);
940 comp_mask = (vector unsigned char)
941 vec_packs((vector unsigned int)comp_mask,
942 (vector unsigned int)zero);
944 /* F.2 mask out invalid entries. */
945 comp_mask = (vector unsigned char)
946 vec_andc((vector unsigned long)comp_mask,
947 (vector unsigned long)invalid_mask);
948 comp_idx = ((vector unsigned long)comp_mask)[0];
950 /* F.3 get the first compressed CQE. */
951 comp_idx = comp_idx ? __builtin_ctzll(comp_idx) /
952 (sizeof(uint16_t) * 8) : MLX5_VPMD_DESCS_PER_LOOP;
954 /* E.6 mask out entries after the compressed CQE. */
955 mask = (vector unsigned char)(vector unsigned long){
956 (comp_idx * sizeof(uint16_t) * 8), 0};
957 lshift = vec_splat((vector unsigned long)mask, 0);
958 shmask = vec_cmpgt(shmax, lshift);
959 mask = (vector unsigned char)
960 vec_sl((vector unsigned long)ones, lshift);
961 mask = (vector unsigned char)
962 vec_sel((vector unsigned long)shmask,
963 (vector unsigned long)mask, shmask);
964 invalid_mask = (vector unsigned char)
965 vec_or((vector unsigned long)invalid_mask,
966 (vector unsigned long)mask);
968 /* E.7 count non-compressed valid CQEs. */
969 n = ((vector unsigned long)invalid_mask)[0];
970 n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
971 MLX5_VPMD_DESCS_PER_LOOP;
974 /* D.2 get the final invalid mask. */
975 mask = (vector unsigned char)(vector unsigned long){
976 (n * sizeof(uint16_t) * 8), 0};
977 lshift = vec_splat((vector unsigned long)mask, 0);
978 shmask = vec_cmpgt(shmax, lshift);
979 mask = (vector unsigned char)
980 vec_sl((vector unsigned long)ones, lshift);
981 mask = (vector unsigned char)
982 vec_sel((vector unsigned long)shmask,
983 (vector unsigned long)mask, shmask);
984 invalid_mask = (vector unsigned char)
985 vec_or((vector unsigned long)invalid_mask,
986 (vector unsigned long)mask);
988 /* D.3 check error in opcode. */
989 opcode = (vector unsigned char)
990 vec_cmpeq((vector unsigned int)resp_err_check,
991 (vector unsigned int)opcode);
992 opcode = (vector unsigned char)
993 vec_packs((vector unsigned int)opcode,
994 (vector unsigned int)zero);
995 opcode = (vector unsigned char)
996 vec_andc((vector unsigned long)opcode,
997 (vector unsigned long)invalid_mask);
999 /* D.4 mark if any error is set */
1000 *err |= ((vector unsigned long)opcode)[0];
1002 /* D.5 fill in mbuf - rearm_data and packet_type. */
1003 rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
1004 if (rxq->hw_timestamp) {
1005 pkts[pos]->timestamp =
1006 rte_be_to_cpu_64(cq[pos].timestamp);
1007 pkts[pos + 1]->timestamp =
1008 rte_be_to_cpu_64(cq[pos + p1].timestamp);
1009 pkts[pos + 2]->timestamp =
1010 rte_be_to_cpu_64(cq[pos + p2].timestamp);
1011 pkts[pos + 3]->timestamp =
1012 rte_be_to_cpu_64(cq[pos + p3].timestamp);
1015 #ifdef MLX5_PMD_SOFT_COUNTERS
1016 /* Add up received bytes count. */
1017 byte_cnt = vec_perm(op_own, zero, len_shuf_mask);
1018 byte_cnt = (vector unsigned char)
1019 vec_andc((vector unsigned long)byte_cnt,
1020 (vector unsigned long)invalid_mask);
1021 left = vec_perm((vector unsigned short)byte_cnt,
1022 (vector unsigned short)zero, lower_half);
1023 right = vec_perm((vector unsigned short)byte_cnt,
1024 (vector unsigned short)zero, upper_half);
1025 byte_cnt = (vector unsigned char)vec_add(left, right);
1026 left = vec_perm((vector unsigned short)byte_cnt,
1027 (vector unsigned short)zero, lower_half);
1028 right = vec_perm((vector unsigned short)byte_cnt,
1029 (vector unsigned short)zero, upper_half);
1030 byte_cnt = (vector unsigned char)vec_add(left, right);
1031 rcvd_byte += ((vector unsigned long)byte_cnt)[0];
1035 * Break the loop unless more valid CQE is expected, or if
1036 * there's a compressed CQE.
1038 if (n != MLX5_VPMD_DESCS_PER_LOOP)
1041 /* If no new CQE seen, return without updating cq_db. */
1042 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
1044 /* Update the consumer indexes for non-compressed CQEs. */
1045 assert(nocmp_n <= pkts_n);
1046 rxq->cq_ci += nocmp_n;
1047 rxq->rq_pi += nocmp_n;
1048 rcvd_pkt += nocmp_n;
1049 #ifdef MLX5_PMD_SOFT_COUNTERS
1050 rxq->stats.ipackets += nocmp_n;
1051 rxq->stats.ibytes += rcvd_byte;
1053 /* Decompress the last CQE if compressed. */
1054 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
1055 assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
1057 rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
1058 /* Return more packets if needed. */
1059 if (nocmp_n < pkts_n) {
1060 uint16_t n = rxq->decompressed;
1062 n = RTE_MIN(n, pkts_n - nocmp_n);
1063 rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
1066 rxq->decompressed -= n;
1069 rte_compiler_barrier();
1070 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1074 #endif /* RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_ */