1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_NEON_H_
16 #include <rte_mempool.h>
17 #include <rte_prefetch.h>
20 #include "mlx5_utils.h"
21 #include "mlx5_rxtx.h"
22 #include "mlx5_rxtx_vec.h"
23 #include "mlx5_autoconf.h"
24 #include "mlx5_defs.h"
27 #pragma GCC diagnostic ignored "-Wcast-qual"
30 * Fill in buffer descriptors in a multi-packet send descriptor.
33 * Pointer to TX queue structure.
35 * Pointer to buffer descriptor to be written.
37 * Pointer to array of packets to be sent.
39 * Number of packets to be filled.
42 txq_wr_dseg_v(struct mlx5_txq_data *txq, uint8_t *dseg,
43 struct rte_mbuf **pkts, unsigned int n)
47 const uint8x16_t dseg_shuf_m = {
48 3, 2, 1, 0, /* length, bswap32 */
49 4, 5, 6, 7, /* lkey */
50 15, 14, 13, 12, /* addr, bswap64 */
53 #ifdef MLX5_PMD_SOFT_COUNTERS
57 for (pos = 0; pos < n; ++pos, dseg += MLX5_WQE_DWORD_SIZE) {
59 struct rte_mbuf *pkt = pkts[pos];
61 addr = rte_pktmbuf_mtod(pkt, uintptr_t);
62 desc = vreinterpretq_u8_u32((uint32x4_t) {
64 mlx5_tx_mb2mr(txq, pkt),
67 desc = vqtbl1q_u8(desc, dseg_shuf_m);
69 #ifdef MLX5_PMD_SOFT_COUNTERS
70 tx_byte += DATA_LEN(pkt);
73 #ifdef MLX5_PMD_SOFT_COUNTERS
74 txq->stats.obytes += tx_byte;
79 * Send multi-segmented packets until it encounters a single segment packet in
83 * Pointer to TX queue structure.
85 * Pointer to array of packets to be sent.
87 * Number of packets to be sent.
90 * Number of packets successfully transmitted (<= pkts_n).
93 txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
96 uint16_t elts_head = txq->elts_head;
97 const uint16_t elts_n = 1 << txq->elts_n;
98 const uint16_t elts_m = elts_n - 1;
99 const uint16_t wq_n = 1 << txq->wqe_n;
100 const uint16_t wq_mask = wq_n - 1;
101 const unsigned int nb_dword_per_wqebb =
102 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
103 const unsigned int nb_dword_in_hdr =
104 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
106 volatile struct mlx5_wqe *wqe = NULL;
108 txq->offloads & DEV_TX_OFFLOAD_MATCH_METADATA ? true : false;
110 assert(elts_n > pkts_n);
111 mlx5_tx_complete(txq);
112 if (unlikely(!pkts_n))
114 for (n = 0; n < pkts_n; ++n) {
115 struct rte_mbuf *buf = pkts[n];
116 unsigned int segs_n = buf->nb_segs;
117 unsigned int ds = nb_dword_in_hdr;
118 unsigned int len = PKT_LEN(buf);
119 uint16_t wqe_ci = txq->wqe_ci;
120 const uint8x16_t ctrl_shuf_m = {
121 3, 2, 1, 0, /* bswap32 */
122 7, 6, 5, 4, /* bswap32 */
123 11, 10, 9, 8, /* bswap32 */
132 rte_be32_t metadata =
133 metadata_ol && (buf->ol_flags & PKT_TX_METADATA) ?
134 buf->tx_metadata : 0;
137 max_elts = elts_n - (elts_head - txq->elts_tail);
138 max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
140 * A MPW session consumes 2 WQEs at most to
141 * include MLX5_MPW_DSEG_MAX pointers.
144 max_elts < segs_n || max_wqe < 2)
146 wqe = &((volatile struct mlx5_wqe64 *)
147 txq->wqes)[wqe_ci & wq_mask].hdr;
148 cs_flags = txq_ol_cksum_to_cs(buf);
149 /* Title WQEBB pointer. */
150 t_wqe = (uint8x16_t *)wqe;
151 dseg = (uint8_t *)(wqe + 1);
153 if (!(ds++ % nb_dword_per_wqebb)) {
155 &((volatile struct mlx5_wqe64 *)
156 txq->wqes)[++wqe_ci & wq_mask];
158 txq_wr_dseg_v(txq, dseg, &buf, 1);
159 dseg += MLX5_WQE_DWORD_SIZE;
160 (*txq->elts)[elts_head++ & elts_m] = buf;
164 /* Fill CTRL in the header. */
165 ctrl = vreinterpretq_u8_u32((uint32x4_t) {
166 MLX5_OPC_MOD_MPW << 24 |
167 txq->wqe_ci << 8 | MLX5_OPCODE_TSO,
168 txq->qp_num_8s | ds, 4, 0});
169 ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
170 vst1q_u8((void *)t_wqe, ctrl);
171 /* Fill ESEG in the header. */
172 vst1q_u32((void *)(t_wqe + 1),
174 rte_cpu_to_be_16(len) << 16 | cs_flags,
176 txq->wqe_ci = wqe_ci;
180 txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
181 txq->elts_head = elts_head;
182 if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
183 /* A CQE slot must always be available. */
184 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
185 wqe->ctrl[2] = rte_cpu_to_be_32(MLX5_COMP_ALWAYS <<
186 MLX5_COMP_MODE_OFFSET);
187 wqe->ctrl[3] = txq->elts_head;
190 #ifdef MLX5_PMD_SOFT_COUNTERS
191 txq->stats.opackets += n;
193 mlx5_tx_dbrec(txq, wqe);
198 * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
199 * it returns to make it processed by txq_scatter_v(). All the packets in
200 * the pkts list should be single segment packets having same offload flags.
201 * This must be checked by txq_count_contig_single_seg() and txq_calc_offload().
204 * Pointer to TX queue structure.
206 * Pointer to array of packets to be sent.
208 * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
210 * Checksum offload flags to be written in the descriptor.
212 * Metadata value to be written in the descriptor.
215 * Number of packets successfully transmitted (<= pkts_n).
217 static inline uint16_t
218 txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
219 uint8_t cs_flags, rte_be32_t metadata)
221 struct rte_mbuf **elts;
222 uint16_t elts_head = txq->elts_head;
223 const uint16_t elts_n = 1 << txq->elts_n;
224 const uint16_t elts_m = elts_n - 1;
225 const unsigned int nb_dword_per_wqebb =
226 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
227 const unsigned int nb_dword_in_hdr =
228 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
234 const uint16_t wq_n = 1 << txq->wqe_n;
235 const uint16_t wq_mask = wq_n - 1;
236 uint16_t wq_idx = txq->wqe_ci & wq_mask;
237 volatile struct mlx5_wqe64 *wq =
238 &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
239 volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
240 const uint8x16_t ctrl_shuf_m = {
241 3, 2, 1, 0, /* bswap32 */
242 7, 6, 5, 4, /* bswap32 */
243 11, 10, 9, 8, /* bswap32 */
250 /* Make sure all packets can fit into a single WQE. */
251 assert(elts_n > pkts_n);
252 mlx5_tx_complete(txq);
253 max_elts = (elts_n - (elts_head - txq->elts_tail));
254 max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
255 pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
256 if (unlikely(!pkts_n))
258 elts = &(*txq->elts)[elts_head & elts_m];
259 /* Loop for available tailroom first. */
260 n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
261 for (pos = 0; pos < (n & -2); pos += 2)
262 vst1q_u64((void *)&elts[pos], vld1q_u64((void *)&pkts[pos]));
264 elts[pos] = pkts[pos];
265 /* Check if it crosses the end of the queue. */
266 if (unlikely(n < pkts_n)) {
267 elts = &(*txq->elts)[0];
268 for (pos = 0; pos < pkts_n - n; ++pos)
269 elts[pos] = pkts[n + pos];
271 txq->elts_head += pkts_n;
272 /* Save title WQEBB pointer. */
273 t_wqe = (uint8x16_t *)wqe;
274 dseg = (uint8_t *)(wqe + 1);
275 /* Calculate the number of entries to the end. */
277 (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
280 txq_wr_dseg_v(txq, dseg, pkts, n);
281 /* Check if it crosses the end of the queue. */
283 dseg = (uint8_t *)txq->wqes;
284 txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
286 if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
287 txq->elts_comp += pkts_n;
288 comp_req = MLX5_COMP_ONLY_FIRST_ERR << MLX5_COMP_MODE_OFFSET;
290 /* A CQE slot must always be available. */
291 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
292 /* Request a completion. */
294 comp_req = MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET;
296 /* Fill CTRL in the header. */
297 ctrl = vreinterpretq_u8_u32((uint32x4_t) {
298 MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
299 txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW,
300 txq->qp_num_8s | (pkts_n + 2),
303 ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
304 vst1q_u8((void *)t_wqe, ctrl);
305 /* Fill ESEG in the header. */
306 vst1q_u32((void *)(t_wqe + 1),
307 ((uint32x4_t) { 0, cs_flags, metadata, 0 }));
308 #ifdef MLX5_PMD_SOFT_COUNTERS
309 txq->stats.opackets += pkts_n;
311 txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
313 /* Ring QP doorbell. */
314 mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST);
319 * Store free buffers to RX SW ring.
322 * Pointer to RX queue structure.
324 * Pointer to array of packets to be stored.
326 * Number of packets to be stored.
329 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
331 const uint16_t q_mask = (1 << rxq->elts_n) - 1;
332 struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
336 for (pos = 0; pos < p; pos += 2) {
339 mbp = vld1q_u64((void *)&elts[pos]);
340 vst1q_u64((void *)&pkts[pos], mbp);
343 pkts[pos] = elts[pos];
347 * Decompress a compressed completion and fill in mbufs in RX SW ring with data
348 * extracted from the title completion descriptor.
351 * Pointer to RX queue structure.
353 * Pointer to completion array having a compressed completion at first.
355 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
356 * the title completion descriptor to be copied to the rest of mbufs.
359 * Number of mini-CQEs successfully decompressed.
361 static inline uint16_t
362 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
363 struct rte_mbuf **elts)
365 volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info;
366 struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
369 unsigned int inv = 0;
370 /* Mask to shuffle from extracted mini CQE to mbuf. */
371 const uint8x16_t mcqe_shuf_m1 = {
372 -1, -1, -1, -1, /* skip packet_type */
373 7, 6, -1, -1, /* pkt_len, bswap16 */
374 7, 6, /* data_len, bswap16 */
375 -1, -1, /* skip vlan_tci */
376 3, 2, 1, 0 /* hash.rss, bswap32 */
378 const uint8x16_t mcqe_shuf_m2 = {
379 -1, -1, -1, -1, /* skip packet_type */
380 15, 14, -1, -1, /* pkt_len, bswap16 */
381 15, 14, /* data_len, bswap16 */
382 -1, -1, /* skip vlan_tci */
383 11, 10, 9, 8 /* hash.rss, bswap32 */
385 /* Restore the compressed count. Must be 16 bits. */
386 const uint16_t mcqe_n = t_pkt->data_len +
387 (rxq->crc_present * RTE_ETHER_CRC_LEN);
388 const uint64x2_t rearm =
389 vld1q_u64((void *)&t_pkt->rearm_data);
390 const uint32x4_t rxdf_mask = {
391 0xffffffff, /* packet_type */
392 0, /* skip pkt_len */
393 0xffff0000, /* vlan_tci, skip data_len */
394 0, /* skip hash.rss */
396 const uint8x16_t rxdf =
397 vandq_u8(vld1q_u8((void *)&t_pkt->rx_descriptor_fields1),
398 vreinterpretq_u8_u32(rxdf_mask));
399 const uint16x8_t crc_adj = {
401 rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
402 rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
405 const uint32_t flow_tag = t_pkt->hash.fdir.hi;
406 #ifdef MLX5_PMD_SOFT_COUNTERS
407 uint32_t rcvd_byte = 0;
409 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
410 const uint8x8_t len_shuf_m = {
412 15, 14, /* 2nd mCQE */
413 23, 22, /* 3rd mCQE */
414 31, 30 /* 4th mCQE */
418 * A. load mCQEs into a 128bit register.
419 * B. store rearm data to mbuf.
420 * C. combine data from mCQEs with rx_descriptor_fields1.
421 * D. store rx_descriptor_fields1.
422 * E. store flow tag (rte_flow mark).
424 for (pos = 0; pos < mcqe_n; ) {
425 uint8_t *p = (void *)&mcq[pos % 8];
426 uint8_t *e0 = (void *)&elts[pos]->rearm_data;
427 uint8_t *e1 = (void *)&elts[pos + 1]->rearm_data;
428 uint8_t *e2 = (void *)&elts[pos + 2]->rearm_data;
429 uint8_t *e3 = (void *)&elts[pos + 3]->rearm_data;
431 #ifdef MLX5_PMD_SOFT_COUNTERS
432 uint16x4_t invalid_mask =
433 vcreate_u16(mcqe_n - pos < MLX5_VPMD_DESCS_PER_LOOP ?
434 -1UL << ((mcqe_n - pos) *
435 sizeof(uint16_t) * 8) : 0);
438 if (!(pos & 0x7) && pos + 8 < mcqe_n)
439 rte_prefetch0((void *)(cq + pos + 8));
441 /* A.1 load mCQEs into a 128bit register. */
442 "ld1 {v16.16b - v17.16b}, [%[mcq]] \n\t"
443 /* B.1 store rearm data to mbuf. */
444 "st1 {%[rearm].2d}, [%[e0]] \n\t"
445 "add %[e0], %[e0], #16 \n\t"
446 "st1 {%[rearm].2d}, [%[e1]] \n\t"
447 "add %[e1], %[e1], #16 \n\t"
448 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
449 "tbl v18.16b, {v16.16b}, %[mcqe_shuf_m1].16b \n\t"
450 "tbl v19.16b, {v16.16b}, %[mcqe_shuf_m2].16b \n\t"
451 "sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
452 "sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
453 "orr v18.16b, v18.16b, %[rxdf].16b \n\t"
454 "orr v19.16b, v19.16b, %[rxdf].16b \n\t"
455 /* D.1 store rx_descriptor_fields1. */
456 "st1 {v18.2d}, [%[e0]] \n\t"
457 "st1 {v19.2d}, [%[e1]] \n\t"
458 /* B.1 store rearm data to mbuf. */
459 "st1 {%[rearm].2d}, [%[e2]] \n\t"
460 "add %[e2], %[e2], #16 \n\t"
461 "st1 {%[rearm].2d}, [%[e3]] \n\t"
462 "add %[e3], %[e3], #16 \n\t"
463 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
464 "tbl v18.16b, {v17.16b}, %[mcqe_shuf_m1].16b \n\t"
465 "tbl v19.16b, {v17.16b}, %[mcqe_shuf_m2].16b \n\t"
466 "sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
467 "sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
468 "orr v18.16b, v18.16b, %[rxdf].16b \n\t"
469 "orr v19.16b, v19.16b, %[rxdf].16b \n\t"
470 /* D.1 store rx_descriptor_fields1. */
471 "st1 {v18.2d}, [%[e2]] \n\t"
472 "st1 {v19.2d}, [%[e3]] \n\t"
473 #ifdef MLX5_PMD_SOFT_COUNTERS
474 "tbl %[byte_cnt].8b, {v16.16b - v17.16b}, %[len_shuf_m].8b \n\t"
476 :[byte_cnt]"=&w"(byte_cnt)
480 [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
481 [mcqe_shuf_m1]"w"(mcqe_shuf_m1),
482 [mcqe_shuf_m2]"w"(mcqe_shuf_m2),
483 [crc_adj]"w"(crc_adj),
484 [len_shuf_m]"w"(len_shuf_m)
485 :"memory", "v16", "v17", "v18", "v19");
486 #ifdef MLX5_PMD_SOFT_COUNTERS
487 byte_cnt = vbic_u16(byte_cnt, invalid_mask);
488 rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0);
491 /* E.1 store flow tag (rte_flow mark). */
492 elts[pos]->hash.fdir.hi = flow_tag;
493 elts[pos + 1]->hash.fdir.hi = flow_tag;
494 elts[pos + 2]->hash.fdir.hi = flow_tag;
495 elts[pos + 3]->hash.fdir.hi = flow_tag;
497 pos += MLX5_VPMD_DESCS_PER_LOOP;
498 /* Move to next CQE and invalidate consumed CQEs. */
499 if (!(pos & 0x7) && pos < mcqe_n) {
500 mcq = (void *)&(cq + pos)->pkt_info;
501 for (i = 0; i < 8; ++i)
502 cq[inv++].op_own = MLX5_CQE_INVALIDATE;
505 /* Invalidate the rest of CQEs. */
506 for (; inv < mcqe_n; ++inv)
507 cq[inv].op_own = MLX5_CQE_INVALIDATE;
508 #ifdef MLX5_PMD_SOFT_COUNTERS
509 rxq->stats.ipackets += mcqe_n;
510 rxq->stats.ibytes += rcvd_byte;
512 rxq->cq_ci += mcqe_n;
517 * Calculate packet type and offload flag for mbuf and store it.
520 * Pointer to RX queue structure.
522 * Array of four 4bytes packet type info extracted from the original
523 * completion descriptor.
525 * Array of four 4bytes flow ID extracted from the original completion
528 * Opcode vector having responder error status. Each field is 4B.
530 * Pointer to array of packets to be filled.
533 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
534 uint32x4_t ptype_info, uint32x4_t flow_tag,
535 uint16x4_t op_err, struct rte_mbuf **pkts)
538 uint32x4_t pinfo, cv_flags;
539 uint32x4_t ol_flags =
540 vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH |
541 rxq->hw_timestamp * PKT_RX_TIMESTAMP);
542 const uint32x4_t ptype_ol_mask = { 0x106, 0x106, 0x106, 0x106 };
543 const uint8x16_t cv_flag_sel = {
545 (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
546 (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
548 (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
550 (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
551 0, 0, 0, 0, 0, 0, 0, 0, 0
553 const uint32x4_t cv_mask =
554 vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
555 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
556 const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer);
557 const uint64x1_t r32_mask = vcreate_u64(0xffffffff);
558 uint64x2_t rearm0, rearm1, rearm2, rearm3;
559 uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
562 const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
563 const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR);
564 uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID);
565 uint32x4_t invalid_mask;
567 /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
568 invalid_mask = vceqzq_u32(flow_tag);
569 ol_flags = vorrq_u32(ol_flags,
570 vbicq_u32(fdir_flags, invalid_mask));
571 /* Mask out invalid entries. */
572 fdir_id_flags = vbicq_u32(fdir_id_flags, invalid_mask);
573 /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
574 ol_flags = vorrq_u32(ol_flags,
575 vbicq_u32(fdir_id_flags,
576 vceqq_u32(flow_tag, ft_def)));
579 * ptype_info has the following:
583 * bit[11:10] = l3_hdr_type
584 * bit[14:12] = l4_hdr_type
587 * bit[17] = outer_l3_type
589 ptype = vshrn_n_u32(ptype_info, 10);
590 /* Errored packets will have RTE_PTYPE_ALL_MASK. */
591 ptype = vorr_u16(ptype, op_err);
592 pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6);
593 pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4);
594 pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2);
595 pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0);
596 pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
597 !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
598 pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
599 !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
600 pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
601 !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
602 pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
603 !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
604 /* Fill flags for checksum and VLAN. */
605 pinfo = vandq_u32(ptype_info, ptype_ol_mask);
606 pinfo = vreinterpretq_u32_u8(
607 vqtbl1q_u8(cv_flag_sel, vreinterpretq_u8_u32(pinfo)));
608 /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
609 cv_flags = vshlq_n_u32(pinfo, 9);
610 cv_flags = vorrq_u32(pinfo, cv_flags);
611 /* Move back flags to start from byte[0]. */
612 cv_flags = vshrq_n_u32(cv_flags, 8);
613 /* Mask out garbage bits. */
614 cv_flags = vandq_u32(cv_flags, cv_mask);
615 /* Merge to ol_flags. */
616 ol_flags = vorrq_u32(ol_flags, cv_flags);
617 /* Merge mbuf_init and ol_flags, and store. */
618 rearm0 = vcombine_u64(mbuf_init,
619 vshr_n_u64(vget_high_u64(vreinterpretq_u64_u32(
621 rearm1 = vcombine_u64(mbuf_init,
622 vand_u64(vget_high_u64(vreinterpretq_u64_u32(
623 ol_flags)), r32_mask));
624 rearm2 = vcombine_u64(mbuf_init,
625 vshr_n_u64(vget_low_u64(vreinterpretq_u64_u32(
627 rearm3 = vcombine_u64(mbuf_init,
628 vand_u64(vget_low_u64(vreinterpretq_u64_u32(
629 ol_flags)), r32_mask));
630 vst1q_u64((void *)&pkts[0]->rearm_data, rearm0);
631 vst1q_u64((void *)&pkts[1]->rearm_data, rearm1);
632 vst1q_u64((void *)&pkts[2]->rearm_data, rearm2);
633 vst1q_u64((void *)&pkts[3]->rearm_data, rearm3);
637 * Receive burst of packets. An errored completion also consumes a mbuf, but the
638 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
639 * before returning to application.
642 * Pointer to RX queue structure.
644 * Array to store received packets.
646 * Maximum number of packets in array.
648 * Pointer to a flag. Set non-zero value if pkts array has at least one error
652 * Number of packets received including errors (<= pkts_n).
654 static inline uint16_t
655 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
658 const uint16_t q_n = 1 << rxq->cqe_n;
659 const uint16_t q_mask = q_n - 1;
660 volatile struct mlx5_cqe *cq;
661 struct rte_mbuf **elts;
665 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
666 uint16_t nocmp_n = 0;
667 uint16_t rcvd_pkt = 0;
668 unsigned int cq_idx = rxq->cq_ci & q_mask;
669 unsigned int elts_idx;
670 const uint16x4_t ownership = vdup_n_u16(!(rxq->cq_ci & (q_mask + 1)));
671 const uint16x4_t owner_check = vcreate_u16(0x0001000100010001);
672 const uint16x4_t opcode_check = vcreate_u16(0x00f000f000f000f0);
673 const uint16x4_t format_check = vcreate_u16(0x000c000c000c000c);
674 const uint16x4_t resp_err_check = vcreate_u16(0x00e000e000e000e0);
675 #ifdef MLX5_PMD_SOFT_COUNTERS
676 uint32_t rcvd_byte = 0;
678 /* Mask to generate 16B length vector. */
679 const uint8x8_t len_shuf_m = {
680 52, 53, /* 4th CQE */
681 36, 37, /* 3rd CQE */
682 20, 21, /* 2nd CQE */
685 /* Mask to extract 16B data from a 64B CQE. */
686 const uint8x16_t cqe_shuf_m = {
687 28, 29, /* hdr_type_etc */
690 47, 46, /* byte_cnt, bswap16 */
691 31, 30, /* vlan_info, bswap16 */
692 15, 14, 13, 12, /* rx_hash_res, bswap32 */
693 57, 58, 59, /* flow_tag */
696 /* Mask to generate 16B data for mbuf. */
697 const uint8x16_t mb_shuf_m = {
698 4, 5, -1, -1, /* pkt_len */
701 8, 9, 10, 11, /* hash.rss */
702 12, 13, 14, -1 /* hash.fdir.hi */
704 /* Mask to generate 16B owner vector. */
705 const uint8x8_t owner_shuf_m = {
706 63, -1, /* 4th CQE */
707 47, -1, /* 3rd CQE */
708 31, -1, /* 2nd CQE */
711 /* Mask to generate a vector having packet_type/ol_flags. */
712 const uint8x16_t ptype_shuf_m = {
713 48, 49, 50, -1, /* 4th CQE */
714 32, 33, 34, -1, /* 3rd CQE */
715 16, 17, 18, -1, /* 2nd CQE */
716 0, 1, 2, -1 /* 1st CQE */
718 /* Mask to generate a vector having flow tags. */
719 const uint8x16_t ftag_shuf_m = {
720 60, 61, 62, -1, /* 4th CQE */
721 44, 45, 46, -1, /* 3rd CQE */
722 28, 29, 30, -1, /* 2nd CQE */
723 12, 13, 14, -1 /* 1st CQE */
725 const uint16x8_t crc_adj = {
726 0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0
728 const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) };
730 assert(rxq->sges_n == 0);
731 assert(rxq->cqe_n == rxq->elts_n);
732 cq = &(*rxq->cqes)[cq_idx];
733 rte_prefetch_non_temporal(cq);
734 rte_prefetch_non_temporal(cq + 1);
735 rte_prefetch_non_temporal(cq + 2);
736 rte_prefetch_non_temporal(cq + 3);
737 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
738 repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
739 if (repl_n >= rxq->rq_repl_thresh)
740 mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
741 /* See if there're unreturned mbufs from compressed CQE. */
742 rcvd_pkt = rxq->decompressed;
744 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
745 rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
746 rxq->rq_pi += rcvd_pkt;
748 rxq->decompressed -= rcvd_pkt;
750 elts_idx = rxq->rq_pi & q_mask;
751 elts = &(*rxq->elts)[elts_idx];
752 /* Not to overflow pkts array. */
753 pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
754 /* Not to cross queue end. */
755 pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
756 pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
759 /* At this point, there shouldn't be any remained packets. */
760 assert(rxq->decompressed == 0);
762 * Note that vectors have reverse order - {v3, v2, v1, v0}, because
763 * there's no instruction to count trailing zeros. __builtin_clzl() is
766 * A. copy 4 mbuf pointers from elts ring to returing pkts.
767 * B. load 64B CQE and extract necessary fields
768 * Final 16bytes cqes[] extracted from original 64bytes CQE has the
769 * following structure:
771 * uint16_t hdr_type_etc;
775 * uint16_t vlan_info;
776 * uint32_t rx_has_res;
777 * uint8_t flow_tag[3];
782 * E. find compressed CQE.
786 pos += MLX5_VPMD_DESCS_PER_LOOP) {
788 uint16x4_t opcode, owner_mask, invalid_mask;
789 uint16x4_t comp_mask;
792 uint32x4_t ptype_info, flow_tag;
793 register uint64x2_t c0, c1, c2, c3;
794 uint8_t *p0, *p1, *p2, *p3;
795 uint8_t *e0 = (void *)&elts[pos]->pkt_len;
796 uint8_t *e1 = (void *)&elts[pos + 1]->pkt_len;
797 uint8_t *e2 = (void *)&elts[pos + 2]->pkt_len;
798 uint8_t *e3 = (void *)&elts[pos + 3]->pkt_len;
799 void *elts_p = (void *)&elts[pos];
800 void *pkts_p = (void *)&pkts[pos];
802 /* A.0 do not cross the end of CQ. */
803 mask = vcreate_u16(pkts_n - pos < MLX5_VPMD_DESCS_PER_LOOP ?
804 -1UL >> ((pkts_n - pos) *
805 sizeof(uint16_t) * 8) : 0);
806 p0 = (void *)&cq[pos].pkt_info;
807 p1 = p0 + (pkts_n - pos > 1) * sizeof(struct mlx5_cqe);
808 p2 = p1 + (pkts_n - pos > 2) * sizeof(struct mlx5_cqe);
809 p3 = p2 + (pkts_n - pos > 3) * sizeof(struct mlx5_cqe);
810 /* B.0 (CQE 3) load a block having op_own. */
811 c3 = vld1q_u64((uint64_t *)(p3 + 48));
812 /* B.0 (CQE 2) load a block having op_own. */
813 c2 = vld1q_u64((uint64_t *)(p2 + 48));
814 /* B.0 (CQE 1) load a block having op_own. */
815 c1 = vld1q_u64((uint64_t *)(p1 + 48));
816 /* B.0 (CQE 0) load a block having op_own. */
817 c0 = vld1q_u64((uint64_t *)(p0 + 48));
818 /* Synchronize for loading the rest of blocks. */
820 /* Prefetch next 4 CQEs. */
821 if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
822 unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP;
823 rte_prefetch_non_temporal(&cq[next]);
824 rte_prefetch_non_temporal(&cq[next + 1]);
825 rte_prefetch_non_temporal(&cq[next + 2]);
826 rte_prefetch_non_temporal(&cq[next + 3]);
829 /* B.1 (CQE 3) load the rest of blocks. */
830 "ld1 {v16.16b - v18.16b}, [%[p3]] \n\t"
831 /* B.2 (CQE 3) move the block having op_own. */
832 "mov v19.16b, %[c3].16b \n\t"
833 /* B.3 (CQE 3) extract 16B fields. */
834 "tbl v23.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
835 /* B.1 (CQE 2) load the rest of blocks. */
836 "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t"
837 /* B.4 (CQE 3) adjust CRC length. */
838 "sub v23.8h, v23.8h, %[crc_adj].8h \n\t"
839 /* C.1 (CQE 3) generate final structure for mbuf. */
840 "tbl v15.16b, {v23.16b}, %[mb_shuf_m].16b \n\t"
841 /* B.2 (CQE 2) move the block having op_own. */
842 "mov v19.16b, %[c2].16b \n\t"
843 /* B.3 (CQE 2) extract 16B fields. */
844 "tbl v22.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
845 /* B.1 (CQE 1) load the rest of blocks. */
846 "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t"
847 /* B.4 (CQE 2) adjust CRC length. */
848 "sub v22.8h, v22.8h, %[crc_adj].8h \n\t"
849 /* C.1 (CQE 2) generate final structure for mbuf. */
850 "tbl v14.16b, {v22.16b}, %[mb_shuf_m].16b \n\t"
851 /* B.2 (CQE 1) move the block having op_own. */
852 "mov v19.16b, %[c1].16b \n\t"
853 /* B.3 (CQE 1) extract 16B fields. */
854 "tbl v21.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
855 /* B.1 (CQE 0) load the rest of blocks. */
856 "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t"
857 /* B.4 (CQE 1) adjust CRC length. */
858 "sub v21.8h, v21.8h, %[crc_adj].8h \n\t"
859 /* C.1 (CQE 1) generate final structure for mbuf. */
860 "tbl v13.16b, {v21.16b}, %[mb_shuf_m].16b \n\t"
861 /* B.2 (CQE 0) move the block having op_own. */
862 "mov v19.16b, %[c0].16b \n\t"
863 /* A.1 load mbuf pointers. */
864 "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t"
865 /* B.3 (CQE 0) extract 16B fields. */
866 "tbl v20.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
867 /* B.4 (CQE 0) adjust CRC length. */
868 "sub v20.8h, v20.8h, %[crc_adj].8h \n\t"
869 /* D.1 extract op_own byte. */
870 "tbl %[op_own].8b, {v20.16b - v23.16b}, %[owner_shuf_m].8b \n\t"
871 /* C.2 (CQE 3) adjust flow mark. */
872 "add v15.4s, v15.4s, %[flow_mark_adj].4s \n\t"
873 /* C.3 (CQE 3) fill in mbuf - rx_descriptor_fields1. */
874 "st1 {v15.2d}, [%[e3]] \n\t"
875 /* C.2 (CQE 2) adjust flow mark. */
876 "add v14.4s, v14.4s, %[flow_mark_adj].4s \n\t"
877 /* C.3 (CQE 2) fill in mbuf - rx_descriptor_fields1. */
878 "st1 {v14.2d}, [%[e2]] \n\t"
879 /* C.1 (CQE 0) generate final structure for mbuf. */
880 "tbl v12.16b, {v20.16b}, %[mb_shuf_m].16b \n\t"
881 /* C.2 (CQE 1) adjust flow mark. */
882 "add v13.4s, v13.4s, %[flow_mark_adj].4s \n\t"
883 /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
884 "st1 {v13.2d}, [%[e1]] \n\t"
885 #ifdef MLX5_PMD_SOFT_COUNTERS
886 /* Extract byte_cnt. */
887 "tbl %[byte_cnt].8b, {v20.16b - v23.16b}, %[len_shuf_m].8b \n\t"
889 /* Extract ptype_info. */
890 "tbl %[ptype_info].16b, {v20.16b - v23.16b}, %[ptype_shuf_m].16b \n\t"
891 /* Extract flow_tag. */
892 "tbl %[flow_tag].16b, {v20.16b - v23.16b}, %[ftag_shuf_m].16b \n\t"
893 /* A.2 copy mbuf pointers. */
894 "st1 {v24.2d - v25.2d}, [%[pkts_p]] \n\t"
895 /* C.2 (CQE 0) adjust flow mark. */
896 "add v12.4s, v12.4s, %[flow_mark_adj].4s \n\t"
897 /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
898 "st1 {v12.2d}, [%[e0]] \n\t"
899 :[op_own]"=&w"(op_own),
900 [byte_cnt]"=&w"(byte_cnt),
901 [ptype_info]"=&w"(ptype_info),
902 [flow_tag]"=&w"(flow_tag)
903 :[p3]"r"(p3), [p2]"r"(p2), [p1]"r"(p1), [p0]"r"(p0),
904 [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
905 [c3]"w"(c3), [c2]"w"(c2), [c1]"w"(c1), [c0]"w"(c0),
908 [cqe_shuf_m]"w"(cqe_shuf_m),
909 [mb_shuf_m]"w"(mb_shuf_m),
910 [owner_shuf_m]"w"(owner_shuf_m),
911 [len_shuf_m]"w"(len_shuf_m),
912 [ptype_shuf_m]"w"(ptype_shuf_m),
913 [ftag_shuf_m]"w"(ftag_shuf_m),
914 [crc_adj]"w"(crc_adj),
915 [flow_mark_adj]"w"(flow_mark_adj)
917 "v12", "v13", "v14", "v15",
918 "v16", "v17", "v18", "v19",
919 "v20", "v21", "v22", "v23",
921 /* D.2 flip owner bit to mark CQEs from last round. */
922 owner_mask = vand_u16(op_own, owner_check);
923 owner_mask = vceq_u16(owner_mask, ownership);
924 /* D.3 get mask for invalidated CQEs. */
925 opcode = vand_u16(op_own, opcode_check);
926 invalid_mask = vceq_u16(opcode_check, opcode);
927 /* E.1 find compressed CQE format. */
928 comp_mask = vand_u16(op_own, format_check);
929 comp_mask = vceq_u16(comp_mask, format_check);
930 /* D.4 mask out beyond boundary. */
931 invalid_mask = vorr_u16(invalid_mask, mask);
932 /* D.5 merge invalid_mask with invalid owner. */
933 invalid_mask = vorr_u16(invalid_mask, owner_mask);
934 /* E.2 mask out invalid entries. */
935 comp_mask = vbic_u16(comp_mask, invalid_mask);
936 /* E.3 get the first compressed CQE. */
937 comp_idx = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
939 (sizeof(uint16_t) * 8);
940 /* D.6 mask out entries after the compressed CQE. */
941 mask = vcreate_u16(comp_idx < MLX5_VPMD_DESCS_PER_LOOP ?
942 -1UL >> (comp_idx * sizeof(uint16_t) * 8) :
944 invalid_mask = vorr_u16(invalid_mask, mask);
945 /* D.7 count non-compressed valid CQEs. */
946 n = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
947 invalid_mask), 0)) / (sizeof(uint16_t) * 8);
949 /* D.2 get the final invalid mask. */
950 mask = vcreate_u16(n < MLX5_VPMD_DESCS_PER_LOOP ?
951 -1UL >> (n * sizeof(uint16_t) * 8) : 0);
952 invalid_mask = vorr_u16(invalid_mask, mask);
953 /* D.3 check error in opcode. */
954 opcode = vceq_u16(resp_err_check, opcode);
955 opcode = vbic_u16(opcode, invalid_mask);
956 /* D.4 mark if any error is set */
957 *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0);
958 /* C.4 fill in mbuf - rearm_data and packet_type. */
959 rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
961 if (rxq->hw_timestamp) {
962 elts[pos]->timestamp =
964 container_of(p0, struct mlx5_cqe,
965 pkt_info)->timestamp);
966 elts[pos + 1]->timestamp =
968 container_of(p1, struct mlx5_cqe,
969 pkt_info)->timestamp);
970 elts[pos + 2]->timestamp =
972 container_of(p2, struct mlx5_cqe,
973 pkt_info)->timestamp);
974 elts[pos + 3]->timestamp =
976 container_of(p3, struct mlx5_cqe,
977 pkt_info)->timestamp);
979 #ifdef MLX5_PMD_SOFT_COUNTERS
980 /* Add up received bytes count. */
981 byte_cnt = vbic_u16(byte_cnt, invalid_mask);
982 rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0);
985 * Break the loop unless more valid CQE is expected, or if
986 * there's a compressed CQE.
988 if (n != MLX5_VPMD_DESCS_PER_LOOP)
991 /* If no new CQE seen, return without updating cq_db. */
992 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
994 /* Update the consumer indexes for non-compressed CQEs. */
995 assert(nocmp_n <= pkts_n);
996 rxq->cq_ci += nocmp_n;
997 rxq->rq_pi += nocmp_n;
999 #ifdef MLX5_PMD_SOFT_COUNTERS
1000 rxq->stats.ipackets += nocmp_n;
1001 rxq->stats.ibytes += rcvd_byte;
1003 /* Decompress the last CQE if compressed. */
1004 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
1005 assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
1006 rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
1008 /* Return more packets if needed. */
1009 if (nocmp_n < pkts_n) {
1010 uint16_t n = rxq->decompressed;
1012 n = RTE_MIN(n, pkts_n - nocmp_n);
1013 rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
1016 rxq->decompressed -= n;
1019 rte_compiler_barrier();
1020 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1024 #endif /* RTE_PMD_MLX5_RXTX_VEC_NEON_H_ */