4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
35 #define RTE_PMD_MLX5_RXTX_VEC_NEON_H_
44 #include <rte_mempool.h>
45 #include <rte_prefetch.h>
48 #include "mlx5_utils.h"
49 #include "mlx5_rxtx.h"
50 #include "mlx5_rxtx_vec.h"
51 #include "mlx5_autoconf.h"
52 #include "mlx5_defs.h"
55 #pragma GCC diagnostic ignored "-Wcast-qual"
58 * Fill in buffer descriptors in a multi-packet send descriptor.
61 * Pointer to TX queue structure.
63 * Pointer to buffer descriptor to be writen.
65 * Pointer to array of packets to be sent.
67 * Number of packets to be filled.
70 txq_wr_dseg_v(struct mlx5_txq_data *txq, uint8_t *dseg,
71 struct rte_mbuf **pkts, unsigned int n)
75 const uint8x16_t dseg_shuf_m = {
76 3, 2, 1, 0, /* length, bswap32 */
77 4, 5, 6, 7, /* lkey */
78 15, 14, 13, 12, /* addr, bswap64 */
81 #ifdef MLX5_PMD_SOFT_COUNTERS
85 for (pos = 0; pos < n; ++pos, dseg += MLX5_WQE_DWORD_SIZE) {
87 struct rte_mbuf *pkt = pkts[pos];
89 addr = rte_pktmbuf_mtod(pkt, uintptr_t);
90 desc = vreinterpretq_u8_u32((uint32x4_t) {
92 mlx5_tx_mb2mr(txq, pkt),
95 desc = vqtbl1q_u8(desc, dseg_shuf_m);
97 #ifdef MLX5_PMD_SOFT_COUNTERS
98 tx_byte += DATA_LEN(pkt);
101 #ifdef MLX5_PMD_SOFT_COUNTERS
102 txq->stats.obytes += tx_byte;
107 * Send multi-segmented packets until it encounters a single segment packet in
111 * Pointer to TX queue structure.
113 * Pointer to array of packets to be sent.
115 * Number of packets to be sent.
118 * Number of packets successfully transmitted (<= pkts_n).
121 txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
124 uint16_t elts_head = txq->elts_head;
125 const uint16_t elts_n = 1 << txq->elts_n;
126 const uint16_t elts_m = elts_n - 1;
127 const uint16_t wq_n = 1 << txq->wqe_n;
128 const uint16_t wq_mask = wq_n - 1;
129 const unsigned int nb_dword_per_wqebb =
130 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
131 const unsigned int nb_dword_in_hdr =
132 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
134 volatile struct mlx5_wqe *wqe = NULL;
136 assert(elts_n > pkts_n);
137 mlx5_tx_complete(txq);
138 if (unlikely(!pkts_n))
140 for (n = 0; n < pkts_n; ++n) {
141 struct rte_mbuf *buf = pkts[n];
142 unsigned int segs_n = buf->nb_segs;
143 unsigned int ds = nb_dword_in_hdr;
144 unsigned int len = PKT_LEN(buf);
145 uint16_t wqe_ci = txq->wqe_ci;
146 const uint8x16_t ctrl_shuf_m = {
147 3, 2, 1, 0, /* bswap32 */
148 7, 6, 5, 4, /* bswap32 */
149 11, 10, 9, 8, /* bswap32 */
152 uint8_t cs_flags = 0;
160 max_elts = elts_n - (elts_head - txq->elts_tail);
161 max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
163 * A MPW session consumes 2 WQEs at most to
164 * include MLX5_MPW_DSEG_MAX pointers.
167 max_elts < segs_n || max_wqe < 2)
169 wqe = &((volatile struct mlx5_wqe64 *)
170 txq->wqes)[wqe_ci & wq_mask].hdr;
172 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
173 const uint64_t is_tunneled =
174 buf->ol_flags & (PKT_TX_TUNNEL_GRE |
175 PKT_TX_TUNNEL_VXLAN);
177 if (is_tunneled && txq->tunnel_en) {
178 cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
179 MLX5_ETH_WQE_L4_INNER_CSUM;
180 if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
181 cs_flags |= MLX5_ETH_WQE_L3_CSUM;
183 cs_flags = MLX5_ETH_WQE_L3_CSUM |
184 MLX5_ETH_WQE_L4_CSUM;
187 /* Title WQEBB pointer. */
188 t_wqe = (uint8x16_t *)wqe;
189 dseg = (uint8_t *)(wqe + 1);
191 if (!(ds++ % nb_dword_per_wqebb)) {
193 &((volatile struct mlx5_wqe64 *)
194 txq->wqes)[++wqe_ci & wq_mask];
196 txq_wr_dseg_v(txq, dseg, &buf, 1);
197 dseg += MLX5_WQE_DWORD_SIZE;
198 (*txq->elts)[elts_head++ & elts_m] = buf;
202 /* Fill CTRL in the header. */
203 ctrl = vreinterpretq_u8_u32((uint32x4_t) {
204 MLX5_OPC_MOD_MPW << 24 |
205 txq->wqe_ci << 8 | MLX5_OPCODE_TSO,
206 txq->qp_num_8s | ds, 0, 0});
207 ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
208 vst1q_u8((void *)t_wqe, ctrl);
209 /* Fill ESEG in the header. */
210 vst1q_u16((void *)(t_wqe + 1),
211 (uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
213 txq->wqe_ci = wqe_ci;
217 txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
218 txq->elts_head = elts_head;
219 if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
220 wqe->ctrl[2] = rte_cpu_to_be_32(8);
221 wqe->ctrl[3] = txq->elts_head;
225 #ifdef MLX5_PMD_SOFT_COUNTERS
226 txq->stats.opackets += n;
228 mlx5_tx_dbrec(txq, wqe);
233 * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
234 * it returns to make it processed by txq_scatter_v(). All the packets in
235 * the pkts list should be single segment packets having same offload flags.
236 * This must be checked by txq_check_multiseg() and txq_calc_offload().
239 * Pointer to TX queue structure.
241 * Pointer to array of packets to be sent.
243 * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
245 * Checksum offload flags to be written in the descriptor.
248 * Number of packets successfully transmitted (<= pkts_n).
250 static inline uint16_t
251 txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
254 struct rte_mbuf **elts;
255 uint16_t elts_head = txq->elts_head;
256 const uint16_t elts_n = 1 << txq->elts_n;
257 const uint16_t elts_m = elts_n - 1;
258 const unsigned int nb_dword_per_wqebb =
259 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
260 const unsigned int nb_dword_in_hdr =
261 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
266 uint32_t comp_req = 0;
267 const uint16_t wq_n = 1 << txq->wqe_n;
268 const uint16_t wq_mask = wq_n - 1;
269 uint16_t wq_idx = txq->wqe_ci & wq_mask;
270 volatile struct mlx5_wqe64 *wq =
271 &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
272 volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
273 const uint8x16_t ctrl_shuf_m = {
274 3, 2, 1, 0, /* bswap32 */
275 7, 6, 5, 4, /* bswap32 */
276 11, 10, 9, 8, /* bswap32 */
283 /* Make sure all packets can fit into a single WQE. */
284 assert(elts_n > pkts_n);
285 mlx5_tx_complete(txq);
286 max_elts = (elts_n - (elts_head - txq->elts_tail));
287 max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
288 pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
289 if (unlikely(!pkts_n))
291 elts = &(*txq->elts)[elts_head & elts_m];
292 /* Loop for available tailroom first. */
293 n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
294 for (pos = 0; pos < (n & -2); pos += 2)
295 vst1q_u64((void *)&elts[pos], vld1q_u64((void *)&pkts[pos]));
297 elts[pos] = pkts[pos];
298 /* Check if it crosses the end of the queue. */
299 if (unlikely(n < pkts_n)) {
300 elts = &(*txq->elts)[0];
301 for (pos = 0; pos < pkts_n - n; ++pos)
302 elts[pos] = pkts[n + pos];
304 txq->elts_head += pkts_n;
305 /* Save title WQEBB pointer. */
306 t_wqe = (uint8x16_t *)wqe;
307 dseg = (uint8_t *)(wqe + 1);
308 /* Calculate the number of entries to the end. */
310 (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
313 txq_wr_dseg_v(txq, dseg, pkts, n);
314 /* Check if it crosses the end of the queue. */
316 dseg = (uint8_t *)txq->wqes;
317 txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
319 if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
320 txq->elts_comp += pkts_n;
322 /* Request a completion. */
327 /* Fill CTRL in the header. */
328 ctrl = vreinterpretq_u8_u32((uint32x4_t) {
329 MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
330 txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW,
331 txq->qp_num_8s | (pkts_n + 2),
334 ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
335 vst1q_u8((void *)t_wqe, ctrl);
336 /* Fill ESEG in the header. */
337 vst1q_u8((void *)(t_wqe + 1),
338 (uint8x16_t) { 0, 0, 0, 0,
342 #ifdef MLX5_PMD_SOFT_COUNTERS
343 txq->stats.opackets += pkts_n;
345 txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
347 /* Ring QP doorbell. */
348 mlx5_tx_dbrec(txq, wqe);
353 * Store free buffers to RX SW ring.
356 * Pointer to RX queue structure.
358 * Pointer to array of packets to be stored.
360 * Number of packets to be stored.
363 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
365 const uint16_t q_mask = (1 << rxq->elts_n) - 1;
366 struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
370 for (pos = 0; pos < p; pos += 2) {
373 mbp = vld1q_u64((void *)&elts[pos]);
374 vst1q_u64((void *)&pkts[pos], mbp);
377 pkts[pos] = elts[pos];
381 * Decompress a compressed completion and fill in mbufs in RX SW ring with data
382 * extracted from the title completion descriptor.
385 * Pointer to RX queue structure.
387 * Pointer to completion array having a compressed completion at first.
389 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
390 * the title completion descriptor to be copied to the rest of mbufs.
393 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
394 struct rte_mbuf **elts)
396 volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info;
397 struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
400 unsigned int inv = 0;
401 /* Mask to shuffle from extracted mini CQE to mbuf. */
402 const uint8x16_t mcqe_shuf_m1 = {
403 -1, -1, -1, -1, /* skip packet_type */
404 7, 6, -1, -1, /* pkt_len, bswap16 */
405 7, 6, /* data_len, bswap16 */
406 -1, -1, /* skip vlan_tci */
407 3, 2, 1, 0 /* hash.rss, bswap32 */
409 const uint8x16_t mcqe_shuf_m2 = {
410 -1, -1, -1, -1, /* skip packet_type */
411 15, 14, -1, -1, /* pkt_len, bswap16 */
412 15, 14, /* data_len, bswap16 */
413 -1, -1, /* skip vlan_tci */
414 11, 10, 9, 8 /* hash.rss, bswap32 */
416 /* Restore the compressed count. Must be 16 bits. */
417 const uint16_t mcqe_n = t_pkt->data_len +
418 (rxq->crc_present * ETHER_CRC_LEN);
419 const uint64x2_t rearm =
420 vld1q_u64((void *)&t_pkt->rearm_data);
421 const uint32x4_t rxdf_mask = {
422 0xffffffff, /* packet_type */
423 0, /* skip pkt_len */
424 0xffff0000, /* vlan_tci, skip data_len */
425 0, /* skip hash.rss */
427 const uint8x16_t rxdf =
428 vandq_u8(vld1q_u8((void *)&t_pkt->rx_descriptor_fields1),
429 vreinterpretq_u8_u32(rxdf_mask));
430 const uint16x8_t crc_adj = {
432 rxq->crc_present * ETHER_CRC_LEN, 0,
433 rxq->crc_present * ETHER_CRC_LEN, 0,
436 const uint32_t flow_tag = t_pkt->hash.fdir.hi;
437 #ifdef MLX5_PMD_SOFT_COUNTERS
438 uint32_t rcvd_byte = 0;
440 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
441 const uint8x8_t len_shuf_m = {
443 15, 14, /* 2nd mCQE */
444 23, 22, /* 3rd mCQE */
445 31, 30 /* 4th mCQE */
449 * A. load mCQEs into a 128bit register.
450 * B. store rearm data to mbuf.
451 * C. combine data from mCQEs with rx_descriptor_fields1.
452 * D. store rx_descriptor_fields1.
453 * E. store flow tag (rte_flow mark).
455 for (pos = 0; pos < mcqe_n; ) {
456 uint8_t *p = (void *)&mcq[pos % 8];
457 uint8_t *e0 = (void *)&elts[pos]->rearm_data;
458 uint8_t *e1 = (void *)&elts[pos + 1]->rearm_data;
459 uint8_t *e2 = (void *)&elts[pos + 2]->rearm_data;
460 uint8_t *e3 = (void *)&elts[pos + 3]->rearm_data;
462 #ifdef MLX5_PMD_SOFT_COUNTERS
463 uint16x4_t invalid_mask =
464 vcreate_u16(mcqe_n - pos < MLX5_VPMD_DESCS_PER_LOOP ?
465 -1UL << ((mcqe_n - pos) *
466 sizeof(uint16_t) * 8) : 0);
469 if (!(pos & 0x7) && pos + 8 < mcqe_n)
470 rte_prefetch0((void *)(cq + pos + 8));
472 /* A.1 load mCQEs into a 128bit register. */
473 "ld1 {v16.16b - v17.16b}, [%[mcq]] \n\t"
474 /* B.1 store rearm data to mbuf. */
475 "st1 {%[rearm].2d}, [%[e0]] \n\t"
476 "add %[e0], %[e0], #16 \n\t"
477 "st1 {%[rearm].2d}, [%[e1]] \n\t"
478 "add %[e1], %[e1], #16 \n\t"
479 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
480 "tbl v18.16b, {v16.16b}, %[mcqe_shuf_m1].16b \n\t"
481 "tbl v19.16b, {v16.16b}, %[mcqe_shuf_m2].16b \n\t"
482 "sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
483 "sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
484 "orr v18.16b, v18.16b, %[rxdf].16b \n\t"
485 "orr v19.16b, v19.16b, %[rxdf].16b \n\t"
486 /* D.1 store rx_descriptor_fields1. */
487 "st1 {v18.2d}, [%[e0]] \n\t"
488 "st1 {v19.2d}, [%[e1]] \n\t"
489 /* B.1 store rearm data to mbuf. */
490 "st1 {%[rearm].2d}, [%[e2]] \n\t"
491 "add %[e2], %[e2], #16 \n\t"
492 "st1 {%[rearm].2d}, [%[e3]] \n\t"
493 "add %[e3], %[e3], #16 \n\t"
494 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
495 "tbl v18.16b, {v17.16b}, %[mcqe_shuf_m1].16b \n\t"
496 "tbl v19.16b, {v17.16b}, %[mcqe_shuf_m2].16b \n\t"
497 "sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
498 "sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
499 "orr v18.16b, v18.16b, %[rxdf].16b \n\t"
500 "orr v19.16b, v19.16b, %[rxdf].16b \n\t"
501 /* D.1 store rx_descriptor_fields1. */
502 "st1 {v18.2d}, [%[e2]] \n\t"
503 "st1 {v19.2d}, [%[e3]] \n\t"
504 #ifdef MLX5_PMD_SOFT_COUNTERS
505 "tbl %[byte_cnt].8b, {v16.16b - v17.16b}, %[len_shuf_m].8b \n\t"
507 :[byte_cnt]"=&w"(byte_cnt)
511 [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
512 [mcqe_shuf_m1]"w"(mcqe_shuf_m1),
513 [mcqe_shuf_m2]"w"(mcqe_shuf_m2),
514 [crc_adj]"w"(crc_adj),
515 [len_shuf_m]"w"(len_shuf_m)
516 :"memory", "v16", "v17", "v18", "v19");
517 #ifdef MLX5_PMD_SOFT_COUNTERS
518 byte_cnt = vbic_u16(byte_cnt, invalid_mask);
519 rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0);
522 /* E.1 store flow tag (rte_flow mark). */
523 elts[pos]->hash.fdir.hi = flow_tag;
524 elts[pos + 1]->hash.fdir.hi = flow_tag;
525 elts[pos + 2]->hash.fdir.hi = flow_tag;
526 elts[pos + 3]->hash.fdir.hi = flow_tag;
528 pos += MLX5_VPMD_DESCS_PER_LOOP;
529 /* Move to next CQE and invalidate consumed CQEs. */
530 if (!(pos & 0x7) && pos < mcqe_n) {
531 mcq = (void *)&(cq + pos)->pkt_info;
532 for (i = 0; i < 8; ++i)
533 cq[inv++].op_own = MLX5_CQE_INVALIDATE;
536 /* Invalidate the rest of CQEs. */
537 for (; inv < mcqe_n; ++inv)
538 cq[inv].op_own = MLX5_CQE_INVALIDATE;
539 #ifdef MLX5_PMD_SOFT_COUNTERS
540 rxq->stats.ipackets += mcqe_n;
541 rxq->stats.ibytes += rcvd_byte;
543 rxq->cq_ci += mcqe_n;
547 * Calculate packet type and offload flag for mbuf and store it.
550 * Pointer to RX queue structure.
552 * Array of four 4bytes packet type info extracted from the original
553 * completion descriptor.
555 * Array of four 4bytes flow ID extracted from the original completion
558 * Opcode vector having responder error status. Each field is 4B.
560 * Pointer to array of packets to be filled.
563 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
564 uint32x4_t ptype_info, uint32x4_t flow_tag,
565 uint16x4_t op_err, struct rte_mbuf **pkts)
568 uint32x4_t pinfo, cv_flags;
569 uint32x4_t ol_flags =
570 vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH |
571 rxq->hw_timestamp * PKT_RX_TIMESTAMP);
572 const uint32x4_t ptype_ol_mask = { 0x106, 0x106, 0x106, 0x106 };
573 const uint8x16_t cv_flag_sel = {
575 (uint8_t)(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED),
576 (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
578 (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
580 (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
581 0, 0, 0, 0, 0, 0, 0, 0, 0
583 const uint32x4_t cv_mask =
584 vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
585 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
586 const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer);
587 const uint64x1_t r32_mask = vcreate_u64(0xffffffff);
588 uint64x2_t rearm0, rearm1, rearm2, rearm3;
591 const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
592 const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR);
593 const uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID);
595 /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
596 ol_flags = vorrq_u32(ol_flags, vbicq_u32(fdir_flags,
597 vceqzq_u32(flow_tag)));
598 /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
599 ol_flags = vorrq_u32(ol_flags,
600 vbicq_u32(fdir_id_flags,
601 vceqq_u32(flow_tag, ft_def)));
604 * ptype_info has the following:
608 * bit[11:10] = l3_hdr_type
609 * bit[14:12] = l4_hdr_type
612 * bit[17] = outer_l3_type
614 ptype = vshrn_n_u32(ptype_info, 10);
615 /* Errored packets will have RTE_PTYPE_ALL_MASK. */
616 ptype = vorr_u16(ptype, op_err);
617 pkts[0]->packet_type =
618 mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 6)];
619 pkts[1]->packet_type =
620 mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 4)];
621 pkts[2]->packet_type =
622 mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 2)];
623 pkts[3]->packet_type =
624 mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 0)];
625 /* Fill flags for checksum and VLAN. */
626 pinfo = vandq_u32(ptype_info, ptype_ol_mask);
627 pinfo = vreinterpretq_u32_u8(
628 vqtbl1q_u8(cv_flag_sel, vreinterpretq_u8_u32(pinfo)));
629 /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
630 cv_flags = vshlq_n_u32(pinfo, 9);
631 cv_flags = vorrq_u32(pinfo, cv_flags);
632 /* Move back flags to start from byte[0]. */
633 cv_flags = vshrq_n_u32(cv_flags, 8);
634 /* Mask out garbage bits. */
635 cv_flags = vandq_u32(cv_flags, cv_mask);
636 /* Merge to ol_flags. */
637 ol_flags = vorrq_u32(ol_flags, cv_flags);
638 /* Merge mbuf_init and ol_flags, and store. */
639 rearm0 = vcombine_u64(mbuf_init,
640 vshr_n_u64(vget_high_u64(vreinterpretq_u64_u32(
642 rearm1 = vcombine_u64(mbuf_init,
643 vand_u64(vget_high_u64(vreinterpretq_u64_u32(
644 ol_flags)), r32_mask));
645 rearm2 = vcombine_u64(mbuf_init,
646 vshr_n_u64(vget_low_u64(vreinterpretq_u64_u32(
648 rearm3 = vcombine_u64(mbuf_init,
649 vand_u64(vget_low_u64(vreinterpretq_u64_u32(
650 ol_flags)), r32_mask));
651 vst1q_u64((void *)&pkts[0]->rearm_data, rearm0);
652 vst1q_u64((void *)&pkts[1]->rearm_data, rearm1);
653 vst1q_u64((void *)&pkts[2]->rearm_data, rearm2);
654 vst1q_u64((void *)&pkts[3]->rearm_data, rearm3);
658 * Receive burst of packets. An errored completion also consumes a mbuf, but the
659 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
660 * before returning to application.
663 * Pointer to RX queue structure.
665 * Array to store received packets.
667 * Maximum number of packets in array.
670 * Number of packets received including errors (<= pkts_n).
672 static inline uint16_t
673 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
675 const uint16_t q_n = 1 << rxq->cqe_n;
676 const uint16_t q_mask = q_n - 1;
677 volatile struct mlx5_cqe *cq;
678 struct rte_mbuf **elts;
682 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
683 uint16_t nocmp_n = 0;
684 uint16_t rcvd_pkt = 0;
685 unsigned int cq_idx = rxq->cq_ci & q_mask;
686 unsigned int elts_idx;
687 const uint16x4_t ownership = vdup_n_u16(!(rxq->cq_ci & (q_mask + 1)));
688 const uint16x4_t owner_check = vcreate_u16(0x0001000100010001);
689 const uint16x4_t opcode_check = vcreate_u16(0x00f000f000f000f0);
690 const uint16x4_t format_check = vcreate_u16(0x000c000c000c000c);
691 const uint16x4_t resp_err_check = vcreate_u16(0x00e000e000e000e0);
692 #ifdef MLX5_PMD_SOFT_COUNTERS
693 uint32_t rcvd_byte = 0;
695 /* Mask to generate 16B length vector. */
696 const uint8x8_t len_shuf_m = {
697 52, 53, /* 4th CQE */
698 36, 37, /* 3rd CQE */
699 20, 21, /* 2nd CQE */
702 /* Mask to extract 16B data from a 64B CQE. */
703 const uint8x16_t cqe_shuf_m = {
704 28, 29, /* hdr_type_etc */
707 47, 46, /* byte_cnt, bswap16 */
708 31, 30, /* vlan_info, bswap16 */
709 15, 14, 13, 12, /* rx_hash_res, bswap32 */
710 57, 58, 59, /* flow_tag */
713 /* Mask to generate 16B data for mbuf. */
714 const uint8x16_t mb_shuf_m = {
715 4, 5, -1, -1, /* pkt_len */
718 8, 9, 10, 11, /* hash.rss */
719 12, 13, 14, -1 /* hash.fdir.hi */
721 /* Mask to generate 16B owner vector. */
722 const uint8x8_t owner_shuf_m = {
723 63, -1, /* 4th CQE */
724 47, -1, /* 3rd CQE */
725 31, -1, /* 2nd CQE */
728 /* Mask to generate a vector having packet_type/ol_flags. */
729 const uint8x16_t ptype_shuf_m = {
730 48, 49, 50, -1, /* 4th CQE */
731 32, 33, 34, -1, /* 3rd CQE */
732 16, 17, 18, -1, /* 2nd CQE */
733 0, 1, 2, -1 /* 1st CQE */
735 /* Mask to generate a vector having flow tags. */
736 const uint8x16_t ftag_shuf_m = {
737 60, 61, 62, -1, /* 4th CQE */
738 44, 45, 46, -1, /* 3rd CQE */
739 28, 29, 30, -1, /* 2nd CQE */
740 12, 13, 14, -1 /* 1st CQE */
742 const uint16x8_t crc_adj = {
743 0, 0, rxq->crc_present * ETHER_CRC_LEN, 0, 0, 0, 0, 0
745 const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) };
747 assert(rxq->sges_n == 0);
748 assert(rxq->cqe_n == rxq->elts_n);
749 cq = &(*rxq->cqes)[cq_idx];
750 rte_prefetch_non_temporal(cq);
751 rte_prefetch_non_temporal(cq + 1);
752 rte_prefetch_non_temporal(cq + 2);
753 rte_prefetch_non_temporal(cq + 3);
754 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
757 * rq_ci >= cq_ci >= rq_pi
758 * Definition of indexes:
759 * rq_ci - cq_ci := # of buffers owned by HW (posted).
760 * cq_ci - rq_pi := # of buffers not returned to app (decompressed).
761 * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
763 repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
764 if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
765 mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
766 /* See if there're unreturned mbufs from compressed CQE. */
767 rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
769 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
770 rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
771 rxq->rq_pi += rcvd_pkt;
774 elts_idx = rxq->rq_pi & q_mask;
775 elts = &(*rxq->elts)[elts_idx];
776 /* Not to overflow pkts array. */
777 pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
778 /* Not to cross queue end. */
779 pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
782 /* At this point, there shouldn't be any remained packets. */
783 assert(rxq->rq_pi == rxq->cq_ci);
785 * Note that vectors have reverse order - {v3, v2, v1, v0}, because
786 * there's no instruction to count trailing zeros. __builtin_clzl() is
789 * A. copy 4 mbuf pointers from elts ring to returing pkts.
790 * B. load 64B CQE and extract necessary fields
791 * Final 16bytes cqes[] extracted from original 64bytes CQE has the
792 * following structure:
794 * uint16_t hdr_type_etc;
798 * uint16_t vlan_info;
799 * uint32_t rx_has_res;
800 * uint8_t flow_tag[3];
805 * E. find compressed CQE.
809 pos += MLX5_VPMD_DESCS_PER_LOOP) {
811 uint16x4_t opcode, owner_mask, invalid_mask;
812 uint16x4_t comp_mask;
815 uint32x4_t ptype_info, flow_tag;
816 uint8_t *p0, *p1, *p2, *p3;
817 uint8_t *e0 = (void *)&elts[pos]->pkt_len;
818 uint8_t *e1 = (void *)&elts[pos + 1]->pkt_len;
819 uint8_t *e2 = (void *)&elts[pos + 2]->pkt_len;
820 uint8_t *e3 = (void *)&elts[pos + 3]->pkt_len;
821 void *elts_p = (void *)&elts[pos];
822 void *pkts_p = (void *)&pkts[pos];
824 /* A.0 do not cross the end of CQ. */
825 mask = vcreate_u16(pkts_n - pos < MLX5_VPMD_DESCS_PER_LOOP ?
826 -1UL >> ((pkts_n - pos) *
827 sizeof(uint16_t) * 8) : 0);
828 p0 = (void *)&cq[pos].pkt_info;
829 p1 = p0 + (pkts_n - pos > 1) * sizeof(struct mlx5_cqe);
830 p2 = p1 + (pkts_n - pos > 2) * sizeof(struct mlx5_cqe);
831 p3 = p2 + (pkts_n - pos > 3) * sizeof(struct mlx5_cqe);
832 /* Prefetch next 4 CQEs. */
833 if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
834 unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP;
835 rte_prefetch_non_temporal(&cq[next]);
836 rte_prefetch_non_temporal(&cq[next + 1]);
837 rte_prefetch_non_temporal(&cq[next + 2]);
838 rte_prefetch_non_temporal(&cq[next + 3]);
841 /* B.1 (CQE 3) load a block having op_own. */
842 "ld1 {v19.16b}, [%[p3]] \n\t"
843 "sub %[p3], %[p3], #48 \n\t"
844 /* B.2 (CQE 3) load the rest blocks. */
845 "ld1 {v16.16b - v18.16b}, [%[p3]] \n\t"
846 /* B.3 (CQE 3) extract 16B fields. */
847 "tbl v23.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
848 /* B.4 (CQE 3) adjust CRC length. */
849 "sub v23.8h, v23.8h, %[crc_adj].8h \n\t"
850 /* B.1 (CQE 2) load a block having op_own. */
851 "ld1 {v19.16b}, [%[p2]] \n\t"
852 "sub %[p2], %[p2], #48 \n\t"
853 /* C.1 (CQE 3) generate final structure for mbuf. */
854 "tbl v15.16b, {v23.16b}, %[mb_shuf_m].16b \n\t"
855 /* B.2 (CQE 2) load the rest blocks. */
856 "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t"
857 /* B.3 (CQE 2) extract 16B fields. */
858 "tbl v22.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
859 /* B.4 (CQE 2) adjust CRC length. */
860 "sub v22.8h, v22.8h, %[crc_adj].8h \n\t"
861 /* B.1 (CQE 1) load a block having op_own. */
862 "ld1 {v19.16b}, [%[p1]] \n\t"
863 "sub %[p1], %[p1], #48 \n\t"
864 /* C.1 (CQE 2) generate final structure for mbuf. */
865 "tbl v14.16b, {v22.16b}, %[mb_shuf_m].16b \n\t"
866 /* B.2 (CQE 1) load the rest blocks. */
867 "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t"
868 /* B.3 (CQE 1) extract 16B fields. */
869 "tbl v21.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
870 /* B.4 (CQE 1) adjust CRC length. */
871 "sub v21.8h, v21.8h, %[crc_adj].8h \n\t"
872 /* B.1 (CQE 0) load a block having op_own. */
873 "ld1 {v19.16b}, [%[p0]] \n\t"
874 "sub %[p0], %[p0], #48 \n\t"
875 /* C.1 (CQE 1) generate final structure for mbuf. */
876 "tbl v13.16b, {v21.16b}, %[mb_shuf_m].16b \n\t"
877 /* B.2 (CQE 0) load the rest blocks. */
878 "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t"
879 /* B.3 (CQE 0) extract 16B fields. */
880 "tbl v20.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
881 /* B.4 (CQE 0) adjust CRC length. */
882 "sub v20.8h, v20.8h, %[crc_adj].8h \n\t"
883 /* A.1 load mbuf pointers. */
884 "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t"
885 /* D.1 extract op_own byte. */
886 "tbl %[op_own].8b, {v20.16b - v23.16b}, %[owner_shuf_m].8b \n\t"
887 /* C.2 (CQE 3) adjust flow mark. */
888 "add v15.4s, v15.4s, %[flow_mark_adj].4s \n\t"
889 /* C.3 (CQE 3) fill in mbuf - rx_descriptor_fields1. */
890 "st1 {v15.2d}, [%[e3]] \n\t"
891 /* C.2 (CQE 2) adjust flow mark. */
892 "add v14.4s, v14.4s, %[flow_mark_adj].4s \n\t"
893 /* C.3 (CQE 2) fill in mbuf - rx_descriptor_fields1. */
894 "st1 {v14.2d}, [%[e2]] \n\t"
895 /* C.1 (CQE 0) generate final structure for mbuf. */
896 "tbl v12.16b, {v20.16b}, %[mb_shuf_m].16b \n\t"
897 /* C.2 (CQE 1) adjust flow mark. */
898 "add v13.4s, v13.4s, %[flow_mark_adj].4s \n\t"
899 /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
900 "st1 {v13.2d}, [%[e1]] \n\t"
901 #ifdef MLX5_PMD_SOFT_COUNTERS
902 /* Extract byte_cnt. */
903 "tbl %[byte_cnt].8b, {v20.16b - v23.16b}, %[len_shuf_m].8b \n\t"
905 /* Extract ptype_info. */
906 "tbl %[ptype_info].16b, {v20.16b - v23.16b}, %[ptype_shuf_m].16b \n\t"
907 /* Extract flow_tag. */
908 "tbl %[flow_tag].16b, {v20.16b - v23.16b}, %[ftag_shuf_m].16b \n\t"
909 /* A.2 copy mbuf pointers. */
910 "st1 {v24.2d - v25.2d}, [%[pkts_p]] \n\t"
911 /* C.2 (CQE 0) adjust flow mark. */
912 "add v12.4s, v12.4s, %[flow_mark_adj].4s \n\t"
913 /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
914 "st1 {v12.2d}, [%[e0]] \n\t"
915 :[op_own]"=&w"(op_own),
916 [byte_cnt]"=&w"(byte_cnt),
917 [ptype_info]"=&w"(ptype_info),
918 [flow_tag]"=&w"(flow_tag)
919 :[p3]"r"(p3 + 48), [p2]"r"(p2 + 48),
920 [p1]"r"(p1 + 48), [p0]"r"(p0 + 48),
921 [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
924 [cqe_shuf_m]"w"(cqe_shuf_m),
925 [mb_shuf_m]"w"(mb_shuf_m),
926 [owner_shuf_m]"w"(owner_shuf_m),
927 [len_shuf_m]"w"(len_shuf_m),
928 [ptype_shuf_m]"w"(ptype_shuf_m),
929 [ftag_shuf_m]"w"(ftag_shuf_m),
930 [crc_adj]"w"(crc_adj),
931 [flow_mark_adj]"w"(flow_mark_adj)
933 "v12", "v13", "v14", "v15",
934 "v16", "v17", "v18", "v19",
935 "v20", "v21", "v22", "v23",
937 /* D.2 flip owner bit to mark CQEs from last round. */
938 owner_mask = vand_u16(op_own, owner_check);
939 owner_mask = vceq_u16(owner_mask, ownership);
940 /* D.3 get mask for invalidated CQEs. */
941 opcode = vand_u16(op_own, opcode_check);
942 invalid_mask = vceq_u16(opcode_check, opcode);
943 /* E.1 find compressed CQE format. */
944 comp_mask = vand_u16(op_own, format_check);
945 comp_mask = vceq_u16(comp_mask, format_check);
946 /* D.4 mask out beyond boundary. */
947 invalid_mask = vorr_u16(invalid_mask, mask);
948 /* D.5 merge invalid_mask with invalid owner. */
949 invalid_mask = vorr_u16(invalid_mask, owner_mask);
950 /* E.2 mask out invalid entries. */
951 comp_mask = vbic_u16(comp_mask, invalid_mask);
952 /* E.3 get the first compressed CQE. */
953 comp_idx = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
955 (sizeof(uint16_t) * 8);
956 /* D.6 mask out entries after the compressed CQE. */
957 mask = vcreate_u16(comp_idx < MLX5_VPMD_DESCS_PER_LOOP ?
958 -1UL >> (comp_idx * sizeof(uint16_t) * 8) :
960 invalid_mask = vorr_u16(invalid_mask, mask);
961 /* D.7 count non-compressed valid CQEs. */
962 n = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
963 invalid_mask), 0)) / (sizeof(uint16_t) * 8);
965 /* D.2 get the final invalid mask. */
966 mask = vcreate_u16(n < MLX5_VPMD_DESCS_PER_LOOP ?
967 -1UL >> (n * sizeof(uint16_t) * 8) : 0);
968 invalid_mask = vorr_u16(invalid_mask, mask);
969 /* D.3 check error in opcode. */
970 opcode = vceq_u16(resp_err_check, opcode);
971 opcode = vbic_u16(opcode, invalid_mask);
972 /* D.4 mark if any error is set */
974 !!vget_lane_u64(vreinterpret_u64_u16(opcode), 0);
975 /* C.4 fill in mbuf - rearm_data and packet_type. */
976 rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
978 if (rxq->hw_timestamp) {
979 elts[pos]->timestamp =
981 container_of(p0, struct mlx5_cqe,
982 pkt_info)->timestamp);
983 elts[pos + 1]->timestamp =
985 container_of(p1, struct mlx5_cqe,
986 pkt_info)->timestamp);
987 elts[pos + 2]->timestamp =
989 container_of(p2, struct mlx5_cqe,
990 pkt_info)->timestamp);
991 elts[pos + 3]->timestamp =
993 container_of(p3, struct mlx5_cqe,
994 pkt_info)->timestamp);
996 #ifdef MLX5_PMD_SOFT_COUNTERS
997 /* Add up received bytes count. */
998 byte_cnt = vbic_u16(byte_cnt, invalid_mask);
999 rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0);
1002 * Break the loop unless more valid CQE is expected, or if
1003 * there's a compressed CQE.
1005 if (n != MLX5_VPMD_DESCS_PER_LOOP)
1008 /* If no new CQE seen, return without updating cq_db. */
1009 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
1011 /* Update the consumer indexes for non-compressed CQEs. */
1012 assert(nocmp_n <= pkts_n);
1013 rxq->cq_ci += nocmp_n;
1014 rxq->rq_pi += nocmp_n;
1015 rcvd_pkt += nocmp_n;
1016 #ifdef MLX5_PMD_SOFT_COUNTERS
1017 rxq->stats.ipackets += nocmp_n;
1018 rxq->stats.ibytes += rcvd_byte;
1020 /* Decompress the last CQE if compressed. */
1021 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
1022 assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
1023 rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
1024 /* Return more packets if needed. */
1025 if (nocmp_n < pkts_n) {
1026 uint16_t n = rxq->cq_ci - rxq->rq_pi;
1028 n = RTE_MIN(n, pkts_n - nocmp_n);
1029 rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
1034 rte_compiler_barrier();
1035 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1039 #endif /* RTE_PMD_MLX5_RXTX_VEC_NEON_H_ */