4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
35 #define RTE_PMD_MLX5_RXTX_VEC_SSE_H_
41 #include <smmintrin.h>
44 #include <rte_mempool.h>
45 #include <rte_prefetch.h>
48 #include "mlx5_utils.h"
49 #include "mlx5_rxtx.h"
50 #include "mlx5_rxtx_vec.h"
51 #include "mlx5_autoconf.h"
52 #include "mlx5_defs.h"
55 #ifndef __INTEL_COMPILER
56 #pragma GCC diagnostic ignored "-Wcast-qual"
60 * Fill in buffer descriptors in a multi-packet send descriptor.
63 * Pointer to TX queue structure.
65 * Pointer to buffer descriptor to be writen.
67 * Pointer to array of packets to be sent.
69 * Number of packets to be filled.
72 txq_wr_dseg_v(struct mlx5_txq_data *txq, __m128i *dseg,
73 struct rte_mbuf **pkts, unsigned int n)
77 const __m128i shuf_mask_dseg =
78 _mm_set_epi8(8, 9, 10, 11, /* addr, bswap64 */
80 7, 6, 5, 4, /* lkey */
81 0, 1, 2, 3 /* length, bswap32 */);
82 #ifdef MLX5_PMD_SOFT_COUNTERS
86 for (pos = 0; pos < n; ++pos, ++dseg) {
88 struct rte_mbuf *pkt = pkts[pos];
90 addr = rte_pktmbuf_mtod(pkt, uintptr_t);
91 desc = _mm_set_epi32(addr >> 32,
93 mlx5_tx_mb2mr(txq, pkt),
95 desc = _mm_shuffle_epi8(desc, shuf_mask_dseg);
96 _mm_store_si128(dseg, desc);
97 #ifdef MLX5_PMD_SOFT_COUNTERS
98 tx_byte += DATA_LEN(pkt);
101 #ifdef MLX5_PMD_SOFT_COUNTERS
102 txq->stats.obytes += tx_byte;
107 * Send multi-segmented packets until it encounters a single segment packet in
111 * Pointer to TX queue structure.
113 * Pointer to array of packets to be sent.
115 * Number of packets to be sent.
118 * Number of packets successfully transmitted (<= pkts_n).
121 txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
124 uint16_t elts_head = txq->elts_head;
125 const uint16_t elts_n = 1 << txq->elts_n;
126 const uint16_t elts_m = elts_n - 1;
127 const uint16_t wq_n = 1 << txq->wqe_n;
128 const uint16_t wq_mask = wq_n - 1;
129 const unsigned int nb_dword_per_wqebb =
130 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
131 const unsigned int nb_dword_in_hdr =
132 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
134 volatile struct mlx5_wqe *wqe = NULL;
136 assert(elts_n > pkts_n);
137 mlx5_tx_complete(txq);
138 if (unlikely(!pkts_n))
140 for (n = 0; n < pkts_n; ++n) {
141 struct rte_mbuf *buf = pkts[n];
142 unsigned int segs_n = buf->nb_segs;
143 unsigned int ds = nb_dword_in_hdr;
144 unsigned int len = PKT_LEN(buf);
145 uint16_t wqe_ci = txq->wqe_ci;
146 const __m128i shuf_mask_ctrl =
147 _mm_set_epi8(15, 14, 13, 12,
148 8, 9, 10, 11, /* bswap32 */
149 4, 5, 6, 7, /* bswap32 */
150 0, 1, 2, 3 /* bswap32 */);
151 uint8_t cs_flags = 0;
154 __m128i *t_wqe, *dseg;
158 max_elts = elts_n - (elts_head - txq->elts_tail);
159 max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
161 * A MPW session consumes 2 WQEs at most to
162 * include MLX5_MPW_DSEG_MAX pointers.
165 max_elts < segs_n || max_wqe < 2)
167 if (segs_n > MLX5_MPW_DSEG_MAX) {
168 txq->stats.oerrors++;
171 wqe = &((volatile struct mlx5_wqe64 *)
172 txq->wqes)[wqe_ci & wq_mask].hdr;
174 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
175 const uint64_t is_tunneled =
176 buf->ol_flags & (PKT_TX_TUNNEL_GRE |
177 PKT_TX_TUNNEL_VXLAN);
179 if (is_tunneled && txq->tunnel_en) {
180 cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
181 MLX5_ETH_WQE_L4_INNER_CSUM;
182 if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
183 cs_flags |= MLX5_ETH_WQE_L3_CSUM;
185 cs_flags = MLX5_ETH_WQE_L3_CSUM |
186 MLX5_ETH_WQE_L4_CSUM;
189 /* Title WQEBB pointer. */
190 t_wqe = (__m128i *)wqe;
191 dseg = (__m128i *)(wqe + 1);
193 if (!(ds++ % nb_dword_per_wqebb)) {
195 &((volatile struct mlx5_wqe64 *)
196 txq->wqes)[++wqe_ci & wq_mask];
198 txq_wr_dseg_v(txq, dseg++, &buf, 1);
199 (*txq->elts)[elts_head++ & elts_m] = buf;
203 /* Fill CTRL in the header. */
204 ctrl = _mm_set_epi32(0, 0, txq->qp_num_8s | ds,
205 MLX5_OPC_MOD_MPW << 24 |
206 txq->wqe_ci << 8 | MLX5_OPCODE_TSO);
207 ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
208 _mm_store_si128(t_wqe, ctrl);
209 /* Fill ESEG in the header. */
210 _mm_store_si128(t_wqe + 1,
211 _mm_set_epi16(0, 0, 0, 0,
212 rte_cpu_to_be_16(len), cs_flags,
214 txq->wqe_ci = wqe_ci;
218 txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
219 txq->elts_head = elts_head;
220 if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
221 wqe->ctrl[2] = rte_cpu_to_be_32(8);
222 wqe->ctrl[3] = txq->elts_head;
226 #ifdef MLX5_PMD_SOFT_COUNTERS
227 txq->stats.opackets += n;
229 mlx5_tx_dbrec(txq, wqe);
234 * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
235 * it returns to make it processed by txq_scatter_v(). All the packets in
236 * the pkts list should be single segment packets having same offload flags.
237 * This must be checked by txq_check_multiseg() and txq_calc_offload().
240 * Pointer to TX queue structure.
242 * Pointer to array of packets to be sent.
244 * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
246 * Checksum offload flags to be written in the descriptor.
249 * Number of packets successfully transmitted (<= pkts_n).
251 static inline uint16_t
252 txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
255 struct rte_mbuf **elts;
256 uint16_t elts_head = txq->elts_head;
257 const uint16_t elts_n = 1 << txq->elts_n;
258 const uint16_t elts_m = elts_n - 1;
259 const unsigned int nb_dword_per_wqebb =
260 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
261 const unsigned int nb_dword_in_hdr =
262 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
267 uint32_t comp_req = 0;
268 const uint16_t wq_n = 1 << txq->wqe_n;
269 const uint16_t wq_mask = wq_n - 1;
270 uint16_t wq_idx = txq->wqe_ci & wq_mask;
271 volatile struct mlx5_wqe64 *wq =
272 &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
273 volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
274 const __m128i shuf_mask_ctrl =
275 _mm_set_epi8(15, 14, 13, 12,
276 8, 9, 10, 11, /* bswap32 */
277 4, 5, 6, 7, /* bswap32 */
278 0, 1, 2, 3 /* bswap32 */);
279 __m128i *t_wqe, *dseg;
282 /* Make sure all packets can fit into a single WQE. */
283 assert(elts_n > pkts_n);
284 mlx5_tx_complete(txq);
285 max_elts = (elts_n - (elts_head - txq->elts_tail));
286 max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
287 pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
288 assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
289 if (unlikely(!pkts_n))
291 elts = &(*txq->elts)[elts_head & elts_m];
292 /* Loop for available tailroom first. */
293 n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
294 for (pos = 0; pos < (n & -2); pos += 2)
295 _mm_storeu_si128((__m128i *)&elts[pos],
296 _mm_loadu_si128((__m128i *)&pkts[pos]));
298 elts[pos] = pkts[pos];
299 /* Check if it crosses the end of the queue. */
300 if (unlikely(n < pkts_n)) {
301 elts = &(*txq->elts)[0];
302 for (pos = 0; pos < pkts_n - n; ++pos)
303 elts[pos] = pkts[n + pos];
305 txq->elts_head += pkts_n;
306 /* Save title WQEBB pointer. */
307 t_wqe = (__m128i *)wqe;
308 dseg = (__m128i *)(wqe + 1);
309 /* Calculate the number of entries to the end. */
311 (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
314 txq_wr_dseg_v(txq, dseg, pkts, n);
315 /* Check if it crosses the end of the queue. */
317 dseg = (__m128i *)txq->wqes;
318 txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
320 if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
321 txq->elts_comp += pkts_n;
323 /* Request a completion. */
328 /* Fill CTRL in the header. */
329 ctrl = _mm_set_epi32(txq->elts_head, comp_req,
330 txq->qp_num_8s | (pkts_n + 2),
331 MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
332 txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW);
333 ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
334 _mm_store_si128(t_wqe, ctrl);
335 /* Fill ESEG in the header. */
336 _mm_store_si128(t_wqe + 1,
337 _mm_set_epi8(0, 0, 0, 0,
341 #ifdef MLX5_PMD_SOFT_COUNTERS
342 txq->stats.opackets += pkts_n;
344 txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
346 /* Ring QP doorbell. */
347 mlx5_tx_dbrec(txq, wqe);
352 * Store free buffers to RX SW ring.
355 * Pointer to RX queue structure.
357 * Pointer to array of packets to be stored.
359 * Number of packets to be stored.
362 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
364 const uint16_t q_mask = (1 << rxq->elts_n) - 1;
365 struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
369 for (pos = 0; pos < p; pos += 2) {
372 mbp = _mm_loadu_si128((__m128i *)&elts[pos]);
373 _mm_storeu_si128((__m128i *)&pkts[pos], mbp);
376 pkts[pos] = elts[pos];
380 * Decompress a compressed completion and fill in mbufs in RX SW ring with data
381 * extracted from the title completion descriptor.
384 * Pointer to RX queue structure.
386 * Pointer to completion array having a compressed completion at first.
388 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
389 * the title completion descriptor to be copied to the rest of mbufs.
392 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
393 struct rte_mbuf **elts)
395 volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1);
396 struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
399 unsigned int inv = 0;
400 /* Mask to shuffle from extracted mini CQE to mbuf. */
401 const __m128i shuf_mask1 =
402 _mm_set_epi8(0, 1, 2, 3, /* rss, bswap32 */
403 -1, -1, /* skip vlan_tci */
404 6, 7, /* data_len, bswap16 */
405 -1, -1, 6, 7, /* pkt_len, bswap16 */
406 -1, -1, -1, -1 /* skip packet_type */);
407 const __m128i shuf_mask2 =
408 _mm_set_epi8(8, 9, 10, 11, /* rss, bswap32 */
409 -1, -1, /* skip vlan_tci */
410 14, 15, /* data_len, bswap16 */
411 -1, -1, 14, 15, /* pkt_len, bswap16 */
412 -1, -1, -1, -1 /* skip packet_type */);
413 /* Restore the compressed count. Must be 16 bits. */
414 const uint16_t mcqe_n = t_pkt->data_len +
415 (rxq->crc_present * ETHER_CRC_LEN);
416 const __m128i rearm =
417 _mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
419 _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
420 const __m128i crc_adj =
421 _mm_set_epi16(0, 0, 0,
422 rxq->crc_present * ETHER_CRC_LEN,
424 rxq->crc_present * ETHER_CRC_LEN,
426 const uint32_t flow_tag = t_pkt->hash.fdir.hi;
427 #ifdef MLX5_PMD_SOFT_COUNTERS
428 const __m128i zero = _mm_setzero_si128();
429 const __m128i ones = _mm_cmpeq_epi32(zero, zero);
430 uint32_t rcvd_byte = 0;
431 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
432 const __m128i len_shuf_mask =
433 _mm_set_epi8(-1, -1, -1, -1,
440 * Not to overflow elts array. Decompress next time after mbuf
443 if (unlikely(mcqe_n + MLX5_VPMD_DESCS_PER_LOOP >
444 (uint16_t)(rxq->rq_ci - rxq->cq_ci)))
447 * A. load mCQEs into a 128bit register.
448 * B. store rearm data to mbuf.
449 * C. combine data from mCQEs with rx_descriptor_fields1.
450 * D. store rx_descriptor_fields1.
451 * E. store flow tag (rte_flow mark).
453 for (pos = 0; pos < mcqe_n; ) {
454 __m128i mcqe1, mcqe2;
455 __m128i rxdf1, rxdf2;
456 #ifdef MLX5_PMD_SOFT_COUNTERS
457 __m128i byte_cnt, invalid_mask;
460 if (!(pos & 0x7) && pos + 8 < mcqe_n)
461 rte_prefetch0((void *)(cq + pos + 8));
462 /* A.1 load mCQEs into a 128bit register. */
463 mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
464 mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
465 /* B.1 store rearm data to mbuf. */
466 _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm);
467 _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm);
468 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
469 rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1);
470 rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2);
471 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
472 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
473 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
474 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
475 /* D.1 store rx_descriptor_fields1. */
476 _mm_storeu_si128((__m128i *)
477 &elts[pos]->rx_descriptor_fields1,
479 _mm_storeu_si128((__m128i *)
480 &elts[pos + 1]->rx_descriptor_fields1,
482 /* B.1 store rearm data to mbuf. */
483 _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm);
484 _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm);
485 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
486 rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1);
487 rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2);
488 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
489 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
490 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
491 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
492 /* D.1 store rx_descriptor_fields1. */
493 _mm_storeu_si128((__m128i *)
494 &elts[pos + 2]->rx_descriptor_fields1,
496 _mm_storeu_si128((__m128i *)
497 &elts[pos + 3]->rx_descriptor_fields1,
499 #ifdef MLX5_PMD_SOFT_COUNTERS
500 invalid_mask = _mm_set_epi64x(0,
502 sizeof(uint16_t) * 8);
503 invalid_mask = _mm_sll_epi64(ones, invalid_mask);
504 mcqe1 = _mm_srli_si128(mcqe1, 4);
505 byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc);
506 byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask);
507 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
508 byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
509 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
512 /* E.1 store flow tag (rte_flow mark). */
513 elts[pos]->hash.fdir.hi = flow_tag;
514 elts[pos + 1]->hash.fdir.hi = flow_tag;
515 elts[pos + 2]->hash.fdir.hi = flow_tag;
516 elts[pos + 3]->hash.fdir.hi = flow_tag;
518 pos += MLX5_VPMD_DESCS_PER_LOOP;
519 /* Move to next CQE and invalidate consumed CQEs. */
520 if (!(pos & 0x7) && pos < mcqe_n) {
521 mcq = (void *)(cq + pos);
522 for (i = 0; i < 8; ++i)
523 cq[inv++].op_own = MLX5_CQE_INVALIDATE;
526 /* Invalidate the rest of CQEs. */
527 for (; inv < mcqe_n; ++inv)
528 cq[inv].op_own = MLX5_CQE_INVALIDATE;
529 #ifdef MLX5_PMD_SOFT_COUNTERS
530 rxq->stats.ipackets += mcqe_n;
531 rxq->stats.ibytes += rcvd_byte;
533 rxq->cq_ci += mcqe_n;
537 * Calculate packet type and offload flag for mbuf and store it.
540 * Pointer to RX queue structure.
542 * Array of four 16bytes completions extracted from the original completion
545 * Opcode vector having responder error status. Each field is 4B.
547 * Pointer to array of packets to be filled.
550 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
551 __m128i op_err, struct rte_mbuf **pkts)
553 __m128i pinfo0, pinfo1;
554 __m128i pinfo, ptype;
555 __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH);
557 const __m128i zero = _mm_setzero_si128();
558 const __m128i ptype_mask =
559 _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06);
560 const __m128i ptype_ol_mask =
561 _mm_set_epi32(0x106, 0x106, 0x106, 0x106);
562 const __m128i pinfo_mask =
563 _mm_set_epi32(0x3, 0x3, 0x3, 0x3);
564 const __m128i cv_flag_sel =
565 _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
566 (uint8_t)((PKT_RX_IP_CKSUM_GOOD |
567 PKT_RX_L4_CKSUM_GOOD) >> 1),
569 (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
571 (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
572 (uint8_t)(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED),
574 const __m128i cv_mask =
575 _mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
576 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
577 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
578 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
579 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
580 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
581 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
582 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
583 const __m128i mbuf_init =
584 _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
585 __m128i rearm0, rearm1, rearm2, rearm3;
587 /* Extract pkt_info field. */
588 pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
589 pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
590 pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1);
591 /* Extract hdr_type_etc field. */
592 pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]);
593 pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]);
594 ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
596 const __m128i pinfo_ft_mask =
597 _mm_set_epi32(0xffffff00, 0xffffff00,
598 0xffffff00, 0xffffff00);
599 const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
600 const __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
601 __m128i flow_tag, invalid_mask;
603 flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
604 /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
605 invalid_mask = _mm_cmpeq_epi32(flow_tag, zero);
606 ol_flags = _mm_or_si128(ol_flags,
607 _mm_andnot_si128(invalid_mask,
609 /* Mask out invalid entries. */
610 flow_tag = _mm_andnot_si128(invalid_mask, flow_tag);
611 /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
612 ol_flags = _mm_or_si128(ol_flags,
614 _mm_cmpeq_epi32(flow_tag,
619 * Merge the two fields to generate the following:
623 * bit[11:10] = l3_hdr_type
624 * bit[14:12] = l4_hdr_type
627 * bit[17] = outer_l3_type
629 ptype = _mm_and_si128(ptype, ptype_mask);
630 pinfo = _mm_and_si128(pinfo, pinfo_mask);
631 pinfo = _mm_slli_epi32(pinfo, 16);
632 /* Make pinfo has merged fields for ol_flags calculation. */
633 pinfo = _mm_or_si128(ptype, pinfo);
634 ptype = _mm_srli_epi32(pinfo, 10);
635 ptype = _mm_packs_epi32(ptype, zero);
636 /* Errored packets will have RTE_PTYPE_ALL_MASK. */
637 op_err = _mm_srli_epi16(op_err, 8);
638 ptype = _mm_or_si128(ptype, op_err);
639 pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)];
640 pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)];
641 pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)];
642 pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)];
643 /* Fill flags for checksum and VLAN. */
644 pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
645 pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
646 /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
647 cv_flags = _mm_slli_epi32(pinfo, 9);
648 cv_flags = _mm_or_si128(pinfo, cv_flags);
649 /* Move back flags to start from byte[0]. */
650 cv_flags = _mm_srli_epi32(cv_flags, 8);
651 /* Mask out garbage bits. */
652 cv_flags = _mm_and_si128(cv_flags, cv_mask);
653 /* Merge to ol_flags. */
654 ol_flags = _mm_or_si128(ol_flags, cv_flags);
655 /* Merge mbuf_init and ol_flags. */
656 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30);
657 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
658 rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
659 rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
660 /* Write 8B rearm_data and 8B ol_flags. */
661 _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
662 _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
663 _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
664 _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3);
668 * Receive burst of packets. An errored completion also consumes a mbuf, but the
669 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
670 * before returning to application.
673 * Pointer to RX queue structure.
675 * Array to store received packets.
677 * Maximum number of packets in array.
680 * Number of packets received including errors (<= pkts_n).
682 static inline uint16_t
683 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
685 const uint16_t q_n = 1 << rxq->cqe_n;
686 const uint16_t q_mask = q_n - 1;
687 volatile struct mlx5_cqe *cq;
688 struct rte_mbuf **elts;
692 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
693 uint16_t nocmp_n = 0;
694 uint16_t rcvd_pkt = 0;
695 unsigned int cq_idx = rxq->cq_ci & q_mask;
696 unsigned int elts_idx;
697 unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
698 const __m128i owner_check =
699 _mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL);
700 const __m128i opcode_check =
701 _mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL);
702 const __m128i format_check =
703 _mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL);
704 const __m128i resp_err_check =
705 _mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL);
706 #ifdef MLX5_PMD_SOFT_COUNTERS
707 uint32_t rcvd_byte = 0;
708 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
709 const __m128i len_shuf_mask =
710 _mm_set_epi8(-1, -1, -1, -1,
715 /* Mask to shuffle from extracted CQE to mbuf. */
716 const __m128i shuf_mask =
717 _mm_set_epi8(-1, 3, 2, 1, /* fdir.hi */
718 12, 13, 14, 15, /* rss, bswap32 */
719 10, 11, /* vlan_tci, bswap16 */
720 4, 5, /* data_len, bswap16 */
721 -1, -1, /* zero out 2nd half of pkt_len */
722 4, 5 /* pkt_len, bswap16 */);
723 /* Mask to blend from the last Qword to the first DQword. */
724 const __m128i blend_mask =
725 _mm_set_epi8(-1, -1, -1, -1,
729 const __m128i zero = _mm_setzero_si128();
730 const __m128i ones = _mm_cmpeq_epi32(zero, zero);
731 const __m128i crc_adj =
732 _mm_set_epi16(0, 0, 0, 0, 0,
733 rxq->crc_present * ETHER_CRC_LEN,
735 rxq->crc_present * ETHER_CRC_LEN);
736 const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
738 assert(rxq->sges_n == 0);
739 assert(rxq->cqe_n == rxq->elts_n);
740 cq = &(*rxq->cqes)[cq_idx];
742 rte_prefetch0(cq + 1);
743 rte_prefetch0(cq + 2);
744 rte_prefetch0(cq + 3);
745 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
748 * rq_ci >= cq_ci >= rq_pi
749 * Definition of indexes:
750 * rq_ci - cq_ci := # of buffers owned by HW (posted).
751 * cq_ci - rq_pi := # of buffers not returned to app (decompressed).
752 * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
754 repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
755 if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
756 mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
757 /* See if there're unreturned mbufs from compressed CQE. */
758 rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
760 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
761 rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
762 rxq->rq_pi += rcvd_pkt;
765 elts_idx = rxq->rq_pi & q_mask;
766 elts = &(*rxq->elts)[elts_idx];
767 pkts_n = RTE_MIN(pkts_n - rcvd_pkt,
768 (uint16_t)(rxq->rq_ci - rxq->cq_ci));
769 /* Not to overflow pkts/elts array. */
770 pkts_n = RTE_ALIGN_FLOOR(pkts_n, MLX5_VPMD_DESCS_PER_LOOP);
771 /* Not to cross queue end. */
772 pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
775 /* At this point, there shouldn't be any remained packets. */
776 assert(rxq->rq_pi == rxq->cq_ci);
778 * A. load first Qword (8bytes) in one loop.
779 * B. copy 4 mbuf pointers from elts ring to returing pkts.
780 * C. load remained CQE data and extract necessary fields.
781 * Final 16bytes cqes[] extracted from original 64bytes CQE has the
782 * following structure:
785 * uint8_t flow_tag[3];
789 * uint16_t hdr_type_etc;
790 * uint16_t vlan_info;
791 * uint32_t rx_has_res;
795 * F. find compressed CQE.
799 pos += MLX5_VPMD_DESCS_PER_LOOP) {
800 __m128i cqes[MLX5_VPMD_DESCS_PER_LOOP];
801 __m128i cqe_tmp1, cqe_tmp2;
802 __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
803 __m128i op_own, op_own_tmp1, op_own_tmp2;
804 __m128i opcode, owner_mask, invalid_mask;
807 #ifdef MLX5_PMD_SOFT_COUNTERS
811 __m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0);
812 unsigned int p1, p2, p3;
814 /* Prefetch next 4 CQEs. */
815 if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
816 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
817 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
818 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
819 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
821 /* A.0 do not cross the end of CQ. */
822 mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8);
823 mask = _mm_sll_epi64(ones, mask);
824 p = _mm_andnot_si128(mask, p);
826 p3 = _mm_extract_epi16(p, 3);
827 cqes[3] = _mm_loadl_epi64((__m128i *)
828 &cq[pos + p3].sop_drop_qpn);
829 rte_compiler_barrier();
830 p2 = _mm_extract_epi16(p, 2);
831 cqes[2] = _mm_loadl_epi64((__m128i *)
832 &cq[pos + p2].sop_drop_qpn);
833 rte_compiler_barrier();
834 /* B.1 load mbuf pointers. */
835 mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]);
836 mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]);
837 /* A.1 load a block having op_own. */
838 p1 = _mm_extract_epi16(p, 1);
839 cqes[1] = _mm_loadl_epi64((__m128i *)
840 &cq[pos + p1].sop_drop_qpn);
841 rte_compiler_barrier();
842 cqes[0] = _mm_loadl_epi64((__m128i *)
843 &cq[pos].sop_drop_qpn);
844 /* B.2 copy mbuf pointers. */
845 _mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
846 _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
847 rte_compiler_barrier();
848 /* C.1 load remained CQE data and extract necessary fields. */
849 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
850 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
851 cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
852 cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
853 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].rsvd1[3]);
854 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].rsvd1[3]);
855 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
856 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
857 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd2[10]);
858 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd2[10]);
859 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
860 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
861 /* C.2 generate final structure for mbuf with swapping bytes. */
862 pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask);
863 pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask);
864 /* C.3 adjust CRC length. */
865 pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj);
866 pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj);
867 /* C.4 adjust flow mark. */
868 pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj);
869 pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj);
870 /* D.1 fill in mbuf - rx_descriptor_fields1. */
871 _mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3);
872 _mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2);
873 /* E.1 extract op_own field. */
874 op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
875 /* C.1 load remained CQE data and extract necessary fields. */
876 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]);
877 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
878 cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
879 cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
880 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].rsvd1[3]);
881 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].rsvd1[3]);
882 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
883 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
884 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd2[10]);
885 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd2[10]);
886 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
887 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
888 /* C.2 generate final structure for mbuf with swapping bytes. */
889 pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask);
890 pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask);
891 /* C.3 adjust CRC length. */
892 pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj);
893 pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj);
894 /* C.4 adjust flow mark. */
895 pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj);
896 pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj);
897 /* E.1 extract op_own byte. */
898 op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
899 op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2);
900 /* D.1 fill in mbuf - rx_descriptor_fields1. */
901 _mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1);
902 _mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0);
903 /* E.2 flip owner bit to mark CQEs from last round. */
904 owner_mask = _mm_and_si128(op_own, owner_check);
906 owner_mask = _mm_xor_si128(owner_mask, owner_check);
907 owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check);
908 owner_mask = _mm_packs_epi32(owner_mask, zero);
909 /* E.3 get mask for invalidated CQEs. */
910 opcode = _mm_and_si128(op_own, opcode_check);
911 invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode);
912 invalid_mask = _mm_packs_epi32(invalid_mask, zero);
913 /* E.4 mask out beyond boundary. */
914 invalid_mask = _mm_or_si128(invalid_mask, mask);
915 /* E.5 merge invalid_mask with invalid owner. */
916 invalid_mask = _mm_or_si128(invalid_mask, owner_mask);
917 /* F.1 find compressed CQE format. */
918 comp_mask = _mm_and_si128(op_own, format_check);
919 comp_mask = _mm_cmpeq_epi32(comp_mask, format_check);
920 comp_mask = _mm_packs_epi32(comp_mask, zero);
921 /* F.2 mask out invalid entries. */
922 comp_mask = _mm_andnot_si128(invalid_mask, comp_mask);
923 comp_idx = _mm_cvtsi128_si64(comp_mask);
924 /* F.3 get the first compressed CQE. */
925 comp_idx = comp_idx ?
926 __builtin_ctzll(comp_idx) /
927 (sizeof(uint16_t) * 8) :
928 MLX5_VPMD_DESCS_PER_LOOP;
929 /* E.6 mask out entries after the compressed CQE. */
930 mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8);
931 mask = _mm_sll_epi64(ones, mask);
932 invalid_mask = _mm_or_si128(invalid_mask, mask);
933 /* E.7 count non-compressed valid CQEs. */
934 n = _mm_cvtsi128_si64(invalid_mask);
935 n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
936 MLX5_VPMD_DESCS_PER_LOOP;
938 /* D.2 get the final invalid mask. */
939 mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8);
940 mask = _mm_sll_epi64(ones, mask);
941 invalid_mask = _mm_or_si128(invalid_mask, mask);
942 /* D.3 check error in opcode. */
943 opcode = _mm_cmpeq_epi32(resp_err_check, opcode);
944 opcode = _mm_packs_epi32(opcode, zero);
945 opcode = _mm_andnot_si128(invalid_mask, opcode);
946 /* D.4 mark if any error is set */
947 rxq->pending_err |= !!_mm_cvtsi128_si64(opcode);
948 /* D.5 fill in mbuf - rearm_data and packet_type. */
949 rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
950 #ifdef MLX5_PMD_SOFT_COUNTERS
951 /* Add up received bytes count. */
952 byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);
953 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
954 byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
955 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
958 * Break the loop unless more valid CQE is expected, or if
959 * there's a compressed CQE.
961 if (n != MLX5_VPMD_DESCS_PER_LOOP)
964 /* If no new CQE seen, return without updating cq_db. */
965 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
967 /* Update the consumer indexes for non-compressed CQEs. */
968 assert(nocmp_n <= pkts_n);
969 rxq->cq_ci += nocmp_n;
970 rxq->rq_pi += nocmp_n;
972 #ifdef MLX5_PMD_SOFT_COUNTERS
973 rxq->stats.ipackets += nocmp_n;
974 rxq->stats.ibytes += rcvd_byte;
976 /* Decompress the last CQE if compressed. */
977 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
978 assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
979 rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
980 /* Return more packets if needed. */
981 if (nocmp_n < pkts_n) {
982 uint16_t n = rxq->cq_ci - rxq->rq_pi;
984 n = RTE_MIN(n, pkts_n - nocmp_n);
985 rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
990 rte_compiler_barrier();
991 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
995 #endif /* RTE_PMD_MLX5_RXTX_VEC_SSE_H_ */