net/ice: support flex Rx descriptor RxDID22
[dpdk.git] / drivers / net / ice / ice_rxtx_vec_sse.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include "ice_rxtx_vec_common.h"
6
7 #include <tmmintrin.h>
8
9 #ifndef __INTEL_COMPILER
10 #pragma GCC diagnostic ignored "-Wcast-qual"
11 #endif
12
13 static inline void
14 ice_rxq_rearm(struct ice_rx_queue *rxq)
15 {
16         int i;
17         uint16_t rx_id;
18         volatile union ice_rx_flex_desc *rxdp;
19         struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
20         struct rte_mbuf *mb0, *mb1;
21         __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
22                                           RTE_PKTMBUF_HEADROOM);
23         __m128i dma_addr0, dma_addr1;
24
25         rxdp = rxq->rx_ring + rxq->rxrearm_start;
26
27         /* Pull 'n' more MBUFs into the software ring */
28         if (rte_mempool_get_bulk(rxq->mp,
29                                  (void *)rxep,
30                                  ICE_RXQ_REARM_THRESH) < 0) {
31                 if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >=
32                     rxq->nb_rx_desc) {
33                         dma_addr0 = _mm_setzero_si128();
34                         for (i = 0; i < ICE_DESCS_PER_LOOP; i++) {
35                                 rxep[i].mbuf = &rxq->fake_mbuf;
36                                 _mm_store_si128((__m128i *)&rxdp[i].read,
37                                                 dma_addr0);
38                         }
39                 }
40                 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
41                         ICE_RXQ_REARM_THRESH;
42                 return;
43         }
44
45         /* Initialize the mbufs in vector, process 2 mbufs in one loop */
46         for (i = 0; i < ICE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
47                 __m128i vaddr0, vaddr1;
48
49                 mb0 = rxep[0].mbuf;
50                 mb1 = rxep[1].mbuf;
51
52                 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
53                 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
54                                  offsetof(struct rte_mbuf, buf_addr) + 8);
55                 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
56                 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
57
58                 /* convert pa to dma_addr hdr/data */
59                 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
60                 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
61
62                 /* add headroom to pa values */
63                 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
64                 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
65
66                 /* flush desc with pa dma_addr */
67                 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
68                 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
69         }
70
71         rxq->rxrearm_start += ICE_RXQ_REARM_THRESH;
72         if (rxq->rxrearm_start >= rxq->nb_rx_desc)
73                 rxq->rxrearm_start = 0;
74
75         rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH;
76
77         rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
78                            (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
79
80         /* Update the tail pointer on the NIC */
81         ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
82 }
83
84 static inline void
85 ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4],
86                          struct rte_mbuf **rx_pkts)
87 {
88         const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
89         __m128i rearm0, rearm1, rearm2, rearm3;
90
91         __m128i tmp_desc, flags, rss_vlan;
92
93         /* mask everything except checksum, RSS and VLAN flags.
94          * bit6:4 for checksum.
95          * bit12 for RSS indication.
96          * bit13 for VLAN indication.
97          */
98         const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070,
99                                                 0x3070, 0x3070);
100
101         const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK |
102                                                  PKT_RX_L4_CKSUM_MASK |
103                                                  PKT_RX_EIP_CKSUM_BAD,
104                                                  PKT_RX_IP_CKSUM_MASK |
105                                                  PKT_RX_L4_CKSUM_MASK |
106                                                  PKT_RX_EIP_CKSUM_BAD,
107                                                  PKT_RX_IP_CKSUM_MASK |
108                                                  PKT_RX_L4_CKSUM_MASK |
109                                                  PKT_RX_EIP_CKSUM_BAD,
110                                                  PKT_RX_IP_CKSUM_MASK |
111                                                  PKT_RX_L4_CKSUM_MASK |
112                                                  PKT_RX_EIP_CKSUM_BAD);
113
114         /* map the checksum, rss and vlan fields to the checksum, rss
115          * and vlan flag
116          */
117         const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
118                         /* shift right 1 bit to make sure it not exceed 255 */
119                         (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
120                          PKT_RX_IP_CKSUM_BAD) >> 1,
121                         (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
122                          PKT_RX_IP_CKSUM_GOOD) >> 1,
123                         (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
124                          PKT_RX_IP_CKSUM_BAD) >> 1,
125                         (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
126                          PKT_RX_IP_CKSUM_GOOD) >> 1,
127                         (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
128                         (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
129                         (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
130                         (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
131
132         const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0,
133                         0, 0, 0, 0,
134                         0, 0, 0, 0,
135                         PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
136                         PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
137                         PKT_RX_RSS_HASH, 0);
138
139         /* merge 4 descriptors */
140         flags = _mm_unpackhi_epi32(descs[0], descs[1]);
141         tmp_desc = _mm_unpackhi_epi32(descs[2], descs[3]);
142         tmp_desc = _mm_unpacklo_epi64(flags, tmp_desc);
143         tmp_desc = _mm_and_si128(flags, desc_mask);
144
145         /* checksum flags */
146         tmp_desc = _mm_srli_epi32(tmp_desc, 4);
147         flags = _mm_shuffle_epi8(cksum_flags, tmp_desc);
148         /* then we shift left 1 bit */
149         flags = _mm_slli_epi32(flags, 1);
150         /* we need to mask out the reduntant bits introduced by RSS or
151          * VLAN fields.
152          */
153         flags = _mm_and_si128(flags, cksum_mask);
154
155         /* RSS, VLAN flag */
156         tmp_desc = _mm_srli_epi32(tmp_desc, 8);
157         rss_vlan = _mm_shuffle_epi8(rss_vlan_flags, tmp_desc);
158
159         /* merge the flags */
160         flags = _mm_or_si128(flags, rss_vlan);
161
162         /**
163          * At this point, we have the 4 sets of flags in the low 16-bits
164          * of each 32-bit value in flags.
165          * We want to extract these, and merge them with the mbuf init data
166          * so we can do a single 16-byte write to the mbuf to set the flags
167          * and all the other initialization fields. Extracting the
168          * appropriate flags means that we have to do a shift and blend for
169          * each mbuf before we do the write.
170          */
171         rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x10);
172         rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x10);
173         rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x10);
174         rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x10);
175
176         /* write the rearm data and the olflags in one write */
177         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
178                          offsetof(struct rte_mbuf, rearm_data) + 8);
179         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
180                          RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
181         _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
182         _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
183         _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
184         _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
185 }
186
187 static inline void
188 ice_rx_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
189                        uint32_t *ptype_tbl)
190 {
191         const __m128i ptype_mask = _mm_set_epi16(0, ICE_RX_FLEX_DESC_PTYPE_M,
192                                                  0, ICE_RX_FLEX_DESC_PTYPE_M,
193                                                  0, ICE_RX_FLEX_DESC_PTYPE_M,
194                                                  0, ICE_RX_FLEX_DESC_PTYPE_M);
195         __m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
196         __m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
197         __m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
198
199         ptype_all = _mm_and_si128(ptype_all, ptype_mask);
200
201         rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 1)];
202         rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 3)];
203         rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 5)];
204         rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 7)];
205 }
206
207 /**
208  * Notice:
209  * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
210  * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST
211  *   numbers of DD bits
212  */
213 static inline uint16_t
214 _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
215                        uint16_t nb_pkts, uint8_t *split_packet)
216 {
217         volatile union ice_rx_flex_desc *rxdp;
218         struct ice_rx_entry *sw_ring;
219         uint16_t nb_pkts_recd;
220         int pos;
221         uint64_t var;
222         uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
223         __m128i crc_adjust = _mm_set_epi16
224                                 (0, 0, 0,       /* ignore non-length fields */
225                                  -rxq->crc_len, /* sub crc on data_len */
226                                  0,          /* ignore high-16bits of pkt_len */
227                                  -rxq->crc_len, /* sub crc on pkt_len */
228                                  0, 0           /* ignore pkt_type field */
229                                 );
230         const __m128i zero = _mm_setzero_si128();
231         /* mask to shuffle from desc. to mbuf */
232         const __m128i shuf_msk = _mm_set_epi8
233                         (0xFF, 0xFF,
234                          0xFF, 0xFF,  /* rss hash parsed separately */
235                          11, 10,      /* octet 10~11, 16 bits vlan_macip */
236                          5, 4,        /* octet 4~5, 16 bits data_len */
237                          0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */
238                          5, 4,        /* octet 4~5, low 16 bits pkt_len */
239                          0xFF, 0xFF,  /* pkt_type set as unknown */
240                          0xFF, 0xFF   /* pkt_type set as unknown */
241                         );
242         const __m128i eop_shuf_mask = _mm_set_epi8(0xFF, 0xFF,
243                                                    0xFF, 0xFF,
244                                                    0xFF, 0xFF,
245                                                    0xFF, 0xFF,
246                                                    0xFF, 0xFF,
247                                                    0xFF, 0xFF,
248                                                    0x04, 0x0C,
249                                                    0x00, 0x08);
250
251         /**
252          * compile-time check the above crc_adjust layout is correct.
253          * NOTE: the first field (lowest address) is given last in set_epi16
254          * call above.
255          */
256         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
257                          offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
258         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
259                          offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
260
261         /* 4 packets DD mask */
262         const __m128i dd_check = _mm_set_epi64x(0x0000000100000001LL,
263                                                 0x0000000100000001LL);
264         /* 4 packets EOP mask */
265         const __m128i eop_check = _mm_set_epi64x(0x0000000200000002LL,
266                                                  0x0000000200000002LL);
267
268         /* nb_pkts shall be less equal than ICE_MAX_RX_BURST */
269         nb_pkts = RTE_MIN(nb_pkts, ICE_MAX_RX_BURST);
270
271         /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP */
272         nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP);
273
274         /* Just the act of getting into the function from the application is
275          * going to cost about 7 cycles
276          */
277         rxdp = rxq->rx_ring + rxq->rx_tail;
278
279         rte_prefetch0(rxdp);
280
281         /* See if we need to rearm the RX queue - gives the prefetch a bit
282          * of time to act
283          */
284         if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH)
285                 ice_rxq_rearm(rxq);
286
287         /* Before we start moving massive data around, check to see if
288          * there is actually a packet available
289          */
290         if (!(rxdp->wb.status_error0 &
291               rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
292                 return 0;
293
294         /**
295          * Compile-time verify the shuffle mask
296          * NOTE: some field positions already verified above, but duplicated
297          * here for completeness in case of future modifications.
298          */
299         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
300                          offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
301         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
302                          offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
303         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
304                          offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
305         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
306                          offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
307
308         /* Cache is empty -> need to scan the buffer rings, but first move
309          * the next 'n' mbufs into the cache
310          */
311         sw_ring = &rxq->sw_ring[rxq->rx_tail];
312
313         /* A. load 4 packet in one loop
314          * [A*. mask out 4 unused dirty field in desc]
315          * B. copy 4 mbuf point from swring to rx_pkts
316          * C. calc the number of DD bits among the 4 packets
317          * [C*. extract the end-of-packet bit, if requested]
318          * D. fill info. from desc to mbuf
319          */
320
321         for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
322              pos += ICE_DESCS_PER_LOOP,
323              rxdp += ICE_DESCS_PER_LOOP) {
324                 __m128i descs[ICE_DESCS_PER_LOOP];
325                 __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
326                 __m128i staterr, sterr_tmp1, sterr_tmp2;
327                 /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
328                 __m128i mbp1;
329 #if defined(RTE_ARCH_X86_64)
330                 __m128i mbp2;
331 #endif
332
333                 /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
334                 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
335                 /* Read desc statuses backwards to avoid race condition */
336                 /* A.1 load 4 pkts desc */
337                 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
338                 rte_compiler_barrier();
339
340                 /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
341                 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
342
343 #if defined(RTE_ARCH_X86_64)
344                 /* B.1 load 2 64 bit mbuf points */
345                 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
346 #endif
347
348                 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
349                 rte_compiler_barrier();
350                 /* B.1 load 2 mbuf point */
351                 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
352                 rte_compiler_barrier();
353                 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
354
355 #if defined(RTE_ARCH_X86_64)
356                 /* B.2 copy 2 mbuf point into rx_pkts  */
357                 _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2);
358 #endif
359
360                 if (split_packet) {
361                         rte_mbuf_prefetch_part2(rx_pkts[pos]);
362                         rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
363                         rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
364                         rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
365                 }
366
367                 /* avoid compiler reorder optimization */
368                 rte_compiler_barrier();
369
370                 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
371                 pkt_mb3 = _mm_shuffle_epi8(descs[3], shuf_msk);
372                 pkt_mb2 = _mm_shuffle_epi8(descs[2], shuf_msk);
373
374                 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
375                 pkt_mb1 = _mm_shuffle_epi8(descs[1], shuf_msk);
376                 pkt_mb0 = _mm_shuffle_epi8(descs[0], shuf_msk);
377
378                 /* C.1 4=>2 filter staterr info only */
379                 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
380                 /* C.1 4=>2 filter staterr info only */
381                 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
382
383                 ice_rx_desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
384
385                 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
386                 pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
387                 pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
388
389                 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
390                 pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
391                 pkt_mb0 = _mm_add_epi16(pkt_mb0, crc_adjust);
392
393 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
394                 /**
395                  * needs to load 2nd 16B of each desc for RSS hash parsing,
396                  * will cause performance drop to get into this context.
397                  */
398                 if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
399                                 DEV_RX_OFFLOAD_RSS_HASH) {
400                         /* load bottom half of every 32B desc */
401                         const __m128i raw_desc_bh3 =
402                                 _mm_load_si128
403                                         ((void *)(&rxdp[3].wb.status_error1));
404                         rte_compiler_barrier();
405                         const __m128i raw_desc_bh2 =
406                                 _mm_load_si128
407                                         ((void *)(&rxdp[2].wb.status_error1));
408                         rte_compiler_barrier();
409                         const __m128i raw_desc_bh1 =
410                                 _mm_load_si128
411                                         ((void *)(&rxdp[1].wb.status_error1));
412                         rte_compiler_barrier();
413                         const __m128i raw_desc_bh0 =
414                                 _mm_load_si128
415                                         ((void *)(&rxdp[0].wb.status_error1));
416
417                         /**
418                          * to shift the 32b RSS hash value to the
419                          * highest 32b of each 128b before mask
420                          */
421                         __m128i rss_hash3 =
422                                 _mm_slli_epi64(raw_desc_bh3, 32);
423                         __m128i rss_hash2 =
424                                 _mm_slli_epi64(raw_desc_bh2, 32);
425                         __m128i rss_hash1 =
426                                 _mm_slli_epi64(raw_desc_bh1, 32);
427                         __m128i rss_hash0 =
428                                 _mm_slli_epi64(raw_desc_bh0, 32);
429
430                         __m128i rss_hash_msk =
431                                 _mm_set_epi32(0xFFFFFFFF, 0, 0, 0);
432
433                         rss_hash3 = _mm_and_si128
434                                         (rss_hash3, rss_hash_msk);
435                         rss_hash2 = _mm_and_si128
436                                         (rss_hash2, rss_hash_msk);
437                         rss_hash1 = _mm_and_si128
438                                         (rss_hash1, rss_hash_msk);
439                         rss_hash0 = _mm_and_si128
440                                         (rss_hash0, rss_hash_msk);
441
442                         pkt_mb3 = _mm_or_si128(pkt_mb3, rss_hash3);
443                         pkt_mb2 = _mm_or_si128(pkt_mb2, rss_hash2);
444                         pkt_mb1 = _mm_or_si128(pkt_mb1, rss_hash1);
445                         pkt_mb0 = _mm_or_si128(pkt_mb0, rss_hash0);
446                 } /* if() on RSS hash parsing */
447 #endif
448
449                 /* C.2 get 4 pkts staterr value  */
450                 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
451
452                 /* D.3 copy final 3,4 data to rx_pkts */
453                 _mm_storeu_si128
454                         ((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
455                          pkt_mb3);
456                 _mm_storeu_si128
457                         ((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
458                          pkt_mb2);
459
460                 /* C* extract and record EOP bit */
461                 if (split_packet) {
462                         /* and with mask to extract bits, flipping 1-0 */
463                         __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
464                         /* the staterr values are not in order, as the count
465                          * count of dd bits doesn't care. However, for end of
466                          * packet tracking, we do care, so shuffle. This also
467                          * compresses the 32-bit values to 8-bit
468                          */
469                         eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
470                         /* store the resulting 32-bit value */
471                         *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
472                         split_packet += ICE_DESCS_PER_LOOP;
473                 }
474
475                 /* C.3 calc available number of desc */
476                 staterr = _mm_and_si128(staterr, dd_check);
477                 staterr = _mm_packs_epi32(staterr, zero);
478
479                 /* D.3 copy final 1,2 data to rx_pkts */
480                 _mm_storeu_si128
481                         ((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
482                          pkt_mb1);
483                 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
484                                  pkt_mb0);
485                 ice_rx_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
486                 /* C.4 calc avaialbe number of desc */
487                 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
488                 nb_pkts_recd += var;
489                 if (likely(var != ICE_DESCS_PER_LOOP))
490                         break;
491         }
492
493         /* Update our internal tail pointer */
494         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
495         rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
496         rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
497
498         return nb_pkts_recd;
499 }
500
501 /**
502  * Notice:
503  * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
504  * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST
505  *   numbers of DD bits
506  */
507 uint16_t
508 ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
509                   uint16_t nb_pkts)
510 {
511         return _ice_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
512 }
513
514 /* vPMD receive routine that reassembles scattered packets
515  * Notice:
516  * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
517  * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST
518  *   numbers of DD bits
519  */
520 uint16_t
521 ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
522                             uint16_t nb_pkts)
523 {
524         struct ice_rx_queue *rxq = rx_queue;
525         uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
526
527         /* get some new buffers */
528         uint16_t nb_bufs = _ice_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
529                                                   split_flags);
530         if (nb_bufs == 0)
531                 return 0;
532
533         /* happy day case, full burst + no packets to be joined */
534         const uint64_t *split_fl64 = (uint64_t *)split_flags;
535
536         if (!rxq->pkt_first_seg &&
537             split_fl64[0] == 0 && split_fl64[1] == 0 &&
538             split_fl64[2] == 0 && split_fl64[3] == 0)
539                 return nb_bufs;
540
541         /* reassemble any packets that need reassembly*/
542         unsigned int i = 0;
543
544         if (!rxq->pkt_first_seg) {
545                 /* find the first split flag, and only reassemble then*/
546                 while (i < nb_bufs && !split_flags[i])
547                         i++;
548                 if (i == nb_bufs)
549                         return nb_bufs;
550                 rxq->pkt_first_seg = rx_pkts[i];
551         }
552         return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
553                                              &split_flags[i]);
554 }
555
556 static inline void
557 ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt,
558          uint64_t flags)
559 {
560         uint64_t high_qw =
561                 (ICE_TX_DESC_DTYPE_DATA |
562                  ((uint64_t)flags  << ICE_TXD_QW1_CMD_S) |
563                  ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
564
565         __m128i descriptor = _mm_set_epi64x(high_qw,
566                                             pkt->buf_iova + pkt->data_off);
567         _mm_store_si128((__m128i *)txdp, descriptor);
568 }
569
570 static inline void
571 ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,
572         uint16_t nb_pkts, uint64_t flags)
573 {
574         int i;
575
576         for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
577                 ice_vtx1(txdp, *pkt, flags);
578 }
579
580 static uint16_t
581 ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
582                          uint16_t nb_pkts)
583 {
584         struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
585         volatile struct ice_tx_desc *txdp;
586         struct ice_tx_entry *txep;
587         uint16_t n, nb_commit, tx_id;
588         uint64_t flags = ICE_TD_CMD;
589         uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
590         int i;
591
592         /* cross rx_thresh boundary is not allowed */
593         nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
594
595         if (txq->nb_tx_free < txq->tx_free_thresh)
596                 ice_tx_free_bufs(txq);
597
598         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
599         nb_commit = nb_pkts;
600         if (unlikely(nb_pkts == 0))
601                 return 0;
602
603         tx_id = txq->tx_tail;
604         txdp = &txq->tx_ring[tx_id];
605         txep = &txq->sw_ring[tx_id];
606
607         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
608
609         n = (uint16_t)(txq->nb_tx_desc - tx_id);
610         if (nb_commit >= n) {
611                 ice_tx_backlog_entry(txep, tx_pkts, n);
612
613                 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
614                         ice_vtx1(txdp, *tx_pkts, flags);
615
616                 ice_vtx1(txdp, *tx_pkts++, rs);
617
618                 nb_commit = (uint16_t)(nb_commit - n);
619
620                 tx_id = 0;
621                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
622
623                 /* avoid reach the end of ring */
624                 txdp = &txq->tx_ring[tx_id];
625                 txep = &txq->sw_ring[tx_id];
626         }
627
628         ice_tx_backlog_entry(txep, tx_pkts, nb_commit);
629
630         ice_vtx(txdp, tx_pkts, nb_commit, flags);
631
632         tx_id = (uint16_t)(tx_id + nb_commit);
633         if (tx_id > txq->tx_next_rs) {
634                 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
635                         rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
636                                          ICE_TXD_QW1_CMD_S);
637                 txq->tx_next_rs =
638                         (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
639         }
640
641         txq->tx_tail = tx_id;
642
643         ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
644
645         return nb_pkts;
646 }
647
648 uint16_t
649 ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
650                   uint16_t nb_pkts)
651 {
652         uint16_t nb_tx = 0;
653         struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
654
655         while (nb_pkts) {
656                 uint16_t ret, num;
657
658                 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
659                 ret = ice_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
660                 nb_tx += ret;
661                 nb_pkts -= ret;
662                 if (ret < num)
663                         break;
664         }
665
666         return nb_tx;
667 }
668
669 int __rte_cold
670 ice_rxq_vec_setup(struct ice_rx_queue *rxq)
671 {
672         if (!rxq)
673                 return -1;
674
675         rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs_vec;
676         return ice_rxq_vec_setup_default(rxq);
677 }
678
679 int __rte_cold
680 ice_txq_vec_setup(struct ice_tx_queue __rte_unused *txq)
681 {
682         if (!txq)
683                 return -1;
684
685         txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs_vec;
686         return 0;
687 }
688
689 int __rte_cold
690 ice_rx_vec_dev_check(struct rte_eth_dev *dev)
691 {
692         return ice_rx_vec_dev_check_default(dev);
693 }
694
695 int __rte_cold
696 ice_tx_vec_dev_check(struct rte_eth_dev *dev)
697 {
698         return ice_tx_vec_dev_check_default(dev);
699 }