86955539bea88bd217fc5d23d84151a4fd8f8e1c
[dpdk.git] / drivers / net / ice / ice_rxtx_vec_avx2.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include "ice_rxtx_vec_common.h"
6 #include "ice_rxtx_common_avx.h"
7
8 #include <rte_vect.h>
9
10 #ifndef __INTEL_COMPILER
11 #pragma GCC diagnostic ignored "-Wcast-qual"
12 #endif
13
14 static __rte_always_inline void
15 ice_rxq_rearm(struct ice_rx_queue *rxq)
16 {
17         return ice_rxq_rearm_common(rxq, false);
18 }
19
20 static __rte_always_inline __m256i
21 ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
22 {
23 #define FDID_MIS_MAGIC 0xFFFFFFFF
24         RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
25         RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
26         const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
27                         PKT_RX_FDIR_ID);
28         /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
29         const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
30         __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
31                         fdir_mis_mask);
32         /* this XOR op results to bit-reverse the fdir_mask */
33         fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
34         const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
35
36         return fdir_flags;
37 }
38
39 static __rte_always_inline uint16_t
40 _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
41                             uint16_t nb_pkts, uint8_t *split_packet,
42                             bool offload)
43 {
44 #define ICE_DESCS_PER_LOOP_AVX 8
45
46         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
47         const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
48                         0, rxq->mbuf_initializer);
49         struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
50         volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
51         const int avx_aligned = ((rxq->rx_tail & 1) == 0);
52
53         rte_prefetch0(rxdp);
54
55         /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */
56         nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX);
57
58         /* See if we need to rearm the RX queue - gives the prefetch a bit
59          * of time to act
60          */
61         if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH)
62                 ice_rxq_rearm(rxq);
63
64         /* Before we start moving massive data around, check to see if
65          * there is actually a packet available
66          */
67         if (!(rxdp->wb.status_error0 &
68                         rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
69                 return 0;
70
71         /* constants used in processing loop */
72         const __m256i crc_adjust =
73                 _mm256_set_epi16
74                         (/* first descriptor */
75                          0, 0, 0,       /* ignore non-length fields */
76                          -rxq->crc_len, /* sub crc on data_len */
77                          0,             /* ignore high-16bits of pkt_len */
78                          -rxq->crc_len, /* sub crc on pkt_len */
79                          0, 0,          /* ignore pkt_type field */
80                          /* second descriptor */
81                          0, 0, 0,       /* ignore non-length fields */
82                          -rxq->crc_len, /* sub crc on data_len */
83                          0,             /* ignore high-16bits of pkt_len */
84                          -rxq->crc_len, /* sub crc on pkt_len */
85                          0, 0           /* ignore pkt_type field */
86                         );
87
88         /* 8 packets DD mask, LSB in each 32-bit value */
89         const __m256i dd_check = _mm256_set1_epi32(1);
90
91         /* 8 packets EOP mask, second-LSB in each 32-bit value */
92         const __m256i eop_check = _mm256_slli_epi32(dd_check,
93                         ICE_RX_DESC_STATUS_EOF_S);
94
95         /* mask to shuffle from desc. to mbuf (2 descriptors)*/
96         const __m256i shuf_msk =
97                 _mm256_set_epi8
98                         (/* first descriptor */
99                          0xFF, 0xFF,
100                          0xFF, 0xFF,    /* rss hash parsed separately */
101                          11, 10,        /* octet 10~11, 16 bits vlan_macip */
102                          5, 4,          /* octet 4~5, 16 bits data_len */
103                          0xFF, 0xFF,    /* skip hi 16 bits pkt_len, zero out */
104                          5, 4,          /* octet 4~5, 16 bits pkt_len */
105                          0xFF, 0xFF,    /* pkt_type set as unknown */
106                          0xFF, 0xFF,    /*pkt_type set as unknown */
107                          /* second descriptor */
108                          0xFF, 0xFF,
109                          0xFF, 0xFF,    /* rss hash parsed separately */
110                          11, 10,        /* octet 10~11, 16 bits vlan_macip */
111                          5, 4,          /* octet 4~5, 16 bits data_len */
112                          0xFF, 0xFF,    /* skip hi 16 bits pkt_len, zero out */
113                          5, 4,          /* octet 4~5, 16 bits pkt_len */
114                          0xFF, 0xFF,    /* pkt_type set as unknown */
115                          0xFF, 0xFF     /*pkt_type set as unknown */
116                         );
117         /**
118          * compile-time check the above crc and shuffle layout is correct.
119          * NOTE: the first field (lowest address) is given last in set_epi
120          * calls above.
121          */
122         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
123                         offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
124         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
125                         offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
126         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
127                         offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
128         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
129                         offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
130
131         /* Status/Error flag masks */
132         /**
133          * mask everything except Checksum Reports, RSS indication
134          * and VLAN indication.
135          * bit6:4 for IP/L4 checksum errors.
136          * bit12 is for RSS indication.
137          * bit13 is for VLAN indication.
138          */
139         const __m256i flags_mask =
140                  _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13));
141         /**
142          * data to be shuffled by the result of the flags mask shifted by 4
143          * bits.  This gives use the l3_l4 flags.
144          */
145         const __m256i l3_l4_flags_shuf =
146                 _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
147                  PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
148                   PKT_RX_IP_CKSUM_BAD) >> 1,
149                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
150                  PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
151                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
152                  PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
153                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
154                  PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
155                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD  |
156                  PKT_RX_IP_CKSUM_BAD) >> 1,
157                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD  |
158                  PKT_RX_IP_CKSUM_GOOD) >> 1,
159                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
160                  PKT_RX_IP_CKSUM_BAD) >> 1,
161                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
162                  PKT_RX_IP_CKSUM_GOOD) >> 1,
163                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
164                  PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
165                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
166                  PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
167                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
168                  PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
169                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
170                  PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
171                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
172                  PKT_RX_IP_CKSUM_BAD) >> 1,
173                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
174                  PKT_RX_IP_CKSUM_GOOD) >> 1,
175                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
176                  PKT_RX_IP_CKSUM_BAD) >> 1,
177                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
178                  PKT_RX_IP_CKSUM_GOOD) >> 1,
179                 /**
180                  * second 128-bits
181                  * shift right 20 bits to use the low two bits to indicate
182                  * outer checksum status
183                  * shift right 1 bit to make sure it not exceed 255
184                  */
185                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
186                  PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
187                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
188                  PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
189                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
190                  PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
191                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
192                  PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
193                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD  |
194                  PKT_RX_IP_CKSUM_BAD) >> 1,
195                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD  |
196                  PKT_RX_IP_CKSUM_GOOD) >> 1,
197                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
198                  PKT_RX_IP_CKSUM_BAD) >> 1,
199                 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
200                  PKT_RX_IP_CKSUM_GOOD) >> 1,
201                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
202                  PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
203                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
204                  PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
205                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
206                  PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
207                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
208                  PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
209                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
210                  PKT_RX_IP_CKSUM_BAD) >> 1,
211                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
212                  PKT_RX_IP_CKSUM_GOOD) >> 1,
213                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
214                  PKT_RX_IP_CKSUM_BAD) >> 1,
215                 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
216                  PKT_RX_IP_CKSUM_GOOD) >> 1);
217         const __m256i cksum_mask =
218                  _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK |
219                                    PKT_RX_L4_CKSUM_MASK |
220                                    PKT_RX_OUTER_IP_CKSUM_BAD |
221                                    PKT_RX_OUTER_L4_CKSUM_MASK);
222         /**
223          * data to be shuffled by result of flag mask, shifted down 12.
224          * If RSS(bit12)/VLAN(bit13) are set,
225          * shuffle moves appropriate flags in place.
226          */
227         const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
228                         0, 0, 0, 0,
229                         0, 0, 0, 0,
230                         PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
231                         PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
232                         PKT_RX_RSS_HASH, 0,
233                         /* end up 128-bits */
234                         0, 0, 0, 0,
235                         0, 0, 0, 0,
236                         0, 0, 0, 0,
237                         PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
238                         PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
239                         PKT_RX_RSS_HASH, 0);
240
241         RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
242
243         uint16_t i, received;
244
245         for (i = 0, received = 0; i < nb_pkts;
246              i += ICE_DESCS_PER_LOOP_AVX,
247              rxdp += ICE_DESCS_PER_LOOP_AVX) {
248                 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
249                 _mm256_storeu_si256((void *)&rx_pkts[i],
250                                     _mm256_loadu_si256((void *)&sw_ring[i]));
251 #ifdef RTE_ARCH_X86_64
252                 _mm256_storeu_si256
253                         ((void *)&rx_pkts[i + 4],
254                          _mm256_loadu_si256((void *)&sw_ring[i + 4]));
255 #endif
256
257                 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
258 #ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC
259                 /* for AVX we need alignment otherwise loads are not atomic */
260                 if (avx_aligned) {
261                         /* load in descriptors, 2 at a time, in reverse order */
262                         raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
263                         rte_compiler_barrier();
264                         raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
265                         rte_compiler_barrier();
266                         raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
267                         rte_compiler_barrier();
268                         raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
269                 } else
270 #endif
271                 {
272                         const __m128i raw_desc7 =
273                                 _mm_load_si128((void *)(rxdp + 7));
274                         rte_compiler_barrier();
275                         const __m128i raw_desc6 =
276                                 _mm_load_si128((void *)(rxdp + 6));
277                         rte_compiler_barrier();
278                         const __m128i raw_desc5 =
279                                 _mm_load_si128((void *)(rxdp + 5));
280                         rte_compiler_barrier();
281                         const __m128i raw_desc4 =
282                                 _mm_load_si128((void *)(rxdp + 4));
283                         rte_compiler_barrier();
284                         const __m128i raw_desc3 =
285                                 _mm_load_si128((void *)(rxdp + 3));
286                         rte_compiler_barrier();
287                         const __m128i raw_desc2 =
288                                 _mm_load_si128((void *)(rxdp + 2));
289                         rte_compiler_barrier();
290                         const __m128i raw_desc1 =
291                                 _mm_load_si128((void *)(rxdp + 1));
292                         rte_compiler_barrier();
293                         const __m128i raw_desc0 =
294                                 _mm_load_si128((void *)(rxdp + 0));
295
296                         raw_desc6_7 =
297                                 _mm256_inserti128_si256
298                                         (_mm256_castsi128_si256(raw_desc6),
299                                          raw_desc7, 1);
300                         raw_desc4_5 =
301                                 _mm256_inserti128_si256
302                                         (_mm256_castsi128_si256(raw_desc4),
303                                          raw_desc5, 1);
304                         raw_desc2_3 =
305                                 _mm256_inserti128_si256
306                                         (_mm256_castsi128_si256(raw_desc2),
307                                          raw_desc3, 1);
308                         raw_desc0_1 =
309                                 _mm256_inserti128_si256
310                                         (_mm256_castsi128_si256(raw_desc0),
311                                          raw_desc1, 1);
312                 }
313
314                 if (split_packet) {
315                         int j;
316
317                         for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++)
318                                 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
319                 }
320
321                 /**
322                  * convert descriptors 4-7 into mbufs, re-arrange fields.
323                  * Then write into the mbuf.
324                  */
325                 __m256i mb6_7 = _mm256_shuffle_epi8(raw_desc6_7, shuf_msk);
326                 __m256i mb4_5 = _mm256_shuffle_epi8(raw_desc4_5, shuf_msk);
327
328                 mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
329                 mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
330                 /**
331                  * to get packet types, ptype is located in bit16-25
332                  * of each 128bits
333                  */
334                 const __m256i ptype_mask =
335                         _mm256_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M);
336                 const __m256i ptypes6_7 =
337                         _mm256_and_si256(raw_desc6_7, ptype_mask);
338                 const __m256i ptypes4_5 =
339                         _mm256_and_si256(raw_desc4_5, ptype_mask);
340                 const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
341                 const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
342                 const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
343                 const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
344
345                 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4);
346                 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0);
347                 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4);
348                 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0);
349                 /* merge the status bits into one register */
350                 const __m256i status4_7 = _mm256_unpackhi_epi32(raw_desc6_7,
351                                 raw_desc4_5);
352
353                 /**
354                  * convert descriptors 0-3 into mbufs, re-arrange fields.
355                  * Then write into the mbuf.
356                  */
357                 __m256i mb2_3 = _mm256_shuffle_epi8(raw_desc2_3, shuf_msk);
358                 __m256i mb0_1 = _mm256_shuffle_epi8(raw_desc0_1, shuf_msk);
359
360                 mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
361                 mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
362                 /**
363                  * to get packet types, ptype is located in bit16-25
364                  * of each 128bits
365                  */
366                 const __m256i ptypes2_3 =
367                         _mm256_and_si256(raw_desc2_3, ptype_mask);
368                 const __m256i ptypes0_1 =
369                         _mm256_and_si256(raw_desc0_1, ptype_mask);
370                 const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
371                 const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
372                 const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
373                 const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
374
375                 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4);
376                 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0);
377                 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4);
378                 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0);
379                 /* merge the status bits into one register */
380                 const __m256i status0_3 = _mm256_unpackhi_epi32(raw_desc2_3,
381                                                                 raw_desc0_1);
382
383                 /**
384                  * take the two sets of status bits and merge to one
385                  * After merge, the packets status flags are in the
386                  * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
387                  */
388                 __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
389                                                           status0_3);
390                 __m256i mbuf_flags = _mm256_set1_epi32(0);
391
392                 if (offload) {
393                         /* now do flag manipulation */
394
395                         /* get only flag/error bits we want */
396                         const __m256i flag_bits =
397                                 _mm256_and_si256(status0_7, flags_mask);
398                         /**
399                          * l3_l4_error flags, shuffle, then shift to correct adjustment
400                          * of flags in flags_shuf, and finally mask out extra bits
401                          */
402                         __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
403                                         _mm256_srli_epi32(flag_bits, 4));
404                         l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
405
406                         __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
407                         __m256i l4_outer_flags =
408                                         _mm256_and_si256(l3_l4_flags, l4_outer_mask);
409                         l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
410
411                         __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
412
413                         l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
414                         l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
415                         l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
416                         /* set rss and vlan flags */
417                         const __m256i rss_vlan_flag_bits =
418                                 _mm256_srli_epi32(flag_bits, 12);
419                         const __m256i rss_vlan_flags =
420                                 _mm256_shuffle_epi8(rss_vlan_flags_shuf,
421                                                     rss_vlan_flag_bits);
422
423                         /* merge flags */
424                         mbuf_flags = _mm256_or_si256(l3_l4_flags,
425                                                      rss_vlan_flags);
426                 }
427
428                 if (rxq->fdir_enabled) {
429                         const __m256i fdir_id4_7 =
430                                 _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
431
432                         const __m256i fdir_id0_3 =
433                                 _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
434
435                         const __m256i fdir_id0_7 =
436                                 _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
437
438                         const __m256i fdir_flags =
439                                 ice_flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7);
440
441                         /* merge with fdir_flags */
442                         mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
443
444                         /* write to mbuf: have to use scalar store here */
445                         rx_pkts[i + 0]->hash.fdir.hi =
446                                 _mm256_extract_epi32(fdir_id0_7, 3);
447
448                         rx_pkts[i + 1]->hash.fdir.hi =
449                                 _mm256_extract_epi32(fdir_id0_7, 7);
450
451                         rx_pkts[i + 2]->hash.fdir.hi =
452                                 _mm256_extract_epi32(fdir_id0_7, 2);
453
454                         rx_pkts[i + 3]->hash.fdir.hi =
455                                 _mm256_extract_epi32(fdir_id0_7, 6);
456
457                         rx_pkts[i + 4]->hash.fdir.hi =
458                                 _mm256_extract_epi32(fdir_id0_7, 1);
459
460                         rx_pkts[i + 5]->hash.fdir.hi =
461                                 _mm256_extract_epi32(fdir_id0_7, 5);
462
463                         rx_pkts[i + 6]->hash.fdir.hi =
464                                 _mm256_extract_epi32(fdir_id0_7, 0);
465
466                         rx_pkts[i + 7]->hash.fdir.hi =
467                                 _mm256_extract_epi32(fdir_id0_7, 4);
468                 } /* if() on fdir_enabled */
469
470                 if (offload) {
471 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
472                         /**
473                          * needs to load 2nd 16B of each desc for RSS hash parsing,
474                          * will cause performance drop to get into this context.
475                          */
476                         if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
477                                         RTE_ETH_RX_OFFLOAD_RSS_HASH) {
478                                 /* load bottom half of every 32B desc */
479                                 const __m128i raw_desc_bh7 =
480                                         _mm_load_si128
481                                                 ((void *)(&rxdp[7].wb.status_error1));
482                                 rte_compiler_barrier();
483                                 const __m128i raw_desc_bh6 =
484                                         _mm_load_si128
485                                                 ((void *)(&rxdp[6].wb.status_error1));
486                                 rte_compiler_barrier();
487                                 const __m128i raw_desc_bh5 =
488                                         _mm_load_si128
489                                                 ((void *)(&rxdp[5].wb.status_error1));
490                                 rte_compiler_barrier();
491                                 const __m128i raw_desc_bh4 =
492                                         _mm_load_si128
493                                                 ((void *)(&rxdp[4].wb.status_error1));
494                                 rte_compiler_barrier();
495                                 const __m128i raw_desc_bh3 =
496                                         _mm_load_si128
497                                                 ((void *)(&rxdp[3].wb.status_error1));
498                                 rte_compiler_barrier();
499                                 const __m128i raw_desc_bh2 =
500                                         _mm_load_si128
501                                                 ((void *)(&rxdp[2].wb.status_error1));
502                                 rte_compiler_barrier();
503                                 const __m128i raw_desc_bh1 =
504                                         _mm_load_si128
505                                                 ((void *)(&rxdp[1].wb.status_error1));
506                                 rte_compiler_barrier();
507                                 const __m128i raw_desc_bh0 =
508                                         _mm_load_si128
509                                                 ((void *)(&rxdp[0].wb.status_error1));
510
511                                 __m256i raw_desc_bh6_7 =
512                                         _mm256_inserti128_si256
513                                                 (_mm256_castsi128_si256(raw_desc_bh6),
514                                                 raw_desc_bh7, 1);
515                                 __m256i raw_desc_bh4_5 =
516                                         _mm256_inserti128_si256
517                                                 (_mm256_castsi128_si256(raw_desc_bh4),
518                                                 raw_desc_bh5, 1);
519                                 __m256i raw_desc_bh2_3 =
520                                         _mm256_inserti128_si256
521                                                 (_mm256_castsi128_si256(raw_desc_bh2),
522                                                 raw_desc_bh3, 1);
523                                 __m256i raw_desc_bh0_1 =
524                                         _mm256_inserti128_si256
525                                                 (_mm256_castsi128_si256(raw_desc_bh0),
526                                                 raw_desc_bh1, 1);
527
528                                 /**
529                                  * to shift the 32b RSS hash value to the
530                                  * highest 32b of each 128b before mask
531                                  */
532                                 __m256i rss_hash6_7 =
533                                         _mm256_slli_epi64(raw_desc_bh6_7, 32);
534                                 __m256i rss_hash4_5 =
535                                         _mm256_slli_epi64(raw_desc_bh4_5, 32);
536                                 __m256i rss_hash2_3 =
537                                         _mm256_slli_epi64(raw_desc_bh2_3, 32);
538                                 __m256i rss_hash0_1 =
539                                         _mm256_slli_epi64(raw_desc_bh0_1, 32);
540
541                                 __m256i rss_hash_msk =
542                                         _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
543                                                          0xFFFFFFFF, 0, 0, 0);
544
545                                 rss_hash6_7 = _mm256_and_si256
546                                                 (rss_hash6_7, rss_hash_msk);
547                                 rss_hash4_5 = _mm256_and_si256
548                                                 (rss_hash4_5, rss_hash_msk);
549                                 rss_hash2_3 = _mm256_and_si256
550                                                 (rss_hash2_3, rss_hash_msk);
551                                 rss_hash0_1 = _mm256_and_si256
552                                                 (rss_hash0_1, rss_hash_msk);
553
554                                 mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
555                                 mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
556                                 mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
557                                 mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
558                         } /* if() on RSS hash parsing */
559 #endif
560                 }
561
562                 /**
563                  * At this point, we have the 8 sets of flags in the low 16-bits
564                  * of each 32-bit value in vlan0.
565                  * We want to extract these, and merge them with the mbuf init
566                  * data so we can do a single write to the mbuf to set the flags
567                  * and all the other initialization fields. Extracting the
568                  * appropriate flags means that we have to do a shift and blend
569                  * for each mbuf before we do the write. However, we can also
570                  * add in the previously computed rx_descriptor fields to
571                  * make a single 256-bit write per mbuf
572                  */
573                 /* check the structure matches expectations */
574                 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
575                                  offsetof(struct rte_mbuf, rearm_data) + 8);
576                 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
577                                  RTE_ALIGN(offsetof(struct rte_mbuf,
578                                                     rearm_data),
579                                            16));
580                 /* build up data and do writes */
581                 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
582                         rearm6, rearm7;
583                 rearm6 = _mm256_blend_epi32(mbuf_init,
584                                             _mm256_slli_si256(mbuf_flags, 8),
585                                             0x04);
586                 rearm4 = _mm256_blend_epi32(mbuf_init,
587                                             _mm256_slli_si256(mbuf_flags, 4),
588                                             0x04);
589                 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
590                 rearm0 = _mm256_blend_epi32(mbuf_init,
591                                             _mm256_srli_si256(mbuf_flags, 4),
592                                             0x04);
593                 /* permute to add in the rx_descriptor e.g. rss fields */
594                 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
595                 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
596                 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
597                 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
598                 /* write to mbuf */
599                 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
600                                     rearm6);
601                 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
602                                     rearm4);
603                 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
604                                     rearm2);
605                 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
606                                     rearm0);
607
608                 /* repeat for the odd mbufs */
609                 const __m256i odd_flags =
610                         _mm256_castsi128_si256
611                                 (_mm256_extracti128_si256(mbuf_flags, 1));
612                 rearm7 = _mm256_blend_epi32(mbuf_init,
613                                             _mm256_slli_si256(odd_flags, 8),
614                                             0x04);
615                 rearm5 = _mm256_blend_epi32(mbuf_init,
616                                             _mm256_slli_si256(odd_flags, 4),
617                                             0x04);
618                 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
619                 rearm1 = _mm256_blend_epi32(mbuf_init,
620                                             _mm256_srli_si256(odd_flags, 4),
621                                             0x04);
622                 /* since odd mbufs are already in hi 128-bits use blend */
623                 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
624                 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
625                 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
626                 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
627                 /* again write to mbufs */
628                 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
629                                     rearm7);
630                 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
631                                     rearm5);
632                 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
633                                     rearm3);
634                 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
635                                     rearm1);
636
637                 /* extract and record EOP bit */
638                 if (split_packet) {
639                         const __m128i eop_mask =
640                                 _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S);
641                         const __m256i eop_bits256 = _mm256_and_si256(status0_7,
642                                                                      eop_check);
643                         /* pack status bits into a single 128-bit register */
644                         const __m128i eop_bits =
645                                 _mm_packus_epi32
646                                         (_mm256_castsi256_si128(eop_bits256),
647                                          _mm256_extractf128_si256(eop_bits256,
648                                                                   1));
649                         /**
650                          * flip bits, and mask out the EOP bit, which is now
651                          * a split-packet bit i.e. !EOP, rather than EOP one.
652                          */
653                         __m128i split_bits = _mm_andnot_si128(eop_bits,
654                                         eop_mask);
655                         /**
656                          * eop bits are out of order, so we need to shuffle them
657                          * back into order again. In doing so, only use low 8
658                          * bits, which acts like another pack instruction
659                          * The original order is (hi->lo): 1,3,5,7,0,2,4,6
660                          * [Since we use epi8, the 16-bit positions are
661                          * multiplied by 2 in the eop_shuffle value.]
662                          */
663                         __m128i eop_shuffle =
664                                 _mm_set_epi8(/* zero hi 64b */
665                                              0xFF, 0xFF, 0xFF, 0xFF,
666                                              0xFF, 0xFF, 0xFF, 0xFF,
667                                              /* move values to lo 64b */
668                                              8, 0, 10, 2,
669                                              12, 4, 14, 6);
670                         split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
671                         *(uint64_t *)split_packet =
672                                 _mm_cvtsi128_si64(split_bits);
673                         split_packet += ICE_DESCS_PER_LOOP_AVX;
674                 }
675
676                 /* perform dd_check */
677                 status0_7 = _mm256_and_si256(status0_7, dd_check);
678                 status0_7 = _mm256_packs_epi32(status0_7,
679                                                _mm256_setzero_si256());
680
681                 uint64_t burst = __builtin_popcountll
682                                         (_mm_cvtsi128_si64
683                                                 (_mm256_extracti128_si256
684                                                         (status0_7, 1)));
685                 burst += __builtin_popcountll
686                                 (_mm_cvtsi128_si64
687                                         (_mm256_castsi256_si128(status0_7)));
688                 received += burst;
689                 if (burst != ICE_DESCS_PER_LOOP_AVX)
690                         break;
691         }
692
693         /* update tail pointers */
694         rxq->rx_tail += received;
695         rxq->rx_tail &= (rxq->nb_rx_desc - 1);
696         if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
697                 rxq->rx_tail--;
698                 received--;
699         }
700         rxq->rxrearm_nb += received;
701         return received;
702 }
703
704 /**
705  * Notice:
706  * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
707  */
708 uint16_t
709 ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
710                        uint16_t nb_pkts)
711 {
712         return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts,
713                                            nb_pkts, NULL, false);
714 }
715
716 uint16_t
717 ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
718                                uint16_t nb_pkts)
719 {
720         return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts,
721                                            nb_pkts, NULL, true);
722 }
723
724 /**
725  * vPMD receive routine that reassembles single burst of 32 scattered packets
726  * Notice:
727  * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
728  */
729 static __rte_always_inline uint16_t
730 ice_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
731                                   uint16_t nb_pkts, bool offload)
732 {
733         struct ice_rx_queue *rxq = rx_queue;
734         uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
735
736         /* get some new buffers */
737         uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
738                                                        split_flags, offload);
739         if (nb_bufs == 0)
740                 return 0;
741
742         /* happy day case, full burst + no packets to be joined */
743         const uint64_t *split_fl64 = (uint64_t *)split_flags;
744
745         if (!rxq->pkt_first_seg &&
746             split_fl64[0] == 0 && split_fl64[1] == 0 &&
747             split_fl64[2] == 0 && split_fl64[3] == 0)
748                 return nb_bufs;
749
750         /* reassemble any packets that need reassembly*/
751         unsigned int i = 0;
752
753         if (!rxq->pkt_first_seg) {
754                 /* find the first split flag, and only reassemble then*/
755                 while (i < nb_bufs && !split_flags[i])
756                         i++;
757                 if (i == nb_bufs)
758                         return nb_bufs;
759                 rxq->pkt_first_seg = rx_pkts[i];
760         }
761         return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
762                                              &split_flags[i]);
763 }
764
765 /**
766  * vPMD receive routine that reassembles scattered packets.
767  * Main receive routine that can handle arbitrary burst sizes
768  * Notice:
769  * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
770  */
771 static __rte_always_inline uint16_t
772 ice_recv_scattered_pkts_vec_avx2_common(void *rx_queue,
773                                         struct rte_mbuf **rx_pkts,
774                                         uint16_t nb_pkts,
775                                         bool offload)
776 {
777         uint16_t retval = 0;
778
779         while (nb_pkts > ICE_VPMD_RX_BURST) {
780                 uint16_t burst = ice_recv_scattered_burst_vec_avx2(rx_queue,
781                                 rx_pkts + retval, ICE_VPMD_RX_BURST, offload);
782                 retval += burst;
783                 nb_pkts -= burst;
784                 if (burst < ICE_VPMD_RX_BURST)
785                         return retval;
786         }
787         return retval + ice_recv_scattered_burst_vec_avx2(rx_queue,
788                                 rx_pkts + retval, nb_pkts, offload);
789 }
790
791 uint16_t
792 ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
793                                  struct rte_mbuf **rx_pkts,
794                                  uint16_t nb_pkts)
795 {
796         return ice_recv_scattered_pkts_vec_avx2_common(rx_queue,
797                                                        rx_pkts,
798                                                        nb_pkts,
799                                                        false);
800 }
801
802 uint16_t
803 ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue,
804                                          struct rte_mbuf **rx_pkts,
805                                          uint16_t nb_pkts)
806 {
807         return ice_recv_scattered_pkts_vec_avx2_common(rx_queue,
808                                                        rx_pkts,
809                                                        nb_pkts,
810                                                        true);
811 }
812
813 static __rte_always_inline void
814 ice_vtx1(volatile struct ice_tx_desc *txdp,
815          struct rte_mbuf *pkt, uint64_t flags, bool offload)
816 {
817         uint64_t high_qw =
818                 (ICE_TX_DESC_DTYPE_DATA |
819                  ((uint64_t)flags  << ICE_TXD_QW1_CMD_S) |
820                  ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
821         if (offload)
822                 ice_txd_enable_offload(pkt, &high_qw);
823
824         __m128i descriptor = _mm_set_epi64x(high_qw,
825                                 pkt->buf_iova + pkt->data_off);
826         _mm_store_si128((__m128i *)txdp, descriptor);
827 }
828
829 static __rte_always_inline void
830 ice_vtx(volatile struct ice_tx_desc *txdp,
831         struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags, bool offload)
832 {
833         const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
834                         ((uint64_t)flags  << ICE_TXD_QW1_CMD_S));
835
836         /* if unaligned on 32-bit boundary, do one to align */
837         if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
838                 ice_vtx1(txdp, *pkt, flags, offload);
839                 nb_pkts--, txdp++, pkt++;
840         }
841
842         /* do two at a time while possible, in bursts */
843         for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
844                 uint64_t hi_qw3 =
845                         hi_qw_tmpl |
846                         ((uint64_t)pkt[3]->data_len <<
847                          ICE_TXD_QW1_TX_BUF_SZ_S);
848                 if (offload)
849                         ice_txd_enable_offload(pkt[3], &hi_qw3);
850                 uint64_t hi_qw2 =
851                         hi_qw_tmpl |
852                         ((uint64_t)pkt[2]->data_len <<
853                          ICE_TXD_QW1_TX_BUF_SZ_S);
854                 if (offload)
855                         ice_txd_enable_offload(pkt[2], &hi_qw2);
856                 uint64_t hi_qw1 =
857                         hi_qw_tmpl |
858                         ((uint64_t)pkt[1]->data_len <<
859                          ICE_TXD_QW1_TX_BUF_SZ_S);
860                 if (offload)
861                         ice_txd_enable_offload(pkt[1], &hi_qw1);
862                 uint64_t hi_qw0 =
863                         hi_qw_tmpl |
864                         ((uint64_t)pkt[0]->data_len <<
865                          ICE_TXD_QW1_TX_BUF_SZ_S);
866                 if (offload)
867                         ice_txd_enable_offload(pkt[0], &hi_qw0);
868
869                 __m256i desc2_3 =
870                         _mm256_set_epi64x
871                                 (hi_qw3,
872                                  pkt[3]->buf_iova + pkt[3]->data_off,
873                                  hi_qw2,
874                                  pkt[2]->buf_iova + pkt[2]->data_off);
875                 __m256i desc0_1 =
876                         _mm256_set_epi64x
877                                 (hi_qw1,
878                                  pkt[1]->buf_iova + pkt[1]->data_off,
879                                  hi_qw0,
880                                  pkt[0]->buf_iova + pkt[0]->data_off);
881                 _mm256_store_si256((void *)(txdp + 2), desc2_3);
882                 _mm256_store_si256((void *)txdp, desc0_1);
883         }
884
885         /* do any last ones */
886         while (nb_pkts) {
887                 ice_vtx1(txdp, *pkt, flags, offload);
888                 txdp++, pkt++, nb_pkts--;
889         }
890 }
891
892 static __rte_always_inline uint16_t
893 ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
894                               uint16_t nb_pkts, bool offload)
895 {
896         struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
897         volatile struct ice_tx_desc *txdp;
898         struct ice_tx_entry *txep;
899         uint16_t n, nb_commit, tx_id;
900         uint64_t flags = ICE_TD_CMD;
901         uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
902
903         /* cross rx_thresh boundary is not allowed */
904         nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
905
906         if (txq->nb_tx_free < txq->tx_free_thresh)
907                 ice_tx_free_bufs_vec(txq);
908
909         nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
910         if (unlikely(nb_pkts == 0))
911                 return 0;
912
913         tx_id = txq->tx_tail;
914         txdp = &txq->tx_ring[tx_id];
915         txep = &txq->sw_ring[tx_id];
916
917         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
918
919         n = (uint16_t)(txq->nb_tx_desc - tx_id);
920         if (nb_commit >= n) {
921                 ice_tx_backlog_entry(txep, tx_pkts, n);
922
923                 ice_vtx(txdp, tx_pkts, n - 1, flags, offload);
924                 tx_pkts += (n - 1);
925                 txdp += (n - 1);
926
927                 ice_vtx1(txdp, *tx_pkts++, rs, offload);
928
929                 nb_commit = (uint16_t)(nb_commit - n);
930
931                 tx_id = 0;
932                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
933
934                 /* avoid reach the end of ring */
935                 txdp = &txq->tx_ring[tx_id];
936                 txep = &txq->sw_ring[tx_id];
937         }
938
939         ice_tx_backlog_entry(txep, tx_pkts, nb_commit);
940
941         ice_vtx(txdp, tx_pkts, nb_commit, flags, offload);
942
943         tx_id = (uint16_t)(tx_id + nb_commit);
944         if (tx_id > txq->tx_next_rs) {
945                 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
946                         rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
947                                          ICE_TXD_QW1_CMD_S);
948                 txq->tx_next_rs =
949                         (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
950         }
951
952         txq->tx_tail = tx_id;
953
954         ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
955
956         return nb_pkts;
957 }
958
959 static __rte_always_inline uint16_t
960 ice_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts,
961                               uint16_t nb_pkts, bool offload)
962 {
963         uint16_t nb_tx = 0;
964         struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
965
966         while (nb_pkts) {
967                 uint16_t ret, num;
968
969                 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
970                 ret = ice_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],
971                                                     num, offload);
972                 nb_tx += ret;
973                 nb_pkts -= ret;
974                 if (ret < num)
975                         break;
976         }
977
978         return nb_tx;
979 }
980
981 uint16_t
982 ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
983                        uint16_t nb_pkts)
984 {
985         return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, false);
986 }
987
988 uint16_t
989 ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
990                                uint16_t nb_pkts)
991 {
992         return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, true);
993 }