net/bnxt: fix FW version query
[dpdk.git] / drivers / net / i40e / i40e_rxtx_vec_altivec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010 - 2015 Intel Corporation
3  * Copyright(c) 2017 IBM Corporation.
4  */
5
6 #include <stdint.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_malloc.h>
9
10 #include "base/i40e_prototype.h"
11 #include "base/i40e_type.h"
12 #include "i40e_ethdev.h"
13 #include "i40e_rxtx.h"
14 #include "i40e_rxtx_vec_common.h"
15
16 #include <rte_altivec.h>
17
18 #pragma GCC diagnostic ignored "-Wcast-qual"
19
20 static inline void
21 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
22 {
23         int i;
24         uint16_t rx_id;
25         volatile union i40e_rx_desc *rxdp;
26
27         struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
28         struct rte_mbuf *mb0, *mb1;
29
30         vector unsigned long hdr_room = (vector unsigned long){
31                                                 RTE_PKTMBUF_HEADROOM,
32                                                 RTE_PKTMBUF_HEADROOM};
33         vector unsigned long dma_addr0, dma_addr1;
34
35         rxdp = rxq->rx_ring + rxq->rxrearm_start;
36
37         /* Pull 'n' more MBUFs into the software ring */
38         if (rte_mempool_get_bulk(rxq->mp,
39                                  (void *)rxep,
40                                  RTE_I40E_RXQ_REARM_THRESH) < 0) {
41                 if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
42                     rxq->nb_rx_desc) {
43                         dma_addr0 = (vector unsigned long){};
44                         for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
45                                 rxep[i].mbuf = &rxq->fake_mbuf;
46                                 vec_st(dma_addr0, 0,
47                                        (vector unsigned long *)&rxdp[i].read);
48                         }
49                 }
50                 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
51                         RTE_I40E_RXQ_REARM_THRESH;
52                 return;
53         }
54
55         /* Initialize the mbufs in vector, process 2 mbufs in one loop */
56         for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
57                 vector unsigned long vaddr0, vaddr1;
58                 uintptr_t p0, p1;
59
60                 mb0 = rxep[0].mbuf;
61                 mb1 = rxep[1].mbuf;
62
63                  /* Flush mbuf with pkt template.
64                   * Data to be rearmed is 6 bytes long.
65                   * Though, RX will overwrite ol_flags that are coming next
66                   * anyway. So overwrite whole 8 bytes with one load:
67                   * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
68                   */
69                 p0 = (uintptr_t)&mb0->rearm_data;
70                 *(uint64_t *)p0 = rxq->mbuf_initializer;
71                 p1 = (uintptr_t)&mb1->rearm_data;
72                 *(uint64_t *)p1 = rxq->mbuf_initializer;
73
74                 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
75                 vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
76                 vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
77
78                 /* convert pa to dma_addr hdr/data */
79                 dma_addr0 = vec_mergel(vaddr0, vaddr0);
80                 dma_addr1 = vec_mergel(vaddr1, vaddr1);
81
82                 /* add headroom to pa values */
83                 dma_addr0 = vec_add(dma_addr0, hdr_room);
84                 dma_addr1 = vec_add(dma_addr1, hdr_room);
85
86                 /* flush desc with pa dma_addr */
87                 vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
88                 vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
89         }
90
91         rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
92         if (rxq->rxrearm_start >= rxq->nb_rx_desc)
93                 rxq->rxrearm_start = 0;
94
95         rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
96
97         rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
98                              (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
99
100         /* Update the tail pointer on the NIC */
101         I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
102 }
103
104 static inline void
105 desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
106 {
107         vector unsigned int vlan0, vlan1, rss, l3_l4e;
108
109         /* mask everything except RSS, flow director and VLAN flags
110          * bit2 is for VLAN tag, bit11 for flow director indication
111          * bit13:12 for RSS indication.
112          */
113         const vector unsigned int rss_vlan_msk = (vector unsigned int){
114                         (int32_t)0x1c03804, (int32_t)0x1c03804,
115                         (int32_t)0x1c03804, (int32_t)0x1c03804};
116
117         /* map rss and vlan type to rss hash and vlan flag */
118         const vector unsigned char vlan_flags = (vector unsigned char){
119                         0, 0, 0, 0,
120                         PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
121                         0, 0, 0, 0,
122                         0, 0, 0, 0};
123
124         const vector unsigned char rss_flags = (vector unsigned char){
125                         0, PKT_RX_FDIR, 0, 0,
126                         0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
127                         0, 0, 0, 0,
128                         0, 0, 0, 0};
129
130         const vector unsigned char l3_l4e_flags = (vector unsigned char){
131                         0,
132                         PKT_RX_IP_CKSUM_BAD,
133                         PKT_RX_L4_CKSUM_BAD,
134                         PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
135                         PKT_RX_EIP_CKSUM_BAD,
136                         PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
137                         PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
138                         PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
139                                              | PKT_RX_IP_CKSUM_BAD,
140                         0, 0, 0, 0, 0, 0, 0, 0};
141
142         vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
143         vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
144         vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
145
146         vlan1 = vec_and(vlan0, rss_vlan_msk);
147         vlan0 = (vector unsigned int)vec_perm(vlan_flags,
148                                         (vector unsigned char){},
149                                         *(vector unsigned char *)&vlan1);
150
151         rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
152         rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
153                                         *(vector unsigned char *)&rss);
154
155         l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
156         l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
157                                         (vector unsigned char){},
158                                         *(vector unsigned char *)&l3_l4e);
159
160         vlan0 = vec_or(vlan0, rss);
161         vlan0 = vec_or(vlan0, l3_l4e);
162
163         rx_pkts[0]->ol_flags = (uint64_t)vlan0[2];
164         rx_pkts[1]->ol_flags = (uint64_t)vlan0[3];
165         rx_pkts[2]->ol_flags = (uint64_t)vlan0[0];
166         rx_pkts[3]->ol_flags = (uint64_t)vlan0[1];
167 }
168
169 #define PKTLEN_SHIFT     10
170
171 static inline void
172 desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts,
173                 uint32_t *ptype_tbl)
174 {
175         vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
176         vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
177
178         ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
179         ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
180
181         rx_pkts[0]->packet_type =
182                 ptype_tbl[(*(vector unsigned char *)&ptype0)[0]];
183         rx_pkts[1]->packet_type =
184                 ptype_tbl[(*(vector unsigned char *)&ptype0)[8]];
185         rx_pkts[2]->packet_type =
186                 ptype_tbl[(*(vector unsigned char *)&ptype1)[0]];
187         rx_pkts[3]->packet_type =
188                 ptype_tbl[(*(vector unsigned char *)&ptype1)[8]];
189 }
190
191  /* Notice:
192   * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
193   * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
194   *   numbers of DD bits
195   */
196 static inline uint16_t
197 _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
198                    uint16_t nb_pkts, uint8_t *split_packet)
199 {
200         volatile union i40e_rx_desc *rxdp;
201         struct i40e_rx_entry *sw_ring;
202         uint16_t nb_pkts_recd;
203         int pos;
204         uint64_t var;
205         vector unsigned char shuf_msk;
206         uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
207
208         vector unsigned short crc_adjust = (vector unsigned short){
209                 0, 0,         /* ignore pkt_type field */
210                 rxq->crc_len, /* sub crc on pkt_len */
211                 0,            /* ignore high-16bits of pkt_len */
212                 rxq->crc_len, /* sub crc on data_len */
213                 0, 0, 0       /* ignore non-length fields */
214                 };
215         vector unsigned long dd_check, eop_check;
216
217         /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
218         nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
219
220         /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
221         nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
222
223         /* Just the act of getting into the function from the application is
224          * going to cost about 7 cycles
225          */
226         rxdp = rxq->rx_ring + rxq->rx_tail;
227
228         rte_prefetch0(rxdp);
229
230         /* See if we need to rearm the RX queue - gives the prefetch a bit
231          * of time to act
232          */
233         if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
234                 i40e_rxq_rearm(rxq);
235
236         /* Before we start moving massive data around, check to see if
237          * there is actually a packet available
238          */
239         if (!(rxdp->wb.qword1.status_error_len &
240                         rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
241                 return 0;
242
243         /* 4 packets DD mask */
244         dd_check = (vector unsigned long){0x0000000100000001ULL,
245                                           0x0000000100000001ULL};
246
247         /* 4 packets EOP mask */
248         eop_check = (vector unsigned long){0x0000000200000002ULL,
249                                            0x0000000200000002ULL};
250
251         /* mask to shuffle from desc. to mbuf */
252         shuf_msk = (vector unsigned char){
253                 0xFF, 0xFF,   /* pkt_type set as unknown */
254                 0xFF, 0xFF,   /* pkt_type set as unknown */
255                 14, 15,       /* octet 15~14, low 16 bits pkt_len */
256                 0xFF, 0xFF,   /* skip high 16 bits pkt_len, zero out */
257                 14, 15,       /* octet 15~14, 16 bits data_len */
258                 2, 3,         /* octet 2~3, low 16 bits vlan_macip */
259                 4, 5, 6, 7    /* octet 4~7, 32bits rss */
260                 };
261
262         /* Cache is empty -> need to scan the buffer rings, but first move
263          * the next 'n' mbufs into the cache
264          */
265         sw_ring = &rxq->sw_ring[rxq->rx_tail];
266
267         /* A. load 4 packet in one loop
268          * [A*. mask out 4 unused dirty field in desc]
269          * B. copy 4 mbuf point from swring to rx_pkts
270          * C. calc the number of DD bits among the 4 packets
271          * [C*. extract the end-of-packet bit, if requested]
272          * D. fill info. from desc to mbuf
273          */
274
275         for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
276                         pos += RTE_I40E_DESCS_PER_LOOP,
277                         rxdp += RTE_I40E_DESCS_PER_LOOP) {
278                 vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
279                 vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
280                 vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
281                 vector unsigned long mbp1, mbp2; /* two mbuf pointer
282                                                   * in one XMM reg.
283                                                   */
284
285                 /* B.1 load 1 mbuf point */
286                 mbp1 = *(vector unsigned long *)&sw_ring[pos];
287                 /* Read desc statuses backwards to avoid race condition */
288                 /* A.1 load 4 pkts desc */
289                 descs[3] = *(vector unsigned long *)(rxdp + 3);
290                 rte_compiler_barrier();
291
292                 /* B.2 copy 2 mbuf point into rx_pkts  */
293                 *(vector unsigned long *)&rx_pkts[pos] = mbp1;
294
295                 /* B.1 load 1 mbuf point */
296                 mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
297
298                 descs[2] = *(vector unsigned long *)(rxdp + 2);
299                 rte_compiler_barrier();
300                 /* B.1 load 2 mbuf point */
301                 descs[1] = *(vector unsigned long *)(rxdp + 1);
302                 rte_compiler_barrier();
303                 descs[0] = *(vector unsigned long *)(rxdp);
304
305                 /* B.2 copy 2 mbuf point into rx_pkts  */
306                 *(vector unsigned long *)&rx_pkts[pos + 2] =  mbp2;
307
308                 if (split_packet) {
309                         rte_mbuf_prefetch_part2(rx_pkts[pos]);
310                         rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
311                         rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
312                         rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
313                 }
314
315                 /* avoid compiler reorder optimization */
316                 rte_compiler_barrier();
317
318                 /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
319                 const vector unsigned int len3 = vec_sl(
320                         vec_ld(0, (vector unsigned int *)&descs[3]),
321                         (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
322
323                 const vector unsigned int len2 = vec_sl(
324                         vec_ld(0, (vector unsigned int *)&descs[2]),
325                         (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
326
327                 /* merge the now-aligned packet length fields back in */
328                 descs[3] = (vector unsigned long)len3;
329                 descs[2] = (vector unsigned long)len2;
330
331                 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
332                 pkt_mb4 = vec_perm((vector unsigned char)descs[3],
333                                   (vector unsigned char){}, shuf_msk);
334                 pkt_mb3 = vec_perm((vector unsigned char)descs[2],
335                                   (vector unsigned char){}, shuf_msk);
336
337                 /* C.1 4=>2 filter staterr info only */
338                 sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
339                                         (vector unsigned short)descs[2]);
340                 /* C.1 4=>2 filter staterr info only */
341                 sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
342                                         (vector unsigned short)descs[0]);
343                 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
344                 pkt_mb4 = (vector unsigned char)vec_sub(
345                                 (vector unsigned short)pkt_mb4, crc_adjust);
346                 pkt_mb3 = (vector unsigned char)vec_sub(
347                                 (vector unsigned short)pkt_mb3, crc_adjust);
348
349                 /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
350                 const vector unsigned int len1 = vec_sl(
351                         vec_ld(0, (vector unsigned int *)&descs[1]),
352                         (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
353                 const vector unsigned int len0 = vec_sl(
354                         vec_ld(0, (vector unsigned int *)&descs[0]),
355                         (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
356
357                 /* merge the now-aligned packet length fields back in */
358                 descs[1] = (vector unsigned long)len1;
359                 descs[0] = (vector unsigned long)len0;
360
361                 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
362                 pkt_mb2 = vec_perm((vector unsigned char)descs[1],
363                                    (vector unsigned char){}, shuf_msk);
364                 pkt_mb1 = vec_perm((vector unsigned char)descs[0],
365                                    (vector unsigned char){}, shuf_msk);
366
367                 /* C.2 get 4 pkts staterr value  */
368                 staterr = (vector unsigned short)vec_mergeh(
369                                 sterr_tmp1, sterr_tmp2);
370
371                 /* D.3 copy final 3,4 data to rx_pkts */
372                 vec_st(pkt_mb4, 0,
373                  (vector unsigned char *)&rx_pkts[pos + 3]
374                         ->rx_descriptor_fields1
375                 );
376                 vec_st(pkt_mb3, 0,
377                  (vector unsigned char *)&rx_pkts[pos + 2]
378                         ->rx_descriptor_fields1
379                 );
380
381                 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
382                 pkt_mb2 = (vector unsigned char)vec_sub(
383                                 (vector unsigned short)pkt_mb2, crc_adjust);
384                 pkt_mb1 = (vector unsigned char)vec_sub(
385                                 (vector unsigned short)pkt_mb1, crc_adjust);
386
387                 /* C* extract and record EOP bit */
388                 if (split_packet) {
389                         vector unsigned char eop_shuf_mask =
390                                 (vector unsigned char){
391                                         0xFF, 0xFF, 0xFF, 0xFF,
392                                         0xFF, 0xFF, 0xFF, 0xFF,
393                                         0xFF, 0xFF, 0xFF, 0xFF,
394                                         0x04, 0x0C, 0x00, 0x08
395                                 };
396
397                         /* and with mask to extract bits, flipping 1-0 */
398                         vector unsigned char eop_bits = vec_and(
399                                 (vector unsigned char)vec_nor(staterr, staterr),
400                                 (vector unsigned char)eop_check);
401                         /* the staterr values are not in order, as the count
402                          * count of dd bits doesn't care. However, for end of
403                          * packet tracking, we do care, so shuffle. This also
404                          * compresses the 32-bit values to 8-bit
405                          */
406                         eop_bits = vec_perm(eop_bits, (vector unsigned char){},
407                                             eop_shuf_mask);
408                         /* store the resulting 32-bit value */
409                         *split_packet = (vec_ld(0,
410                                          (vector unsigned int *)&eop_bits))[0];
411                         split_packet += RTE_I40E_DESCS_PER_LOOP;
412
413                         /* zero-out next pointers */
414                         rx_pkts[pos]->next = NULL;
415                         rx_pkts[pos + 1]->next = NULL;
416                         rx_pkts[pos + 2]->next = NULL;
417                         rx_pkts[pos + 3]->next = NULL;
418                 }
419
420                 /* C.3 calc available number of desc */
421                 staterr = vec_and(staterr, (vector unsigned short)dd_check);
422
423                 /* D.3 copy final 1,2 data to rx_pkts */
424                 vec_st(pkt_mb2, 0,
425                  (vector unsigned char *)&rx_pkts[pos + 1]
426                         ->rx_descriptor_fields1
427                 );
428                 vec_st(pkt_mb1, 0,
429                  (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
430                 );
431                 desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
432                 desc_to_olflags_v(descs, &rx_pkts[pos]);
433
434                 /* C.4 calc avaialbe number of desc */
435                 var = __builtin_popcountll((vec_ld(0,
436                         (vector unsigned long *)&staterr)[0]));
437                 nb_pkts_recd += var;
438                 if (likely(var != RTE_I40E_DESCS_PER_LOOP))
439                         break;
440         }
441
442         /* Update our internal tail pointer */
443         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
444         rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
445         rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
446
447         return nb_pkts_recd;
448 }
449
450  /* Notice:
451   * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
452   * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
453   *   numbers of DD bits
454   */
455 uint16_t
456 i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
457                    uint16_t nb_pkts)
458 {
459         return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
460 }
461
462  /* vPMD receive routine that reassembles scattered packets
463   * Notice:
464   * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
465   * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
466   *   numbers of DD bits
467   */
468 uint16_t
469 i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
470                              uint16_t nb_pkts)
471 {
472         struct i40e_rx_queue *rxq = rx_queue;
473         uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
474
475         /* get some new buffers */
476         uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
477                         split_flags);
478         if (nb_bufs == 0)
479                 return 0;
480
481         /* happy day case, full burst + no packets to be joined */
482         const uint64_t *split_fl64 = (uint64_t *)split_flags;
483
484         if (rxq->pkt_first_seg == NULL &&
485             split_fl64[0] == 0 && split_fl64[1] == 0 &&
486             split_fl64[2] == 0 && split_fl64[3] == 0)
487                 return nb_bufs;
488
489         /* reassemble any packets that need reassembly*/
490         unsigned int i = 0;
491
492         if (!rxq->pkt_first_seg) {
493                 /* find the first split flag, and only reassemble then*/
494                 while (i < nb_bufs && !split_flags[i])
495                         i++;
496                 if (i == nb_bufs)
497                         return nb_bufs;
498         }
499         return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
500                 &split_flags[i]);
501 }
502
503 static inline void
504 vtx1(volatile struct i40e_tx_desc *txdp,
505         struct rte_mbuf *pkt, uint64_t flags)
506 {
507         uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
508                 ((uint64_t)flags  << I40E_TXD_QW1_CMD_SHIFT) |
509                 ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
510
511         vector unsigned long descriptor = (vector unsigned long){
512                 pkt->buf_iova + pkt->data_off, high_qw};
513         *(vector unsigned long *)txdp = descriptor;
514 }
515
516 static inline void
517 vtx(volatile struct i40e_tx_desc *txdp,
518         struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
519 {
520         int i;
521
522         for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
523                 vtx1(txdp, *pkt, flags);
524 }
525
526 uint16_t
527 i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
528                           uint16_t nb_pkts)
529 {
530         struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
531         volatile struct i40e_tx_desc *txdp;
532         struct i40e_tx_entry *txep;
533         uint16_t n, nb_commit, tx_id;
534         uint64_t flags = I40E_TD_CMD;
535         uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
536         int i;
537
538         /* cross rx_thresh boundary is not allowed */
539         nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
540
541         if (txq->nb_tx_free < txq->tx_free_thresh)
542                 i40e_tx_free_bufs(txq);
543
544         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
545         nb_commit = nb_pkts;
546         if (unlikely(nb_pkts == 0))
547                 return 0;
548
549         tx_id = txq->tx_tail;
550         txdp = &txq->tx_ring[tx_id];
551         txep = &txq->sw_ring[tx_id];
552
553         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
554
555         n = (uint16_t)(txq->nb_tx_desc - tx_id);
556         if (nb_commit >= n) {
557                 tx_backlog_entry(txep, tx_pkts, n);
558
559                 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
560                         vtx1(txdp, *tx_pkts, flags);
561
562                 vtx1(txdp, *tx_pkts++, rs);
563
564                 nb_commit = (uint16_t)(nb_commit - n);
565
566                 tx_id = 0;
567                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
568
569                 /* avoid reach the end of ring */
570                 txdp = &txq->tx_ring[tx_id];
571                 txep = &txq->sw_ring[tx_id];
572         }
573
574         tx_backlog_entry(txep, tx_pkts, nb_commit);
575
576         vtx(txdp, tx_pkts, nb_commit, flags);
577
578         tx_id = (uint16_t)(tx_id + nb_commit);
579         if (tx_id > txq->tx_next_rs) {
580                 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
581                         rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
582                                                 I40E_TXD_QW1_CMD_SHIFT);
583                 txq->tx_next_rs =
584                         (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
585         }
586
587         txq->tx_tail = tx_id;
588
589         I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
590
591         return nb_pkts;
592 }
593
594 void __rte_cold
595 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
596 {
597         _i40e_rx_queue_release_mbufs_vec(rxq);
598 }
599
600 int __rte_cold
601 i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
602 {
603         return i40e_rxq_vec_setup_default(rxq);
604 }
605
606 int __rte_cold
607 i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
608 {
609         return 0;
610 }
611
612 int __rte_cold
613 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
614 {
615         return i40e_rx_vec_dev_conf_condition_check_default(dev);
616 }