4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5 * Copyright(c) 2017 IBM Corporation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_ethdev.h>
37 #include <rte_malloc.h>
39 #include "base/i40e_prototype.h"
40 #include "base/i40e_type.h"
41 #include "i40e_ethdev.h"
42 #include "i40e_rxtx.h"
43 #include "i40e_rxtx_vec_common.h"
47 #pragma GCC diagnostic ignored "-Wcast-qual"
50 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
54 volatile union i40e_rx_desc *rxdp;
56 struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
57 struct rte_mbuf *mb0, *mb1;
59 vector unsigned long hdr_room = (vector unsigned long){
61 RTE_PKTMBUF_HEADROOM};
62 vector unsigned long dma_addr0, dma_addr1;
64 rxdp = rxq->rx_ring + rxq->rxrearm_start;
66 /* Pull 'n' more MBUFs into the software ring */
67 if (rte_mempool_get_bulk(rxq->mp,
69 RTE_I40E_RXQ_REARM_THRESH) < 0) {
70 if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
72 dma_addr0 = (vector unsigned long){};
73 for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
74 rxep[i].mbuf = &rxq->fake_mbuf;
76 (vector unsigned long *)&rxdp[i].read);
79 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
80 RTE_I40E_RXQ_REARM_THRESH;
84 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
85 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
86 vector unsigned long vaddr0, vaddr1;
92 /* Flush mbuf with pkt template.
93 * Data to be rearmed is 6 bytes long.
94 * Though, RX will overwrite ol_flags that are coming next
95 * anyway. So overwrite whole 8 bytes with one load:
96 * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
98 p0 = (uintptr_t)&mb0->rearm_data;
99 *(uint64_t *)p0 = rxq->mbuf_initializer;
100 p1 = (uintptr_t)&mb1->rearm_data;
101 *(uint64_t *)p1 = rxq->mbuf_initializer;
103 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
104 vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
105 vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
107 /* convert pa to dma_addr hdr/data */
108 dma_addr0 = vec_mergel(vaddr0, vaddr0);
109 dma_addr1 = vec_mergel(vaddr1, vaddr1);
111 /* add headroom to pa values */
112 dma_addr0 = vec_add(dma_addr0, hdr_room);
113 dma_addr1 = vec_add(dma_addr1, hdr_room);
115 /* flush desc with pa dma_addr */
116 vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
117 vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
120 rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
121 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
122 rxq->rxrearm_start = 0;
124 rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
126 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
127 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
129 /* Update the tail pointer on the NIC */
130 I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
133 /* Handling the offload flags (olflags) field takes computation
134 * time when receiving packets. Therefore we provide a flag to disable
135 * the processing of the olflags field when they are not needed. This
136 * gives improved performance, at the cost of losing the offload info
137 * in the received packet
139 #ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
142 desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
144 vector unsigned int vlan0, vlan1, rss, l3_l4e;
146 /* mask everything except RSS, flow director and VLAN flags
147 * bit2 is for VLAN tag, bit11 for flow director indication
148 * bit13:12 for RSS indication.
150 const vector unsigned int rss_vlan_msk = (vector unsigned int){
151 (int32_t)0x1c03804, (int32_t)0x1c03804,
152 (int32_t)0x1c03804, (int32_t)0x1c03804};
154 /* map rss and vlan type to rss hash and vlan flag */
155 const vector unsigned char vlan_flags = (vector unsigned char){
157 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
161 const vector unsigned char rss_flags = (vector unsigned char){
162 0, PKT_RX_FDIR, 0, 0,
163 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
167 const vector unsigned char l3_l4e_flags = (vector unsigned char){
171 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
172 PKT_RX_EIP_CKSUM_BAD,
173 PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
174 PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
175 PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
176 | PKT_RX_IP_CKSUM_BAD,
177 0, 0, 0, 0, 0, 0, 0, 0};
179 vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
180 vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
181 vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
183 vlan1 = vec_and(vlan0, rss_vlan_msk);
184 vlan0 = (vector unsigned int)vec_perm(vlan_flags,
185 (vector unsigned char){},
186 *(vector unsigned char *)&vlan1);
188 rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
189 rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
190 *(vector unsigned char *)&rss);
192 l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
193 l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
194 (vector unsigned char){},
195 *(vector unsigned char *)&l3_l4e);
197 vlan0 = vec_or(vlan0, rss);
198 vlan0 = vec_or(vlan0, l3_l4e);
200 rx_pkts[0]->ol_flags = (uint64_t)vlan0[2];
201 rx_pkts[1]->ol_flags = (uint64_t)vlan0[3];
202 rx_pkts[2]->ol_flags = (uint64_t)vlan0[0];
203 rx_pkts[3]->ol_flags = (uint64_t)vlan0[1];
206 #define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
209 #define PKTLEN_SHIFT 10
212 desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
214 vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
215 vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
217 ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
218 ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
220 rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(
221 (*(vector unsigned char *)&ptype0)[0]);
222 rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(
223 (*(vector unsigned char *)&ptype0)[8]);
224 rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(
225 (*(vector unsigned char *)&ptype1)[0]);
226 rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(
227 (*(vector unsigned char *)&ptype1)[8]);
231 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
232 * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
235 static inline uint16_t
236 _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
237 uint16_t nb_pkts, uint8_t *split_packet)
239 volatile union i40e_rx_desc *rxdp;
240 struct i40e_rx_entry *sw_ring;
241 uint16_t nb_pkts_recd;
244 vector unsigned char shuf_msk;
246 vector unsigned short crc_adjust = (vector unsigned short){
247 0, 0, /* ignore pkt_type field */
248 rxq->crc_len, /* sub crc on pkt_len */
249 0, /* ignore high-16bits of pkt_len */
250 rxq->crc_len, /* sub crc on data_len */
251 0, 0, 0 /* ignore non-length fields */
253 vector unsigned long dd_check, eop_check;
255 /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
256 nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
258 /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
259 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
261 /* Just the act of getting into the function from the application is
262 * going to cost about 7 cycles
264 rxdp = rxq->rx_ring + rxq->rx_tail;
268 /* See if we need to rearm the RX queue - gives the prefetch a bit
271 if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
274 /* Before we start moving massive data around, check to see if
275 * there is actually a packet available
277 if (!(rxdp->wb.qword1.status_error_len &
278 rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
281 /* 4 packets DD mask */
282 dd_check = (vector unsigned long){0x0000000100000001ULL,
283 0x0000000100000001ULL};
285 /* 4 packets EOP mask */
286 eop_check = (vector unsigned long){0x0000000200000002ULL,
287 0x0000000200000002ULL};
289 /* mask to shuffle from desc. to mbuf */
290 shuf_msk = (vector unsigned char){
291 0xFF, 0xFF, /* pkt_type set as unknown */
292 0xFF, 0xFF, /* pkt_type set as unknown */
293 14, 15, /* octet 15~14, low 16 bits pkt_len */
294 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
295 14, 15, /* octet 15~14, 16 bits data_len */
296 2, 3, /* octet 2~3, low 16 bits vlan_macip */
297 4, 5, 6, 7 /* octet 4~7, 32bits rss */
300 /* Cache is empty -> need to scan the buffer rings, but first move
301 * the next 'n' mbufs into the cache
303 sw_ring = &rxq->sw_ring[rxq->rx_tail];
305 /* A. load 4 packet in one loop
306 * [A*. mask out 4 unused dirty field in desc]
307 * B. copy 4 mbuf point from swring to rx_pkts
308 * C. calc the number of DD bits among the 4 packets
309 * [C*. extract the end-of-packet bit, if requested]
310 * D. fill info. from desc to mbuf
313 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
314 pos += RTE_I40E_DESCS_PER_LOOP,
315 rxdp += RTE_I40E_DESCS_PER_LOOP) {
316 vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
317 vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
318 vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
319 vector unsigned long mbp1, mbp2; /* two mbuf pointer
323 /* B.1 load 1 mbuf point */
324 mbp1 = *(vector unsigned long *)&sw_ring[pos];
325 /* Read desc statuses backwards to avoid race condition */
326 /* A.1 load 4 pkts desc */
327 descs[3] = *(vector unsigned long *)(rxdp + 3);
328 rte_compiler_barrier();
330 /* B.2 copy 2 mbuf point into rx_pkts */
331 *(vector unsigned long *)&rx_pkts[pos] = mbp1;
333 /* B.1 load 1 mbuf point */
334 mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
336 descs[2] = *(vector unsigned long *)(rxdp + 2);
337 rte_compiler_barrier();
338 /* B.1 load 2 mbuf point */
339 descs[1] = *(vector unsigned long *)(rxdp + 1);
340 rte_compiler_barrier();
341 descs[0] = *(vector unsigned long *)(rxdp);
343 /* B.2 copy 2 mbuf point into rx_pkts */
344 *(vector unsigned long *)&rx_pkts[pos + 2] = mbp2;
347 rte_mbuf_prefetch_part2(rx_pkts[pos]);
348 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
349 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
350 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
353 /* avoid compiler reorder optimization */
354 rte_compiler_barrier();
356 /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
357 const vector unsigned int len3 = vec_sl(
358 vec_ld(0, (vector unsigned int *)&descs[3]),
359 (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
361 const vector unsigned int len2 = vec_sl(
362 vec_ld(0, (vector unsigned int *)&descs[2]),
363 (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
365 /* merge the now-aligned packet length fields back in */
366 descs[3] = (vector unsigned long)len3;
367 descs[2] = (vector unsigned long)len2;
369 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
370 pkt_mb4 = vec_perm((vector unsigned char)descs[3],
371 (vector unsigned char){}, shuf_msk);
372 pkt_mb3 = vec_perm((vector unsigned char)descs[2],
373 (vector unsigned char){}, shuf_msk);
375 /* C.1 4=>2 filter staterr info only */
376 sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
377 (vector unsigned short)descs[2]);
378 /* C.1 4=>2 filter staterr info only */
379 sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
380 (vector unsigned short)descs[0]);
381 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
382 pkt_mb4 = (vector unsigned char)vec_sub(
383 (vector unsigned short)pkt_mb4, crc_adjust);
384 pkt_mb3 = (vector unsigned char)vec_sub(
385 (vector unsigned short)pkt_mb3, crc_adjust);
387 /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
388 const vector unsigned int len1 = vec_sl(
389 vec_ld(0, (vector unsigned int *)&descs[1]),
390 (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
391 const vector unsigned int len0 = vec_sl(
392 vec_ld(0, (vector unsigned int *)&descs[0]),
393 (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
395 /* merge the now-aligned packet length fields back in */
396 descs[1] = (vector unsigned long)len1;
397 descs[0] = (vector unsigned long)len0;
399 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
400 pkt_mb2 = vec_perm((vector unsigned char)descs[1],
401 (vector unsigned char){}, shuf_msk);
402 pkt_mb1 = vec_perm((vector unsigned char)descs[0],
403 (vector unsigned char){}, shuf_msk);
405 /* C.2 get 4 pkts staterr value */
406 staterr = (vector unsigned short)vec_mergeh(
407 sterr_tmp1, sterr_tmp2);
409 /* D.3 copy final 3,4 data to rx_pkts */
411 (vector unsigned char *)&rx_pkts[pos + 3]
412 ->rx_descriptor_fields1
415 (vector unsigned char *)&rx_pkts[pos + 2]
416 ->rx_descriptor_fields1
419 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
420 pkt_mb2 = (vector unsigned char)vec_sub(
421 (vector unsigned short)pkt_mb2, crc_adjust);
422 pkt_mb1 = (vector unsigned char)vec_sub(
423 (vector unsigned short)pkt_mb1, crc_adjust);
425 /* C* extract and record EOP bit */
427 vector unsigned char eop_shuf_mask =
428 (vector unsigned char){
429 0xFF, 0xFF, 0xFF, 0xFF,
430 0xFF, 0xFF, 0xFF, 0xFF,
431 0xFF, 0xFF, 0xFF, 0xFF,
432 0x04, 0x0C, 0x00, 0x08
435 /* and with mask to extract bits, flipping 1-0 */
436 vector unsigned char eop_bits = vec_and(
437 (vector unsigned char)vec_nor(staterr, staterr),
438 (vector unsigned char)eop_check);
439 /* the staterr values are not in order, as the count
440 * count of dd bits doesn't care. However, for end of
441 * packet tracking, we do care, so shuffle. This also
442 * compresses the 32-bit values to 8-bit
444 eop_bits = vec_perm(eop_bits, (vector unsigned char){},
446 /* store the resulting 32-bit value */
447 *split_packet = (vec_ld(0,
448 (vector unsigned int *)&eop_bits))[0];
449 split_packet += RTE_I40E_DESCS_PER_LOOP;
451 /* zero-out next pointers */
452 rx_pkts[pos]->next = NULL;
453 rx_pkts[pos + 1]->next = NULL;
454 rx_pkts[pos + 2]->next = NULL;
455 rx_pkts[pos + 3]->next = NULL;
458 /* C.3 calc available number of desc */
459 staterr = vec_and(staterr, (vector unsigned short)dd_check);
461 /* D.3 copy final 1,2 data to rx_pkts */
463 (vector unsigned char *)&rx_pkts[pos + 1]
464 ->rx_descriptor_fields1
467 (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
469 desc_to_ptype_v(descs, &rx_pkts[pos]);
470 desc_to_olflags_v(descs, &rx_pkts[pos]);
472 /* C.4 calc avaialbe number of desc */
473 var = __builtin_popcountll((vec_ld(0,
474 (vector unsigned long *)&staterr)[0]));
476 if (likely(var != RTE_I40E_DESCS_PER_LOOP))
480 /* Update our internal tail pointer */
481 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
482 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
483 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
489 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
490 * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
494 i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
497 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
500 /* vPMD receive routine that reassembles scattered packets
502 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
503 * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
507 i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
510 struct i40e_rx_queue *rxq = rx_queue;
511 uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
513 /* get some new buffers */
514 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
519 /* happy day case, full burst + no packets to be joined */
520 const uint64_t *split_fl64 = (uint64_t *)split_flags;
522 if (rxq->pkt_first_seg == NULL &&
523 split_fl64[0] == 0 && split_fl64[1] == 0 &&
524 split_fl64[2] == 0 && split_fl64[3] == 0)
527 /* reassemble any packets that need reassembly*/
530 if (!rxq->pkt_first_seg) {
531 /* find the first split flag, and only reassemble then*/
532 while (i < nb_bufs && !split_flags[i])
537 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
542 vtx1(volatile struct i40e_tx_desc *txdp,
543 struct rte_mbuf *pkt, uint64_t flags)
545 uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
546 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
547 ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
549 vector unsigned long descriptor = (vector unsigned long){
550 pkt->buf_physaddr + pkt->data_off, high_qw};
551 *(vector unsigned long *)txdp = descriptor;
555 vtx(volatile struct i40e_tx_desc *txdp,
556 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
560 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
561 vtx1(txdp, *pkt, flags);
565 i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
568 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
569 volatile struct i40e_tx_desc *txdp;
570 struct i40e_tx_entry *txep;
571 uint16_t n, nb_commit, tx_id;
572 uint64_t flags = I40E_TD_CMD;
573 uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
576 /* cross rx_thresh boundary is not allowed */
577 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
579 if (txq->nb_tx_free < txq->tx_free_thresh)
580 i40e_tx_free_bufs(txq);
582 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
584 if (unlikely(nb_pkts == 0))
587 tx_id = txq->tx_tail;
588 txdp = &txq->tx_ring[tx_id];
589 txep = &txq->sw_ring[tx_id];
591 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
593 n = (uint16_t)(txq->nb_tx_desc - tx_id);
594 if (nb_commit >= n) {
595 tx_backlog_entry(txep, tx_pkts, n);
597 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
598 vtx1(txdp, *tx_pkts, flags);
600 vtx1(txdp, *tx_pkts++, rs);
602 nb_commit = (uint16_t)(nb_commit - n);
605 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
607 /* avoid reach the end of ring */
608 txdp = &txq->tx_ring[tx_id];
609 txep = &txq->sw_ring[tx_id];
612 tx_backlog_entry(txep, tx_pkts, nb_commit);
614 vtx(txdp, tx_pkts, nb_commit, flags);
616 tx_id = (uint16_t)(tx_id + nb_commit);
617 if (tx_id > txq->tx_next_rs) {
618 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
619 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
620 I40E_TXD_QW1_CMD_SHIFT);
622 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
625 txq->tx_tail = tx_id;
627 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
632 void __attribute__((cold))
633 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
635 _i40e_rx_queue_release_mbufs_vec(rxq);
638 int __attribute__((cold))
639 i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
641 return i40e_rxq_vec_setup_default(rxq);
644 int __attribute__((cold))
645 i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
650 int __attribute__((cold))
651 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
653 return i40e_rx_vec_dev_conf_condition_check_default(dev);