4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5 * Copyright(c) 2016, Linaro Limited
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_ethdev.h>
37 #include <rte_malloc.h>
39 #include "base/i40e_prototype.h"
40 #include "base/i40e_type.h"
41 #include "i40e_ethdev.h"
42 #include "i40e_rxtx.h"
43 #include "i40e_rxtx_vec_common.h"
47 #pragma GCC diagnostic ignored "-Wcast-qual"
50 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
54 volatile union i40e_rx_desc *rxdp;
55 struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
56 struct rte_mbuf *mb0, *mb1;
57 uint64x2_t dma_addr0, dma_addr1;
58 uint64x2_t zero = vdupq_n_u64(0);
61 rxdp = rxq->rx_ring + rxq->rxrearm_start;
63 /* Pull 'n' more MBUFs into the software ring */
64 if (unlikely(rte_mempool_get_bulk(rxq->mp,
66 RTE_I40E_RXQ_REARM_THRESH) < 0)) {
67 if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
69 for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
70 rxep[i].mbuf = &rxq->fake_mbuf;
71 vst1q_u64((uint64_t *)&rxdp[i].read, zero);
74 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
75 RTE_I40E_RXQ_REARM_THRESH;
79 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
80 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
84 paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
85 dma_addr0 = vdupq_n_u64(paddr);
87 /* flush desc with pa dma_addr */
88 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
90 paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
91 dma_addr1 = vdupq_n_u64(paddr);
92 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
95 rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
96 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
97 rxq->rxrearm_start = 0;
99 rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
101 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
102 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
104 /* Update the tail pointer on the NIC */
105 I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
109 desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4],
110 struct rte_mbuf **rx_pkts)
112 uint32x4_t vlan0, vlan1, rss, l3_l4e;
113 const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
114 uint64x2_t rearm0, rearm1, rearm2, rearm3;
116 /* mask everything except RSS, flow director and VLAN flags
117 * bit2 is for VLAN tag, bit11 for flow director indication
118 * bit13:12 for RSS indication.
120 const uint32x4_t rss_vlan_msk = {
121 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
123 const uint32x4_t cksum_mask = {
124 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
125 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
126 PKT_RX_EIP_CKSUM_BAD,
127 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
128 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
129 PKT_RX_EIP_CKSUM_BAD,
130 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
131 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
132 PKT_RX_EIP_CKSUM_BAD,
133 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
134 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
135 PKT_RX_EIP_CKSUM_BAD};
137 /* map rss and vlan type to rss hash and vlan flag */
138 const uint8x16_t vlan_flags = {
140 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
144 const uint8x16_t rss_flags = {
145 0, PKT_RX_FDIR, 0, 0,
146 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
150 const uint8x16_t l3_l4e_flags = {
151 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
152 PKT_RX_IP_CKSUM_BAD >> 1,
153 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
154 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
155 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
156 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
157 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
158 PKT_RX_L4_CKSUM_BAD) >> 1,
159 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
160 PKT_RX_IP_CKSUM_BAD) >> 1,
161 0, 0, 0, 0, 0, 0, 0, 0};
163 vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
164 vreinterpretq_u32_u64(descs[2])).val[1];
165 vlan1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
166 vreinterpretq_u32_u64(descs[3])).val[1];
167 vlan0 = vzipq_u32(vlan0, vlan1).val[0];
169 vlan1 = vandq_u32(vlan0, rss_vlan_msk);
170 vlan0 = vreinterpretq_u32_u8(vqtbl1q_u8(vlan_flags,
171 vreinterpretq_u8_u32(vlan1)));
173 rss = vshrq_n_u32(vlan1, 11);
174 rss = vreinterpretq_u32_u8(vqtbl1q_u8(rss_flags,
175 vreinterpretq_u8_u32(rss)));
177 l3_l4e = vshrq_n_u32(vlan1, 22);
178 l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags,
179 vreinterpretq_u8_u32(l3_l4e)));
180 /* then we shift left 1 bit */
181 l3_l4e = vshlq_n_u32(l3_l4e, 1);
182 /* we need to mask out the reduntant bits */
183 l3_l4e = vandq_u32(l3_l4e, cksum_mask);
185 vlan0 = vorrq_u32(vlan0, rss);
186 vlan0 = vorrq_u32(vlan0, l3_l4e);
188 rearm0 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 0), mbuf_init, 1);
189 rearm1 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 1), mbuf_init, 1);
190 rearm2 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 2), mbuf_init, 1);
191 rearm3 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 3), mbuf_init, 1);
193 vst1q_u64((uint64_t *)&rx_pkts[0]->rearm_data, rearm0);
194 vst1q_u64((uint64_t *)&rx_pkts[1]->rearm_data, rearm1);
195 vst1q_u64((uint64_t *)&rx_pkts[2]->rearm_data, rearm2);
196 vst1q_u64((uint64_t *)&rx_pkts[3]->rearm_data, rearm3);
199 #define PKTLEN_SHIFT 10
201 #define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL
204 desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
210 for (i = 0; i < 4; i++) {
211 tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
212 ptype = vgetq_lane_u8(tmp, 8);
213 rx_pkts[i]->packet_type = i40e_rxd_pkt_type_mapping(ptype);
220 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
221 * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
224 static inline uint16_t
225 _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
226 uint16_t nb_pkts, uint8_t *split_packet)
228 volatile union i40e_rx_desc *rxdp;
229 struct i40e_rx_entry *sw_ring;
230 uint16_t nb_pkts_recd;
234 /* mask to shuffle from desc. to mbuf */
235 uint8x16_t shuf_msk = {
236 0xFF, 0xFF, /* pkt_type set as unknown */
237 0xFF, 0xFF, /* pkt_type set as unknown */
238 14, 15, /* octet 15~14, low 16 bits pkt_len */
239 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
240 14, 15, /* octet 15~14, 16 bits data_len */
241 2, 3, /* octet 2~3, low 16 bits vlan_macip */
242 4, 5, 6, 7 /* octet 4~7, 32bits rss */
245 uint8x16_t eop_check = {
246 0x02, 0x00, 0x02, 0x00,
247 0x02, 0x00, 0x02, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00
252 uint16x8_t crc_adjust = {
253 0, 0, /* ignore pkt_type field */
254 rxq->crc_len, /* sub crc on pkt_len */
255 0, /* ignore high-16bits of pkt_len */
256 rxq->crc_len, /* sub crc on data_len */
257 0, 0, 0 /* ignore non-length fields */
260 /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
261 nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
263 /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
264 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
266 /* Just the act of getting into the function from the application is
267 * going to cost about 7 cycles
269 rxdp = rxq->rx_ring + rxq->rx_tail;
271 rte_prefetch_non_temporal(rxdp);
273 /* See if we need to rearm the RX queue - gives the prefetch a bit
276 if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
279 /* Before we start moving massive data around, check to see if
280 * there is actually a packet available
282 if (!(rxdp->wb.qword1.status_error_len &
283 rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
286 /* Cache is empty -> need to scan the buffer rings, but first move
287 * the next 'n' mbufs into the cache
289 sw_ring = &rxq->sw_ring[rxq->rx_tail];
291 /* A. load 4 packet in one loop
292 * [A*. mask out 4 unused dirty field in desc]
293 * B. copy 4 mbuf point from swring to rx_pkts
294 * C. calc the number of DD bits among the 4 packets
295 * [C*. extract the end-of-packet bit, if requested]
296 * D. fill info. from desc to mbuf
299 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
300 pos += RTE_I40E_DESCS_PER_LOOP,
301 rxdp += RTE_I40E_DESCS_PER_LOOP) {
302 uint64x2_t descs[RTE_I40E_DESCS_PER_LOOP];
303 uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
304 uint16x8x2_t sterr_tmp1, sterr_tmp2;
305 uint64x2_t mbp1, mbp2;
310 int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT};
312 /* B.1 load 1 mbuf point */
313 mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
314 /* Read desc statuses backwards to avoid race condition */
315 /* A.1 load 4 pkts desc */
316 descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
319 /* B.2 copy 2 mbuf point into rx_pkts */
320 vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
322 /* B.1 load 1 mbuf point */
323 mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
325 descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
326 /* B.1 load 2 mbuf point */
327 descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
328 descs[0] = vld1q_u64((uint64_t *)(rxdp));
330 /* B.2 copy 2 mbuf point into rx_pkts */
331 vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
334 rte_mbuf_prefetch_part2(rx_pkts[pos]);
335 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
336 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
337 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
340 /* avoid compiler reorder optimization */
341 rte_compiler_barrier();
343 /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
344 uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]),
346 descs[3] = vreinterpretq_u64_u32(len3);
347 uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]),
349 descs[2] = vreinterpretq_u64_u32(len2);
351 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
352 pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
353 pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
355 /* C.1 4=>2 filter staterr info only */
356 sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]),
357 vreinterpretq_u16_u64(descs[3]));
358 /* C.1 4=>2 filter staterr info only */
359 sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]),
360 vreinterpretq_u16_u64(descs[2]));
362 /* C.2 get 4 pkts staterr value */
363 staterr = vzipq_u16(sterr_tmp1.val[1],
364 sterr_tmp2.val[1]).val[0];
365 stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
367 desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
369 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
370 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
371 pkt_mb4 = vreinterpretq_u8_u16(tmp);
372 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
373 pkt_mb3 = vreinterpretq_u8_u16(tmp);
375 /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
376 uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]),
378 descs[1] = vreinterpretq_u64_u32(len1);
379 uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]),
381 descs[0] = vreinterpretq_u64_u32(len0);
383 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
384 pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
385 pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
387 /* D.3 copy final 3,4 data to rx_pkts */
388 vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
390 vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
393 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
394 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
395 pkt_mb2 = vreinterpretq_u8_u16(tmp);
396 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
397 pkt_mb1 = vreinterpretq_u8_u16(tmp);
399 /* C* extract and record EOP bit */
401 uint8x16_t eop_shuf_mask = {
402 0x00, 0x02, 0x04, 0x06,
403 0xFF, 0xFF, 0xFF, 0xFF,
404 0xFF, 0xFF, 0xFF, 0xFF,
405 0xFF, 0xFF, 0xFF, 0xFF};
408 /* and with mask to extract bits, flipping 1-0 */
409 eop_bits = vmvnq_u8(vreinterpretq_u8_u16(staterr));
410 eop_bits = vandq_u8(eop_bits, eop_check);
411 /* the staterr values are not in order, as the count
412 * count of dd bits doesn't care. However, for end of
413 * packet tracking, we do care, so shuffle. This also
414 * compresses the 32-bit values to 8-bit
416 eop_bits = vqtbl1q_u8(eop_bits, eop_shuf_mask);
418 /* store the resulting 32-bit value */
419 vst1q_lane_u32((uint32_t *)split_packet,
420 vreinterpretq_u32_u8(eop_bits), 0);
421 split_packet += RTE_I40E_DESCS_PER_LOOP;
423 /* zero-out next pointers */
424 rx_pkts[pos]->next = NULL;
425 rx_pkts[pos + 1]->next = NULL;
426 rx_pkts[pos + 2]->next = NULL;
427 rx_pkts[pos + 3]->next = NULL;
430 rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP);
432 /* D.3 copy final 1,2 data to rx_pkts */
433 vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
435 vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
437 desc_to_ptype_v(descs, &rx_pkts[pos]);
438 /* C.4 calc avaialbe number of desc */
439 var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK);
441 if (likely(var != RTE_I40E_DESCS_PER_LOOP))
445 /* Update our internal tail pointer */
446 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
447 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
448 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
455 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
456 * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
460 i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
463 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
466 /* vPMD receive routine that reassembles scattered packets
468 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
469 * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
473 i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
477 struct i40e_rx_queue *rxq = rx_queue;
478 uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
480 /* get some new buffers */
481 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
486 /* happy day case, full burst + no packets to be joined */
487 const uint64_t *split_fl64 = (uint64_t *)split_flags;
489 if (rxq->pkt_first_seg == NULL &&
490 split_fl64[0] == 0 && split_fl64[1] == 0 &&
491 split_fl64[2] == 0 && split_fl64[3] == 0)
494 /* reassemble any packets that need reassembly*/
497 if (rxq->pkt_first_seg == NULL) {
498 /* find the first split flag, and only reassemble then*/
499 while (i < nb_bufs && !split_flags[i])
504 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
509 vtx1(volatile struct i40e_tx_desc *txdp,
510 struct rte_mbuf *pkt, uint64_t flags)
512 uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
513 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
514 ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
516 uint64x2_t descriptor = {pkt->buf_physaddr + pkt->data_off, high_qw};
517 vst1q_u64((uint64_t *)txdp, descriptor);
521 vtx(volatile struct i40e_tx_desc *txdp,
522 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
526 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
527 vtx1(txdp, *pkt, flags);
531 i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
534 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
535 volatile struct i40e_tx_desc *txdp;
536 struct i40e_tx_entry *txep;
537 uint16_t n, nb_commit, tx_id;
538 uint64_t flags = I40E_TD_CMD;
539 uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
542 /* cross rx_thresh boundary is not allowed */
543 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
545 if (txq->nb_tx_free < txq->tx_free_thresh)
546 i40e_tx_free_bufs(txq);
548 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
549 if (unlikely(nb_pkts == 0))
552 tx_id = txq->tx_tail;
553 txdp = &txq->tx_ring[tx_id];
554 txep = &txq->sw_ring[tx_id];
556 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
558 n = (uint16_t)(txq->nb_tx_desc - tx_id);
559 if (nb_commit >= n) {
560 tx_backlog_entry(txep, tx_pkts, n);
562 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
563 vtx1(txdp, *tx_pkts, flags);
565 vtx1(txdp, *tx_pkts++, rs);
567 nb_commit = (uint16_t)(nb_commit - n);
570 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
572 /* avoid reach the end of ring */
573 txdp = &txq->tx_ring[tx_id];
574 txep = &txq->sw_ring[tx_id];
577 tx_backlog_entry(txep, tx_pkts, nb_commit);
579 vtx(txdp, tx_pkts, nb_commit, flags);
581 tx_id = (uint16_t)(tx_id + nb_commit);
582 if (tx_id > txq->tx_next_rs) {
583 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
584 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
585 I40E_TXD_QW1_CMD_SHIFT);
587 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
590 txq->tx_tail = tx_id;
592 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
597 void __attribute__((cold))
598 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
600 _i40e_rx_queue_release_mbufs_vec(rxq);
603 int __attribute__((cold))
604 i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
606 return i40e_rxq_vec_setup_default(rxq);
609 int __attribute__((cold))
610 i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
615 int __attribute__((cold))
616 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
618 return i40e_rx_vec_dev_conf_condition_check_default(dev);