1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
10 #include "ixgbe_ethdev.h"
11 #include "ixgbe_rxtx.h"
12 #include "ixgbe_rxtx_vec_common.h"
14 #pragma GCC diagnostic ignored "-Wcast-qual"
17 ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
21 volatile union ixgbe_adv_rx_desc *rxdp;
22 struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
23 struct rte_mbuf *mb0, *mb1;
24 uint64x2_t dma_addr0, dma_addr1;
25 uint64x2_t zero = vdupq_n_u64(0);
29 rxdp = rxq->rx_ring + rxq->rxrearm_start;
31 /* Pull 'n' more MBUFs into the software ring */
32 if (unlikely(rte_mempool_get_bulk(rxq->mb_pool,
34 RTE_IXGBE_RXQ_REARM_THRESH) < 0)) {
35 if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
37 for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
38 rxep[i].mbuf = &rxq->fake_mbuf;
39 vst1q_u64((uint64_t *)&rxdp[i].read,
43 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
44 RTE_IXGBE_RXQ_REARM_THRESH;
48 p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
50 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
51 for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
56 * Flush mbuf with pkt template.
57 * Data to be rearmed is 6 bytes long.
59 vst1_u8((uint8_t *)&mb0->rearm_data, p);
60 paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
61 dma_addr0 = vsetq_lane_u64(paddr, zero, 0);
62 /* flush desc with pa dma_addr */
63 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
65 vst1_u8((uint8_t *)&mb1->rearm_data, p);
66 paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
67 dma_addr1 = vsetq_lane_u64(paddr, zero, 0);
68 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
71 rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
72 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
73 rxq->rxrearm_start = 0;
75 rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
77 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
78 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
80 /* Update the tail pointer on the NIC */
81 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
85 desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
86 uint8x16_t staterr, uint8_t vlan_flags, struct rte_mbuf **rx_pkts)
89 uint8x16_t vtag_lo, vtag_hi, vtag;
91 uint32x4_t csum = {0, 0, 0, 0};
98 const uint8x16_t rsstype_msk = {
99 0x0F, 0x0F, 0x0F, 0x0F,
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00};
104 const uint8x16_t rss_flags = {
105 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
106 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
107 PKT_RX_RSS_HASH, 0, 0, 0,
108 0, 0, 0, PKT_RX_FDIR};
110 /* mask everything except vlan present and l4/ip csum error */
111 const uint8x16_t vlan_csum_msk = {
112 IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
113 IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
116 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 24,
117 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 24,
118 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 24,
119 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 24};
121 /* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */
122 const uint8x16_t vlan_csum_map_lo = {
123 PKT_RX_IP_CKSUM_GOOD,
124 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
126 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
128 vlan_flags | PKT_RX_IP_CKSUM_GOOD,
129 vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
130 vlan_flags | PKT_RX_IP_CKSUM_BAD,
131 vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
134 const uint8x16_t vlan_csum_map_hi = {
135 PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
136 PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
138 PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
139 PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
142 ptype = vzipq_u8(sterr_tmp1.val[0], sterr_tmp2.val[0]).val[0];
143 ptype = vandq_u8(ptype, rsstype_msk);
144 ptype = vqtbl1q_u8(rss_flags, ptype);
146 /* extract vlan_flags and csum_error from staterr */
147 vtag = vandq_u8(staterr, vlan_csum_msk);
149 /* csum bits are in the most significant, to use shuffle we need to
150 * shift them. Change mask from 0xc0 to 0x03.
152 temp_csum = vshrq_n_u8(vtag, 6);
154 /* 'OR' the most significant 32 bits containing the checksum
155 * flags with the vlan present flags
156 * Then bits layout of each lane(8bits) will be 'xxxx,VP,x,IPE,L4E'
158 csum = vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u8(temp_csum), 3), csum, 0);
159 vtag = vorrq_u8(vreinterpretq_u8_u32(csum), vtag);
161 /* convert L4 checksum correct type to vtag_hi */
162 vtag_hi = vqtbl1q_u8(vlan_csum_map_hi, vtag);
163 vtag_hi = vshrq_n_u8(vtag_hi, 7);
165 /* convert VP, IPE, L4E to vtag_lo */
166 vtag_lo = vqtbl1q_u8(vlan_csum_map_lo, vtag);
167 vtag_lo = vorrq_u8(ptype, vtag_lo);
169 vtag = vzipq_u8(vtag_lo, vtag_hi).val[0];
170 vol.word = vgetq_lane_u64(vreinterpretq_u64_u8(vtag), 0);
172 rx_pkts[0]->ol_flags = vol.e[0];
173 rx_pkts[1]->ol_flags = vol.e[1];
174 rx_pkts[2]->ol_flags = vol.e[2];
175 rx_pkts[3]->ol_flags = vol.e[3];
178 #define IXGBE_VPMD_DESC_EOP_MASK 0x02020202
179 #define IXGBE_UINT8_BIT (CHAR_BIT * sizeof(uint8_t))
181 static inline uint32_t
182 get_packet_type(uint32_t pkt_info,
184 uint32_t tunnel_check)
187 return RTE_PTYPE_UNKNOWN;
190 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
191 return ptype_table_tn[pkt_info];
194 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
195 return ptype_table[pkt_info];
199 desc_to_ptype_v(uint64x2_t descs[4], uint16_t pkt_type_mask,
200 struct rte_mbuf **rx_pkts)
202 uint32x4_t etqf_check, tunnel_check;
203 uint32x4_t etqf_mask = vdupq_n_u32(0x8000);
204 uint32x4_t tunnel_mask = vdupq_n_u32(0x10000);
205 uint32x4_t ptype_mask = vdupq_n_u32((uint32_t)pkt_type_mask);
206 uint32x4_t ptype0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
207 vreinterpretq_u32_u64(descs[2])).val[0];
208 uint32x4_t ptype1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
209 vreinterpretq_u32_u64(descs[3])).val[0];
211 /* interleave low 32 bits,
212 * now we have 4 ptypes in a NEON register
214 ptype0 = vzipq_u32(ptype0, ptype1).val[0];
217 etqf_check = vandq_u32(ptype0, etqf_mask);
218 /* mask tunnel bits */
219 tunnel_check = vandq_u32(ptype0, tunnel_mask);
221 /* shift right by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */
222 ptype0 = vandq_u32(vshrq_n_u32(ptype0, IXGBE_PACKET_TYPE_SHIFT),
225 rx_pkts[0]->packet_type =
226 get_packet_type(vgetq_lane_u32(ptype0, 0),
227 vgetq_lane_u32(etqf_check, 0),
228 vgetq_lane_u32(tunnel_check, 0));
229 rx_pkts[1]->packet_type =
230 get_packet_type(vgetq_lane_u32(ptype0, 1),
231 vgetq_lane_u32(etqf_check, 1),
232 vgetq_lane_u32(tunnel_check, 1));
233 rx_pkts[2]->packet_type =
234 get_packet_type(vgetq_lane_u32(ptype0, 2),
235 vgetq_lane_u32(etqf_check, 2),
236 vgetq_lane_u32(tunnel_check, 2));
237 rx_pkts[3]->packet_type =
238 get_packet_type(vgetq_lane_u32(ptype0, 3),
239 vgetq_lane_u32(etqf_check, 3),
240 vgetq_lane_u32(tunnel_check, 3));
244 * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
247 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
248 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
250 static inline uint16_t
251 _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
252 uint16_t nb_pkts, uint8_t *split_packet)
254 volatile union ixgbe_adv_rx_desc *rxdp;
255 struct ixgbe_rx_entry *sw_ring;
256 uint16_t nb_pkts_recd;
258 uint8x16_t shuf_msk = {
260 0xFF, 0xFF, /* skip 32 bits pkt_type */
261 12, 13, /* octet 12~13, low 16 bits pkt_len */
262 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
263 12, 13, /* octet 12~13, 16 bits data_len */
264 14, 15, /* octet 14~15, low 16 bits vlan_macip */
265 4, 5, 6, 7 /* octet 4~7, 32bits rss */
267 uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0,
268 rxq->crc_len, 0, 0, 0};
271 /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
272 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
274 /* Just the act of getting into the function from the application is
275 * going to cost about 7 cycles
277 rxdp = rxq->rx_ring + rxq->rx_tail;
279 rte_prefetch_non_temporal(rxdp);
281 /* See if we need to rearm the RX queue - gives the prefetch a bit
284 if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
285 ixgbe_rxq_rearm(rxq);
287 /* Before we start moving massive data around, check to see if
288 * there is actually a packet available
290 if (!(rxdp->wb.upper.status_error &
291 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
294 /* Cache is empty -> need to scan the buffer rings, but first move
295 * the next 'n' mbufs into the cache
297 sw_ring = &rxq->sw_ring[rxq->rx_tail];
299 /* ensure these 2 flags are in the lower 8 bits */
300 RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
301 vlan_flags = rxq->vlan_flags & UINT8_MAX;
303 /* A. load 4 packet in one loop
304 * B. copy 4 mbuf point from swring to rx_pkts
305 * C. calc the number of DD bits among the 4 packets
306 * [C*. extract the end-of-packet bit, if requested]
307 * D. fill info. from desc to mbuf
309 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
310 pos += RTE_IXGBE_DESCS_PER_LOOP,
311 rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
312 uint64x2_t descs[RTE_IXGBE_DESCS_PER_LOOP];
313 uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
314 uint8x16x2_t sterr_tmp1, sterr_tmp2;
315 uint64x2_t mbp1, mbp2;
320 /* B.1 load 2 mbuf point */
321 mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
323 /* B.2 copy 2 mbuf point into rx_pkts */
324 vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
326 /* B.1 load 2 mbuf point */
327 mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
329 /* A. load 4 pkts descs */
330 descs[0] = vld1q_u64((uint64_t *)(rxdp));
331 descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
332 descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
333 descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
335 /* B.2 copy 2 mbuf point into rx_pkts */
336 vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
339 rte_mbuf_prefetch_part2(rx_pkts[pos]);
340 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
341 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
342 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
345 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
346 pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
347 pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
349 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
350 pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
351 pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
353 /* C.1 4=>2 filter staterr info only */
354 sterr_tmp2 = vzipq_u8(vreinterpretq_u8_u64(descs[1]),
355 vreinterpretq_u8_u64(descs[3]));
356 /* C.1 4=>2 filter staterr info only */
357 sterr_tmp1 = vzipq_u8(vreinterpretq_u8_u64(descs[0]),
358 vreinterpretq_u8_u64(descs[2]));
360 /* C.2 get 4 pkts staterr value */
361 staterr = vzipq_u8(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0];
363 /* set ol_flags with vlan packet type */
364 desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, vlan_flags,
367 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
368 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
369 pkt_mb4 = vreinterpretq_u8_u16(tmp);
370 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
371 pkt_mb3 = vreinterpretq_u8_u16(tmp);
373 /* D.3 copy final 3,4 data to rx_pkts */
374 vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
376 vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
379 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
380 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
381 pkt_mb2 = vreinterpretq_u8_u16(tmp);
382 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
383 pkt_mb1 = vreinterpretq_u8_u16(tmp);
385 /* C* extract and record EOP bit */
387 stat = vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
388 /* and with mask to extract bits, flipping 1-0 */
389 *(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK;
391 split_packet += RTE_IXGBE_DESCS_PER_LOOP;
394 /* C.4 expand DD bit to saturate UINT8 */
395 staterr = vshlq_n_u8(staterr, IXGBE_UINT8_BIT - 1);
396 staterr = vreinterpretq_u8_s8
397 (vshrq_n_s8(vreinterpretq_s8_u8(staterr),
398 IXGBE_UINT8_BIT - 1));
399 stat = ~vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
401 rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP);
403 /* D.3 copy final 1,2 data to rx_pkts */
404 vst1q_u8((uint8_t *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
406 vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
409 desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
411 /* C.5 calc available number of desc */
412 if (unlikely(stat == 0)) {
413 nb_pkts_recd += RTE_IXGBE_DESCS_PER_LOOP;
415 nb_pkts_recd += __builtin_ctz(stat) / IXGBE_UINT8_BIT;
420 /* Update our internal tail pointer */
421 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
422 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
423 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
429 * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
432 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
433 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
436 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
439 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
443 * vPMD receive routine that reassembles scattered packets
446 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
447 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
450 ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
453 struct ixgbe_rx_queue *rxq = rx_queue;
454 uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
456 /* get some new buffers */
457 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
462 /* happy day case, full burst + no packets to be joined */
463 const uint64_t *split_fl64 = (uint64_t *)split_flags;
464 if (rxq->pkt_first_seg == NULL &&
465 split_fl64[0] == 0 && split_fl64[1] == 0 &&
466 split_fl64[2] == 0 && split_fl64[3] == 0)
469 /* reassemble any packets that need reassembly*/
471 if (rxq->pkt_first_seg == NULL) {
472 /* find the first split flag, and only reassemble then*/
473 while (i < nb_bufs && !split_flags[i])
477 rxq->pkt_first_seg = rx_pkts[i];
479 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
484 * vPMD receive routine that reassembles scattered packets.
487 ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
492 while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) {
495 burst = ixgbe_recv_scattered_burst_vec(rx_queue,
497 RTE_IXGBE_MAX_RX_BURST);
500 if (burst < RTE_IXGBE_MAX_RX_BURST)
504 return retval + ixgbe_recv_scattered_burst_vec(rx_queue,
510 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
511 struct rte_mbuf *pkt, uint64_t flags)
513 uint64x2_t descriptor = {
514 pkt->buf_iova + pkt->data_off,
515 (uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len};
517 vst1q_u64((uint64_t *)&txdp->read, descriptor);
521 vtx(volatile union ixgbe_adv_tx_desc *txdp,
522 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
526 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
527 vtx1(txdp, *pkt, flags);
531 ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
534 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
535 volatile union ixgbe_adv_tx_desc *txdp;
536 struct ixgbe_tx_entry_v *txep;
537 uint16_t n, nb_commit, tx_id;
538 uint64_t flags = DCMD_DTYP_FLAGS;
539 uint64_t rs = IXGBE_ADVTXD_DCMD_RS | DCMD_DTYP_FLAGS;
542 /* cross rx_thresh boundary is not allowed */
543 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
545 if (txq->nb_tx_free < txq->tx_free_thresh)
546 ixgbe_tx_free_bufs(txq);
548 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
549 if (unlikely(nb_pkts == 0))
552 tx_id = txq->tx_tail;
553 txdp = &txq->tx_ring[tx_id];
554 txep = &txq->sw_ring_v[tx_id];
556 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
558 n = (uint16_t)(txq->nb_tx_desc - tx_id);
559 if (nb_commit >= n) {
560 tx_backlog_entry(txep, tx_pkts, n);
562 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
563 vtx1(txdp, *tx_pkts, flags);
565 vtx1(txdp, *tx_pkts++, rs);
567 nb_commit = (uint16_t)(nb_commit - n);
570 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
572 /* avoid reach the end of ring */
573 txdp = &txq->tx_ring[tx_id];
574 txep = &txq->sw_ring_v[tx_id];
577 tx_backlog_entry(txep, tx_pkts, nb_commit);
579 vtx(txdp, tx_pkts, nb_commit, flags);
581 tx_id = (uint16_t)(tx_id + nb_commit);
582 if (tx_id > txq->tx_next_rs) {
583 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
584 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
585 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
589 txq->tx_tail = tx_id;
591 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
596 static void __rte_cold
597 ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
599 _ixgbe_tx_queue_release_mbufs_vec(txq);
603 ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
605 _ixgbe_rx_queue_release_mbufs_vec(rxq);
608 static void __rte_cold
609 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
611 _ixgbe_tx_free_swring_vec(txq);
614 static void __rte_cold
615 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
617 _ixgbe_reset_tx_queue_vec(txq);
620 static const struct ixgbe_txq_ops vec_txq_ops = {
621 .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
622 .free_swring = ixgbe_tx_free_swring,
623 .reset = ixgbe_reset_tx_queue,
627 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
629 return ixgbe_rxq_vec_setup_default(rxq);
633 ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
635 return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
639 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
641 return ixgbe_rx_vec_dev_conf_condition_check_default(dev);