4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
38 #include "ixgbe_ethdev.h"
39 #include "ixgbe_rxtx.h"
40 #include "ixgbe_rxtx_vec_common.h"
42 #include <tmmintrin.h>
44 #ifndef __INTEL_COMPILER
45 #pragma GCC diagnostic ignored "-Wcast-qual"
49 ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
53 volatile union ixgbe_adv_rx_desc *rxdp;
54 struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
55 struct rte_mbuf *mb0, *mb1;
56 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
57 RTE_PKTMBUF_HEADROOM);
58 __m128i dma_addr0, dma_addr1;
60 const __m128i hba_msk = _mm_set_epi64x(0, UINT64_MAX);
62 rxdp = rxq->rx_ring + rxq->rxrearm_start;
64 /* Pull 'n' more MBUFs into the software ring */
65 if (rte_mempool_get_bulk(rxq->mb_pool,
67 RTE_IXGBE_RXQ_REARM_THRESH) < 0) {
68 if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
70 dma_addr0 = _mm_setzero_si128();
71 for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
72 rxep[i].mbuf = &rxq->fake_mbuf;
73 _mm_store_si128((__m128i *)&rxdp[i].read,
77 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
78 RTE_IXGBE_RXQ_REARM_THRESH;
82 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
83 for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
84 __m128i vaddr0, vaddr1;
89 #ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
93 * Flush mbuf with pkt template.
94 * Data to be rearmed is 6 bytes long.
96 p0 = (uintptr_t)&mb0->rearm_data;
97 *(uint64_t *)p0 = rxq->mbuf_initializer;
98 p1 = (uintptr_t)&mb1->rearm_data;
99 *(uint64_t *)p1 = rxq->mbuf_initializer;
103 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
104 vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
105 vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
107 /* convert pa to dma_addr hdr/data */
108 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
109 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
111 /* add headroom to pa values */
112 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
113 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
115 /* set Header Buffer Address to zero */
116 dma_addr0 = _mm_and_si128(dma_addr0, hba_msk);
117 dma_addr1 = _mm_and_si128(dma_addr1, hba_msk);
119 /* flush desc with pa dma_addr */
120 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
121 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
124 rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
125 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
126 rxq->rxrearm_start = 0;
128 rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
130 rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
131 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
133 /* Update the tail pointer on the NIC */
134 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
137 /* Handling the offload flags (olflags) field takes computation
138 * time when receiving packets. Therefore we provide a flag to disable
139 * the processing of the olflags field when they are not needed. This
140 * gives improved performance, at the cost of losing the offload info
141 * in the received packet
143 #ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
146 desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
147 struct rte_mbuf **rx_pkts)
149 __m128i ptype0, ptype1, vtag0, vtag1, csum;
150 __m128i rearm0, rearm1, rearm2, rearm3;
152 /* mask everything except rss type */
153 const __m128i rsstype_msk = _mm_set_epi16(
154 0x0000, 0x0000, 0x0000, 0x0000,
155 0x000F, 0x000F, 0x000F, 0x000F);
157 /* mask the lower byte of ol_flags */
158 const __m128i ol_flags_msk = _mm_set_epi16(
159 0x0000, 0x0000, 0x0000, 0x0000,
160 0x00FF, 0x00FF, 0x00FF, 0x00FF);
162 /* map rss type to rss hash flag */
163 const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0,
164 0, 0, 0, PKT_RX_RSS_HASH,
165 PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
166 PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
168 /* mask everything except vlan present and l4/ip csum error */
169 const __m128i vlan_csum_msk = _mm_set_epi16(
170 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
171 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
172 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
173 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
174 IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
175 IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP);
176 /* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */
177 const __m128i vlan_csum_map_lo = _mm_set_epi8(
179 vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
180 vlan_flags | PKT_RX_IP_CKSUM_BAD,
181 vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
182 vlan_flags | PKT_RX_IP_CKSUM_GOOD,
184 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
186 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
187 PKT_RX_IP_CKSUM_GOOD);
189 const __m128i vlan_csum_map_hi = _mm_set_epi8(
191 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
192 PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t),
194 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
195 PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
197 ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
198 ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
199 vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
200 vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
202 ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
203 ptype0 = _mm_and_si128(ptype0, rsstype_msk);
204 ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
206 vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
207 vtag1 = _mm_and_si128(vtag1, vlan_csum_msk);
209 /* csum bits are in the most significant, to use shuffle we need to
210 * shift them. Change mask to 0xc000 to 0x0003.
212 csum = _mm_srli_epi16(vtag1, 14);
214 /* now or the most significant 64 bits containing the checksum
215 * flags with the vlan present flags.
217 csum = _mm_srli_si128(csum, 8);
218 vtag1 = _mm_or_si128(csum, vtag1);
220 /* convert VP, IPE, L4E to ol_flags */
221 vtag0 = _mm_shuffle_epi8(vlan_csum_map_hi, vtag1);
222 vtag0 = _mm_slli_epi16(vtag0, sizeof(uint8_t));
224 vtag1 = _mm_shuffle_epi8(vlan_csum_map_lo, vtag1);
225 vtag1 = _mm_and_si128(vtag1, ol_flags_msk);
226 vtag1 = _mm_or_si128(vtag0, vtag1);
228 vtag1 = _mm_or_si128(ptype0, vtag1);
231 * At this point, we have the 4 sets of flags in the low 64-bits
233 * We want to extract these, and merge them with the mbuf init data
234 * so we can do a single 16-byte write to the mbuf to set the flags
235 * and all the other initialization fields. Extracting the
236 * appropriate flags means that we have to do a shift and blend for
237 * each mbuf before we do the write.
239 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
241 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10);
242 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10);
243 rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
244 rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
247 rearm0 = _mm_slli_si128(vtag1, 14);
248 rearm1 = _mm_slli_si128(vtag1, 12);
249 rearm2 = _mm_slli_si128(vtag1, 10);
250 rearm3 = _mm_slli_si128(vtag1, 8);
252 rearm0 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm0, 48));
253 rearm1 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm1, 48));
254 rearm2 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm2, 48));
255 rearm3 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm3, 48));
257 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
259 _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
260 _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
261 _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
262 _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
265 #define desc_to_olflags_v(desc, vlan_flags, rx_pkts) do { \
266 RTE_SET_USED(vlan_flags); \
271 * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
274 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
275 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
277 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
279 static inline uint16_t
280 _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
281 uint16_t nb_pkts, uint8_t *split_packet)
283 volatile union ixgbe_adv_rx_desc *rxdp;
284 struct ixgbe_rx_entry *sw_ring;
285 uint16_t nb_pkts_recd;
289 __m128i crc_adjust = _mm_set_epi16(
290 0, 0, 0, /* ignore non-length fields */
291 -rxq->crc_len, /* sub crc on data_len */
292 0, /* ignore high-16bits of pkt_len */
293 -rxq->crc_len, /* sub crc on pkt_len */
294 0, 0 /* ignore pkt_type field */
296 __m128i dd_check, eop_check;
300 /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
301 nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
303 /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
304 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
306 /* Just the act of getting into the function from the application is
307 * going to cost about 7 cycles
309 rxdp = rxq->rx_ring + rxq->rx_tail;
313 /* See if we need to rearm the RX queue - gives the prefetch a bit
316 if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
317 ixgbe_rxq_rearm(rxq);
319 /* Before we start moving massive data around, check to see if
320 * there is actually a packet available
322 if (!(rxdp->wb.upper.status_error &
323 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
326 /* 4 packets DD mask */
327 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
329 /* 4 packets EOP mask */
330 eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
332 /* mask to shuffle from desc. to mbuf */
333 shuf_msk = _mm_set_epi8(
334 7, 6, 5, 4, /* octet 4~7, 32bits rss */
335 15, 14, /* octet 14~15, low 16 bits vlan_macip */
336 13, 12, /* octet 12~13, 16 bits data_len */
337 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
338 13, 12, /* octet 12~13, low 16 bits pkt_len */
339 0xFF, 0xFF, /* skip 32 bit pkt_type */
343 mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
345 /* Cache is empty -> need to scan the buffer rings, but first move
346 * the next 'n' mbufs into the cache
348 sw_ring = &rxq->sw_ring[rxq->rx_tail];
350 /* ensure these 2 flags are in the lower 8 bits */
351 RTE_BUILD_BUG_ON((PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
352 vlan_flags = rxq->vlan_flags & UINT8_MAX;
354 /* A. load 4 packet in one loop
355 * [A*. mask out 4 unused dirty field in desc]
356 * B. copy 4 mbuf point from swring to rx_pkts
357 * C. calc the number of DD bits among the 4 packets
358 * [C*. extract the end-of-packet bit, if requested]
359 * D. fill info. from desc to mbuf
361 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
362 pos += RTE_IXGBE_DESCS_PER_LOOP,
363 rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
364 __m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
365 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
366 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
367 __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
369 /* B.1 load 1 mbuf point */
370 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
372 /* Read desc statuses backwards to avoid race condition */
373 /* A.1 load 4 pkts desc */
374 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
375 rte_compiler_barrier();
377 /* B.2 copy 2 mbuf point into rx_pkts */
378 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
380 /* B.1 load 1 mbuf point */
381 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
383 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
384 rte_compiler_barrier();
385 /* B.1 load 2 mbuf point */
386 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
387 rte_compiler_barrier();
388 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
390 /* B.2 copy 2 mbuf point into rx_pkts */
391 _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
394 rte_mbuf_prefetch_part2(rx_pkts[pos]);
395 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
396 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
397 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
400 /* avoid compiler reorder optimization */
401 rte_compiler_barrier();
403 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
404 pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
405 pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
407 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
408 pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
409 pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
411 /* C.1 4=>2 filter staterr info only */
412 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
413 /* C.1 4=>2 filter staterr info only */
414 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
416 /* set ol_flags with vlan packet type */
417 desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
419 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
420 pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
421 pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
423 /* C.2 get 4 pkts staterr value */
424 zero = _mm_xor_si128(dd_check, dd_check);
425 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
427 /* D.3 copy final 3,4 data to rx_pkts */
428 _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
430 _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
433 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
434 pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
435 pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
437 /* C* extract and record EOP bit */
439 __m128i eop_shuf_mask = _mm_set_epi8(
440 0xFF, 0xFF, 0xFF, 0xFF,
441 0xFF, 0xFF, 0xFF, 0xFF,
442 0xFF, 0xFF, 0xFF, 0xFF,
443 0x04, 0x0C, 0x00, 0x08
446 /* and with mask to extract bits, flipping 1-0 */
447 __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
448 /* the staterr values are not in order, as the count
449 * count of dd bits doesn't care. However, for end of
450 * packet tracking, we do care, so shuffle. This also
451 * compresses the 32-bit values to 8-bit
453 eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
454 /* store the resulting 32-bit value */
455 *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
456 split_packet += RTE_IXGBE_DESCS_PER_LOOP;
459 /* C.3 calc available number of desc */
460 staterr = _mm_and_si128(staterr, dd_check);
461 staterr = _mm_packs_epi32(staterr, zero);
463 /* D.3 copy final 1,2 data to rx_pkts */
464 _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
466 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
469 /* C.4 calc avaialbe number of desc */
470 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
472 if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
476 /* Update our internal tail pointer */
477 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
478 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
479 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
485 * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
488 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
489 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
491 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
494 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
497 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
501 * vPMD receive routine that reassembles scattered packets
504 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
505 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
507 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
510 ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
513 struct ixgbe_rx_queue *rxq = rx_queue;
514 uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
516 /* get some new buffers */
517 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
522 /* happy day case, full burst + no packets to be joined */
523 const uint64_t *split_fl64 = (uint64_t *)split_flags;
524 if (rxq->pkt_first_seg == NULL &&
525 split_fl64[0] == 0 && split_fl64[1] == 0 &&
526 split_fl64[2] == 0 && split_fl64[3] == 0)
529 /* reassemble any packets that need reassembly*/
531 if (rxq->pkt_first_seg == NULL) {
532 /* find the first split flag, and only reassemble then*/
533 while (i < nb_bufs && !split_flags[i])
538 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
543 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
544 struct rte_mbuf *pkt, uint64_t flags)
546 __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
547 flags | pkt->data_len,
548 pkt->buf_physaddr + pkt->data_off);
549 _mm_store_si128((__m128i *)&txdp->read, descriptor);
553 vtx(volatile union ixgbe_adv_tx_desc *txdp,
554 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
558 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
559 vtx1(txdp, *pkt, flags);
563 ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
566 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
567 volatile union ixgbe_adv_tx_desc *txdp;
568 struct ixgbe_tx_entry_v *txep;
569 uint16_t n, nb_commit, tx_id;
570 uint64_t flags = DCMD_DTYP_FLAGS;
571 uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
574 /* cross rx_thresh boundary is not allowed */
575 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
577 if (txq->nb_tx_free < txq->tx_free_thresh)
578 ixgbe_tx_free_bufs(txq);
580 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
581 if (unlikely(nb_pkts == 0))
584 tx_id = txq->tx_tail;
585 txdp = &txq->tx_ring[tx_id];
586 txep = &txq->sw_ring_v[tx_id];
588 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
590 n = (uint16_t)(txq->nb_tx_desc - tx_id);
591 if (nb_commit >= n) {
593 tx_backlog_entry(txep, tx_pkts, n);
595 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
596 vtx1(txdp, *tx_pkts, flags);
598 vtx1(txdp, *tx_pkts++, rs);
600 nb_commit = (uint16_t)(nb_commit - n);
603 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
605 /* avoid reach the end of ring */
606 txdp = &(txq->tx_ring[tx_id]);
607 txep = &txq->sw_ring_v[tx_id];
610 tx_backlog_entry(txep, tx_pkts, nb_commit);
612 vtx(txdp, tx_pkts, nb_commit, flags);
614 tx_id = (uint16_t)(tx_id + nb_commit);
615 if (tx_id > txq->tx_next_rs) {
616 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
617 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
618 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
622 txq->tx_tail = tx_id;
624 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
629 static void __attribute__((cold))
630 ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
632 _ixgbe_tx_queue_release_mbufs_vec(txq);
635 void __attribute__((cold))
636 ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
638 _ixgbe_rx_queue_release_mbufs_vec(rxq);
641 static void __attribute__((cold))
642 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
644 _ixgbe_tx_free_swring_vec(txq);
647 static void __attribute__((cold))
648 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
650 _ixgbe_reset_tx_queue_vec(txq);
653 static const struct ixgbe_txq_ops vec_txq_ops = {
654 .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
655 .free_swring = ixgbe_tx_free_swring,
656 .reset = ixgbe_reset_tx_queue,
659 int __attribute__((cold))
660 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
662 return ixgbe_rxq_vec_setup_default(rxq);
665 int __attribute__((cold))
666 ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
668 return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
671 int __attribute__((cold))
672 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
674 return ixgbe_rx_vec_dev_conf_condition_check_default(dev);