4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
38 #include "ixgbe_ethdev.h"
39 #include "ixgbe_rxtx.h"
41 #include <tmmintrin.h>
43 #ifndef __INTEL_COMPILER
44 #pragma GCC diagnostic ignored "-Wcast-qual"
48 ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
52 volatile union ixgbe_adv_rx_desc *rxdp;
53 struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
54 struct rte_mbuf *mb0, *mb1;
55 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
56 RTE_PKTMBUF_HEADROOM);
57 __m128i dma_addr0, dma_addr1;
59 const __m128i hba_msk = _mm_set_epi64x(0, UINT64_MAX);
61 rxdp = rxq->rx_ring + rxq->rxrearm_start;
63 /* Pull 'n' more MBUFs into the software ring */
64 if (rte_mempool_get_bulk(rxq->mb_pool,
66 RTE_IXGBE_RXQ_REARM_THRESH) < 0) {
67 if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
69 dma_addr0 = _mm_setzero_si128();
70 for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
71 rxep[i].mbuf = &rxq->fake_mbuf;
72 _mm_store_si128((__m128i *)&rxdp[i].read,
76 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
77 RTE_IXGBE_RXQ_REARM_THRESH;
81 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
82 for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
83 __m128i vaddr0, vaddr1;
90 * Flush mbuf with pkt template.
91 * Data to be rearmed is 6 bytes long.
92 * Though, RX will overwrite ol_flags that are coming next
93 * anyway. So overwrite whole 8 bytes with one load:
94 * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
96 p0 = (uintptr_t)&mb0->rearm_data;
97 *(uint64_t *)p0 = rxq->mbuf_initializer;
98 p1 = (uintptr_t)&mb1->rearm_data;
99 *(uint64_t *)p1 = rxq->mbuf_initializer;
101 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
102 vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
103 vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
105 /* convert pa to dma_addr hdr/data */
106 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
107 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
109 /* add headroom to pa values */
110 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
111 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
113 /* set Header Buffer Address to zero */
114 dma_addr0 = _mm_and_si128(dma_addr0, hba_msk);
115 dma_addr1 = _mm_and_si128(dma_addr1, hba_msk);
117 /* flush desc with pa dma_addr */
118 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
119 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
122 rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
123 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
124 rxq->rxrearm_start = 0;
126 rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
128 rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
129 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
131 /* Update the tail pointer on the NIC */
132 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
135 /* Handling the offload flags (olflags) field takes computation
136 * time when receiving packets. Therefore we provide a flag to disable
137 * the processing of the olflags field when they are not needed. This
138 * gives improved performance, at the cost of losing the offload info
139 * in the received packet
141 #ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
144 desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
145 struct rte_mbuf **rx_pkts)
147 __m128i ptype0, ptype1, vtag0, vtag1;
153 /* mask everything except rss type */
154 const __m128i rsstype_msk = _mm_set_epi16(
155 0x0000, 0x0000, 0x0000, 0x0000,
156 0x000F, 0x000F, 0x000F, 0x000F);
158 /* map rss type to rss hash flag */
159 const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0,
160 0, 0, 0, PKT_RX_RSS_HASH,
161 PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
162 PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
164 /* mask everything except vlan present bit */
165 const __m128i vlan_msk = _mm_set_epi16(
168 IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
169 IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP);
170 /* map vlan present (0x8) to ol_flags */
171 const __m128i vlan_map = _mm_set_epi8(
177 ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
178 ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
179 vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
180 vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
182 ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
183 ptype0 = _mm_and_si128(ptype0, rsstype_msk);
184 ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
186 vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
187 vtag1 = _mm_and_si128(vtag1, vlan_msk);
188 vtag1 = _mm_shuffle_epi8(vlan_map, vtag1);
190 vtag1 = _mm_or_si128(ptype0, vtag1);
191 vol.dword = _mm_cvtsi128_si64(vtag1);
193 rx_pkts[0]->ol_flags = vol.e[0];
194 rx_pkts[1]->ol_flags = vol.e[1];
195 rx_pkts[2]->ol_flags = vol.e[2];
196 rx_pkts[3]->ol_flags = vol.e[3];
199 #define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
203 * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
206 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
207 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
209 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
210 * - don't support ol_flags for rss and csum err
212 static inline uint16_t
213 _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
214 uint16_t nb_pkts, uint8_t *split_packet)
216 volatile union ixgbe_adv_rx_desc *rxdp;
217 struct ixgbe_rx_entry *sw_ring;
218 uint16_t nb_pkts_recd;
222 __m128i crc_adjust = _mm_set_epi16(
223 0, 0, 0, /* ignore non-length fields */
224 -rxq->crc_len, /* sub crc on data_len */
225 0, /* ignore high-16bits of pkt_len */
226 -rxq->crc_len, /* sub crc on pkt_len */
227 0, 0 /* ignore pkt_type field */
229 __m128i dd_check, eop_check;
232 /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
233 nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
235 /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
236 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
238 /* Just the act of getting into the function from the application is
239 * going to cost about 7 cycles
241 rxdp = rxq->rx_ring + rxq->rx_tail;
243 _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
245 /* See if we need to rearm the RX queue - gives the prefetch a bit
248 if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
249 ixgbe_rxq_rearm(rxq);
251 /* Before we start moving massive data around, check to see if
252 * there is actually a packet available
254 if (!(rxdp->wb.upper.status_error &
255 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
258 /* 4 packets DD mask */
259 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
261 /* 4 packets EOP mask */
262 eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
264 /* mask to shuffle from desc. to mbuf */
265 shuf_msk = _mm_set_epi8(
266 7, 6, 5, 4, /* octet 4~7, 32bits rss */
267 15, 14, /* octet 14~15, low 16 bits vlan_macip */
268 13, 12, /* octet 12~13, 16 bits data_len */
269 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
270 13, 12, /* octet 12~13, low 16 bits pkt_len */
271 0xFF, 0xFF, /* skip 32 bit pkt_type */
275 /* Cache is empty -> need to scan the buffer rings, but first move
276 * the next 'n' mbufs into the cache
278 sw_ring = &rxq->sw_ring[rxq->rx_tail];
280 /* ensure these 2 flags are in the lower 8 bits */
281 RTE_BUILD_BUG_ON((PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
282 vlan_flags = rxq->vlan_flags & UINT8_MAX;
284 /* A. load 4 packet in one loop
285 * [A*. mask out 4 unused dirty field in desc]
286 * B. copy 4 mbuf point from swring to rx_pkts
287 * C. calc the number of DD bits among the 4 packets
288 * [C*. extract the end-of-packet bit, if requested]
289 * D. fill info. from desc to mbuf
291 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
292 pos += RTE_IXGBE_DESCS_PER_LOOP,
293 rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
294 __m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
295 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
296 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
297 __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
299 /* B.1 load 1 mbuf point */
300 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
302 /* Read desc statuses backwards to avoid race condition */
303 /* A.1 load 4 pkts desc */
304 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
306 /* B.2 copy 2 mbuf point into rx_pkts */
307 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
309 /* B.1 load 1 mbuf point */
310 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
312 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
313 /* B.1 load 2 mbuf point */
314 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
315 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
317 /* B.2 copy 2 mbuf point into rx_pkts */
318 _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
321 rte_mbuf_prefetch_part2(rx_pkts[pos]);
322 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
323 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
324 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
327 /* avoid compiler reorder optimization */
328 rte_compiler_barrier();
330 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
331 pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
332 pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
334 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
335 pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
336 pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
338 /* C.1 4=>2 filter staterr info only */
339 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
340 /* C.1 4=>2 filter staterr info only */
341 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
343 /* set ol_flags with vlan packet type */
344 desc_to_olflags_v(descs, vlan_flags, &rx_pkts[pos]);
346 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
347 pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
348 pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
350 /* C.2 get 4 pkts staterr value */
351 zero = _mm_xor_si128(dd_check, dd_check);
352 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
354 /* D.3 copy final 3,4 data to rx_pkts */
355 _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
357 _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
360 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
361 pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
362 pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
364 /* C* extract and record EOP bit */
366 __m128i eop_shuf_mask = _mm_set_epi8(
367 0xFF, 0xFF, 0xFF, 0xFF,
368 0xFF, 0xFF, 0xFF, 0xFF,
369 0xFF, 0xFF, 0xFF, 0xFF,
370 0x04, 0x0C, 0x00, 0x08
373 /* and with mask to extract bits, flipping 1-0 */
374 __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
375 /* the staterr values are not in order, as the count
376 * count of dd bits doesn't care. However, for end of
377 * packet tracking, we do care, so shuffle. This also
378 * compresses the 32-bit values to 8-bit
380 eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
381 /* store the resulting 32-bit value */
382 *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
383 split_packet += RTE_IXGBE_DESCS_PER_LOOP;
385 /* zero-out next pointers */
386 rx_pkts[pos]->next = NULL;
387 rx_pkts[pos + 1]->next = NULL;
388 rx_pkts[pos + 2]->next = NULL;
389 rx_pkts[pos + 3]->next = NULL;
392 /* C.3 calc available number of desc */
393 staterr = _mm_and_si128(staterr, dd_check);
394 staterr = _mm_packs_epi32(staterr, zero);
396 /* D.3 copy final 1,2 data to rx_pkts */
397 _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
399 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
402 /* C.4 calc avaialbe number of desc */
403 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
405 if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
409 /* Update our internal tail pointer */
410 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
411 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
412 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
418 * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
421 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
422 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
424 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
425 * - don't support ol_flags for rss and csum err
428 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
431 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
434 static inline uint16_t
435 reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
436 uint16_t nb_bufs, uint8_t *split_flags)
438 struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
439 struct rte_mbuf *start = rxq->pkt_first_seg;
440 struct rte_mbuf *end = rxq->pkt_last_seg;
441 unsigned int pkt_idx, buf_idx;
443 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
445 /* processing a split packet */
446 end->next = rx_bufs[buf_idx];
447 rx_bufs[buf_idx]->data_len += rxq->crc_len;
450 start->pkt_len += rx_bufs[buf_idx]->data_len;
453 if (!split_flags[buf_idx]) {
454 /* it's the last packet of the set */
455 start->hash = end->hash;
456 start->ol_flags = end->ol_flags;
457 /* we need to strip crc for the whole packet */
458 start->pkt_len -= rxq->crc_len;
459 if (end->data_len > rxq->crc_len)
460 end->data_len -= rxq->crc_len;
462 /* free up last mbuf */
463 struct rte_mbuf *secondlast = start;
466 while (secondlast->next != end)
467 secondlast = secondlast->next;
468 secondlast->data_len -= (rxq->crc_len -
470 secondlast->next = NULL;
471 rte_pktmbuf_free_seg(end);
474 pkts[pkt_idx++] = start;
478 /* not processing a split packet */
479 if (!split_flags[buf_idx]) {
480 /* not a split packet, save and skip */
481 pkts[pkt_idx++] = rx_bufs[buf_idx];
484 end = start = rx_bufs[buf_idx];
485 rx_bufs[buf_idx]->data_len += rxq->crc_len;
486 rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
490 /* save the partial packet for next time */
491 rxq->pkt_first_seg = start;
492 rxq->pkt_last_seg = end;
493 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
498 * vPMD receive routine that reassembles scattered packets
501 * - don't support ol_flags for rss and csum err
502 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
503 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
505 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
508 ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
511 struct ixgbe_rx_queue *rxq = rx_queue;
512 uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
514 /* get some new buffers */
515 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
520 /* happy day case, full burst + no packets to be joined */
521 const uint64_t *split_fl64 = (uint64_t *)split_flags;
522 if (rxq->pkt_first_seg == NULL &&
523 split_fl64[0] == 0 && split_fl64[1] == 0 &&
524 split_fl64[2] == 0 && split_fl64[3] == 0)
527 /* reassemble any packets that need reassembly*/
529 if (rxq->pkt_first_seg == NULL) {
530 /* find the first split flag, and only reassemble then*/
531 while (i < nb_bufs && !split_flags[i])
536 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
541 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
542 struct rte_mbuf *pkt, uint64_t flags)
544 __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
545 flags | pkt->data_len,
546 pkt->buf_physaddr + pkt->data_off);
547 _mm_store_si128((__m128i *)&txdp->read, descriptor);
551 vtx(volatile union ixgbe_adv_tx_desc *txdp,
552 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
556 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
557 vtx1(txdp, *pkt, flags);
560 static inline int __attribute__((always_inline))
561 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
563 struct ixgbe_tx_entry_v *txep;
568 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
570 /* check DD bit on threshold descriptor */
571 status = txq->tx_ring[txq->tx_next_dd].wb.status;
572 if (!(status & IXGBE_ADVTXD_STAT_DD))
575 n = txq->tx_rs_thresh;
578 * first buffer to free from S/W ring is at index
579 * tx_next_dd - (tx_rs_thresh-1)
581 txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
582 m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
583 if (likely(m != NULL)) {
586 for (i = 1; i < n; i++) {
587 m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
588 if (likely(m != NULL)) {
589 if (likely(m->pool == free[0]->pool))
592 rte_mempool_put_bulk(free[0]->pool,
593 (void *)free, nb_free);
599 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
601 for (i = 1; i < n; i++) {
602 m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
604 rte_mempool_put(m->pool, m);
608 /* buffers were freed, update counters */
609 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
610 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
611 if (txq->tx_next_dd >= txq->nb_tx_desc)
612 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
614 return txq->tx_rs_thresh;
617 static inline void __attribute__((always_inline))
618 tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
619 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
623 for (i = 0; i < (int)nb_pkts; ++i)
624 txep[i].mbuf = tx_pkts[i];
628 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
631 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
632 volatile union ixgbe_adv_tx_desc *txdp;
633 struct ixgbe_tx_entry_v *txep;
634 uint16_t n, nb_commit, tx_id;
635 uint64_t flags = DCMD_DTYP_FLAGS;
636 uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
639 /* cross rx_thresh boundary is not allowed */
640 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
642 if (txq->nb_tx_free < txq->tx_free_thresh)
643 ixgbe_tx_free_bufs(txq);
645 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
646 if (unlikely(nb_pkts == 0))
649 tx_id = txq->tx_tail;
650 txdp = &txq->tx_ring[tx_id];
651 txep = &txq->sw_ring_v[tx_id];
653 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
655 n = (uint16_t)(txq->nb_tx_desc - tx_id);
656 if (nb_commit >= n) {
658 tx_backlog_entry(txep, tx_pkts, n);
660 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
661 vtx1(txdp, *tx_pkts, flags);
663 vtx1(txdp, *tx_pkts++, rs);
665 nb_commit = (uint16_t)(nb_commit - n);
668 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
670 /* avoid reach the end of ring */
671 txdp = &(txq->tx_ring[tx_id]);
672 txep = &txq->sw_ring_v[tx_id];
675 tx_backlog_entry(txep, tx_pkts, nb_commit);
677 vtx(txdp, tx_pkts, nb_commit, flags);
679 tx_id = (uint16_t)(tx_id + nb_commit);
680 if (tx_id > txq->tx_next_rs) {
681 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
682 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
683 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
687 txq->tx_tail = tx_id;
689 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
694 static void __attribute__((cold))
695 ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
698 struct ixgbe_tx_entry_v *txe;
699 const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
701 if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
704 /* release the used mbufs in sw_ring */
705 for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
707 i = (i + 1) & max_desc) {
708 txe = &txq->sw_ring_v[i];
709 rte_pktmbuf_free_seg(txe->mbuf);
711 txq->nb_tx_free = max_desc;
714 for (i = 0; i < txq->nb_tx_desc; i++) {
715 txe = &txq->sw_ring_v[i];
720 void __attribute__((cold))
721 ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
723 const unsigned int mask = rxq->nb_rx_desc - 1;
726 if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
729 /* free all mbufs that are valid in the ring */
730 for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
731 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
732 rxq->rxrearm_nb = rxq->nb_rx_desc;
734 /* set all entries to NULL */
735 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
738 static void __attribute__((cold))
739 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
744 if (txq->sw_ring != NULL) {
745 rte_free(txq->sw_ring_v - 1);
746 txq->sw_ring_v = NULL;
750 static void __attribute__((cold))
751 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
753 static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
754 struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
757 /* Zero out HW ring memory */
758 for (i = 0; i < txq->nb_tx_desc; i++)
759 txq->tx_ring[i] = zeroed_desc;
761 /* Initialize SW ring entries */
762 for (i = 0; i < txq->nb_tx_desc; i++) {
763 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
765 txd->wb.status = IXGBE_TXD_STAT_DD;
769 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
770 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
775 * Always allow 1 descriptor to be un-allocated to avoid
776 * a H/W race condition
778 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
779 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
781 memset((void *)&txq->ctx_cache, 0,
782 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
785 static const struct ixgbe_txq_ops vec_txq_ops = {
786 .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
787 .free_swring = ixgbe_tx_free_swring,
788 .reset = ixgbe_reset_tx_queue,
791 int __attribute__((cold))
792 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
795 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
798 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
799 mb_def.port = rxq->port_id;
800 rte_mbuf_refcnt_set(&mb_def, 1);
802 /* prevent compiler reordering: rearm_data covers previous fields */
803 rte_compiler_barrier();
804 p = (uintptr_t)&mb_def.rearm_data;
805 rxq->mbuf_initializer = *(uint64_t *)p;
809 int __attribute__((cold))
810 ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
812 if (txq->sw_ring_v == NULL)
815 /* leave the first one for overflow */
816 txq->sw_ring_v = txq->sw_ring_v + 1;
817 txq->ops = &vec_txq_ops;
822 int __attribute__((cold))
823 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
825 #ifndef RTE_LIBRTE_IEEE1588
826 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
827 struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
829 #ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
830 /* whithout rx ol_flags, no VP flag report */
831 if (rxmode->hw_vlan_strip != 0 ||
832 rxmode->hw_vlan_extend != 0)
836 /* no fdir support */
837 if (fconf->mode != RTE_FDIR_MODE_NONE)
841 * - no csum error report support
842 * - no header split support
844 if (rxmode->hw_ip_checksum == 1 ||
845 rxmode->header_split == 1)