4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
38 #include "ixgbe_ethdev.h"
39 #include "ixgbe_rxtx.h"
41 #include <tmmintrin.h>
43 #ifndef __INTEL_COMPILER
44 #pragma GCC diagnostic ignored "-Wcast-qual"
48 ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
50 static const struct rte_mbuf mb_def = {
55 volatile union ixgbe_adv_rx_desc *rxdp;
56 struct igb_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
57 struct rte_mbuf *mb0, *mb1;
59 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
60 RTE_PKTMBUF_HEADROOM);
62 /* Pull 'n' more MBUFs into the software ring */
63 if (rte_mempool_get_bulk(rxq->mb_pool,
64 (void *)rxep, RTE_IXGBE_RXQ_REARM_THRESH) < 0)
67 rxdp = rxq->rx_ring + rxq->rxrearm_start;
69 def_low = _mm_load_si128((__m128i *)&(mb_def.next));
71 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
72 for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
73 __m128i dma_addr0, dma_addr1;
74 __m128i vaddr0, vaddr1;
79 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
80 vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
81 vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
83 /* calc va/pa of pkt data point */
84 vaddr0 = _mm_add_epi64(vaddr0, hdr_room);
85 vaddr1 = _mm_add_epi64(vaddr1, hdr_room);
87 /* convert pa to dma_addr hdr/data */
88 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
89 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
91 /* fill va into t0 def pkt template */
92 vaddr0 = _mm_unpacklo_epi64(def_low, vaddr0);
93 vaddr1 = _mm_unpacklo_epi64(def_low, vaddr1);
95 /* flush desc with pa dma_addr */
96 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
97 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
99 /* flush mbuf with pkt template */
100 _mm_store_si128((__m128i *)&mb0->next, vaddr0);
101 _mm_store_si128((__m128i *)&mb1->next, vaddr1);
103 /* update refcnt per pkt */
104 rte_mbuf_refcnt_set(mb0, 1);
105 rte_mbuf_refcnt_set(mb1, 1);
108 rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
109 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
110 rxq->rxrearm_start = 0;
112 rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
114 rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
115 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
117 /* Update the tail pointer on the NIC */
118 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
121 /* Handling the offload flags (olflags) field takes computation
122 * time when receiving packets. Therefore we provide a flag to disable
123 * the processing of the olflags field when they are not needed. This
124 * gives improved performance, at the cost of losing the offload info
125 * in the received packet
127 #ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
129 #define OLFLAGS_MASK ((uint16_t)(PKT_RX_VLAN_PKT | PKT_RX_IPV4_HDR |\
130 PKT_RX_IPV4_HDR_EXT | PKT_RX_IPV6_HDR |\
131 PKT_RX_IPV6_HDR_EXT))
132 #define OLFLAGS_MASK_V (((uint64_t)OLFLAGS_MASK << 48) | \
133 ((uint64_t)OLFLAGS_MASK << 32) | \
134 ((uint64_t)OLFLAGS_MASK << 16) | \
135 ((uint64_t)OLFLAGS_MASK))
136 #define PTYPE_SHIFT (1)
137 #define VTAG_SHIFT (3)
140 desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
142 __m128i ptype0, ptype1, vtag0, vtag1;
148 ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
149 ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
150 vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
151 vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
153 ptype1 = _mm_unpacklo_epi32(ptype0, ptype1);
154 vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
156 ptype1 = _mm_slli_epi16(ptype1, PTYPE_SHIFT);
157 vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT);
159 ptype1 = _mm_or_si128(ptype1, vtag1);
160 vol.dword = _mm_cvtsi128_si64(ptype1) & OLFLAGS_MASK_V;
162 rx_pkts[0]->ol_flags = vol.e[0];
163 rx_pkts[1]->ol_flags = vol.e[1];
164 rx_pkts[2]->ol_flags = vol.e[2];
165 rx_pkts[3]->ol_flags = vol.e[3];
168 #define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
172 * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
176 * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
177 * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
179 * - don't support ol_flags for rss and csum err
182 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
185 volatile union ixgbe_adv_rx_desc *rxdp;
186 struct igb_rx_queue *rxq = rx_queue;
187 struct igb_rx_entry *sw_ring;
188 uint16_t nb_pkts_recd;
195 if (unlikely(nb_pkts < RTE_IXGBE_VPMD_RX_BURST))
198 /* Just the act of getting into the function from the application is
199 * going to cost about 7 cycles */
200 rxdp = rxq->rx_ring + rxq->rx_tail;
202 _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
204 /* See if we need to rearm the RX queue - gives the prefetch a bit
206 if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
207 ixgbe_rxq_rearm(rxq);
209 /* Before we start moving massive data around, check to see if
210 * there is actually a packet available */
211 if (!(rxdp->wb.upper.status_error &
212 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
215 /* 4 packets DD mask */
216 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
218 /* mask to shuffle from desc. to mbuf */
219 shuf_msk = _mm_set_epi8(
220 7, 6, 5, 4, /* octet 4~7, 32bits rss */
221 0xFF, 0xFF, /* skip high 16 bits vlan_macip, zero out */
222 15, 14, /* octet 14~15, low 16 bits vlan_macip */
223 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
224 13, 12, /* octet 12~13, low 16 bits pkt_len */
225 0xFF, 0xFF, /* skip nb_segs and in_port, zero out */
226 13, 12 /* octet 12~13, 16 bits data_len */
230 /* Cache is empty -> need to scan the buffer rings, but first move
231 * the next 'n' mbufs into the cache */
232 sw_ring = &rxq->sw_ring[rxq->rx_tail];
234 /* in_port, nb_seg = 1, crc_len */
235 in_port = rxq->misc_info;
238 * A. load 4 packet in one loop
239 * B. copy 4 mbuf point from swring to rx_pkts
240 * C. calc the number of DD bits among the 4 packets
241 * D. fill info. from desc to mbuf
243 for (pos = 0, nb_pkts_recd = 0; pos < RTE_IXGBE_VPMD_RX_BURST;
244 pos += RTE_IXGBE_DESCS_PER_LOOP,
245 rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
246 __m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
247 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
248 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
249 __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
251 /* B.1 load 1 mbuf point */
252 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
254 /* Read desc statuses backwards to avoid race condition */
255 /* A.1 load 4 pkts desc */
256 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
258 /* B.2 copy 2 mbuf point into rx_pkts */
259 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
261 /* B.1 load 1 mbuf point */
262 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
264 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
265 /* B.1 load 2 mbuf point */
266 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
267 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
269 /* B.2 copy 2 mbuf point into rx_pkts */
270 _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
272 /* avoid compiler reorder optimization */
273 rte_compiler_barrier();
275 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
276 pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
277 pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
279 /* C.1 4=>2 filter staterr info only */
280 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
281 /* C.1 4=>2 filter staterr info only */
282 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
284 /* set ol_flags with packet type and vlan tag */
285 desc_to_olflags_v(descs, &rx_pkts[pos]);
287 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
288 pkt_mb4 = _mm_add_epi16(pkt_mb4, in_port);
289 pkt_mb3 = _mm_add_epi16(pkt_mb3, in_port);
291 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
292 pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
293 pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
295 /* C.2 get 4 pkts staterr value */
296 zero = _mm_xor_si128(dd_check, dd_check);
297 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
299 /* D.3 copy final 3,4 data to rx_pkts */
300 _mm_storeu_si128((__m128i *)&(rx_pkts[pos+3]->data_len),
302 _mm_storeu_si128((__m128i *)&(rx_pkts[pos+2]->data_len),
305 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
306 pkt_mb2 = _mm_add_epi16(pkt_mb2, in_port);
307 pkt_mb1 = _mm_add_epi16(pkt_mb1, in_port);
309 /* C.3 calc avaialbe number of desc */
310 staterr = _mm_and_si128(staterr, dd_check);
311 staterr = _mm_packs_epi32(staterr, zero);
313 /* D.3 copy final 1,2 data to rx_pkts */
314 _mm_storeu_si128((__m128i *)&(rx_pkts[pos+1]->data_len),
316 _mm_storeu_si128((__m128i *)&(rx_pkts[pos]->data_len),
319 /* C.4 calc avaialbe number of desc */
320 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
322 if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
326 /* Update our internal tail pointer */
327 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
328 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
329 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
335 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
336 struct rte_mbuf *pkt, __m128i flags)
338 __m128i t0, t1, offset, ols, ba, ctl;
340 /* load buf_addr/buf_physaddr in t0 */
341 t0 = _mm_loadu_si128((__m128i *)&(pkt->buf_addr));
342 /* load data, ... pkt_len in t1 */
343 t1 = _mm_loadu_si128((__m128i *)&(pkt->data));
345 /* calc offset = (data - buf_adr) */
346 offset = _mm_sub_epi64(t1, t0);
348 /* cmd_type_len: pkt_len |= DCMD_DTYP_FLAGS */
349 ctl = _mm_or_si128(t1, flags);
351 /* reorder as buf_physaddr/buf_addr */
352 offset = _mm_shuffle_epi32(offset, 0x4E);
354 /* olinfo_stats: pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT */
355 ols = _mm_slli_epi32(t1, IXGBE_ADVTXD_PAYLEN_SHIFT);
357 /* buffer_addr = buf_physaddr + offset */
358 ba = _mm_add_epi64(t0, offset);
360 /* format cmd_type_len/olinfo_status */
361 ctl = _mm_unpackhi_epi32(ctl, ols);
363 /* format buf_physaddr/cmd_type_len */
364 ba = _mm_unpackhi_epi64(ba, ctl);
367 _mm_store_si128((__m128i *)&txdp->read, ba);
371 vtx(volatile union ixgbe_adv_tx_desc *txdp,
372 struct rte_mbuf **pkt, uint16_t nb_pkts, __m128i flags)
375 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
376 vtx1(txdp, *pkt, flags);
379 static inline int __attribute__((always_inline))
380 ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
382 struct igb_tx_entry_v *txep;
383 struct igb_tx_entry_seq *txsp;
386 #ifdef RTE_MBUF_REFCNT
389 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
392 /* check DD bit on threshold descriptor */
393 status = txq->tx_ring[txq->tx_next_dd].wb.status;
394 if (!(status & IXGBE_ADVTXD_STAT_DD))
397 n = txq->tx_rs_thresh;
400 * first buffer to free from S/W ring is at index
401 * tx_next_dd - (tx_rs_thresh-1)
403 txep = &((struct igb_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
405 txsp = &txq->sw_ring_seq[txq->tx_next_dd - (n - 1)];
408 k = RTE_MIN(n, txsp[n-1].same_pool);
409 #ifdef RTE_MBUF_REFCNT
410 for (i = 0; i < k; i++) {
411 m = __rte_pktmbuf_prefree_seg((txep+n-k+i)->mbuf);
415 rte_mempool_put_bulk((void *)txsp[n-1].pool,
416 (void **)free, nb_free);
418 rte_mempool_put_bulk((void *)txsp[n-1].pool,
419 (void **)(txep+n-k), k);
424 /* buffers were freed, update counters */
425 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
426 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
427 if (txq->tx_next_dd >= txq->nb_tx_desc)
428 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
430 return txq->tx_rs_thresh;
433 static inline void __attribute__((always_inline))
434 tx_backlog_entry(struct igb_tx_entry_v *txep,
435 struct igb_tx_entry_seq *txsp,
436 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
439 for (i = 0; i < (int)nb_pkts; ++i) {
440 txep[i].mbuf = tx_pkts[i];
441 /* check and update sequence number */
442 txsp[i].pool = tx_pkts[i]->pool;
443 if (txsp[i-1].pool == tx_pkts[i]->pool)
444 txsp[i].same_pool = txsp[i-1].same_pool + 1;
446 txsp[i].same_pool = 1;
451 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
454 struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
455 volatile union ixgbe_adv_tx_desc *txdp;
456 struct igb_tx_entry_v *txep;
457 struct igb_tx_entry_seq *txsp;
458 uint16_t n, nb_commit, tx_id;
459 __m128i flags = _mm_set_epi32(DCMD_DTYP_FLAGS, 0, 0, 0);
460 __m128i rs = _mm_set_epi32(IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS,
464 if (unlikely(nb_pkts > RTE_IXGBE_VPMD_TX_BURST))
465 nb_pkts = RTE_IXGBE_VPMD_TX_BURST;
467 if (txq->nb_tx_free < txq->tx_free_thresh)
468 ixgbe_tx_free_bufs(txq);
470 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
471 if (unlikely(nb_pkts == 0))
474 tx_id = txq->tx_tail;
475 txdp = &txq->tx_ring[tx_id];
476 txep = &((struct igb_tx_entry_v *)txq->sw_ring)[tx_id];
477 txsp = &txq->sw_ring_seq[tx_id];
479 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
481 n = (uint16_t)(txq->nb_tx_desc - tx_id);
482 if (nb_commit >= n) {
484 tx_backlog_entry(txep, txsp, tx_pkts, n);
486 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
487 vtx1(txdp, *tx_pkts, flags);
489 vtx1(txdp, *tx_pkts++, rs);
491 nb_commit = (uint16_t)(nb_commit - n);
494 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
496 /* avoid reach the end of ring */
497 txdp = &(txq->tx_ring[tx_id]);
498 txep = &(((struct igb_tx_entry_v *)txq->sw_ring)[tx_id]);
499 txsp = &(txq->sw_ring_seq[tx_id]);
502 tx_backlog_entry(txep, txsp, tx_pkts, nb_commit);
504 vtx(txdp, tx_pkts, nb_commit, flags);
506 tx_id = (uint16_t)(tx_id + nb_commit);
507 if (tx_id > txq->tx_next_rs) {
508 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
509 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
510 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
514 txq->tx_tail = tx_id;
516 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
522 ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
525 struct igb_tx_entry_v *txe;
526 struct igb_tx_entry_seq *txs;
527 uint16_t nb_free, max_desc;
529 if (txq->sw_ring != NULL) {
530 /* release the used mbufs in sw_ring */
531 nb_free = txq->nb_tx_free;
532 max_desc = (uint16_t)(txq->nb_tx_desc - 1);
533 for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
534 nb_free < max_desc && i != txq->tx_tail;
535 i = (i + 1) & max_desc) {
536 txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
537 if (txe->mbuf != NULL)
538 rte_pktmbuf_free_seg(txe->mbuf);
541 for (i = 0; i < txq->nb_tx_desc; i++) {
542 txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
545 txs = &txq->sw_ring_seq[i];
553 ixgbe_tx_free_swring(struct igb_tx_queue *txq)
558 if (txq->sw_ring != NULL) {
559 rte_free((struct igb_rx_entry *)txq->sw_ring - 1);
563 if (txq->sw_ring_seq != NULL) {
564 rte_free(txq->sw_ring_seq - 1);
565 txq->sw_ring_seq = NULL;
570 ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
572 static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
574 struct igb_tx_entry_v *txe = (struct igb_tx_entry_v *)txq->sw_ring;
575 struct igb_tx_entry_seq *txs = txq->sw_ring_seq;
578 /* Zero out HW ring memory */
579 for (i = 0; i < txq->nb_tx_desc; i++)
580 txq->tx_ring[i] = zeroed_desc;
582 /* Initialize SW ring entries */
583 for (i = 0; i < txq->nb_tx_desc; i++) {
584 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
585 txd->wb.status = IXGBE_TXD_STAT_DD;
588 txs[i].same_pool = 0;
591 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
592 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
597 * Always allow 1 descriptor to be un-allocated to avoid
598 * a H/W race condition
600 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
601 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
603 memset((void *)&txq->ctx_cache, 0,
604 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
607 static struct ixgbe_txq_ops vec_txq_ops = {
608 .release_mbufs = ixgbe_tx_queue_release_mbufs,
609 .free_swring = ixgbe_tx_free_swring,
610 .reset = ixgbe_reset_tx_queue,
613 int ixgbe_txq_vec_setup(struct igb_tx_queue *txq,
614 unsigned int socket_id)
618 if (txq->sw_ring == NULL)
621 /* request addtional one entry for continous sequence check */
622 nb_desc = (uint16_t)(txq->nb_tx_desc + 1);
624 txq->sw_ring_seq = rte_zmalloc_socket("txq->sw_ring_seq",
625 sizeof(struct igb_tx_entry_seq) * nb_desc,
626 CACHE_LINE_SIZE, socket_id);
627 if (txq->sw_ring_seq == NULL)
631 /* leave the first one for overflow */
632 txq->sw_ring = (struct igb_tx_entry *)
633 ((struct igb_tx_entry_v *)txq->sw_ring + 1);
634 txq->sw_ring_seq += 1;
635 txq->ops = &vec_txq_ops;
640 int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq,
641 __rte_unused unsigned int socket_id)
646 (uint16_t)-rxq->crc_len, /* sub crc on pkt_len */
647 (uint16_t)(rxq->port_id << 8 | 1),
648 /* 8b port_id and 8b nb_seg*/
649 (uint16_t)-rxq->crc_len /* sub crc on data_len */
655 int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev)
657 #ifndef RTE_LIBRTE_IEEE1588
658 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
659 struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
661 #ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
662 /* whithout rx ol_flags, no VP flag report */
663 if (rxmode->hw_vlan_strip != 0 ||
664 rxmode->hw_vlan_extend != 0)
668 /* no fdir support */
669 if (fconf->mode != RTE_FDIR_MODE_NONE)
673 * - no csum error report support
674 * - no header split support
676 if (rxmode->hw_ip_checksum == 1 ||
677 rxmode->header_split == 1)