4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
38 #include "ixgbe_ethdev.h"
39 #include "ixgbe_rxtx.h"
41 #include <tmmintrin.h>
43 #ifndef __INTEL_COMPILER
44 #pragma GCC diagnostic ignored "-Wcast-qual"
48 ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
50 static const struct rte_mbuf mb_def = {
57 volatile union ixgbe_adv_rx_desc *rxdp;
58 struct igb_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
59 struct rte_mbuf *mb0, *mb1;
61 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
62 RTE_PKTMBUF_HEADROOM);
64 /* Pull 'n' more MBUFs into the software ring */
65 if (rte_mempool_get_bulk(rxq->mb_pool,
66 (void *)rxep, RTE_IXGBE_RXQ_REARM_THRESH) < 0)
69 rxdp = rxq->rx_ring + rxq->rxrearm_start;
71 def_low = _mm_load_si128((__m128i *)&(mb_def.pkt));
73 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
74 for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
75 __m128i dma_addr0, dma_addr1;
76 __m128i vaddr0, vaddr1;
81 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
82 vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
83 vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
85 /* calc va/pa of pkt data point */
86 vaddr0 = _mm_add_epi64(vaddr0, hdr_room);
87 vaddr1 = _mm_add_epi64(vaddr1, hdr_room);
89 /* convert pa to dma_addr hdr/data */
90 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
91 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
93 /* fill va into t0 def pkt template */
94 vaddr0 = _mm_unpacklo_epi64(def_low, vaddr0);
95 vaddr1 = _mm_unpacklo_epi64(def_low, vaddr1);
97 /* flush desc with pa dma_addr */
98 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
99 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
101 /* flush mbuf with pkt template */
102 _mm_store_si128((__m128i *)&mb0->pkt, vaddr0);
103 _mm_store_si128((__m128i *)&mb1->pkt, vaddr1);
105 /* update refcnt per pkt */
106 rte_mbuf_refcnt_set(mb0, 1);
107 rte_mbuf_refcnt_set(mb1, 1);
110 rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
111 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
112 rxq->rxrearm_start = 0;
114 rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
116 rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
117 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
119 /* Update the tail pointer on the NIC */
120 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
123 /* Handling the offload flags (olflags) field takes computation
124 * time when receiving packets. Therefore we provide a flag to disable
125 * the processing of the olflags field when they are not needed. This
126 * gives improved performance, at the cost of losing the offload info
127 * in the received packet
129 #ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
131 #define OLFLAGS_MASK ((uint16_t)(PKT_RX_VLAN_PKT | PKT_RX_IPV4_HDR |\
132 PKT_RX_IPV4_HDR_EXT | PKT_RX_IPV6_HDR |\
133 PKT_RX_IPV6_HDR_EXT))
134 #define OLFLAGS_MASK_V (((uint64_t)OLFLAGS_MASK << 48) | \
135 ((uint64_t)OLFLAGS_MASK << 32) | \
136 ((uint64_t)OLFLAGS_MASK << 16) | \
137 ((uint64_t)OLFLAGS_MASK))
138 #define PTYPE_SHIFT (1)
139 #define VTAG_SHIFT (3)
142 desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
144 __m128i ptype0, ptype1, vtag0, vtag1;
150 ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
151 ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
152 vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
153 vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
155 ptype1 = _mm_unpacklo_epi32(ptype0, ptype1);
156 vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
158 ptype1 = _mm_slli_epi16(ptype1, PTYPE_SHIFT);
159 vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT);
161 ptype1 = _mm_or_si128(ptype1, vtag1);
162 vol.dword = _mm_cvtsi128_si64(ptype1) & OLFLAGS_MASK_V;
164 rx_pkts[0]->ol_flags = vol.e[0];
165 rx_pkts[1]->ol_flags = vol.e[1];
166 rx_pkts[2]->ol_flags = vol.e[2];
167 rx_pkts[3]->ol_flags = vol.e[3];
170 #define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
174 * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
178 * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
179 * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
181 * - don't support ol_flags for rss and csum err
184 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
187 volatile union ixgbe_adv_rx_desc *rxdp;
188 struct igb_rx_queue *rxq = rx_queue;
189 struct igb_rx_entry *sw_ring;
190 uint16_t nb_pkts_recd;
197 if (unlikely(nb_pkts < RTE_IXGBE_VPMD_RX_BURST))
200 /* Just the act of getting into the function from the application is
201 * going to cost about 7 cycles */
202 rxdp = rxq->rx_ring + rxq->rx_tail;
204 _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
206 /* See if we need to rearm the RX queue - gives the prefetch a bit
208 if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
209 ixgbe_rxq_rearm(rxq);
211 /* Before we start moving massive data around, check to see if
212 * there is actually a packet available */
213 if (!(rxdp->wb.upper.status_error &
214 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
217 /* 4 packets DD mask */
218 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
220 /* mask to shuffle from desc. to mbuf */
221 shuf_msk = _mm_set_epi8(
222 7, 6, 5, 4, /* octet 4~7, 32bits rss */
223 0xFF, 0xFF, /* skip high 16 bits vlan_macip, zero out */
224 15, 14, /* octet 14~15, low 16 bits vlan_macip */
225 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
226 13, 12, /* octet 12~13, low 16 bits pkt_len */
227 0xFF, 0xFF, /* skip nb_segs and in_port, zero out */
228 13, 12 /* octet 12~13, 16 bits data_len */
232 /* Cache is empty -> need to scan the buffer rings, but first move
233 * the next 'n' mbufs into the cache */
234 sw_ring = &rxq->sw_ring[rxq->rx_tail];
236 /* in_port, nb_seg = 1, crc_len */
237 in_port = rxq->misc_info;
240 * A. load 4 packet in one loop
241 * B. copy 4 mbuf point from swring to rx_pkts
242 * C. calc the number of DD bits among the 4 packets
243 * D. fill info. from desc to mbuf
245 for (pos = 0, nb_pkts_recd = 0; pos < RTE_IXGBE_VPMD_RX_BURST;
246 pos += RTE_IXGBE_DESCS_PER_LOOP,
247 rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
248 __m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
249 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
250 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
251 __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
253 /* B.1 load 1 mbuf point */
254 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
256 /* Read desc statuses backwards to avoid race condition */
257 /* A.1 load 4 pkts desc */
258 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
260 /* B.2 copy 2 mbuf point into rx_pkts */
261 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
263 /* B.1 load 1 mbuf point */
264 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
266 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
267 /* B.1 load 2 mbuf point */
268 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
269 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
271 /* B.2 copy 2 mbuf point into rx_pkts */
272 _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
274 /* avoid compiler reorder optimization */
275 rte_compiler_barrier();
277 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
278 pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
279 pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
281 /* C.1 4=>2 filter staterr info only */
282 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
283 /* C.1 4=>2 filter staterr info only */
284 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
286 /* set ol_flags with packet type and vlan tag */
287 desc_to_olflags_v(descs, &rx_pkts[pos]);
289 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
290 pkt_mb4 = _mm_add_epi16(pkt_mb4, in_port);
291 pkt_mb3 = _mm_add_epi16(pkt_mb3, in_port);
293 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
294 pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
295 pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
297 /* C.2 get 4 pkts staterr value */
298 zero = _mm_xor_si128(dd_check, dd_check);
299 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
301 /* D.3 copy final 3,4 data to rx_pkts */
302 _mm_storeu_si128((__m128i *)&(rx_pkts[pos+3]->pkt.data_len),
304 _mm_storeu_si128((__m128i *)&(rx_pkts[pos+2]->pkt.data_len),
307 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
308 pkt_mb2 = _mm_add_epi16(pkt_mb2, in_port);
309 pkt_mb1 = _mm_add_epi16(pkt_mb1, in_port);
311 /* C.3 calc avaialbe number of desc */
312 staterr = _mm_and_si128(staterr, dd_check);
313 staterr = _mm_packs_epi32(staterr, zero);
315 /* D.3 copy final 1,2 data to rx_pkts */
316 _mm_storeu_si128((__m128i *)&(rx_pkts[pos+1]->pkt.data_len),
318 _mm_storeu_si128((__m128i *)&(rx_pkts[pos]->pkt.data_len),
321 /* C.4 calc avaialbe number of desc */
322 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
324 if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
328 /* Update our internal tail pointer */
329 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
330 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
331 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
337 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
338 struct rte_mbuf *pkt, __m128i flags)
340 __m128i t0, t1, offset, ols, ba, ctl;
342 /* load buf_addr/buf_physaddr in t0 */
343 t0 = _mm_loadu_si128((__m128i *)&(pkt->buf_addr));
344 /* load data, ... pkt_len in t1 */
345 t1 = _mm_loadu_si128((__m128i *)&(pkt->pkt.data));
347 /* calc offset = (data - buf_adr) */
348 offset = _mm_sub_epi64(t1, t0);
350 /* cmd_type_len: pkt_len |= DCMD_DTYP_FLAGS */
351 ctl = _mm_or_si128(t1, flags);
353 /* reorder as buf_physaddr/buf_addr */
354 offset = _mm_shuffle_epi32(offset, 0x4E);
356 /* olinfo_stats: pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT */
357 ols = _mm_slli_epi32(t1, IXGBE_ADVTXD_PAYLEN_SHIFT);
359 /* buffer_addr = buf_physaddr + offset */
360 ba = _mm_add_epi64(t0, offset);
362 /* format cmd_type_len/olinfo_status */
363 ctl = _mm_unpackhi_epi32(ctl, ols);
365 /* format buf_physaddr/cmd_type_len */
366 ba = _mm_unpackhi_epi64(ba, ctl);
369 _mm_store_si128((__m128i *)&txdp->read, ba);
373 vtx(volatile union ixgbe_adv_tx_desc *txdp,
374 struct rte_mbuf **pkt, uint16_t nb_pkts, __m128i flags)
377 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
378 vtx1(txdp, *pkt, flags);
381 static inline int __attribute__((always_inline))
382 ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
384 struct igb_tx_entry_v *txep;
385 struct igb_tx_entry_seq *txsp;
388 #ifdef RTE_MBUF_REFCNT
391 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
394 /* check DD bit on threshold descriptor */
395 status = txq->tx_ring[txq->tx_next_dd].wb.status;
396 if (!(status & IXGBE_ADVTXD_STAT_DD))
399 n = txq->tx_rs_thresh;
402 * first buffer to free from S/W ring is at index
403 * tx_next_dd - (tx_rs_thresh-1)
405 txep = &((struct igb_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
407 txsp = &txq->sw_ring_seq[txq->tx_next_dd - (n - 1)];
410 k = RTE_MIN(n, txsp[n-1].same_pool);
411 #ifdef RTE_MBUF_REFCNT
412 for (i = 0; i < k; i++) {
413 m = __rte_pktmbuf_prefree_seg((txep+n-k+i)->mbuf);
417 rte_mempool_put_bulk((void *)txsp[n-1].pool,
418 (void **)free, nb_free);
420 rte_mempool_put_bulk((void *)txsp[n-1].pool,
421 (void **)(txep+n-k), k);
426 /* buffers were freed, update counters */
427 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
428 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
429 if (txq->tx_next_dd >= txq->nb_tx_desc)
430 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
432 return txq->tx_rs_thresh;
435 static inline void __attribute__((always_inline))
436 tx_backlog_entry(struct igb_tx_entry_v *txep,
437 struct igb_tx_entry_seq *txsp,
438 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
441 for (i = 0; i < (int)nb_pkts; ++i) {
442 txep[i].mbuf = tx_pkts[i];
443 /* check and update sequence number */
444 txsp[i].pool = tx_pkts[i]->pool;
445 if (txsp[i-1].pool == tx_pkts[i]->pool)
446 txsp[i].same_pool = txsp[i-1].same_pool + 1;
448 txsp[i].same_pool = 1;
453 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
456 struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
457 volatile union ixgbe_adv_tx_desc *txdp;
458 struct igb_tx_entry_v *txep;
459 struct igb_tx_entry_seq *txsp;
460 uint16_t n, nb_commit, tx_id;
461 __m128i flags = _mm_set_epi32(DCMD_DTYP_FLAGS, 0, 0, 0);
462 __m128i rs = _mm_set_epi32(IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS,
466 if (unlikely(nb_pkts > RTE_IXGBE_VPMD_TX_BURST))
467 nb_pkts = RTE_IXGBE_VPMD_TX_BURST;
469 if (txq->nb_tx_free < txq->tx_free_thresh)
470 ixgbe_tx_free_bufs(txq);
472 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
473 if (unlikely(nb_pkts == 0))
476 tx_id = txq->tx_tail;
477 txdp = &txq->tx_ring[tx_id];
478 txep = &((struct igb_tx_entry_v *)txq->sw_ring)[tx_id];
479 txsp = &txq->sw_ring_seq[tx_id];
481 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
483 n = (uint16_t)(txq->nb_tx_desc - tx_id);
484 if (nb_commit >= n) {
486 tx_backlog_entry(txep, txsp, tx_pkts, n);
488 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
489 vtx1(txdp, *tx_pkts, flags);
491 vtx1(txdp, *tx_pkts++, rs);
493 nb_commit = (uint16_t)(nb_commit - n);
496 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
498 /* avoid reach the end of ring */
499 txdp = &(txq->tx_ring[tx_id]);
500 txep = &(((struct igb_tx_entry_v *)txq->sw_ring)[tx_id]);
501 txsp = &(txq->sw_ring_seq[tx_id]);
504 tx_backlog_entry(txep, txsp, tx_pkts, nb_commit);
506 vtx(txdp, tx_pkts, nb_commit, flags);
508 tx_id = (uint16_t)(tx_id + nb_commit);
509 if (tx_id > txq->tx_next_rs) {
510 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
511 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
512 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
516 txq->tx_tail = tx_id;
518 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
524 ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
527 struct igb_tx_entry_v *txe;
528 struct igb_tx_entry_seq *txs;
529 uint16_t nb_free, max_desc;
531 if (txq->sw_ring != NULL) {
532 /* release the used mbufs in sw_ring */
533 nb_free = txq->nb_tx_free;
534 max_desc = (uint16_t)(txq->nb_tx_desc - 1);
535 for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
536 nb_free < max_desc && i != txq->tx_tail;
537 i = (i + 1) & max_desc) {
538 txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
539 if (txe->mbuf != NULL)
540 rte_pktmbuf_free_seg(txe->mbuf);
543 for (i = 0; i < txq->nb_tx_desc; i++) {
544 txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
547 txs = &txq->sw_ring_seq[i];
555 ixgbe_tx_free_swring(struct igb_tx_queue *txq)
560 if (txq->sw_ring != NULL) {
561 rte_free((struct igb_rx_entry *)txq->sw_ring - 1);
565 if (txq->sw_ring_seq != NULL) {
566 rte_free(txq->sw_ring_seq - 1);
567 txq->sw_ring_seq = NULL;
572 ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
574 static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
576 struct igb_tx_entry_v *txe = (struct igb_tx_entry_v *)txq->sw_ring;
577 struct igb_tx_entry_seq *txs = txq->sw_ring_seq;
580 /* Zero out HW ring memory */
581 for (i = 0; i < txq->nb_tx_desc; i++)
582 txq->tx_ring[i] = zeroed_desc;
584 /* Initialize SW ring entries */
585 for (i = 0; i < txq->nb_tx_desc; i++) {
586 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
587 txd->wb.status = IXGBE_TXD_STAT_DD;
590 txs[i].same_pool = 0;
593 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
594 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
599 * Always allow 1 descriptor to be un-allocated to avoid
600 * a H/W race condition
602 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
603 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
605 memset((void *)&txq->ctx_cache, 0,
606 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
609 static struct ixgbe_txq_ops vec_txq_ops = {
610 .release_mbufs = ixgbe_tx_queue_release_mbufs,
611 .free_swring = ixgbe_tx_free_swring,
612 .reset = ixgbe_reset_tx_queue,
615 int ixgbe_txq_vec_setup(struct igb_tx_queue *txq,
616 unsigned int socket_id)
620 if (txq->sw_ring == NULL)
623 /* request addtional one entry for continous sequence check */
624 nb_desc = (uint16_t)(txq->nb_tx_desc + 1);
626 txq->sw_ring_seq = rte_zmalloc_socket("txq->sw_ring_seq",
627 sizeof(struct igb_tx_entry_seq) * nb_desc,
628 CACHE_LINE_SIZE, socket_id);
629 if (txq->sw_ring_seq == NULL)
633 /* leave the first one for overflow */
634 txq->sw_ring = (struct igb_tx_entry *)
635 ((struct igb_tx_entry_v *)txq->sw_ring + 1);
636 txq->sw_ring_seq += 1;
637 txq->ops = &vec_txq_ops;
642 int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq,
643 __rte_unused unsigned int socket_id)
648 (uint16_t)-rxq->crc_len, /* sub crc on pkt_len */
649 (uint16_t)(rxq->port_id << 8 | 1),
650 /* 8b port_id and 8b nb_seg*/
651 (uint16_t)-rxq->crc_len /* sub crc on data_len */
657 int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev)
659 #ifndef RTE_LIBRTE_IEEE1588
660 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
661 struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
663 #ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
664 /* whithout rx ol_flags, no VP flag report */
665 if (rxmode->hw_vlan_strip != 0 ||
666 rxmode->hw_vlan_extend != 0)
670 /* no fdir support */
671 if (fconf->mode != RTE_FDIR_MODE_NONE)
675 * - no csum error report support
676 * - no header split support
678 if (rxmode->hw_ip_checksum == 1 ||
679 rxmode->header_split == 1)