4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
38 #include "ixgbe_ethdev.h"
39 #include "ixgbe_rxtx.h"
41 #include <nmmintrin.h>
43 #ifndef __INTEL_COMPILER
44 #pragma GCC diagnostic ignored "-Wcast-qual"
47 static struct rte_mbuf mb_def = {
72 ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
76 volatile union ixgbe_adv_rx_desc *rxdp;
77 struct igb_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
78 struct rte_mbuf *mb0, *mb1;
80 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
81 RTE_PKTMBUF_HEADROOM);
83 /* Pull 'n' more MBUFs into the software ring */
84 if (rte_mempool_get_bulk(rxq->mb_pool,
85 (void *)rxep, RTE_IXGBE_RXQ_REARM_THRESH) < 0)
88 rxdp = rxq->rx_ring + rxq->rxrearm_start;
90 def_low = _mm_load_si128((__m128i *)&(mb_def.pkt));
92 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
93 for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
94 __m128i dma_addr0, dma_addr1;
95 __m128i vaddr0, vaddr1;
100 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
101 vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
102 vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
104 /* calc va/pa of pkt data point */
105 vaddr0 = _mm_add_epi64(vaddr0, hdr_room);
106 vaddr1 = _mm_add_epi64(vaddr1, hdr_room);
108 /* convert pa to dma_addr hdr/data */
109 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
110 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
112 /* fill va into t0 def pkt template */
113 vaddr0 = _mm_unpacklo_epi64(def_low, vaddr0);
114 vaddr1 = _mm_unpacklo_epi64(def_low, vaddr1);
116 /* flush desc with pa dma_addr */
117 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
118 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
120 /* flush mbuf with pkt template */
121 _mm_store_si128((__m128i *)&mb0->pkt, vaddr0);
122 _mm_store_si128((__m128i *)&mb1->pkt, vaddr1);
124 /* update refcnt per pkt */
125 rte_mbuf_refcnt_set(mb0, 1);
126 rte_mbuf_refcnt_set(mb1, 1);
129 rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
130 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
131 rxq->rxrearm_start = 0;
133 rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
135 rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
136 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
138 /* Update the tail pointer on the NIC */
139 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
142 /* Handling the offload flags (olflags) field takes computation
143 * time when receiving packets. Therefore we provide a flag to disable
144 * the processing of the olflags field when they are not needed. This
145 * gives improved performance, at the cost of losing the offload info
146 * in the received packet
148 #ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
150 #define OLFLAGS_MASK ((uint16_t)(PKT_RX_VLAN_PKT | PKT_RX_IPV4_HDR |\
151 PKT_RX_IPV4_HDR_EXT | PKT_RX_IPV6_HDR |\
152 PKT_RX_IPV6_HDR_EXT))
153 #define OLFLAGS_MASK_V (((uint64_t)OLFLAGS_MASK << 48) | \
154 ((uint64_t)OLFLAGS_MASK << 32) | \
155 ((uint64_t)OLFLAGS_MASK << 16) | \
156 ((uint64_t)OLFLAGS_MASK))
157 #define PTYPE_SHIFT (1)
158 #define VTAG_SHIFT (3)
161 desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
163 __m128i ptype0, ptype1, vtag0, vtag1;
169 ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
170 ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
171 vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
172 vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
174 ptype1 = _mm_unpacklo_epi32(ptype0, ptype1);
175 vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
177 ptype1 = _mm_slli_epi16(ptype1, PTYPE_SHIFT);
178 vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT);
180 ptype1 = _mm_or_si128(ptype1, vtag1);
181 vol.dword = _mm_cvtsi128_si64(ptype1) & OLFLAGS_MASK_V;
183 rx_pkts[0]->ol_flags = vol.e[0];
184 rx_pkts[1]->ol_flags = vol.e[1];
185 rx_pkts[2]->ol_flags = vol.e[2];
186 rx_pkts[3]->ol_flags = vol.e[3];
189 #define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
193 * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
197 * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
198 * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
200 * - don't support ol_flags for rss and csum err
203 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
206 volatile union ixgbe_adv_rx_desc *rxdp;
207 struct igb_rx_queue *rxq = rx_queue;
208 struct igb_rx_entry *sw_ring;
209 uint16_t nb_pkts_recd;
216 if (unlikely(nb_pkts < RTE_IXGBE_VPMD_RX_BURST))
219 /* Just the act of getting into the function from the application is
220 * going to cost about 7 cycles */
221 rxdp = rxq->rx_ring + rxq->rx_tail;
223 _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
225 /* See if we need to rearm the RX queue - gives the prefetch a bit
227 if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
228 ixgbe_rxq_rearm(rxq);
230 /* Before we start moving massive data around, check to see if
231 * there is actually a packet available */
232 if (!(rxdp->wb.upper.status_error &
233 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
236 /* 4 packets DD mask */
237 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
239 /* mask to shuffle from desc. to mbuf */
240 shuf_msk = _mm_set_epi8(
241 7, 6, 5, 4, /* octet 4~7, 32bits rss */
242 0xFF, 0xFF, /* skip high 16 bits vlan_macip, zero out */
243 15, 14, /* octet 14~15, low 16 bits vlan_macip */
244 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
245 13, 12, /* octet 12~13, low 16 bits pkt_len */
246 0xFF, 0xFF, /* skip nb_segs and in_port, zero out */
247 13, 12 /* octet 12~13, 16 bits data_len */
251 /* Cache is empty -> need to scan the buffer rings, but first move
252 * the next 'n' mbufs into the cache */
253 sw_ring = &rxq->sw_ring[rxq->rx_tail];
255 /* in_port, nb_seg = 1, crc_len */
256 in_port = rxq->misc_info;
259 * A. load 4 packet in one loop
260 * B. copy 4 mbuf point from swring to rx_pkts
261 * C. calc the number of DD bits among the 4 packets
262 * D. fill info. from desc to mbuf
264 for (pos = 0, nb_pkts_recd = 0; pos < RTE_IXGBE_VPMD_RX_BURST;
265 pos += RTE_IXGBE_DESCS_PER_LOOP,
266 rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
267 __m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
268 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
269 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
270 __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
272 /* B.1 load 1 mbuf point */
273 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
275 /* Read desc statuses backwards to avoid race condition */
276 /* A.1 load 4 pkts desc */
277 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
279 /* B.2 copy 2 mbuf point into rx_pkts */
280 _mm_store_si128((__m128i *)&rx_pkts[pos], mbp1);
282 /* B.1 load 1 mbuf point */
283 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
285 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
286 /* B.1 load 2 mbuf point */
287 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
288 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
290 /* B.2 copy 2 mbuf point into rx_pkts */
291 _mm_store_si128((__m128i *)&rx_pkts[pos+2], mbp2);
293 /* avoid compiler reorder optimization */
294 rte_compiler_barrier();
296 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
297 pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
298 pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
300 /* C.1 4=>2 filter staterr info only */
301 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
302 /* C.1 4=>2 filter staterr info only */
303 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
305 /* set ol_flags with packet type and vlan tag */
306 desc_to_olflags_v(descs, &rx_pkts[pos]);
308 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
309 pkt_mb4 = _mm_add_epi16(pkt_mb4, in_port);
310 pkt_mb3 = _mm_add_epi16(pkt_mb3, in_port);
312 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
313 pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
314 pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
316 /* C.2 get 4 pkts staterr value */
317 zero = _mm_xor_si128(dd_check, dd_check);
318 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
320 /* D.3 copy final 3,4 data to rx_pkts */
321 _mm_storeu_si128((__m128i *)&(rx_pkts[pos+3]->pkt.data_len),
323 _mm_storeu_si128((__m128i *)&(rx_pkts[pos+2]->pkt.data_len),
326 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
327 pkt_mb2 = _mm_add_epi16(pkt_mb2, in_port);
328 pkt_mb1 = _mm_add_epi16(pkt_mb1, in_port);
330 /* C.3 calc avaialbe number of desc */
331 staterr = _mm_and_si128(staterr, dd_check);
332 staterr = _mm_packs_epi32(staterr, zero);
334 /* D.3 copy final 1,2 data to rx_pkts */
335 _mm_storeu_si128((__m128i *)&(rx_pkts[pos+1]->pkt.data_len),
337 _mm_storeu_si128((__m128i *)&(rx_pkts[pos]->pkt.data_len),
340 /* C.4 calc avaialbe number of desc */
341 var = _mm_popcnt_u64(_mm_cvtsi128_si64(staterr));
343 if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
347 /* Update our internal tail pointer */
348 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
349 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
350 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
356 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
357 struct rte_mbuf *pkt, __m128i flags)
359 __m128i t0, t1, offset, ols, ba, ctl;
361 /* load buf_addr/buf_physaddr in t0 */
362 t0 = _mm_loadu_si128((__m128i *)&(pkt->buf_addr));
363 /* load data, ... pkt_len in t1 */
364 t1 = _mm_loadu_si128((__m128i *)&(pkt->pkt.data));
366 /* calc offset = (data - buf_adr) */
367 offset = _mm_sub_epi64(t1, t0);
369 /* cmd_type_len: pkt_len |= DCMD_DTYP_FLAGS */
370 ctl = _mm_or_si128(t1, flags);
372 /* reorder as buf_physaddr/buf_addr */
373 offset = _mm_shuffle_epi32(offset, 0x4E);
375 /* olinfo_stats: pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT */
376 ols = _mm_slli_epi32(t1, IXGBE_ADVTXD_PAYLEN_SHIFT);
378 /* buffer_addr = buf_physaddr + offset */
379 ba = _mm_add_epi64(t0, offset);
381 /* format cmd_type_len/olinfo_status */
382 ctl = _mm_unpackhi_epi32(ctl, ols);
384 /* format buf_physaddr/cmd_type_len */
385 ba = _mm_unpackhi_epi64(ba, ctl);
388 _mm_store_si128((__m128i *)&txdp->read, ba);
392 vtx(volatile union ixgbe_adv_tx_desc *txdp,
393 struct rte_mbuf **pkt, uint16_t nb_pkts, __m128i flags)
396 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
397 vtx1(txdp, *pkt, flags);
400 static inline int __attribute__((always_inline))
401 ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
403 struct igb_tx_entry_v *txep;
404 struct igb_tx_entry_seq *txsp;
407 #ifdef RTE_MBUF_SCATTER_GATHER
410 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
413 /* check DD bit on threshold descriptor */
414 status = txq->tx_ring[txq->tx_next_dd].wb.status;
415 if (!(status & IXGBE_ADVTXD_STAT_DD))
418 n = txq->tx_rs_thresh;
421 * first buffer to free from S/W ring is at index
422 * tx_next_dd - (tx_rs_thresh-1)
424 txep = &((struct igb_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
426 txsp = &txq->sw_ring_seq[txq->tx_next_dd - (n - 1)];
429 k = RTE_MIN(n, txsp[n-1].same_pool);
430 #ifdef RTE_MBUF_SCATTER_GATHER
431 for (i = 0; i < k; i++) {
432 m = __rte_pktmbuf_prefree_seg((txep+n-k+i)->mbuf);
436 rte_mempool_put_bulk((void *)txsp[n-1].pool,
437 (void **)free, nb_free);
439 rte_mempool_put_bulk((void *)txsp[n-1].pool,
440 (void **)(txep+n-k), k);
445 /* buffers were freed, update counters */
446 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
447 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
448 if (txq->tx_next_dd >= txq->nb_tx_desc)
449 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
451 return txq->tx_rs_thresh;
454 static inline void __attribute__((always_inline))
455 tx_backlog_entry(struct igb_tx_entry_v *txep,
456 struct igb_tx_entry_seq *txsp,
457 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
460 for (i = 0; i < (int)nb_pkts; ++i) {
461 txep[i].mbuf = tx_pkts[i];
462 /* check and update sequence number */
463 txsp[i].pool = tx_pkts[i]->pool;
464 if (txsp[i-1].pool == tx_pkts[i]->pool)
465 txsp[i].same_pool = txsp[i-1].same_pool + 1;
467 txsp[i].same_pool = 1;
472 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
475 struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
476 volatile union ixgbe_adv_tx_desc *txdp;
477 struct igb_tx_entry_v *txep;
478 struct igb_tx_entry_seq *txsp;
479 uint16_t n, nb_commit, tx_id;
480 __m128i flags = _mm_set_epi32(DCMD_DTYP_FLAGS, 0, 0, 0);
481 __m128i rs = _mm_set_epi32(IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS,
485 if (unlikely(nb_pkts > RTE_IXGBE_VPMD_TX_BURST))
486 nb_pkts = RTE_IXGBE_VPMD_TX_BURST;
488 if (txq->nb_tx_free < txq->tx_free_thresh)
489 ixgbe_tx_free_bufs(txq);
491 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
492 if (unlikely(nb_pkts == 0))
495 tx_id = txq->tx_tail;
496 txdp = &txq->tx_ring[tx_id];
497 txep = &((struct igb_tx_entry_v *)txq->sw_ring)[tx_id];
498 txsp = &txq->sw_ring_seq[tx_id];
500 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
502 n = (uint16_t)(txq->nb_tx_desc - tx_id);
503 if (nb_commit >= n) {
505 tx_backlog_entry(txep, txsp, tx_pkts, n);
507 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
508 vtx1(txdp, *tx_pkts, flags);
510 vtx1(txdp, *tx_pkts++, rs);
512 nb_commit = (uint16_t)(nb_commit - n);
515 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
517 /* avoid reach the end of ring */
518 txdp = &(txq->tx_ring[tx_id]);
519 txep = &(((struct igb_tx_entry_v *)txq->sw_ring)[tx_id]);
520 txsp = &(txq->sw_ring_seq[tx_id]);
523 tx_backlog_entry(txep, txsp, tx_pkts, nb_commit);
525 vtx(txdp, tx_pkts, nb_commit, flags);
527 tx_id = (uint16_t)(tx_id + nb_commit);
528 if (tx_id > txq->tx_next_rs) {
529 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
530 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
531 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
535 txq->tx_tail = tx_id;
537 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
543 ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
546 struct igb_tx_entry_v *txe;
547 struct igb_tx_entry_seq *txs;
548 uint16_t nb_free, max_desc;
550 if (txq->sw_ring != NULL) {
551 /* release the used mbufs in sw_ring */
552 nb_free = txq->nb_tx_free;
553 max_desc = (uint16_t)(txq->nb_tx_desc - 1);
554 for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
555 nb_free < max_desc && i != txq->tx_tail;
556 i = (i + 1) & max_desc) {
557 txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
558 if (txe->mbuf != NULL)
559 rte_pktmbuf_free_seg(txe->mbuf);
562 for (i = 0; i < txq->nb_tx_desc; i++) {
563 txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
566 txs = &txq->sw_ring_seq[i];
574 ixgbe_tx_free_swring(struct igb_tx_queue *txq)
579 if (txq->sw_ring != NULL) {
580 rte_free((struct igb_rx_entry *)txq->sw_ring - 1);
584 if (txq->sw_ring_seq != NULL) {
585 rte_free(txq->sw_ring_seq - 1);
586 txq->sw_ring_seq = NULL;
591 ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
593 static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
595 struct igb_tx_entry_v *txe = (struct igb_tx_entry_v *)txq->sw_ring;
596 struct igb_tx_entry_seq *txs = txq->sw_ring_seq;
599 /* Zero out HW ring memory */
600 for (i = 0; i < txq->nb_tx_desc; i++)
601 txq->tx_ring[i] = zeroed_desc;
603 /* Initialize SW ring entries */
604 for (i = 0; i < txq->nb_tx_desc; i++) {
605 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
606 txd->wb.status = IXGBE_TXD_STAT_DD;
609 txs[i].same_pool = 0;
612 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
613 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
618 * Always allow 1 descriptor to be un-allocated to avoid
619 * a H/W race condition
621 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
622 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
624 memset((void *)&txq->ctx_cache, 0,
625 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
628 static struct ixgbe_txq_ops vec_txq_ops = {
629 .release_mbufs = ixgbe_tx_queue_release_mbufs,
630 .free_swring = ixgbe_tx_free_swring,
631 .reset = ixgbe_reset_tx_queue,
634 int ixgbe_txq_vec_setup(struct igb_tx_queue *txq,
635 unsigned int socket_id)
639 if (txq->sw_ring == NULL)
642 /* request addtional one entry for continous sequence check */
643 nb_desc = (uint16_t)(txq->nb_tx_desc + 1);
645 txq->sw_ring_seq = rte_zmalloc_socket("txq->sw_ring_seq",
646 sizeof(struct igb_tx_entry_seq) * nb_desc,
647 CACHE_LINE_SIZE, socket_id);
648 if (txq->sw_ring_seq == NULL)
652 /* leave the first one for overflow */
653 txq->sw_ring = (struct igb_tx_entry *)
654 ((struct igb_tx_entry_v *)txq->sw_ring + 1);
655 txq->sw_ring_seq += 1;
656 txq->ops = &vec_txq_ops;
661 int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq,
662 __rte_unused unsigned int socket_id)
667 (uint16_t)-rxq->crc_len, /* sub crc on pkt_len */
668 (uint16_t)(rxq->port_id << 8 | 1),
669 /* 8b port_id and 8b nb_seg*/
670 (uint16_t)-rxq->crc_len /* sub crc on data_len */
676 int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev)
678 #ifndef RTE_LIBRTE_IEEE1588
679 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
680 struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
682 #ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
683 /* whithout rx ol_flags, no VP flag report */
684 if (rxmode->hw_vlan_strip != 0 ||
685 rxmode->hw_vlan_extend != 0)
689 /* no fdir support */
690 if (fconf->mode != RTE_FDIR_MODE_NONE)
694 * - no csum error report support
695 * - no header split support
697 if (rxmode->hw_ip_checksum == 1 ||
698 rxmode->header_split == 1)