4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 * Copyright 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
46 #include <rte_byteorder.h>
47 #include <rte_common.h>
48 #include <rte_cycles.h>
50 #include <rte_debug.h>
51 #include <rte_interrupts.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
70 #include <rte_string_fns.h>
71 #include <rte_errno.h>
75 #include "ixgbe_logs.h"
76 #include "base/ixgbe_api.h"
77 #include "base/ixgbe_vf.h"
78 #include "ixgbe_ethdev.h"
79 #include "base/ixgbe_dcb.h"
80 #include "base/ixgbe_common.h"
81 #include "ixgbe_rxtx.h"
83 #ifdef RTE_LIBRTE_IEEE1588
84 #define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
86 #define IXGBE_TX_IEEE1588_TMST 0
88 /* Bit Mask to indicate what bits required for building TX context */
89 #define IXGBE_TX_OFFLOAD_MASK ( \
95 PKT_TX_OUTER_IP_CKSUM | \
96 IXGBE_TX_IEEE1588_TMST)
98 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
99 (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
102 #define RTE_PMD_USE_PREFETCH
105 #ifdef RTE_PMD_USE_PREFETCH
107 * Prefetch a cache line into all cache levels.
109 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
111 #define rte_ixgbe_prefetch(p) do {} while (0)
114 #ifdef RTE_IXGBE_INC_VECTOR
115 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
119 /*********************************************************************
123 **********************************************************************/
126 * Check for descriptors with their DD bit set and free mbufs.
127 * Return the total number of buffers freed.
129 static __rte_always_inline int
130 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
132 struct ixgbe_tx_entry *txep;
135 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
137 /* check DD bit on threshold descriptor */
138 status = txq->tx_ring[txq->tx_next_dd].wb.status;
139 if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
143 * first buffer to free from S/W ring is at index
144 * tx_next_dd - (tx_rs_thresh-1)
146 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
148 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
149 /* free buffers one at a time */
150 m = rte_pktmbuf_prefree_seg(txep->mbuf);
153 if (unlikely(m == NULL))
156 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
157 (nb_free > 0 && m->pool != free[0]->pool)) {
158 rte_mempool_put_bulk(free[0]->pool,
159 (void **)free, nb_free);
167 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
169 /* buffers were freed, update counters */
170 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
171 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
172 if (txq->tx_next_dd >= txq->nb_tx_desc)
173 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
175 return txq->tx_rs_thresh;
178 /* Populate 4 descriptors with data from 4 mbufs */
180 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
182 uint64_t buf_dma_addr;
186 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
187 buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
188 pkt_len = (*pkts)->data_len;
190 /* write data to descriptor */
191 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
193 txdp->read.cmd_type_len =
194 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
196 txdp->read.olinfo_status =
197 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
199 rte_prefetch0(&(*pkts)->pool);
203 /* Populate 1 descriptor with data from 1 mbuf */
205 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
207 uint64_t buf_dma_addr;
210 buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
211 pkt_len = (*pkts)->data_len;
213 /* write data to descriptor */
214 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
215 txdp->read.cmd_type_len =
216 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
217 txdp->read.olinfo_status =
218 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
219 rte_prefetch0(&(*pkts)->pool);
223 * Fill H/W descriptor ring with mbuf data.
224 * Copy mbuf pointers to the S/W ring.
227 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
230 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
231 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
232 const int N_PER_LOOP = 4;
233 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
234 int mainpart, leftover;
238 * Process most of the packets in chunks of N pkts. Any
239 * leftover packets will get processed one at a time.
241 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
242 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
243 for (i = 0; i < mainpart; i += N_PER_LOOP) {
244 /* Copy N mbuf pointers to the S/W ring */
245 for (j = 0; j < N_PER_LOOP; ++j) {
246 (txep + i + j)->mbuf = *(pkts + i + j);
248 tx4(txdp + i, pkts + i);
251 if (unlikely(leftover > 0)) {
252 for (i = 0; i < leftover; ++i) {
253 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
254 tx1(txdp + mainpart + i, pkts + mainpart + i);
259 static inline uint16_t
260 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
263 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
264 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
268 * Begin scanning the H/W ring for done descriptors when the
269 * number of available descriptors drops below tx_free_thresh. For
270 * each done descriptor, free the associated buffer.
272 if (txq->nb_tx_free < txq->tx_free_thresh)
273 ixgbe_tx_free_bufs(txq);
275 /* Only use descriptors that are available */
276 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
277 if (unlikely(nb_pkts == 0))
280 /* Use exactly nb_pkts descriptors */
281 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
284 * At this point, we know there are enough descriptors in the
285 * ring to transmit all the packets. This assumes that each
286 * mbuf contains a single segment, and that no new offloads
287 * are expected, which would require a new context descriptor.
291 * See if we're going to wrap-around. If so, handle the top
292 * of the descriptor ring first, then do the bottom. If not,
293 * the processing looks just like the "bottom" part anyway...
295 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
296 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
297 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
300 * We know that the last descriptor in the ring will need to
301 * have its RS bit set because tx_rs_thresh has to be
302 * a divisor of the ring size
304 tx_r[txq->tx_next_rs].read.cmd_type_len |=
305 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
306 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
311 /* Fill H/W descriptor ring with mbuf data */
312 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
313 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
316 * Determine if RS bit should be set
317 * This is what we actually want:
318 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
319 * but instead of subtracting 1 and doing >=, we can just do
320 * greater than without subtracting.
322 if (txq->tx_tail > txq->tx_next_rs) {
323 tx_r[txq->tx_next_rs].read.cmd_type_len |=
324 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
325 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
327 if (txq->tx_next_rs >= txq->nb_tx_desc)
328 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
332 * Check for wrap-around. This would only happen if we used
333 * up to the last descriptor in the ring, no more, no less.
335 if (txq->tx_tail >= txq->nb_tx_desc)
338 /* update tail pointer */
340 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
346 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
351 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
352 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
353 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
355 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
360 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
361 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
362 nb_tx = (uint16_t)(nb_tx + ret);
363 nb_pkts = (uint16_t)(nb_pkts - ret);
371 #ifdef RTE_IXGBE_INC_VECTOR
373 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
377 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
382 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
383 ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
396 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
397 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
398 uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
400 uint32_t type_tucmd_mlhl;
401 uint32_t mss_l4len_idx = 0;
403 uint32_t vlan_macip_lens;
404 union ixgbe_tx_offload tx_offload_mask;
405 uint32_t seqnum_seed = 0;
407 ctx_idx = txq->ctx_curr;
408 tx_offload_mask.data[0] = 0;
409 tx_offload_mask.data[1] = 0;
412 /* Specify which HW CTX to upload. */
413 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
415 if (ol_flags & PKT_TX_VLAN_PKT) {
416 tx_offload_mask.vlan_tci |= ~0;
419 /* check if TCP segmentation required for this packet */
420 if (ol_flags & PKT_TX_TCP_SEG) {
421 /* implies IP cksum in IPv4 */
422 if (ol_flags & PKT_TX_IP_CKSUM)
423 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
424 IXGBE_ADVTXD_TUCMD_L4T_TCP |
425 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
427 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
428 IXGBE_ADVTXD_TUCMD_L4T_TCP |
429 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
431 tx_offload_mask.l2_len |= ~0;
432 tx_offload_mask.l3_len |= ~0;
433 tx_offload_mask.l4_len |= ~0;
434 tx_offload_mask.tso_segsz |= ~0;
435 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
436 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
437 } else { /* no TSO, check if hardware checksum is needed */
438 if (ol_flags & PKT_TX_IP_CKSUM) {
439 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
440 tx_offload_mask.l2_len |= ~0;
441 tx_offload_mask.l3_len |= ~0;
444 switch (ol_flags & PKT_TX_L4_MASK) {
445 case PKT_TX_UDP_CKSUM:
446 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
447 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
448 mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
449 tx_offload_mask.l2_len |= ~0;
450 tx_offload_mask.l3_len |= ~0;
452 case PKT_TX_TCP_CKSUM:
453 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
454 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
455 mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
456 tx_offload_mask.l2_len |= ~0;
457 tx_offload_mask.l3_len |= ~0;
459 case PKT_TX_SCTP_CKSUM:
460 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
461 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
462 mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
463 tx_offload_mask.l2_len |= ~0;
464 tx_offload_mask.l3_len |= ~0;
467 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
468 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
473 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
474 tx_offload_mask.outer_l2_len |= ~0;
475 tx_offload_mask.outer_l3_len |= ~0;
476 tx_offload_mask.l2_len |= ~0;
477 seqnum_seed |= tx_offload.outer_l3_len
478 << IXGBE_ADVTXD_OUTER_IPLEN;
479 seqnum_seed |= tx_offload.l2_len
480 << IXGBE_ADVTXD_TUNNEL_LEN;
483 txq->ctx_cache[ctx_idx].flags = ol_flags;
484 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
485 tx_offload_mask.data[0] & tx_offload.data[0];
486 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
487 tx_offload_mask.data[1] & tx_offload.data[1];
488 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
490 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
491 vlan_macip_lens = tx_offload.l3_len;
492 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
493 vlan_macip_lens |= (tx_offload.outer_l2_len <<
494 IXGBE_ADVTXD_MACLEN_SHIFT);
496 vlan_macip_lens |= (tx_offload.l2_len <<
497 IXGBE_ADVTXD_MACLEN_SHIFT);
498 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
499 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
500 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
501 ctx_txd->seqnum_seed = seqnum_seed;
505 * Check which hardware context can be used. Use the existing match
506 * or create a new context descriptor.
508 static inline uint32_t
509 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
510 union ixgbe_tx_offload tx_offload)
512 /* If match with the current used context */
513 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
514 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
515 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
516 & tx_offload.data[0])) &&
517 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
518 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
519 & tx_offload.data[1]))))
520 return txq->ctx_curr;
522 /* What if match with the next context */
524 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
525 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
526 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
527 & tx_offload.data[0])) &&
528 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
529 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
530 & tx_offload.data[1]))))
531 return txq->ctx_curr;
533 /* Mismatch, use the previous context */
534 return IXGBE_CTX_NUM;
537 static inline uint32_t
538 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
542 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
543 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
544 if (ol_flags & PKT_TX_IP_CKSUM)
545 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
546 if (ol_flags & PKT_TX_TCP_SEG)
547 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
551 static inline uint32_t
552 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
554 uint32_t cmdtype = 0;
556 if (ol_flags & PKT_TX_VLAN_PKT)
557 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
558 if (ol_flags & PKT_TX_TCP_SEG)
559 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
560 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
561 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
562 if (ol_flags & PKT_TX_MACSEC)
563 cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
567 /* Default RS bit threshold values */
568 #ifndef DEFAULT_TX_RS_THRESH
569 #define DEFAULT_TX_RS_THRESH 32
571 #ifndef DEFAULT_TX_FREE_THRESH
572 #define DEFAULT_TX_FREE_THRESH 32
575 /* Reset transmit descriptors after they have been used */
577 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
579 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
580 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
581 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
582 uint16_t nb_tx_desc = txq->nb_tx_desc;
583 uint16_t desc_to_clean_to;
584 uint16_t nb_tx_to_clean;
587 /* Determine the last descriptor needing to be cleaned */
588 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
589 if (desc_to_clean_to >= nb_tx_desc)
590 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
592 /* Check to make sure the last descriptor to clean is done */
593 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
594 status = txr[desc_to_clean_to].wb.status;
595 if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
596 PMD_TX_FREE_LOG(DEBUG,
597 "TX descriptor %4u is not done"
598 "(port=%d queue=%d)",
600 txq->port_id, txq->queue_id);
601 /* Failed to clean any descriptors, better luck next time */
605 /* Figure out how many descriptors will be cleaned */
606 if (last_desc_cleaned > desc_to_clean_to)
607 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
610 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
613 PMD_TX_FREE_LOG(DEBUG,
614 "Cleaning %4u TX descriptors: %4u to %4u "
615 "(port=%d queue=%d)",
616 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
617 txq->port_id, txq->queue_id);
620 * The last descriptor to clean is done, so that means all the
621 * descriptors from the last descriptor that was cleaned
622 * up to the last descriptor with the RS bit set
623 * are done. Only reset the threshold descriptor.
625 txr[desc_to_clean_to].wb.status = 0;
627 /* Update the txq to reflect the last descriptor that was cleaned */
628 txq->last_desc_cleaned = desc_to_clean_to;
629 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
636 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
639 struct ixgbe_tx_queue *txq;
640 struct ixgbe_tx_entry *sw_ring;
641 struct ixgbe_tx_entry *txe, *txn;
642 volatile union ixgbe_adv_tx_desc *txr;
643 volatile union ixgbe_adv_tx_desc *txd, *txp;
644 struct rte_mbuf *tx_pkt;
645 struct rte_mbuf *m_seg;
646 uint64_t buf_dma_addr;
647 uint32_t olinfo_status;
648 uint32_t cmd_type_len;
659 union ixgbe_tx_offload tx_offload;
661 tx_offload.data[0] = 0;
662 tx_offload.data[1] = 0;
664 sw_ring = txq->sw_ring;
666 tx_id = txq->tx_tail;
667 txe = &sw_ring[tx_id];
670 /* Determine if the descriptor ring needs to be cleaned. */
671 if (txq->nb_tx_free < txq->tx_free_thresh)
672 ixgbe_xmit_cleanup(txq);
674 rte_prefetch0(&txe->mbuf->pool);
677 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
680 pkt_len = tx_pkt->pkt_len;
683 * Determine how many (if any) context descriptors
684 * are needed for offload functionality.
686 ol_flags = tx_pkt->ol_flags;
688 /* If hardware offload required */
689 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
691 tx_offload.l2_len = tx_pkt->l2_len;
692 tx_offload.l3_len = tx_pkt->l3_len;
693 tx_offload.l4_len = tx_pkt->l4_len;
694 tx_offload.vlan_tci = tx_pkt->vlan_tci;
695 tx_offload.tso_segsz = tx_pkt->tso_segsz;
696 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
697 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
699 /* If new context need be built or reuse the exist ctx. */
700 ctx = what_advctx_update(txq, tx_ol_req,
702 /* Only allocate context descriptor if required*/
703 new_ctx = (ctx == IXGBE_CTX_NUM);
708 * Keep track of how many descriptors are used this loop
709 * This will always be the number of segments + the number of
710 * Context descriptors required to transmit the packet
712 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
715 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
716 /* set RS on the previous packet in the burst */
717 txp->read.cmd_type_len |=
718 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
721 * The number of descriptors that must be allocated for a
722 * packet is the number of segments of that packet, plus 1
723 * Context Descriptor for the hardware offload, if any.
724 * Determine the last TX descriptor to allocate in the TX ring
725 * for the packet, starting from the current position (tx_id)
728 tx_last = (uint16_t) (tx_id + nb_used - 1);
731 if (tx_last >= txq->nb_tx_desc)
732 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
734 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
735 " tx_first=%u tx_last=%u",
736 (unsigned) txq->port_id,
737 (unsigned) txq->queue_id,
743 * Make sure there are enough TX descriptors available to
744 * transmit the entire packet.
745 * nb_used better be less than or equal to txq->tx_rs_thresh
747 if (nb_used > txq->nb_tx_free) {
748 PMD_TX_FREE_LOG(DEBUG,
749 "Not enough free TX descriptors "
750 "nb_used=%4u nb_free=%4u "
751 "(port=%d queue=%d)",
752 nb_used, txq->nb_tx_free,
753 txq->port_id, txq->queue_id);
755 if (ixgbe_xmit_cleanup(txq) != 0) {
756 /* Could not clean any descriptors */
762 /* nb_used better be <= txq->tx_rs_thresh */
763 if (unlikely(nb_used > txq->tx_rs_thresh)) {
764 PMD_TX_FREE_LOG(DEBUG,
765 "The number of descriptors needed to "
766 "transmit the packet exceeds the "
767 "RS bit threshold. This will impact "
769 "nb_used=%4u nb_free=%4u "
771 "(port=%d queue=%d)",
772 nb_used, txq->nb_tx_free,
774 txq->port_id, txq->queue_id);
776 * Loop here until there are enough TX
777 * descriptors or until the ring cannot be
780 while (nb_used > txq->nb_tx_free) {
781 if (ixgbe_xmit_cleanup(txq) != 0) {
783 * Could not clean any
795 * By now there are enough free TX descriptors to transmit
800 * Set common flags of all TX Data Descriptors.
802 * The following bits must be set in all Data Descriptors:
803 * - IXGBE_ADVTXD_DTYP_DATA
804 * - IXGBE_ADVTXD_DCMD_DEXT
806 * The following bits must be set in the first Data Descriptor
807 * and are ignored in the other ones:
808 * - IXGBE_ADVTXD_DCMD_IFCS
809 * - IXGBE_ADVTXD_MAC_1588
810 * - IXGBE_ADVTXD_DCMD_VLE
812 * The following bits must only be set in the last Data
814 * - IXGBE_TXD_CMD_EOP
816 * The following bits can be set in any Data Descriptor, but
817 * are only set in the last Data Descriptor:
820 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
821 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
823 #ifdef RTE_LIBRTE_IEEE1588
824 if (ol_flags & PKT_TX_IEEE1588_TMST)
825 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
831 if (ol_flags & PKT_TX_TCP_SEG) {
832 /* when TSO is on, paylen in descriptor is the
833 * not the packet len but the tcp payload len */
834 pkt_len -= (tx_offload.l2_len +
835 tx_offload.l3_len + tx_offload.l4_len);
839 * Setup the TX Advanced Context Descriptor if required
842 volatile struct ixgbe_adv_tx_context_desc *
845 ctx_txd = (volatile struct
846 ixgbe_adv_tx_context_desc *)
849 txn = &sw_ring[txe->next_id];
850 rte_prefetch0(&txn->mbuf->pool);
852 if (txe->mbuf != NULL) {
853 rte_pktmbuf_free_seg(txe->mbuf);
857 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
860 txe->last_id = tx_last;
861 tx_id = txe->next_id;
866 * Setup the TX Advanced Data Descriptor,
867 * This path will go through
868 * whatever new/reuse the context descriptor
870 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
871 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
872 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
875 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
880 txn = &sw_ring[txe->next_id];
881 rte_prefetch0(&txn->mbuf->pool);
883 if (txe->mbuf != NULL)
884 rte_pktmbuf_free_seg(txe->mbuf);
888 * Set up Transmit Data Descriptor.
890 slen = m_seg->data_len;
891 buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
892 txd->read.buffer_addr =
893 rte_cpu_to_le_64(buf_dma_addr);
894 txd->read.cmd_type_len =
895 rte_cpu_to_le_32(cmd_type_len | slen);
896 txd->read.olinfo_status =
897 rte_cpu_to_le_32(olinfo_status);
898 txe->last_id = tx_last;
899 tx_id = txe->next_id;
902 } while (m_seg != NULL);
905 * The last packet data descriptor needs End Of Packet (EOP)
907 cmd_type_len |= IXGBE_TXD_CMD_EOP;
908 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
909 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
911 /* Set RS bit only on threshold packets' last descriptor */
912 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
913 PMD_TX_FREE_LOG(DEBUG,
914 "Setting RS bit on TXD id="
915 "%4u (port=%d queue=%d)",
916 tx_last, txq->port_id, txq->queue_id);
918 cmd_type_len |= IXGBE_TXD_CMD_RS;
920 /* Update txq RS bit counters */
926 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
930 /* set RS on last packet in the burst */
932 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
937 * Set the Transmit Descriptor Tail (TDT)
939 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
940 (unsigned) txq->port_id, (unsigned) txq->queue_id,
941 (unsigned) tx_id, (unsigned) nb_tx);
942 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
943 txq->tx_tail = tx_id;
948 /*********************************************************************
952 **********************************************************************/
954 ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
959 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
961 for (i = 0; i < nb_pkts; i++) {
963 ol_flags = m->ol_flags;
966 * Check if packet meets requirements for number of segments
968 * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
972 if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
977 if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
978 rte_errno = -ENOTSUP;
982 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
983 ret = rte_validate_tx_offload(m);
989 ret = rte_net_intel_cksum_prepare(m);
999 /*********************************************************************
1003 **********************************************************************/
1005 #define IXGBE_PACKET_TYPE_ETHER 0X00
1006 #define IXGBE_PACKET_TYPE_IPV4 0X01
1007 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
1008 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
1009 #define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
1010 #define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
1011 #define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
1012 #define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
1013 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
1014 #define IXGBE_PACKET_TYPE_IPV6 0X04
1015 #define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
1016 #define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
1017 #define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
1018 #define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
1019 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
1020 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
1021 #define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
1022 #define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
1023 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
1024 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
1025 #define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
1026 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
1027 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
1028 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
1029 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
1030 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
1031 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
1032 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
1033 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
1034 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
1035 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
1036 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
1037 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
1039 #define IXGBE_PACKET_TYPE_NVGRE 0X00
1040 #define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
1041 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11
1042 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
1043 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
1044 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
1045 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
1046 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
1047 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
1048 #define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
1049 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
1050 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
1051 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
1052 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
1053 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
1054 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
1055 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
1056 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
1057 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
1058 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
1059 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D
1060 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
1061 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
1063 #define IXGBE_PACKET_TYPE_VXLAN 0X80
1064 #define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81
1065 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91
1066 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
1067 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
1068 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
1069 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
1070 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
1071 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
1072 #define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
1073 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
1074 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
1075 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
1076 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
1077 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
1078 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
1079 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
1080 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
1081 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
1082 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
1083 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D
1084 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
1085 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
1088 * Use 2 different table for normal packet and tunnel packet
1089 * to save the space.
1092 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
1093 [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
1094 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
1096 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1097 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
1098 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1099 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
1100 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1101 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
1102 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1103 RTE_PTYPE_L3_IPV4_EXT,
1104 [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1105 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
1106 [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1107 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
1108 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1109 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
1110 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
1112 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1113 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
1114 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1115 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
1116 [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1117 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
1118 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1119 RTE_PTYPE_L3_IPV6_EXT,
1120 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1121 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
1122 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1123 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
1124 [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1125 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
1126 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1127 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1128 RTE_PTYPE_INNER_L3_IPV6,
1129 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1130 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1131 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1132 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1133 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1134 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1135 [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1136 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1137 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1138 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
1139 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1140 RTE_PTYPE_INNER_L3_IPV6,
1141 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1142 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1143 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1144 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1145 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1146 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1147 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1148 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1149 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1150 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1151 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1152 RTE_PTYPE_INNER_L3_IPV6_EXT,
1153 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1154 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1155 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1156 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1157 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1158 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1159 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1160 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1161 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1162 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1163 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1164 RTE_PTYPE_INNER_L3_IPV6_EXT,
1165 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1166 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1167 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1168 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1169 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1170 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1171 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
1172 RTE_PTYPE_L2_ETHER |
1173 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1174 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1178 ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
1179 [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
1180 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1181 RTE_PTYPE_INNER_L2_ETHER,
1182 [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
1183 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1184 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1185 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1186 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1187 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
1188 [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
1189 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1190 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
1191 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1192 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1193 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1194 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1195 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1196 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
1197 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1198 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1199 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1200 [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1201 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1202 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1203 RTE_PTYPE_INNER_L4_TCP,
1204 [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1205 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1206 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1207 RTE_PTYPE_INNER_L4_TCP,
1208 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1209 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1210 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1211 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1212 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1213 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1214 RTE_PTYPE_INNER_L4_TCP,
1215 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
1216 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1217 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1218 RTE_PTYPE_INNER_L3_IPV4,
1219 [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1220 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1221 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1222 RTE_PTYPE_INNER_L4_UDP,
1223 [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1224 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1225 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1226 RTE_PTYPE_INNER_L4_UDP,
1227 [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1228 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1229 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1230 RTE_PTYPE_INNER_L4_SCTP,
1231 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1232 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1233 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1234 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1235 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1236 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1237 RTE_PTYPE_INNER_L4_UDP,
1238 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1239 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1240 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1241 RTE_PTYPE_INNER_L4_SCTP,
1242 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
1243 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1244 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1245 RTE_PTYPE_INNER_L3_IPV4,
1246 [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1247 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1248 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1249 RTE_PTYPE_INNER_L4_SCTP,
1250 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1251 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1252 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1253 RTE_PTYPE_INNER_L4_SCTP,
1254 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1255 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1256 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1257 RTE_PTYPE_INNER_L4_TCP,
1258 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1259 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1260 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1261 RTE_PTYPE_INNER_L4_UDP,
1263 [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
1264 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1265 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
1266 [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
1267 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1268 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1269 RTE_PTYPE_INNER_L3_IPV4,
1270 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1271 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1272 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1273 RTE_PTYPE_INNER_L3_IPV4_EXT,
1274 [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
1275 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1276 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1277 RTE_PTYPE_INNER_L3_IPV6,
1278 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1279 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1280 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1281 RTE_PTYPE_INNER_L3_IPV4,
1282 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1283 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1284 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1285 RTE_PTYPE_INNER_L3_IPV6_EXT,
1286 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1287 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1288 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1289 RTE_PTYPE_INNER_L3_IPV4,
1290 [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1291 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1292 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1293 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
1294 [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1295 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1296 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1297 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1298 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1299 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1300 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1301 RTE_PTYPE_INNER_L3_IPV4,
1302 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1303 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1304 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1305 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1306 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
1307 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1308 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1309 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1310 [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1311 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1312 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1313 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
1314 [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1315 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1316 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1317 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1318 [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1319 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1320 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1321 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1322 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1323 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1324 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1325 RTE_PTYPE_INNER_L3_IPV4,
1326 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1327 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1328 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1329 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1330 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1331 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1332 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1333 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1334 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
1335 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1336 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1337 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1338 [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1339 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1340 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1341 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
1342 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1343 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1344 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1345 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
1346 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1347 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1348 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1349 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
1350 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1351 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1352 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1353 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
1356 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
1357 static inline uint32_t
1358 ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
1361 if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1362 return RTE_PTYPE_UNKNOWN;
1364 pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
1366 /* For tunnel packet */
1367 if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
1368 /* Remove the tunnel bit to save the space. */
1369 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
1370 return ptype_table_tn[pkt_info];
1374 * For x550, if it's not tunnel,
1375 * tunnel type bit should be set to 0.
1376 * Reuse 82599's mask.
1378 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
1380 return ptype_table[pkt_info];
1383 static inline uint64_t
1384 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1386 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1387 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1388 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1389 PKT_RX_RSS_HASH, 0, 0, 0,
1390 0, 0, 0, PKT_RX_FDIR,
1392 #ifdef RTE_LIBRTE_IEEE1588
1393 static uint64_t ip_pkt_etqf_map[8] = {
1394 0, 0, 0, PKT_RX_IEEE1588_PTP,
1398 if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1399 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1400 ip_rss_types_map[pkt_info & 0XF];
1402 return ip_rss_types_map[pkt_info & 0XF];
1404 return ip_rss_types_map[pkt_info & 0XF];
1408 static inline uint64_t
1409 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1414 * Check if VLAN present only.
1415 * Do not check whether L3/L4 rx checksum done by NIC or not,
1416 * That can be found from rte_eth_rxmode.hw_ip_checksum flag
1418 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
1420 #ifdef RTE_LIBRTE_IEEE1588
1421 if (rx_status & IXGBE_RXD_STAT_TMST)
1422 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1427 static inline uint64_t
1428 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1433 * Bit 31: IPE, IPv4 checksum error
1434 * Bit 30: L4I, L4I integrity error
1436 static uint64_t error_to_pkt_flags_map[4] = {
1437 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
1438 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
1439 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
1440 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1442 pkt_flags = error_to_pkt_flags_map[(rx_status >>
1443 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1445 if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1446 (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1447 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1454 * LOOK_AHEAD defines how many desc statuses to check beyond the
1455 * current descriptor.
1456 * It must be a pound define for optimal performance.
1457 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1458 * function only works with LOOK_AHEAD=8.
1460 #define LOOK_AHEAD 8
1461 #if (LOOK_AHEAD != 8)
1462 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1465 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1467 volatile union ixgbe_adv_rx_desc *rxdp;
1468 struct ixgbe_rx_entry *rxep;
1469 struct rte_mbuf *mb;
1473 uint32_t s[LOOK_AHEAD];
1474 uint32_t pkt_info[LOOK_AHEAD];
1475 int i, j, nb_rx = 0;
1477 uint64_t vlan_flags = rxq->vlan_flags;
1479 /* get references to current descriptor and S/W ring entry */
1480 rxdp = &rxq->rx_ring[rxq->rx_tail];
1481 rxep = &rxq->sw_ring[rxq->rx_tail];
1483 status = rxdp->wb.upper.status_error;
1484 /* check to make sure there is at least 1 packet to receive */
1485 if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1489 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1490 * reference packets that are ready to be received.
1492 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1493 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1494 /* Read desc statuses backwards to avoid race condition */
1495 for (j = 0; j < LOOK_AHEAD; j++)
1496 s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1500 /* Compute how many status bits were set */
1501 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1502 (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
1505 for (j = 0; j < nb_dd; j++)
1506 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
1511 /* Translate descriptor info to mbuf format */
1512 for (j = 0; j < nb_dd; ++j) {
1514 pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1516 mb->data_len = pkt_len;
1517 mb->pkt_len = pkt_len;
1518 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1520 /* convert descriptor fields to rte mbuf flags */
1521 pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1523 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1524 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
1525 ((uint16_t)pkt_info[j]);
1526 mb->ol_flags = pkt_flags;
1528 ixgbe_rxd_pkt_info_to_pkt_type
1529 (pkt_info[j], rxq->pkt_type_mask);
1531 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1532 mb->hash.rss = rte_le_to_cpu_32(
1533 rxdp[j].wb.lower.hi_dword.rss);
1534 else if (pkt_flags & PKT_RX_FDIR) {
1535 mb->hash.fdir.hash = rte_le_to_cpu_16(
1536 rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1537 IXGBE_ATR_HASH_MASK;
1538 mb->hash.fdir.id = rte_le_to_cpu_16(
1539 rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1543 /* Move mbuf pointers from the S/W ring to the stage */
1544 for (j = 0; j < LOOK_AHEAD; ++j) {
1545 rxq->rx_stage[i + j] = rxep[j].mbuf;
1548 /* stop if all requested packets could not be received */
1549 if (nb_dd != LOOK_AHEAD)
1553 /* clear software ring entries so we can cleanup correctly */
1554 for (i = 0; i < nb_rx; ++i) {
1555 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1563 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1565 volatile union ixgbe_adv_rx_desc *rxdp;
1566 struct ixgbe_rx_entry *rxep;
1567 struct rte_mbuf *mb;
1572 /* allocate buffers in bulk directly into the S/W ring */
1573 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1574 rxep = &rxq->sw_ring[alloc_idx];
1575 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1576 rxq->rx_free_thresh);
1577 if (unlikely(diag != 0))
1580 rxdp = &rxq->rx_ring[alloc_idx];
1581 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1582 /* populate the static rte mbuf fields */
1585 mb->port = rxq->port_id;
1588 rte_mbuf_refcnt_set(mb, 1);
1589 mb->data_off = RTE_PKTMBUF_HEADROOM;
1591 /* populate the descriptors */
1592 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
1593 rxdp[i].read.hdr_addr = 0;
1594 rxdp[i].read.pkt_addr = dma_addr;
1597 /* update state of internal queue structure */
1598 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1599 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1600 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1606 static inline uint16_t
1607 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1610 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1613 /* how many packets are ready to return? */
1614 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1616 /* copy mbuf pointers to the application's packet list */
1617 for (i = 0; i < nb_pkts; ++i)
1618 rx_pkts[i] = stage[i];
1620 /* update internal queue state */
1621 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1622 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1627 static inline uint16_t
1628 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1631 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1634 /* Any previously recv'd pkts will be returned from the Rx stage */
1635 if (rxq->rx_nb_avail)
1636 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1638 /* Scan the H/W ring for packets to receive */
1639 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1641 /* update internal queue state */
1642 rxq->rx_next_avail = 0;
1643 rxq->rx_nb_avail = nb_rx;
1644 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1646 /* if required, allocate new buffers to replenish descriptors */
1647 if (rxq->rx_tail > rxq->rx_free_trigger) {
1648 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1650 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1653 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1654 "queue_id=%u", (unsigned) rxq->port_id,
1655 (unsigned) rxq->queue_id);
1657 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1658 rxq->rx_free_thresh;
1661 * Need to rewind any previous receives if we cannot
1662 * allocate new buffers to replenish the old ones.
1664 rxq->rx_nb_avail = 0;
1665 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1666 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1667 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1672 /* update tail pointer */
1674 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
1678 if (rxq->rx_tail >= rxq->nb_rx_desc)
1681 /* received any packets this loop? */
1682 if (rxq->rx_nb_avail)
1683 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1688 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1690 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1695 if (unlikely(nb_pkts == 0))
1698 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1699 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1701 /* request is relatively large, chunk it up */
1706 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1707 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1708 nb_rx = (uint16_t)(nb_rx + ret);
1709 nb_pkts = (uint16_t)(nb_pkts - ret);
1718 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1721 struct ixgbe_rx_queue *rxq;
1722 volatile union ixgbe_adv_rx_desc *rx_ring;
1723 volatile union ixgbe_adv_rx_desc *rxdp;
1724 struct ixgbe_rx_entry *sw_ring;
1725 struct ixgbe_rx_entry *rxe;
1726 struct rte_mbuf *rxm;
1727 struct rte_mbuf *nmb;
1728 union ixgbe_adv_rx_desc rxd;
1737 uint64_t vlan_flags;
1742 rx_id = rxq->rx_tail;
1743 rx_ring = rxq->rx_ring;
1744 sw_ring = rxq->sw_ring;
1745 vlan_flags = rxq->vlan_flags;
1746 while (nb_rx < nb_pkts) {
1748 * The order of operations here is important as the DD status
1749 * bit must not be read after any other descriptor fields.
1750 * rx_ring and rxdp are pointing to volatile data so the order
1751 * of accesses cannot be reordered by the compiler. If they were
1752 * not volatile, they could be reordered which could lead to
1753 * using invalid descriptor fields when read from rxd.
1755 rxdp = &rx_ring[rx_id];
1756 staterr = rxdp->wb.upper.status_error;
1757 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1764 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1765 * is likely to be invalid and to be dropped by the various
1766 * validation checks performed by the network stack.
1768 * Allocate a new mbuf to replenish the RX ring descriptor.
1769 * If the allocation fails:
1770 * - arrange for that RX descriptor to be the first one
1771 * being parsed the next time the receive function is
1772 * invoked [on the same queue].
1774 * - Stop parsing the RX ring and return immediately.
1776 * This policy do not drop the packet received in the RX
1777 * descriptor for which the allocation of a new mbuf failed.
1778 * Thus, it allows that packet to be later retrieved if
1779 * mbuf have been freed in the mean time.
1780 * As a side effect, holding RX descriptors instead of
1781 * systematically giving them back to the NIC may lead to
1782 * RX ring exhaustion situations.
1783 * However, the NIC can gracefully prevent such situations
1784 * to happen by sending specific "back-pressure" flow control
1785 * frames to its peer(s).
1787 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1788 "ext_err_stat=0x%08x pkt_len=%u",
1789 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1790 (unsigned) rx_id, (unsigned) staterr,
1791 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1793 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1795 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1796 "queue_id=%u", (unsigned) rxq->port_id,
1797 (unsigned) rxq->queue_id);
1798 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1803 rxe = &sw_ring[rx_id];
1805 if (rx_id == rxq->nb_rx_desc)
1808 /* Prefetch next mbuf while processing current one. */
1809 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1812 * When next RX descriptor is on a cache-line boundary,
1813 * prefetch the next 4 RX descriptors and the next 8 pointers
1816 if ((rx_id & 0x3) == 0) {
1817 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1818 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1824 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1825 rxdp->read.hdr_addr = 0;
1826 rxdp->read.pkt_addr = dma_addr;
1829 * Initialize the returned mbuf.
1830 * 1) setup generic mbuf fields:
1831 * - number of segments,
1834 * - RX port identifier.
1835 * 2) integrate hardware offload data, if any:
1836 * - RSS flag & hash,
1837 * - IP checksum flag,
1838 * - VLAN TCI, if any,
1841 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1843 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1844 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1847 rxm->pkt_len = pkt_len;
1848 rxm->data_len = pkt_len;
1849 rxm->port = rxq->port_id;
1851 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1852 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1853 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1855 pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
1856 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1857 pkt_flags = pkt_flags |
1858 ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1859 rxm->ol_flags = pkt_flags;
1861 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1862 rxq->pkt_type_mask);
1864 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1865 rxm->hash.rss = rte_le_to_cpu_32(
1866 rxd.wb.lower.hi_dword.rss);
1867 else if (pkt_flags & PKT_RX_FDIR) {
1868 rxm->hash.fdir.hash = rte_le_to_cpu_16(
1869 rxd.wb.lower.hi_dword.csum_ip.csum) &
1870 IXGBE_ATR_HASH_MASK;
1871 rxm->hash.fdir.id = rte_le_to_cpu_16(
1872 rxd.wb.lower.hi_dword.csum_ip.ip_id);
1875 * Store the mbuf address into the next entry of the array
1876 * of returned packets.
1878 rx_pkts[nb_rx++] = rxm;
1880 rxq->rx_tail = rx_id;
1883 * If the number of free RX descriptors is greater than the RX free
1884 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1886 * Update the RDT with the value of the last processed RX descriptor
1887 * minus 1, to guarantee that the RDT register is never equal to the
1888 * RDH register, which creates a "full" ring situtation from the
1889 * hardware point of view...
1891 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1892 if (nb_hold > rxq->rx_free_thresh) {
1893 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1894 "nb_hold=%u nb_rx=%u",
1895 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1896 (unsigned) rx_id, (unsigned) nb_hold,
1898 rx_id = (uint16_t) ((rx_id == 0) ?
1899 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1900 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1903 rxq->nb_rx_hold = nb_hold;
1908 * Detect an RSC descriptor.
1910 static inline uint32_t
1911 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1913 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1914 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1918 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1920 * Fill the following info in the HEAD buffer of the Rx cluster:
1921 * - RX port identifier
1922 * - hardware offload data, if any:
1924 * - IP checksum flag
1925 * - VLAN TCI, if any
1927 * @head HEAD of the packet cluster
1928 * @desc HW descriptor to get data from
1929 * @rxq Pointer to the Rx queue
1932 ixgbe_fill_cluster_head_buf(
1933 struct rte_mbuf *head,
1934 union ixgbe_adv_rx_desc *desc,
1935 struct ixgbe_rx_queue *rxq,
1941 head->port = rxq->port_id;
1943 /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1944 * set in the pkt_flags field.
1946 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1947 pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
1948 pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1949 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1950 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1951 head->ol_flags = pkt_flags;
1953 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
1955 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1956 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1957 else if (pkt_flags & PKT_RX_FDIR) {
1958 head->hash.fdir.hash =
1959 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1960 & IXGBE_ATR_HASH_MASK;
1961 head->hash.fdir.id =
1962 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1967 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1969 * @rx_queue Rx queue handle
1970 * @rx_pkts table of received packets
1971 * @nb_pkts size of rx_pkts table
1972 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1974 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1975 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1977 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1978 * 1) When non-EOP RSC completion arrives:
1979 * a) Update the HEAD of the current RSC aggregation cluster with the new
1980 * segment's data length.
1981 * b) Set the "next" pointer of the current segment to point to the segment
1982 * at the NEXTP index.
1983 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1984 * in the sw_rsc_ring.
1985 * 2) When EOP arrives we just update the cluster's total length and offload
1986 * flags and deliver the cluster up to the upper layers. In our case - put it
1987 * in the rx_pkts table.
1989 * Returns the number of received packets/clusters (according to the "bulk
1990 * receive" interface).
1992 static inline uint16_t
1993 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1996 struct ixgbe_rx_queue *rxq = rx_queue;
1997 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
1998 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
1999 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
2000 uint16_t rx_id = rxq->rx_tail;
2002 uint16_t nb_hold = rxq->nb_rx_hold;
2003 uint16_t prev_id = rxq->rx_tail;
2005 while (nb_rx < nb_pkts) {
2007 struct ixgbe_rx_entry *rxe;
2008 struct ixgbe_scattered_rx_entry *sc_entry;
2009 struct ixgbe_scattered_rx_entry *next_sc_entry;
2010 struct ixgbe_rx_entry *next_rxe = NULL;
2011 struct rte_mbuf *first_seg;
2012 struct rte_mbuf *rxm;
2013 struct rte_mbuf *nmb;
2014 union ixgbe_adv_rx_desc rxd;
2017 volatile union ixgbe_adv_rx_desc *rxdp;
2022 * The code in this whole file uses the volatile pointer to
2023 * ensure the read ordering of the status and the rest of the
2024 * descriptor fields (on the compiler level only!!!). This is so
2025 * UGLY - why not to just use the compiler barrier instead? DPDK
2026 * even has the rte_compiler_barrier() for that.
2028 * But most importantly this is just wrong because this doesn't
2029 * ensure memory ordering in a general case at all. For
2030 * instance, DPDK is supposed to work on Power CPUs where
2031 * compiler barrier may just not be enough!
2033 * I tried to write only this function properly to have a
2034 * starting point (as a part of an LRO/RSC series) but the
2035 * compiler cursed at me when I tried to cast away the
2036 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
2037 * keeping it the way it is for now.
2039 * The code in this file is broken in so many other places and
2040 * will just not work on a big endian CPU anyway therefore the
2041 * lines below will have to be revisited together with the rest
2045 * - Get rid of "volatile" crap and let the compiler do its
2047 * - Use the proper memory barrier (rte_rmb()) to ensure the
2048 * memory ordering below.
2050 rxdp = &rx_ring[rx_id];
2051 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
2053 if (!(staterr & IXGBE_RXDADV_STAT_DD))
2058 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
2059 "staterr=0x%x data_len=%u",
2060 rxq->port_id, rxq->queue_id, rx_id, staterr,
2061 rte_le_to_cpu_16(rxd.wb.upper.length));
2064 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
2066 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
2067 "port_id=%u queue_id=%u",
2068 rxq->port_id, rxq->queue_id);
2070 rte_eth_devices[rxq->port_id].data->
2071 rx_mbuf_alloc_failed++;
2074 } else if (nb_hold > rxq->rx_free_thresh) {
2075 uint16_t next_rdt = rxq->rx_free_trigger;
2077 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
2079 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
2081 nb_hold -= rxq->rx_free_thresh;
2083 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
2084 "port_id=%u queue_id=%u",
2085 rxq->port_id, rxq->queue_id);
2087 rte_eth_devices[rxq->port_id].data->
2088 rx_mbuf_alloc_failed++;
2094 rxe = &sw_ring[rx_id];
2095 eop = staterr & IXGBE_RXDADV_STAT_EOP;
2097 next_id = rx_id + 1;
2098 if (next_id == rxq->nb_rx_desc)
2101 /* Prefetch next mbuf while processing current one. */
2102 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
2105 * When next RX descriptor is on a cache-line boundary,
2106 * prefetch the next 4 RX descriptors and the next 4 pointers
2109 if ((next_id & 0x3) == 0) {
2110 rte_ixgbe_prefetch(&rx_ring[next_id]);
2111 rte_ixgbe_prefetch(&sw_ring[next_id]);
2118 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
2120 * Update RX descriptor with the physical address of the
2121 * new data buffer of the new allocated mbuf.
2125 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2126 rxdp->read.hdr_addr = 0;
2127 rxdp->read.pkt_addr = dma;
2132 * Set data length & data buffer address of mbuf.
2134 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
2135 rxm->data_len = data_len;
2140 * Get next descriptor index:
2141 * - For RSC it's in the NEXTP field.
2142 * - For a scattered packet - it's just a following
2145 if (ixgbe_rsc_count(&rxd))
2147 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
2148 IXGBE_RXDADV_NEXTP_SHIFT;
2152 next_sc_entry = &sw_sc_ring[nextp_id];
2153 next_rxe = &sw_ring[nextp_id];
2154 rte_ixgbe_prefetch(next_rxe);
2157 sc_entry = &sw_sc_ring[rx_id];
2158 first_seg = sc_entry->fbuf;
2159 sc_entry->fbuf = NULL;
2162 * If this is the first buffer of the received packet,
2163 * set the pointer to the first mbuf of the packet and
2164 * initialize its context.
2165 * Otherwise, update the total length and the number of segments
2166 * of the current scattered packet, and update the pointer to
2167 * the last mbuf of the current packet.
2169 if (first_seg == NULL) {
2171 first_seg->pkt_len = data_len;
2172 first_seg->nb_segs = 1;
2174 first_seg->pkt_len += data_len;
2175 first_seg->nb_segs++;
2182 * If this is not the last buffer of the received packet, update
2183 * the pointer to the first mbuf at the NEXTP entry in the
2184 * sw_sc_ring and continue to parse the RX ring.
2186 if (!eop && next_rxe) {
2187 rxm->next = next_rxe->mbuf;
2188 next_sc_entry->fbuf = first_seg;
2192 /* Initialize the first mbuf of the returned packet */
2193 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
2196 * Deal with the case, when HW CRC srip is disabled.
2197 * That can't happen when LRO is enabled, but still could
2198 * happen for scattered RX mode.
2200 first_seg->pkt_len -= rxq->crc_len;
2201 if (unlikely(rxm->data_len <= rxq->crc_len)) {
2202 struct rte_mbuf *lp;
2204 for (lp = first_seg; lp->next != rxm; lp = lp->next)
2207 first_seg->nb_segs--;
2208 lp->data_len -= rxq->crc_len - rxm->data_len;
2210 rte_pktmbuf_free_seg(rxm);
2212 rxm->data_len -= rxq->crc_len;
2214 /* Prefetch data of first segment, if configured to do so. */
2215 rte_packet_prefetch((char *)first_seg->buf_addr +
2216 first_seg->data_off);
2219 * Store the mbuf address into the next entry of the array
2220 * of returned packets.
2222 rx_pkts[nb_rx++] = first_seg;
2226 * Record index of the next RX descriptor to probe.
2228 rxq->rx_tail = rx_id;
2231 * If the number of free RX descriptors is greater than the RX free
2232 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2234 * Update the RDT with the value of the last processed RX descriptor
2235 * minus 1, to guarantee that the RDT register is never equal to the
2236 * RDH register, which creates a "full" ring situtation from the
2237 * hardware point of view...
2239 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
2240 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
2241 "nb_hold=%u nb_rx=%u",
2242 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
2245 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
2249 rxq->nb_rx_hold = nb_hold;
2254 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2257 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
2261 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2264 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
2267 /*********************************************************************
2269 * Queue management functions
2271 **********************************************************************/
2273 static void __attribute__((cold))
2274 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
2278 if (txq->sw_ring != NULL) {
2279 for (i = 0; i < txq->nb_tx_desc; i++) {
2280 if (txq->sw_ring[i].mbuf != NULL) {
2281 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2282 txq->sw_ring[i].mbuf = NULL;
2288 static void __attribute__((cold))
2289 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
2292 txq->sw_ring != NULL)
2293 rte_free(txq->sw_ring);
2296 static void __attribute__((cold))
2297 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
2299 if (txq != NULL && txq->ops != NULL) {
2300 txq->ops->release_mbufs(txq);
2301 txq->ops->free_swring(txq);
2306 void __attribute__((cold))
2307 ixgbe_dev_tx_queue_release(void *txq)
2309 ixgbe_tx_queue_release(txq);
2312 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
2313 static void __attribute__((cold))
2314 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
2316 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
2317 struct ixgbe_tx_entry *txe = txq->sw_ring;
2320 /* Zero out HW ring memory */
2321 for (i = 0; i < txq->nb_tx_desc; i++) {
2322 txq->tx_ring[i] = zeroed_desc;
2325 /* Initialize SW ring entries */
2326 prev = (uint16_t) (txq->nb_tx_desc - 1);
2327 for (i = 0; i < txq->nb_tx_desc; i++) {
2328 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
2330 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
2333 txe[prev].next_id = i;
2337 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2338 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2341 txq->nb_tx_used = 0;
2343 * Always allow 1 descriptor to be un-allocated to avoid
2344 * a H/W race condition
2346 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2347 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2349 memset((void *)&txq->ctx_cache, 0,
2350 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
2353 static const struct ixgbe_txq_ops def_txq_ops = {
2354 .release_mbufs = ixgbe_tx_queue_release_mbufs,
2355 .free_swring = ixgbe_tx_free_swring,
2356 .reset = ixgbe_reset_tx_queue,
2359 /* Takes an ethdev and a queue and sets up the tx function to be used based on
2360 * the queue parameters. Used in tx_queue_setup by primary process and then
2361 * in dev_init by secondary process when attaching to an existing ethdev.
2363 void __attribute__((cold))
2364 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
2366 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2367 if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
2368 && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
2369 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2370 dev->tx_pkt_prepare = NULL;
2371 #ifdef RTE_IXGBE_INC_VECTOR
2372 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2373 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2374 ixgbe_txq_vec_setup(txq) == 0)) {
2375 PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
2376 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
2379 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
2381 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2383 " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
2384 (unsigned long)txq->txq_flags,
2385 (unsigned long)IXGBE_SIMPLE_FLAGS);
2387 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2388 (unsigned long)txq->tx_rs_thresh,
2389 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2390 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2391 dev->tx_pkt_prepare = ixgbe_prep_pkts;
2395 int __attribute__((cold))
2396 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2399 unsigned int socket_id,
2400 const struct rte_eth_txconf *tx_conf)
2402 const struct rte_memzone *tz;
2403 struct ixgbe_tx_queue *txq;
2404 struct ixgbe_hw *hw;
2405 uint16_t tx_rs_thresh, tx_free_thresh;
2407 PMD_INIT_FUNC_TRACE();
2408 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2411 * Validate number of transmit descriptors.
2412 * It must not exceed hardware maximum, and must be multiple
2415 if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2416 (nb_desc > IXGBE_MAX_RING_DESC) ||
2417 (nb_desc < IXGBE_MIN_RING_DESC)) {
2422 * The following two parameters control the setting of the RS bit on
2423 * transmit descriptors.
2424 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2425 * descriptors have been used.
2426 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2427 * descriptors are used or if the number of descriptors required
2428 * to transmit a packet is greater than the number of free TX
2430 * The following constraints must be satisfied:
2431 * tx_rs_thresh must be greater than 0.
2432 * tx_rs_thresh must be less than the size of the ring minus 2.
2433 * tx_rs_thresh must be less than or equal to tx_free_thresh.
2434 * tx_rs_thresh must be a divisor of the ring size.
2435 * tx_free_thresh must be greater than 0.
2436 * tx_free_thresh must be less than the size of the ring minus 3.
2437 * One descriptor in the TX ring is used as a sentinel to avoid a
2438 * H/W race condition, hence the maximum threshold constraints.
2439 * When set to zero use default values.
2441 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2442 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2443 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2444 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2445 if (tx_rs_thresh >= (nb_desc - 2)) {
2446 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2447 "of TX descriptors minus 2. (tx_rs_thresh=%u "
2448 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2449 (int)dev->data->port_id, (int)queue_idx);
2452 if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2453 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2454 "(tx_rs_thresh=%u port=%d queue=%d)",
2455 DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2456 (int)dev->data->port_id, (int)queue_idx);
2459 if (tx_free_thresh >= (nb_desc - 3)) {
2460 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2461 "tx_free_thresh must be less than the number of "
2462 "TX descriptors minus 3. (tx_free_thresh=%u "
2463 "port=%d queue=%d)",
2464 (unsigned int)tx_free_thresh,
2465 (int)dev->data->port_id, (int)queue_idx);
2468 if (tx_rs_thresh > tx_free_thresh) {
2469 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2470 "tx_free_thresh. (tx_free_thresh=%u "
2471 "tx_rs_thresh=%u port=%d queue=%d)",
2472 (unsigned int)tx_free_thresh,
2473 (unsigned int)tx_rs_thresh,
2474 (int)dev->data->port_id,
2478 if ((nb_desc % tx_rs_thresh) != 0) {
2479 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2480 "number of TX descriptors. (tx_rs_thresh=%u "
2481 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2482 (int)dev->data->port_id, (int)queue_idx);
2487 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2488 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2489 * by the NIC and all descriptors are written back after the NIC
2490 * accumulates WTHRESH descriptors.
2492 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2493 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2494 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2495 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2496 (int)dev->data->port_id, (int)queue_idx);
2500 /* Free memory prior to re-allocation if needed... */
2501 if (dev->data->tx_queues[queue_idx] != NULL) {
2502 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2503 dev->data->tx_queues[queue_idx] = NULL;
2506 /* First allocate the tx queue data structure */
2507 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2508 RTE_CACHE_LINE_SIZE, socket_id);
2513 * Allocate TX ring hardware descriptors. A memzone large enough to
2514 * handle the maximum ring size is allocated in order to allow for
2515 * resizing in later calls to the queue setup function.
2517 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2518 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2519 IXGBE_ALIGN, socket_id);
2521 ixgbe_tx_queue_release(txq);
2525 txq->nb_tx_desc = nb_desc;
2526 txq->tx_rs_thresh = tx_rs_thresh;
2527 txq->tx_free_thresh = tx_free_thresh;
2528 txq->pthresh = tx_conf->tx_thresh.pthresh;
2529 txq->hthresh = tx_conf->tx_thresh.hthresh;
2530 txq->wthresh = tx_conf->tx_thresh.wthresh;
2531 txq->queue_id = queue_idx;
2532 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2533 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2534 txq->port_id = dev->data->port_id;
2535 txq->txq_flags = tx_conf->txq_flags;
2536 txq->ops = &def_txq_ops;
2537 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2540 * Modification to set VFTDT for virtual function if vf is detected
2542 if (hw->mac.type == ixgbe_mac_82599_vf ||
2543 hw->mac.type == ixgbe_mac_X540_vf ||
2544 hw->mac.type == ixgbe_mac_X550_vf ||
2545 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2546 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2547 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2549 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2551 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2552 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2554 /* Allocate software ring */
2555 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2556 sizeof(struct ixgbe_tx_entry) * nb_desc,
2557 RTE_CACHE_LINE_SIZE, socket_id);
2558 if (txq->sw_ring == NULL) {
2559 ixgbe_tx_queue_release(txq);
2562 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2563 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2565 /* set up vector or scalar TX function as appropriate */
2566 ixgbe_set_tx_function(dev, txq);
2568 txq->ops->reset(txq);
2570 dev->data->tx_queues[queue_idx] = txq;
2577 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2579 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2580 * in the sw_rsc_ring is not set to NULL but rather points to the next
2581 * mbuf of this RSC aggregation (that has not been completed yet and still
2582 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2583 * will just free first "nb_segs" segments of the cluster explicitly by calling
2584 * an rte_pktmbuf_free_seg().
2586 * @m scattered cluster head
2588 static void __attribute__((cold))
2589 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2591 uint8_t i, nb_segs = m->nb_segs;
2592 struct rte_mbuf *next_seg;
2594 for (i = 0; i < nb_segs; i++) {
2596 rte_pktmbuf_free_seg(m);
2601 static void __attribute__((cold))
2602 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2606 #ifdef RTE_IXGBE_INC_VECTOR
2607 /* SSE Vector driver has a different way of releasing mbufs. */
2608 if (rxq->rx_using_sse) {
2609 ixgbe_rx_queue_release_mbufs_vec(rxq);
2614 if (rxq->sw_ring != NULL) {
2615 for (i = 0; i < rxq->nb_rx_desc; i++) {
2616 if (rxq->sw_ring[i].mbuf != NULL) {
2617 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2618 rxq->sw_ring[i].mbuf = NULL;
2621 if (rxq->rx_nb_avail) {
2622 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2623 struct rte_mbuf *mb;
2625 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2626 rte_pktmbuf_free_seg(mb);
2628 rxq->rx_nb_avail = 0;
2632 if (rxq->sw_sc_ring)
2633 for (i = 0; i < rxq->nb_rx_desc; i++)
2634 if (rxq->sw_sc_ring[i].fbuf) {
2635 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2636 rxq->sw_sc_ring[i].fbuf = NULL;
2640 static void __attribute__((cold))
2641 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2644 ixgbe_rx_queue_release_mbufs(rxq);
2645 rte_free(rxq->sw_ring);
2646 rte_free(rxq->sw_sc_ring);
2651 void __attribute__((cold))
2652 ixgbe_dev_rx_queue_release(void *rxq)
2654 ixgbe_rx_queue_release(rxq);
2658 * Check if Rx Burst Bulk Alloc function can be used.
2660 * 0: the preconditions are satisfied and the bulk allocation function
2662 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2663 * function must be used.
2665 static inline int __attribute__((cold))
2666 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2671 * Make sure the following pre-conditions are satisfied:
2672 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2673 * rxq->rx_free_thresh < rxq->nb_rx_desc
2674 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2675 * Scattered packets are not supported. This should be checked
2676 * outside of this function.
2678 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2679 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2680 "rxq->rx_free_thresh=%d, "
2681 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2682 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2684 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2685 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2686 "rxq->rx_free_thresh=%d, "
2687 "rxq->nb_rx_desc=%d",
2688 rxq->rx_free_thresh, rxq->nb_rx_desc);
2690 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2691 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2692 "rxq->nb_rx_desc=%d, "
2693 "rxq->rx_free_thresh=%d",
2694 rxq->nb_rx_desc, rxq->rx_free_thresh);
2701 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2702 static void __attribute__((cold))
2703 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2705 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2707 uint16_t len = rxq->nb_rx_desc;
2710 * By default, the Rx queue setup function allocates enough memory for
2711 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2712 * extra memory at the end of the descriptor ring to be zero'd out.
2714 if (adapter->rx_bulk_alloc_allowed)
2715 /* zero out extra memory */
2716 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2719 * Zero out HW ring memory. Zero out extra memory at the end of
2720 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2721 * reads extra memory as zeros.
2723 for (i = 0; i < len; i++) {
2724 rxq->rx_ring[i] = zeroed_desc;
2728 * initialize extra software ring entries. Space for these extra
2729 * entries is always allocated
2731 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2732 for (i = rxq->nb_rx_desc; i < len; ++i) {
2733 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2736 rxq->rx_nb_avail = 0;
2737 rxq->rx_next_avail = 0;
2738 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2740 rxq->nb_rx_hold = 0;
2741 rxq->pkt_first_seg = NULL;
2742 rxq->pkt_last_seg = NULL;
2744 #ifdef RTE_IXGBE_INC_VECTOR
2745 rxq->rxrearm_start = 0;
2746 rxq->rxrearm_nb = 0;
2750 int __attribute__((cold))
2751 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2754 unsigned int socket_id,
2755 const struct rte_eth_rxconf *rx_conf,
2756 struct rte_mempool *mp)
2758 const struct rte_memzone *rz;
2759 struct ixgbe_rx_queue *rxq;
2760 struct ixgbe_hw *hw;
2762 struct ixgbe_adapter *adapter =
2763 (struct ixgbe_adapter *)dev->data->dev_private;
2765 PMD_INIT_FUNC_TRACE();
2766 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2769 * Validate number of receive descriptors.
2770 * It must not exceed hardware maximum, and must be multiple
2773 if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2774 (nb_desc > IXGBE_MAX_RING_DESC) ||
2775 (nb_desc < IXGBE_MIN_RING_DESC)) {
2779 /* Free memory prior to re-allocation if needed... */
2780 if (dev->data->rx_queues[queue_idx] != NULL) {
2781 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2782 dev->data->rx_queues[queue_idx] = NULL;
2785 /* First allocate the rx queue data structure */
2786 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2787 RTE_CACHE_LINE_SIZE, socket_id);
2791 rxq->nb_rx_desc = nb_desc;
2792 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2793 rxq->queue_id = queue_idx;
2794 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2795 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2796 rxq->port_id = dev->data->port_id;
2797 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2799 rxq->drop_en = rx_conf->rx_drop_en;
2800 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2803 * The packet type in RX descriptor is different for different NICs.
2804 * Some bits are used for x550 but reserved for other NICS.
2805 * So set different masks for different NICs.
2807 if (hw->mac.type == ixgbe_mac_X550 ||
2808 hw->mac.type == ixgbe_mac_X550EM_x ||
2809 hw->mac.type == ixgbe_mac_X550EM_a ||
2810 hw->mac.type == ixgbe_mac_X550_vf ||
2811 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2812 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2813 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
2815 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
2818 * Allocate RX ring hardware descriptors. A memzone large enough to
2819 * handle the maximum ring size is allocated in order to allow for
2820 * resizing in later calls to the queue setup function.
2822 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2823 RX_RING_SZ, IXGBE_ALIGN, socket_id);
2825 ixgbe_rx_queue_release(rxq);
2830 * Zero init all the descriptors in the ring.
2832 memset(rz->addr, 0, RX_RING_SZ);
2835 * Modified to setup VFRDT for Virtual Function
2837 if (hw->mac.type == ixgbe_mac_82599_vf ||
2838 hw->mac.type == ixgbe_mac_X540_vf ||
2839 hw->mac.type == ixgbe_mac_X550_vf ||
2840 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2841 hw->mac.type == ixgbe_mac_X550EM_a_vf) {
2843 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2845 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2848 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2850 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2853 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2854 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
2857 * Certain constraints must be met in order to use the bulk buffer
2858 * allocation Rx burst function. If any of Rx queues doesn't meet them
2859 * the feature should be disabled for the whole port.
2861 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2862 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2863 "preconditions - canceling the feature for "
2864 "the whole port[%d]",
2865 rxq->queue_id, rxq->port_id);
2866 adapter->rx_bulk_alloc_allowed = false;
2870 * Allocate software ring. Allow for space at the end of the
2871 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2872 * function does not access an invalid memory region.
2875 if (adapter->rx_bulk_alloc_allowed)
2876 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2878 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2879 sizeof(struct ixgbe_rx_entry) * len,
2880 RTE_CACHE_LINE_SIZE, socket_id);
2881 if (!rxq->sw_ring) {
2882 ixgbe_rx_queue_release(rxq);
2887 * Always allocate even if it's not going to be needed in order to
2888 * simplify the code.
2890 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2891 * be requested in ixgbe_dev_rx_init(), which is called later from
2895 rte_zmalloc_socket("rxq->sw_sc_ring",
2896 sizeof(struct ixgbe_scattered_rx_entry) * len,
2897 RTE_CACHE_LINE_SIZE, socket_id);
2898 if (!rxq->sw_sc_ring) {
2899 ixgbe_rx_queue_release(rxq);
2903 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2904 "dma_addr=0x%"PRIx64,
2905 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2906 rxq->rx_ring_phys_addr);
2908 if (!rte_is_power_of_2(nb_desc)) {
2909 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
2910 "preconditions - canceling the feature for "
2911 "the whole port[%d]",
2912 rxq->queue_id, rxq->port_id);
2913 adapter->rx_vec_allowed = false;
2915 ixgbe_rxq_vec_setup(rxq);
2917 dev->data->rx_queues[queue_idx] = rxq;
2919 ixgbe_reset_rx_queue(adapter, rxq);
2925 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2927 #define IXGBE_RXQ_SCAN_INTERVAL 4
2928 volatile union ixgbe_adv_rx_desc *rxdp;
2929 struct ixgbe_rx_queue *rxq;
2932 rxq = dev->data->rx_queues[rx_queue_id];
2933 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2935 while ((desc < rxq->nb_rx_desc) &&
2936 (rxdp->wb.upper.status_error &
2937 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
2938 desc += IXGBE_RXQ_SCAN_INTERVAL;
2939 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
2940 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2941 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2942 desc - rxq->nb_rx_desc]);
2949 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2951 volatile union ixgbe_adv_rx_desc *rxdp;
2952 struct ixgbe_rx_queue *rxq = rx_queue;
2955 if (unlikely(offset >= rxq->nb_rx_desc))
2957 desc = rxq->rx_tail + offset;
2958 if (desc >= rxq->nb_rx_desc)
2959 desc -= rxq->nb_rx_desc;
2961 rxdp = &rxq->rx_ring[desc];
2962 return !!(rxdp->wb.upper.status_error &
2963 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
2967 ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
2969 struct ixgbe_rx_queue *rxq = rx_queue;
2970 volatile uint32_t *status;
2971 uint32_t nb_hold, desc;
2973 if (unlikely(offset >= rxq->nb_rx_desc))
2976 #ifdef RTE_IXGBE_INC_VECTOR
2977 if (rxq->rx_using_sse)
2978 nb_hold = rxq->rxrearm_nb;
2981 nb_hold = rxq->nb_rx_hold;
2982 if (offset >= rxq->nb_rx_desc - nb_hold)
2983 return RTE_ETH_RX_DESC_UNAVAIL;
2985 desc = rxq->rx_tail + offset;
2986 if (desc >= rxq->nb_rx_desc)
2987 desc -= rxq->nb_rx_desc;
2989 status = &rxq->rx_ring[desc].wb.upper.status_error;
2990 if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
2991 return RTE_ETH_RX_DESC_DONE;
2993 return RTE_ETH_RX_DESC_AVAIL;
2997 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
2999 struct ixgbe_tx_queue *txq = tx_queue;
3000 volatile uint32_t *status;
3003 if (unlikely(offset >= txq->nb_tx_desc))
3006 desc = txq->tx_tail + offset;
3007 /* go to next desc that has the RS bit */
3008 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
3010 if (desc >= txq->nb_tx_desc) {
3011 desc -= txq->nb_tx_desc;
3012 if (desc >= txq->nb_tx_desc)
3013 desc -= txq->nb_tx_desc;
3016 status = &txq->tx_ring[desc].wb.status;
3017 if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
3018 return RTE_ETH_TX_DESC_DONE;
3020 return RTE_ETH_TX_DESC_FULL;
3023 void __attribute__((cold))
3024 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
3027 struct ixgbe_adapter *adapter =
3028 (struct ixgbe_adapter *)dev->data->dev_private;
3030 PMD_INIT_FUNC_TRACE();
3032 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3033 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
3036 txq->ops->release_mbufs(txq);
3037 txq->ops->reset(txq);
3041 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3042 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3045 ixgbe_rx_queue_release_mbufs(rxq);
3046 ixgbe_reset_rx_queue(adapter, rxq);
3052 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
3056 PMD_INIT_FUNC_TRACE();
3058 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3059 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
3060 dev->data->rx_queues[i] = NULL;
3062 dev->data->nb_rx_queues = 0;
3064 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3065 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
3066 dev->data->tx_queues[i] = NULL;
3068 dev->data->nb_tx_queues = 0;
3071 /*********************************************************************
3073 * Device RX/TX init functions
3075 **********************************************************************/
3078 * Receive Side Scaling (RSS)
3079 * See section 7.1.2.8 in the following document:
3080 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
3083 * The source and destination IP addresses of the IP header and the source
3084 * and destination ports of TCP/UDP headers, if any, of received packets are
3085 * hashed against a configurable random key to compute a 32-bit RSS hash result.
3086 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
3087 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
3088 * RSS output index which is used as the RX queue index where to store the
3090 * The following output is supplied in the RX write-back descriptor:
3091 * - 32-bit result of the Microsoft RSS hash function,
3092 * - 4-bit RSS type field.
3096 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
3097 * Used as the default key.
3099 static uint8_t rss_intel_key[40] = {
3100 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
3101 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
3102 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3103 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
3104 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
3108 ixgbe_rss_disable(struct rte_eth_dev *dev)
3110 struct ixgbe_hw *hw;
3114 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3115 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3116 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3117 mrqc &= ~IXGBE_MRQC_RSSEN;
3118 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3122 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
3132 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3133 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3135 hash_key = rss_conf->rss_key;
3136 if (hash_key != NULL) {
3137 /* Fill in RSS hash key */
3138 for (i = 0; i < 10; i++) {
3139 rss_key = hash_key[(i * 4)];
3140 rss_key |= hash_key[(i * 4) + 1] << 8;
3141 rss_key |= hash_key[(i * 4) + 2] << 16;
3142 rss_key |= hash_key[(i * 4) + 3] << 24;
3143 IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
3147 /* Set configured hashing protocols in MRQC register */
3148 rss_hf = rss_conf->rss_hf;
3149 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
3150 if (rss_hf & ETH_RSS_IPV4)
3151 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3152 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
3153 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3154 if (rss_hf & ETH_RSS_IPV6)
3155 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3156 if (rss_hf & ETH_RSS_IPV6_EX)
3157 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3158 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
3159 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3160 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
3161 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3162 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
3163 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3164 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
3165 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3166 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
3167 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3168 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3172 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
3173 struct rte_eth_rss_conf *rss_conf)
3175 struct ixgbe_hw *hw;
3180 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3182 if (!ixgbe_rss_update_sp(hw->mac.type)) {
3183 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
3187 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3190 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
3191 * "RSS enabling cannot be done dynamically while it must be
3192 * preceded by a software reset"
3193 * Before changing anything, first check that the update RSS operation
3194 * does not attempt to disable RSS, if RSS was enabled at
3195 * initialization time, or does not attempt to enable RSS, if RSS was
3196 * disabled at initialization time.
3198 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
3199 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3200 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
3201 if (rss_hf != 0) /* Enable RSS */
3203 return 0; /* Nothing to do */
3206 if (rss_hf == 0) /* Disable RSS */
3208 ixgbe_hw_rss_hash_set(hw, rss_conf);
3213 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3214 struct rte_eth_rss_conf *rss_conf)
3216 struct ixgbe_hw *hw;
3225 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3226 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3227 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3228 hash_key = rss_conf->rss_key;
3229 if (hash_key != NULL) {
3230 /* Return RSS hash key */
3231 for (i = 0; i < 10; i++) {
3232 rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
3233 hash_key[(i * 4)] = rss_key & 0x000000FF;
3234 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
3235 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
3236 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
3240 /* Get RSS functions configured in MRQC register */
3241 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3242 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
3243 rss_conf->rss_hf = 0;
3247 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
3248 rss_hf |= ETH_RSS_IPV4;
3249 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
3250 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
3251 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
3252 rss_hf |= ETH_RSS_IPV6;
3253 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
3254 rss_hf |= ETH_RSS_IPV6_EX;
3255 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
3256 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
3257 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
3258 rss_hf |= ETH_RSS_IPV6_TCP_EX;
3259 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
3260 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
3261 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
3262 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
3263 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
3264 rss_hf |= ETH_RSS_IPV6_UDP_EX;
3265 rss_conf->rss_hf = rss_hf;
3270 ixgbe_rss_configure(struct rte_eth_dev *dev)
3272 struct rte_eth_rss_conf rss_conf;
3273 struct ixgbe_hw *hw;
3277 uint16_t sp_reta_size;
3280 PMD_INIT_FUNC_TRACE();
3281 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3283 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3286 * Fill in redirection table
3287 * The byte-swap is needed because NIC registers are in
3288 * little-endian order.
3291 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
3292 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3294 if (j == dev->data->nb_rx_queues)
3296 reta = (reta << 8) | j;
3298 IXGBE_WRITE_REG(hw, reta_reg,
3303 * Configure the RSS key and the RSS protocols used to compute
3304 * the RSS hash of input packets.
3306 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3307 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
3308 ixgbe_rss_disable(dev);
3311 if (rss_conf.rss_key == NULL)
3312 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3313 ixgbe_hw_rss_hash_set(hw, &rss_conf);
3316 #define NUM_VFTA_REGISTERS 128
3317 #define NIC_RX_BUFFER_SIZE 0x200
3318 #define X550_RX_BUFFER_SIZE 0x180
3321 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3323 struct rte_eth_vmdq_dcb_conf *cfg;
3324 struct ixgbe_hw *hw;
3325 enum rte_eth_nb_pools num_pools;
3326 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3328 uint8_t nb_tcs; /* number of traffic classes */
3331 PMD_INIT_FUNC_TRACE();
3332 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3333 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3334 num_pools = cfg->nb_queue_pools;
3335 /* Check we have a valid number of pools */
3336 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3337 ixgbe_rss_disable(dev);
3340 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3341 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3345 * split rx buffer up into sections, each for 1 traffic class
3347 switch (hw->mac.type) {
3348 case ixgbe_mac_X550:
3349 case ixgbe_mac_X550EM_x:
3350 case ixgbe_mac_X550EM_a:
3351 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
3354 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3357 for (i = 0; i < nb_tcs; i++) {
3358 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3360 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3361 /* clear 10 bits. */
3362 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
3363 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3365 /* zero alloc all unused TCs */
3366 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3367 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3369 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3370 /* clear 10 bits. */
3371 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3374 /* MRQC: enable vmdq and dcb */
3375 mrqc = (num_pools == ETH_16_POOLS) ?
3376 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
3377 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3379 /* PFVTCTL: turn on virtualisation and set the default pool */
3380 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3381 if (cfg->enable_default_pool) {
3382 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3384 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3387 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3389 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
3391 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3393 * mapping is done with 3 bits per priority,
3394 * so shift by i*3 each time
3396 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3398 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
3400 /* RTRPCS: DCB related */
3401 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
3403 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3404 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3405 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3406 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3408 /* VFTA - enable all vlan filters */
3409 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3410 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3413 /* VFRE: pool enabling for receive - 16 or 32 */
3414 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
3415 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3418 * MPSAR - allow pools to read specific mac addresses
3419 * In this case, all pools should be able to read from mac addr 0
3421 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
3422 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
3424 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3425 for (i = 0; i < cfg->nb_pool_maps; i++) {
3426 /* set vlan id in VF register and set the valid bit */
3427 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
3428 (cfg->pool_map[i].vlan_id & 0xFFF)));
3430 * Put the allowed pools in VFB reg. As we only have 16 or 32
3431 * pools, we only need to use the first half of the register
3434 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
3439 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3440 * @dev: pointer to eth_dev structure
3441 * @dcb_config: pointer to ixgbe_dcb_config structure
3444 ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3445 struct ixgbe_dcb_config *dcb_config)
3448 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3450 PMD_INIT_FUNC_TRACE();
3451 if (hw->mac.type != ixgbe_mac_82598EB) {
3452 /* Disable the Tx desc arbiter so that MTQC can be changed */
3453 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3454 reg |= IXGBE_RTTDCS_ARBDIS;
3455 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3457 /* Enable DCB for Tx with 8 TCs */
3458 if (dcb_config->num_tcs.pg_tcs == 8) {
3459 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3461 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3463 if (dcb_config->vt_mode)
3464 reg |= IXGBE_MTQC_VT_ENA;
3465 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3467 /* Enable the Tx desc arbiter */
3468 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3469 reg &= ~IXGBE_RTTDCS_ARBDIS;
3470 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3472 /* Enable Security TX Buffer IFG for DCB */
3473 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3474 reg |= IXGBE_SECTX_DCB;
3475 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3480 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3481 * @dev: pointer to rte_eth_dev structure
3482 * @dcb_config: pointer to ixgbe_dcb_config structure
3485 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3486 struct ixgbe_dcb_config *dcb_config)
3488 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3489 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3490 struct ixgbe_hw *hw =
3491 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3493 PMD_INIT_FUNC_TRACE();
3494 if (hw->mac.type != ixgbe_mac_82598EB)
3495 /*PF VF Transmit Enable*/
3496 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3497 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3499 /*Configure general DCB TX parameters*/
3500 ixgbe_dcb_tx_hw_config(dev, dcb_config);
3504 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3505 struct ixgbe_dcb_config *dcb_config)
3507 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3508 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3509 struct ixgbe_dcb_tc_config *tc;
3512 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3513 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3514 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3515 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3517 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3518 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3521 /* Initialize User Priority to Traffic Class mapping */
3522 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3523 tc = &dcb_config->tc_config[j];
3524 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3527 /* User Priority to Traffic Class mapping */
3528 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3529 j = vmdq_rx_conf->dcb_tc[i];
3530 tc = &dcb_config->tc_config[j];
3531 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3537 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3538 struct ixgbe_dcb_config *dcb_config)
3540 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3541 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3542 struct ixgbe_dcb_tc_config *tc;
3545 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3546 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3547 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3548 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3550 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3551 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3554 /* Initialize User Priority to Traffic Class mapping */
3555 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3556 tc = &dcb_config->tc_config[j];
3557 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3560 /* User Priority to Traffic Class mapping */
3561 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3562 j = vmdq_tx_conf->dcb_tc[i];
3563 tc = &dcb_config->tc_config[j];
3564 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3570 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3571 struct ixgbe_dcb_config *dcb_config)
3573 struct rte_eth_dcb_rx_conf *rx_conf =
3574 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3575 struct ixgbe_dcb_tc_config *tc;
3578 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3579 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3581 /* Initialize User Priority to Traffic Class mapping */
3582 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3583 tc = &dcb_config->tc_config[j];
3584 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3587 /* User Priority to Traffic Class mapping */
3588 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3589 j = rx_conf->dcb_tc[i];
3590 tc = &dcb_config->tc_config[j];
3591 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3597 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3598 struct ixgbe_dcb_config *dcb_config)
3600 struct rte_eth_dcb_tx_conf *tx_conf =
3601 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3602 struct ixgbe_dcb_tc_config *tc;
3605 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3606 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3608 /* Initialize User Priority to Traffic Class mapping */
3609 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3610 tc = &dcb_config->tc_config[j];
3611 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3614 /* User Priority to Traffic Class mapping */
3615 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3616 j = tx_conf->dcb_tc[i];
3617 tc = &dcb_config->tc_config[j];
3618 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3624 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3625 * @dev: pointer to eth_dev structure
3626 * @dcb_config: pointer to ixgbe_dcb_config structure
3629 ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3630 struct ixgbe_dcb_config *dcb_config)
3636 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3638 PMD_INIT_FUNC_TRACE();
3640 * Disable the arbiter before changing parameters
3641 * (always enable recycle mode; WSP)
3643 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3644 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3646 if (hw->mac.type != ixgbe_mac_82598EB) {
3647 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3648 if (dcb_config->num_tcs.pg_tcs == 4) {
3649 if (dcb_config->vt_mode)
3650 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3651 IXGBE_MRQC_VMDQRT4TCEN;
3653 /* no matter the mode is DCB or DCB_RSS, just
3654 * set the MRQE to RSSXTCEN. RSS is controlled
3657 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3658 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3659 IXGBE_MRQC_RTRSS4TCEN;
3662 if (dcb_config->num_tcs.pg_tcs == 8) {
3663 if (dcb_config->vt_mode)
3664 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3665 IXGBE_MRQC_VMDQRT8TCEN;
3667 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3668 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3669 IXGBE_MRQC_RTRSS8TCEN;
3673 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3675 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3676 /* Disable drop for all queues in VMDQ mode*/
3677 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3678 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3680 (q << IXGBE_QDE_IDX_SHIFT)));
3682 /* Enable drop for all queues in SRIOV mode */
3683 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3684 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3686 (q << IXGBE_QDE_IDX_SHIFT) |
3691 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3692 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3693 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3694 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3696 /* VFTA - enable all vlan filters */
3697 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3698 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3702 * Configure Rx packet plane (recycle mode; WSP) and
3705 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3706 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3710 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3711 uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3713 switch (hw->mac.type) {
3714 case ixgbe_mac_82598EB:
3715 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3717 case ixgbe_mac_82599EB:
3718 case ixgbe_mac_X540:
3719 case ixgbe_mac_X550:
3720 case ixgbe_mac_X550EM_x:
3721 case ixgbe_mac_X550EM_a:
3722 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3731 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3732 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3734 switch (hw->mac.type) {
3735 case ixgbe_mac_82598EB:
3736 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
3737 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
3739 case ixgbe_mac_82599EB:
3740 case ixgbe_mac_X540:
3741 case ixgbe_mac_X550:
3742 case ixgbe_mac_X550EM_x:
3743 case ixgbe_mac_X550EM_a:
3744 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
3745 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
3752 #define DCB_RX_CONFIG 1
3753 #define DCB_TX_CONFIG 1
3754 #define DCB_TX_PB 1024
3756 * ixgbe_dcb_hw_configure - Enable DCB and configure
3757 * general DCB in VT mode and non-VT mode parameters
3758 * @dev: pointer to rte_eth_dev structure
3759 * @dcb_config: pointer to ixgbe_dcb_config structure
3762 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3763 struct ixgbe_dcb_config *dcb_config)
3766 uint8_t i, pfc_en, nb_tcs;
3767 uint16_t pbsize, rx_buffer_size;
3768 uint8_t config_dcb_rx = 0;
3769 uint8_t config_dcb_tx = 0;
3770 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3771 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3772 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3773 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3774 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3775 struct ixgbe_dcb_tc_config *tc;
3776 uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3777 struct ixgbe_hw *hw =
3778 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3779 struct ixgbe_bw_conf *bw_conf =
3780 IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
3782 switch (dev->data->dev_conf.rxmode.mq_mode) {
3783 case ETH_MQ_RX_VMDQ_DCB:
3784 dcb_config->vt_mode = true;
3785 if (hw->mac.type != ixgbe_mac_82598EB) {
3786 config_dcb_rx = DCB_RX_CONFIG;
3788 *get dcb and VT rx configuration parameters
3791 ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
3792 /*Configure general VMDQ and DCB RX parameters*/
3793 ixgbe_vmdq_dcb_configure(dev);
3797 case ETH_MQ_RX_DCB_RSS:
3798 dcb_config->vt_mode = false;
3799 config_dcb_rx = DCB_RX_CONFIG;
3800 /* Get dcb TX configuration parameters from rte_eth_conf */
3801 ixgbe_dcb_rx_config(dev, dcb_config);
3802 /*Configure general DCB RX parameters*/
3803 ixgbe_dcb_rx_hw_config(dev, dcb_config);
3806 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3809 switch (dev->data->dev_conf.txmode.mq_mode) {
3810 case ETH_MQ_TX_VMDQ_DCB:
3811 dcb_config->vt_mode = true;
3812 config_dcb_tx = DCB_TX_CONFIG;
3813 /* get DCB and VT TX configuration parameters
3816 ixgbe_dcb_vt_tx_config(dev, dcb_config);
3817 /*Configure general VMDQ and DCB TX parameters*/
3818 ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
3822 dcb_config->vt_mode = false;
3823 config_dcb_tx = DCB_TX_CONFIG;
3824 /*get DCB TX configuration parameters from rte_eth_conf*/
3825 ixgbe_dcb_tx_config(dev, dcb_config);
3826 /*Configure general DCB TX parameters*/
3827 ixgbe_dcb_tx_hw_config(dev, dcb_config);
3830 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3834 nb_tcs = dcb_config->num_tcs.pfc_tcs;
3836 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3837 if (nb_tcs == ETH_4_TCS) {
3838 /* Avoid un-configured priority mapping to TC0 */
3840 uint8_t mask = 0xFF;
3842 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
3843 mask = (uint8_t)(mask & (~(1 << map[i])));
3844 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
3845 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
3849 /* Re-configure 4 TCs BW */
3850 for (i = 0; i < nb_tcs; i++) {
3851 tc = &dcb_config->tc_config[i];
3852 if (bw_conf->tc_num != nb_tcs)
3853 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
3854 (uint8_t)(100 / nb_tcs);
3855 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
3856 (uint8_t)(100 / nb_tcs);
3858 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3859 tc = &dcb_config->tc_config[i];
3860 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
3861 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
3864 /* Re-configure 8 TCs BW */
3865 for (i = 0; i < nb_tcs; i++) {
3866 tc = &dcb_config->tc_config[i];
3867 if (bw_conf->tc_num != nb_tcs)
3868 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
3869 (uint8_t)(100 / nb_tcs + (i & 1));
3870 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
3871 (uint8_t)(100 / nb_tcs + (i & 1));
3875 switch (hw->mac.type) {
3876 case ixgbe_mac_X550:
3877 case ixgbe_mac_X550EM_x:
3878 case ixgbe_mac_X550EM_a:
3879 rx_buffer_size = X550_RX_BUFFER_SIZE;
3882 rx_buffer_size = NIC_RX_BUFFER_SIZE;
3886 if (config_dcb_rx) {
3887 /* Set RX buffer size */
3888 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3889 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
3891 for (i = 0; i < nb_tcs; i++) {
3892 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3894 /* zero alloc all unused TCs */
3895 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3896 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3899 if (config_dcb_tx) {
3900 /* Only support an equally distributed
3901 * Tx packet buffer strategy.
3903 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
3904 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
3906 for (i = 0; i < nb_tcs; i++) {
3907 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3908 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3910 /* Clear unused TCs, if any, to zero buffer size*/
3911 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3912 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3913 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3917 /*Calculates traffic class credits*/
3918 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
3919 IXGBE_DCB_TX_CONFIG);
3920 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
3921 IXGBE_DCB_RX_CONFIG);
3923 if (config_dcb_rx) {
3924 /* Unpack CEE standard containers */
3925 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
3926 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3927 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
3928 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
3929 /* Configure PG(ETS) RX */
3930 ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
3933 if (config_dcb_tx) {
3934 /* Unpack CEE standard containers */
3935 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
3936 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3937 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
3938 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
3939 /* Configure PG(ETS) TX */
3940 ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
3943 /*Configure queue statistics registers*/
3944 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
3946 /* Check if the PFC is supported */
3947 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3948 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3949 for (i = 0; i < nb_tcs; i++) {
3951 * If the TC count is 8,and the default high_water is 48,
3952 * the low_water is 16 as default.
3954 hw->fc.high_water[i] = (pbsize * 3) / 4;
3955 hw->fc.low_water[i] = pbsize / 4;
3956 /* Enable pfc for this TC */
3957 tc = &dcb_config->tc_config[i];
3958 tc->pfc = ixgbe_dcb_pfc_enabled;
3960 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3961 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3963 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
3970 * ixgbe_configure_dcb - Configure DCB Hardware
3971 * @dev: pointer to rte_eth_dev
3973 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
3975 struct ixgbe_dcb_config *dcb_cfg =
3976 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3977 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
3979 PMD_INIT_FUNC_TRACE();
3981 /* check support mq_mode for DCB */
3982 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
3983 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
3984 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
3987 if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
3990 /** Configure DCB hardware **/
3991 ixgbe_dcb_hw_configure(dev, dcb_cfg);
3995 * VMDq only support for 10 GbE NIC.
3998 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
4000 struct rte_eth_vmdq_rx_conf *cfg;
4001 struct ixgbe_hw *hw;
4002 enum rte_eth_nb_pools num_pools;
4003 uint32_t mrqc, vt_ctl, vlanctrl;
4007 PMD_INIT_FUNC_TRACE();
4008 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4009 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
4010 num_pools = cfg->nb_queue_pools;
4012 ixgbe_rss_disable(dev);
4014 /* MRQC: enable vmdq */
4015 mrqc = IXGBE_MRQC_VMDQEN;
4016 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4018 /* PFVTCTL: turn on virtualisation and set the default pool */
4019 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
4020 if (cfg->enable_default_pool)
4021 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
4023 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
4025 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
4027 for (i = 0; i < (int)num_pools; i++) {
4028 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
4029 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
4032 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4033 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4034 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4035 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4037 /* VFTA - enable all vlan filters */
4038 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
4039 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
4041 /* VFRE: pool enabling for receive - 64 */
4042 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
4043 if (num_pools == ETH_64_POOLS)
4044 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
4047 * MPSAR - allow pools to read specific mac addresses
4048 * In this case, all pools should be able to read from mac addr 0
4050 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
4051 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
4053 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
4054 for (i = 0; i < cfg->nb_pool_maps; i++) {
4055 /* set vlan id in VF register and set the valid bit */
4056 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
4057 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
4059 * Put the allowed pools in VFB reg. As we only have 16 or 64
4060 * pools, we only need to use the first half of the register
4063 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
4064 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
4065 (cfg->pool_map[i].pools & UINT32_MAX));
4067 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
4068 ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
4072 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
4073 if (cfg->enable_loop_back) {
4074 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4075 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
4076 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
4079 IXGBE_WRITE_FLUSH(hw);
4083 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
4084 * @hw: pointer to hardware structure
4087 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
4092 PMD_INIT_FUNC_TRACE();
4093 /*PF VF Transmit Enable*/
4094 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
4095 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
4097 /* Disable the Tx desc arbiter so that MTQC can be changed */
4098 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4099 reg |= IXGBE_RTTDCS_ARBDIS;
4100 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4102 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4103 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
4105 /* Disable drop for all queues */
4106 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4107 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4108 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
4110 /* Enable the Tx desc arbiter */
4111 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4112 reg &= ~IXGBE_RTTDCS_ARBDIS;
4113 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4115 IXGBE_WRITE_FLUSH(hw);
4118 static int __attribute__((cold))
4119 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
4121 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
4125 /* Initialize software ring entries */
4126 for (i = 0; i < rxq->nb_rx_desc; i++) {
4127 volatile union ixgbe_adv_rx_desc *rxd;
4128 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
4131 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
4132 (unsigned) rxq->queue_id);
4136 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
4137 mbuf->port = rxq->port_id;
4140 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
4141 rxd = &rxq->rx_ring[i];
4142 rxd->read.hdr_addr = 0;
4143 rxd->read.pkt_addr = dma_addr;
4151 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
4153 struct ixgbe_hw *hw;
4156 ixgbe_rss_configure(dev);
4158 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4160 /* MRQC: enable VF RSS */
4161 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
4162 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
4163 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4165 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
4169 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
4173 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
4177 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4183 ixgbe_config_vf_default(struct rte_eth_dev *dev)
4185 struct ixgbe_hw *hw =
4186 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4188 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4190 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4195 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4196 IXGBE_MRQC_VMDQRT4TCEN);
4200 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4201 IXGBE_MRQC_VMDQRT8TCEN);
4205 "invalid pool number in IOV mode");
4212 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
4214 struct ixgbe_hw *hw =
4215 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4217 if (hw->mac.type == ixgbe_mac_82598EB)
4220 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4222 * SRIOV inactive scheme
4223 * any DCB/RSS w/o VMDq multi-queue setting
4225 switch (dev->data->dev_conf.rxmode.mq_mode) {
4227 case ETH_MQ_RX_DCB_RSS:
4228 case ETH_MQ_RX_VMDQ_RSS:
4229 ixgbe_rss_configure(dev);
4232 case ETH_MQ_RX_VMDQ_DCB:
4233 ixgbe_vmdq_dcb_configure(dev);
4236 case ETH_MQ_RX_VMDQ_ONLY:
4237 ixgbe_vmdq_rx_hw_configure(dev);
4240 case ETH_MQ_RX_NONE:
4242 /* if mq_mode is none, disable rss mode.*/
4243 ixgbe_rss_disable(dev);
4247 /* SRIOV active scheme
4248 * Support RSS together with SRIOV.
4250 switch (dev->data->dev_conf.rxmode.mq_mode) {
4252 case ETH_MQ_RX_VMDQ_RSS:
4253 ixgbe_config_vf_rss(dev);
4255 case ETH_MQ_RX_VMDQ_DCB:
4257 /* In SRIOV, the configuration is the same as VMDq case */
4258 ixgbe_vmdq_dcb_configure(dev);
4260 /* DCB/RSS together with SRIOV is not supported */
4261 case ETH_MQ_RX_VMDQ_DCB_RSS:
4262 case ETH_MQ_RX_DCB_RSS:
4264 "Could not support DCB/RSS with VMDq & SRIOV");
4267 ixgbe_config_vf_default(dev);
4276 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4278 struct ixgbe_hw *hw =
4279 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4283 if (hw->mac.type == ixgbe_mac_82598EB)
4286 /* disable arbiter before setting MTQC */
4287 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4288 rttdcs |= IXGBE_RTTDCS_ARBDIS;
4289 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4291 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4293 * SRIOV inactive scheme
4294 * any DCB w/o VMDq multi-queue setting
4296 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4297 ixgbe_vmdq_tx_hw_configure(hw);
4299 mtqc = IXGBE_MTQC_64Q_1PB;
4300 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4303 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4306 * SRIOV active scheme
4307 * FIXME if support DCB together with VMDq & SRIOV
4310 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4313 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
4316 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
4320 mtqc = IXGBE_MTQC_64Q_1PB;
4321 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4323 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4326 /* re-enable arbiter */
4327 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
4328 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4334 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
4336 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
4337 * spec rev. 3.0 chapter 8.2.3.8.13.
4339 * @pool Memory pool of the Rx queue
4341 static inline uint32_t
4342 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4344 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4346 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
4349 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4352 return IXGBE_RSCCTL_MAXDESC_16;
4353 else if (maxdesc >= 8)
4354 return IXGBE_RSCCTL_MAXDESC_8;
4355 else if (maxdesc >= 4)
4356 return IXGBE_RSCCTL_MAXDESC_4;
4358 return IXGBE_RSCCTL_MAXDESC_1;
4362 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
4365 * (Taken from FreeBSD tree)
4366 * (yes this is all very magic and confusing :)
4369 * @entry the register array entry
4370 * @vector the MSIX vector for this queue
4374 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
4376 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4379 vector |= IXGBE_IVAR_ALLOC_VAL;
4381 switch (hw->mac.type) {
4383 case ixgbe_mac_82598EB:
4385 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4387 entry += (type * 64);
4388 index = (entry >> 2) & 0x1F;
4389 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4390 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4391 ivar |= (vector << (8 * (entry & 0x3)));
4392 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4395 case ixgbe_mac_82599EB:
4396 case ixgbe_mac_X540:
4397 if (type == -1) { /* MISC IVAR */
4398 index = (entry & 1) * 8;
4399 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4400 ivar &= ~(0xFF << index);
4401 ivar |= (vector << index);
4402 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4403 } else { /* RX/TX IVARS */
4404 index = (16 * (entry & 1)) + (8 * type);
4405 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4406 ivar &= ~(0xFF << index);
4407 ivar |= (vector << index);
4408 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4418 void __attribute__((cold))
4419 ixgbe_set_rx_function(struct rte_eth_dev *dev)
4421 uint16_t i, rx_using_sse;
4422 struct ixgbe_adapter *adapter =
4423 (struct ixgbe_adapter *)dev->data->dev_private;
4426 * In order to allow Vector Rx there are a few configuration
4427 * conditions to be met and Rx Bulk Allocation should be allowed.
4429 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
4430 !adapter->rx_bulk_alloc_allowed) {
4431 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
4432 "preconditions or RTE_IXGBE_INC_VECTOR is "
4434 dev->data->port_id);
4436 adapter->rx_vec_allowed = false;
4440 * Initialize the appropriate LRO callback.
4442 * If all queues satisfy the bulk allocation preconditions
4443 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
4444 * Otherwise use a single allocation version.
4446 if (dev->data->lro) {
4447 if (adapter->rx_bulk_alloc_allowed) {
4448 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4449 "allocation version");
4450 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4452 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4453 "allocation version");
4454 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4456 } else if (dev->data->scattered_rx) {
4458 * Set the non-LRO scattered callback: there are Vector and
4459 * single allocation versions.
4461 if (adapter->rx_vec_allowed) {
4462 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
4463 "callback (port=%d).",
4464 dev->data->port_id);
4466 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
4467 } else if (adapter->rx_bulk_alloc_allowed) {
4468 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4469 "allocation callback (port=%d).",
4470 dev->data->port_id);
4471 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4473 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
4474 "single allocation) "
4475 "Scattered Rx callback "
4477 dev->data->port_id);
4479 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4482 * Below we set "simple" callbacks according to port/queues parameters.
4483 * If parameters allow we are going to choose between the following
4487 * - Single buffer allocation (the simplest one)
4489 } else if (adapter->rx_vec_allowed) {
4490 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4491 "burst size no less than %d (port=%d).",
4492 RTE_IXGBE_DESCS_PER_LOOP,
4493 dev->data->port_id);
4495 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4496 } else if (adapter->rx_bulk_alloc_allowed) {
4497 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4498 "satisfied. Rx Burst Bulk Alloc function "
4499 "will be used on port=%d.",
4500 dev->data->port_id);
4502 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4504 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4505 "satisfied, or Scattered Rx is requested "
4507 dev->data->port_id);
4509 dev->rx_pkt_burst = ixgbe_recv_pkts;
4512 /* Propagate information about RX function choice through all queues. */
4515 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4516 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4518 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4519 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4521 rxq->rx_using_sse = rx_using_sse;
4526 * ixgbe_set_rsc - configure RSC related port HW registers
4528 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4529 * of 82599 Spec (x540 configuration is virtually the same).
4533 * Returns 0 in case of success or a non-zero error code
4536 ixgbe_set_rsc(struct rte_eth_dev *dev)
4538 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4539 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4540 struct rte_eth_dev_info dev_info = { 0 };
4541 bool rsc_capable = false;
4547 dev->dev_ops->dev_infos_get(dev, &dev_info);
4548 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4551 if (!rsc_capable && rx_conf->enable_lro) {
4552 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4557 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4559 if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
4561 * According to chapter of 4.6.7.2.1 of the Spec Rev.
4562 * 3.0 RSC configuration requires HW CRC stripping being
4563 * enabled. If user requested both HW CRC stripping off
4564 * and RSC on - return an error.
4566 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4571 /* RFCTL configuration */
4572 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4573 if ((rsc_capable) && (rx_conf->enable_lro))
4575 * Since NFS packets coalescing is not supported - clear
4576 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4579 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4580 IXGBE_RFCTL_NFSR_DIS);
4582 rfctl |= IXGBE_RFCTL_RSC_DIS;
4583 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4585 /* If LRO hasn't been requested - we are done here. */
4586 if (!rx_conf->enable_lro)
4589 /* Set RDRXCTL.RSCACKC bit */
4590 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4591 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4592 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4594 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4595 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4596 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4598 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4600 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4602 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4604 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4607 * ixgbe PMD doesn't support header-split at the moment.
4609 * Following the 4.6.7.2.1 chapter of the 82599/x540
4610 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4611 * should be configured even if header split is not
4612 * enabled. We will configure it 128 bytes following the
4613 * recommendation in the spec.
4615 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4616 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4617 IXGBE_SRRCTL_BSIZEHDR_MASK;
4620 * TODO: Consider setting the Receive Descriptor Minimum
4621 * Threshold Size for an RSC case. This is not an obviously
4622 * beneficiary option but the one worth considering...
4625 rscctl |= IXGBE_RSCCTL_RSCEN;
4626 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4627 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4630 * RSC: Set ITR interval corresponding to 2K ints/s.
4632 * Full-sized RSC aggregations for a 10Gb/s link will
4633 * arrive at about 20K aggregation/s rate.
4635 * 2K inst/s rate will make only 10% of the
4636 * aggregations to be closed due to the interrupt timer
4637 * expiration for a streaming at wire-speed case.
4639 * For a sparse streaming case this setting will yield
4640 * at most 500us latency for a single RSC aggregation.
4642 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4643 eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
4645 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4646 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4647 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4648 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4651 * RSC requires the mapping of the queue to the
4654 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4659 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4665 * Initializes Receive Unit.
4667 int __attribute__((cold))
4668 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4670 struct ixgbe_hw *hw;
4671 struct ixgbe_rx_queue *rxq;
4682 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4685 PMD_INIT_FUNC_TRACE();
4686 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4689 * Make sure receives are disabled while setting
4690 * up the RX context (registers, descriptor rings, etc.).
4692 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4693 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4695 /* Enable receipt of broadcasted frames */
4696 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4697 fctrl |= IXGBE_FCTRL_BAM;
4698 fctrl |= IXGBE_FCTRL_DPF;
4699 fctrl |= IXGBE_FCTRL_PMCF;
4700 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4703 * Configure CRC stripping, if any.
4705 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4706 if (rx_conf->hw_strip_crc)
4707 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4709 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4712 * Configure jumbo frame support, if any.
4714 if (rx_conf->jumbo_frame == 1) {
4715 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4716 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4717 maxfrs &= 0x0000FFFF;
4718 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4719 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4721 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4724 * If loopback mode is configured for 82599, set LPBK bit.
4726 if (hw->mac.type == ixgbe_mac_82599EB &&
4727 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4728 hlreg0 |= IXGBE_HLREG0_LPBK;
4730 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4732 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4734 /* Setup RX queues */
4735 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4736 rxq = dev->data->rx_queues[i];
4739 * Reset crc_len in case it was changed after queue setup by a
4740 * call to configure.
4742 rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
4744 /* Setup the Base and Length of the Rx Descriptor Rings */
4745 bus_addr = rxq->rx_ring_phys_addr;
4746 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4747 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4748 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4749 (uint32_t)(bus_addr >> 32));
4750 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4751 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4752 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4753 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4755 /* Configure the SRRCTL register */
4756 #ifdef RTE_HEADER_SPLIT_ENABLE
4758 * Configure Header Split
4760 if (rx_conf->header_split) {
4761 if (hw->mac.type == ixgbe_mac_82599EB) {
4762 /* Must setup the PSRTYPE register */
4765 psrtype = IXGBE_PSRTYPE_TCPHDR |
4766 IXGBE_PSRTYPE_UDPHDR |
4767 IXGBE_PSRTYPE_IPV4HDR |
4768 IXGBE_PSRTYPE_IPV6HDR;
4769 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4771 srrctl = ((rx_conf->split_hdr_size <<
4772 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4773 IXGBE_SRRCTL_BSIZEHDR_MASK);
4774 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4777 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4779 /* Set if packets are dropped when no descriptors available */
4781 srrctl |= IXGBE_SRRCTL_DROP_EN;
4784 * Configure the RX buffer size in the BSIZEPACKET field of
4785 * the SRRCTL register of the queue.
4786 * The value is in 1 KB resolution. Valid values can be from
4789 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4790 RTE_PKTMBUF_HEADROOM);
4791 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4792 IXGBE_SRRCTL_BSIZEPKT_MASK);
4794 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4796 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4797 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4799 /* It adds dual VLAN length for supporting dual VLAN */
4800 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4801 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
4802 dev->data->scattered_rx = 1;
4805 if (rx_conf->enable_scatter)
4806 dev->data->scattered_rx = 1;
4809 * Device configured with multiple RX queues.
4811 ixgbe_dev_mq_rx_configure(dev);
4814 * Setup the Checksum Register.
4815 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4816 * Enable IP/L4 checkum computation by hardware if requested to do so.
4818 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
4819 rxcsum |= IXGBE_RXCSUM_PCSD;
4820 if (rx_conf->hw_ip_checksum)
4821 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4823 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
4825 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4827 if (hw->mac.type == ixgbe_mac_82599EB ||
4828 hw->mac.type == ixgbe_mac_X540) {
4829 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4830 if (rx_conf->hw_strip_crc)
4831 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4833 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
4834 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4835 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4838 rc = ixgbe_set_rsc(dev);
4842 ixgbe_set_rx_function(dev);
4848 * Initializes Transmit Unit.
4850 void __attribute__((cold))
4851 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
4853 struct ixgbe_hw *hw;
4854 struct ixgbe_tx_queue *txq;
4860 PMD_INIT_FUNC_TRACE();
4861 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4863 /* Enable TX CRC (checksum offload requirement) and hw padding
4866 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4867 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
4868 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4870 /* Setup the Base and Length of the Tx Descriptor Rings */
4871 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4872 txq = dev->data->tx_queues[i];
4874 bus_addr = txq->tx_ring_phys_addr;
4875 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
4876 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4877 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
4878 (uint32_t)(bus_addr >> 32));
4879 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
4880 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4881 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4882 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4883 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4886 * Disable Tx Head Writeback RO bit, since this hoses
4887 * bookkeeping if things aren't delivered in order.
4889 switch (hw->mac.type) {
4890 case ixgbe_mac_82598EB:
4891 txctrl = IXGBE_READ_REG(hw,
4892 IXGBE_DCA_TXCTRL(txq->reg_idx));
4893 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4894 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
4898 case ixgbe_mac_82599EB:
4899 case ixgbe_mac_X540:
4900 case ixgbe_mac_X550:
4901 case ixgbe_mac_X550EM_x:
4902 case ixgbe_mac_X550EM_a:
4904 txctrl = IXGBE_READ_REG(hw,
4905 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
4906 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4907 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
4913 /* Device configured with multiple TX queues. */
4914 ixgbe_dev_mq_tx_configure(dev);
4918 * Set up link for 82599 loopback mode Tx->Rx.
4920 static inline void __attribute__((cold))
4921 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
4923 PMD_INIT_FUNC_TRACE();
4925 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
4926 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
4928 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
4937 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
4938 ixgbe_reset_pipeline_82599(hw);
4940 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
4946 * Start Transmit and Receive Units.
4948 int __attribute__((cold))
4949 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
4951 struct ixgbe_hw *hw;
4952 struct ixgbe_tx_queue *txq;
4953 struct ixgbe_rx_queue *rxq;
4960 PMD_INIT_FUNC_TRACE();
4961 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4963 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4964 txq = dev->data->tx_queues[i];
4965 /* Setup Transmit Threshold Registers */
4966 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4967 txdctl |= txq->pthresh & 0x7F;
4968 txdctl |= ((txq->hthresh & 0x7F) << 8);
4969 txdctl |= ((txq->wthresh & 0x7F) << 16);
4970 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4973 if (hw->mac.type != ixgbe_mac_82598EB) {
4974 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
4975 dmatxctl |= IXGBE_DMATXCTL_TE;
4976 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
4979 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4980 txq = dev->data->tx_queues[i];
4981 if (!txq->tx_deferred_start) {
4982 ret = ixgbe_dev_tx_queue_start(dev, i);
4988 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4989 rxq = dev->data->rx_queues[i];
4990 if (!rxq->rx_deferred_start) {
4991 ret = ixgbe_dev_rx_queue_start(dev, i);
4997 /* Enable Receive engine */
4998 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4999 if (hw->mac.type == ixgbe_mac_82598EB)
5000 rxctrl |= IXGBE_RXCTRL_DMBYPS;
5001 rxctrl |= IXGBE_RXCTRL_RXEN;
5002 hw->mac.ops.enable_rx_dma(hw, rxctrl);
5004 /* If loopback mode is enabled for 82599, set up the link accordingly */
5005 if (hw->mac.type == ixgbe_mac_82599EB &&
5006 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
5007 ixgbe_setup_loopback_link_82599(hw);
5013 * Start Receive Units for specified queue.
5015 int __attribute__((cold))
5016 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5018 struct ixgbe_hw *hw;
5019 struct ixgbe_rx_queue *rxq;
5023 PMD_INIT_FUNC_TRACE();
5024 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5026 if (rx_queue_id < dev->data->nb_rx_queues) {
5027 rxq = dev->data->rx_queues[rx_queue_id];
5029 /* Allocate buffers for descriptor rings */
5030 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
5031 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
5035 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5036 rxdctl |= IXGBE_RXDCTL_ENABLE;
5037 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5039 /* Wait until RX Enable ready */
5040 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5043 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5044 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5046 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
5049 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5050 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
5051 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5059 * Stop Receive Units for specified queue.
5061 int __attribute__((cold))
5062 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5064 struct ixgbe_hw *hw;
5065 struct ixgbe_adapter *adapter =
5066 (struct ixgbe_adapter *)dev->data->dev_private;
5067 struct ixgbe_rx_queue *rxq;
5071 PMD_INIT_FUNC_TRACE();
5072 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5074 if (rx_queue_id < dev->data->nb_rx_queues) {
5075 rxq = dev->data->rx_queues[rx_queue_id];
5077 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5078 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5079 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5081 /* Wait until RX Enable bit clear */
5082 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5085 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5086 } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
5088 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
5091 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5093 ixgbe_rx_queue_release_mbufs(rxq);
5094 ixgbe_reset_rx_queue(adapter, rxq);
5095 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5104 * Start Transmit Units for specified queue.
5106 int __attribute__((cold))
5107 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5109 struct ixgbe_hw *hw;
5110 struct ixgbe_tx_queue *txq;
5114 PMD_INIT_FUNC_TRACE();
5115 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5117 if (tx_queue_id < dev->data->nb_tx_queues) {
5118 txq = dev->data->tx_queues[tx_queue_id];
5119 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5120 txdctl |= IXGBE_TXDCTL_ENABLE;
5121 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5123 /* Wait until TX Enable ready */
5124 if (hw->mac.type == ixgbe_mac_82599EB) {
5125 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5128 txdctl = IXGBE_READ_REG(hw,
5129 IXGBE_TXDCTL(txq->reg_idx));
5130 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5132 PMD_INIT_LOG(ERR, "Could not enable "
5133 "Tx Queue %d", tx_queue_id);
5136 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5137 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5138 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5146 * Stop Transmit Units for specified queue.
5148 int __attribute__((cold))
5149 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5151 struct ixgbe_hw *hw;
5152 struct ixgbe_tx_queue *txq;
5154 uint32_t txtdh, txtdt;
5157 PMD_INIT_FUNC_TRACE();
5158 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5160 if (tx_queue_id >= dev->data->nb_tx_queues)
5163 txq = dev->data->tx_queues[tx_queue_id];
5165 /* Wait until TX queue is empty */
5166 if (hw->mac.type == ixgbe_mac_82599EB) {
5167 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5169 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5170 txtdh = IXGBE_READ_REG(hw,
5171 IXGBE_TDH(txq->reg_idx));
5172 txtdt = IXGBE_READ_REG(hw,
5173 IXGBE_TDT(txq->reg_idx));
5174 } while (--poll_ms && (txtdh != txtdt));
5176 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
5177 "when stopping.", tx_queue_id);
5180 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5181 txdctl &= ~IXGBE_TXDCTL_ENABLE;
5182 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5184 /* Wait until TX Enable bit clear */
5185 if (hw->mac.type == ixgbe_mac_82599EB) {
5186 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5189 txdctl = IXGBE_READ_REG(hw,
5190 IXGBE_TXDCTL(txq->reg_idx));
5191 } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
5193 PMD_INIT_LOG(ERR, "Could not disable "
5194 "Tx Queue %d", tx_queue_id);
5197 if (txq->ops != NULL) {
5198 txq->ops->release_mbufs(txq);
5199 txq->ops->reset(txq);
5201 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5207 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5208 struct rte_eth_rxq_info *qinfo)
5210 struct ixgbe_rx_queue *rxq;
5212 rxq = dev->data->rx_queues[queue_id];
5214 qinfo->mp = rxq->mb_pool;
5215 qinfo->scattered_rx = dev->data->scattered_rx;
5216 qinfo->nb_desc = rxq->nb_rx_desc;
5218 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
5219 qinfo->conf.rx_drop_en = rxq->drop_en;
5220 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
5224 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5225 struct rte_eth_txq_info *qinfo)
5227 struct ixgbe_tx_queue *txq;
5229 txq = dev->data->tx_queues[queue_id];
5231 qinfo->nb_desc = txq->nb_tx_desc;
5233 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
5234 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
5235 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
5237 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
5238 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
5239 qinfo->conf.txq_flags = txq->txq_flags;
5240 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
5244 * [VF] Initializes Receive Unit.
5246 int __attribute__((cold))
5247 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
5249 struct ixgbe_hw *hw;
5250 struct ixgbe_rx_queue *rxq;
5252 uint32_t srrctl, psrtype = 0;
5257 PMD_INIT_FUNC_TRACE();
5258 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5260 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
5261 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5262 "it should be power of 2");
5266 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
5267 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5268 "it should be equal to or less than %d",
5269 hw->mac.max_rx_queues);
5274 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
5275 * disables the VF receipt of packets if the PF MTU is > 1500.
5276 * This is done to deal with 82599 limitations that imposes
5277 * the PF and all VFs to share the same MTU.
5278 * Then, the PF driver enables again the VF receipt of packet when
5279 * the VF driver issues a IXGBE_VF_SET_LPE request.
5280 * In the meantime, the VF device cannot be used, even if the VF driver
5281 * and the Guest VM network stack are ready to accept packets with a
5282 * size up to the PF MTU.
5283 * As a work-around to this PF behaviour, force the call to
5284 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
5285 * VF packets received can work in all cases.
5287 ixgbevf_rlpml_set_vf(hw,
5288 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
5290 /* Setup RX queues */
5291 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5292 rxq = dev->data->rx_queues[i];
5294 /* Allocate buffers for descriptor rings */
5295 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
5299 /* Setup the Base and Length of the Rx Descriptor Rings */
5300 bus_addr = rxq->rx_ring_phys_addr;
5302 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
5303 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5304 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
5305 (uint32_t)(bus_addr >> 32));
5306 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
5307 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5308 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
5309 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
5312 /* Configure the SRRCTL register */
5313 #ifdef RTE_HEADER_SPLIT_ENABLE
5315 * Configure Header Split
5317 if (dev->data->dev_conf.rxmode.header_split) {
5318 srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
5319 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
5320 IXGBE_SRRCTL_BSIZEHDR_MASK);
5321 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
5324 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
5326 /* Set if packets are dropped when no descriptors available */
5328 srrctl |= IXGBE_SRRCTL_DROP_EN;
5331 * Configure the RX buffer size in the BSIZEPACKET field of
5332 * the SRRCTL register of the queue.
5333 * The value is in 1 KB resolution. Valid values can be from
5336 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5337 RTE_PKTMBUF_HEADROOM);
5338 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5339 IXGBE_SRRCTL_BSIZEPKT_MASK);
5342 * VF modification to write virtual function SRRCTL register
5344 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
5346 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5347 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5349 if (dev->data->dev_conf.rxmode.enable_scatter ||
5350 /* It adds dual VLAN length for supporting dual VLAN */
5351 (dev->data->dev_conf.rxmode.max_rx_pkt_len +
5352 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
5353 if (!dev->data->scattered_rx)
5354 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
5355 dev->data->scattered_rx = 1;
5359 #ifdef RTE_HEADER_SPLIT_ENABLE
5360 if (dev->data->dev_conf.rxmode.header_split)
5361 /* Must setup the PSRTYPE register */
5362 psrtype = IXGBE_PSRTYPE_TCPHDR |
5363 IXGBE_PSRTYPE_UDPHDR |
5364 IXGBE_PSRTYPE_IPV4HDR |
5365 IXGBE_PSRTYPE_IPV6HDR;
5368 /* Set RQPL for VF RSS according to max Rx queue */
5369 psrtype |= (dev->data->nb_rx_queues >> 1) <<
5370 IXGBE_PSRTYPE_RQPL_SHIFT;
5371 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
5373 ixgbe_set_rx_function(dev);
5379 * [VF] Initializes Transmit Unit.
5381 void __attribute__((cold))
5382 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
5384 struct ixgbe_hw *hw;
5385 struct ixgbe_tx_queue *txq;
5390 PMD_INIT_FUNC_TRACE();
5391 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5393 /* Setup the Base and Length of the Tx Descriptor Rings */
5394 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5395 txq = dev->data->tx_queues[i];
5396 bus_addr = txq->tx_ring_phys_addr;
5397 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
5398 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5399 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
5400 (uint32_t)(bus_addr >> 32));
5401 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
5402 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5403 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5404 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
5405 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
5408 * Disable Tx Head Writeback RO bit, since this hoses
5409 * bookkeeping if things aren't delivered in order.
5411 txctrl = IXGBE_READ_REG(hw,
5412 IXGBE_VFDCA_TXCTRL(i));
5413 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5414 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
5420 * [VF] Start Transmit and Receive Units.
5422 void __attribute__((cold))
5423 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
5425 struct ixgbe_hw *hw;
5426 struct ixgbe_tx_queue *txq;
5427 struct ixgbe_rx_queue *rxq;
5433 PMD_INIT_FUNC_TRACE();
5434 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5436 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5437 txq = dev->data->tx_queues[i];
5438 /* Setup Transmit Threshold Registers */
5439 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5440 txdctl |= txq->pthresh & 0x7F;
5441 txdctl |= ((txq->hthresh & 0x7F) << 8);
5442 txdctl |= ((txq->wthresh & 0x7F) << 16);
5443 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5446 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5448 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5449 txdctl |= IXGBE_TXDCTL_ENABLE;
5450 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5453 /* Wait until TX Enable ready */
5456 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5457 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5459 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
5461 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5463 rxq = dev->data->rx_queues[i];
5465 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5466 rxdctl |= IXGBE_RXDCTL_ENABLE;
5467 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
5469 /* Wait until RX Enable ready */
5473 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5474 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5476 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5478 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
5483 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
5484 int __attribute__((weak))
5485 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5490 uint16_t __attribute__((weak))
5491 ixgbe_recv_pkts_vec(
5492 void __rte_unused *rx_queue,
5493 struct rte_mbuf __rte_unused **rx_pkts,
5494 uint16_t __rte_unused nb_pkts)
5499 uint16_t __attribute__((weak))
5500 ixgbe_recv_scattered_pkts_vec(
5501 void __rte_unused *rx_queue,
5502 struct rte_mbuf __rte_unused **rx_pkts,
5503 uint16_t __rte_unused nb_pkts)
5508 int __attribute__((weak))
5509 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)