1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
21 #include <rte_debug.h>
22 #include <rte_interrupts.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
35 #include <rte_ether.h>
36 #include <ethdev_driver.h>
37 #include <rte_security_driver.h>
38 #include <rte_prefetch.h>
42 #include <rte_string_fns.h>
43 #include <rte_errno.h>
48 #include "ixgbe_logs.h"
49 #include "base/ixgbe_api.h"
50 #include "base/ixgbe_vf.h"
51 #include "ixgbe_ethdev.h"
52 #include "base/ixgbe_dcb.h"
53 #include "base/ixgbe_common.h"
54 #include "ixgbe_rxtx.h"
56 #ifdef RTE_LIBRTE_IEEE1588
57 #define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
59 #define IXGBE_TX_IEEE1588_TMST 0
61 /* Bit Mask to indicate what bits required for building TX context */
62 #define IXGBE_TX_OFFLOAD_MASK ( \
72 PKT_TX_OUTER_IP_CKSUM | \
73 PKT_TX_SEC_OFFLOAD | \
74 IXGBE_TX_IEEE1588_TMST)
76 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
77 (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
80 #define RTE_PMD_USE_PREFETCH
83 #ifdef RTE_PMD_USE_PREFETCH
85 * Prefetch a cache line into all cache levels.
87 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
89 #define rte_ixgbe_prefetch(p) do {} while (0)
92 /*********************************************************************
96 **********************************************************************/
99 * Check for descriptors with their DD bit set and free mbufs.
100 * Return the total number of buffers freed.
102 static __rte_always_inline int
103 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
105 struct ixgbe_tx_entry *txep;
108 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
110 /* check DD bit on threshold descriptor */
111 status = txq->tx_ring[txq->tx_next_dd].wb.status;
112 if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
116 * first buffer to free from S/W ring is at index
117 * tx_next_dd - (tx_rs_thresh-1)
119 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
121 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
122 /* free buffers one at a time */
123 m = rte_pktmbuf_prefree_seg(txep->mbuf);
126 if (unlikely(m == NULL))
129 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
130 (nb_free > 0 && m->pool != free[0]->pool)) {
131 rte_mempool_put_bulk(free[0]->pool,
132 (void **)free, nb_free);
140 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
142 /* buffers were freed, update counters */
143 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
144 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
145 if (txq->tx_next_dd >= txq->nb_tx_desc)
146 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
148 return txq->tx_rs_thresh;
151 /* Populate 4 descriptors with data from 4 mbufs */
153 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
155 uint64_t buf_dma_addr;
159 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
160 buf_dma_addr = rte_mbuf_data_iova(*pkts);
161 pkt_len = (*pkts)->data_len;
163 /* write data to descriptor */
164 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
166 txdp->read.cmd_type_len =
167 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
169 txdp->read.olinfo_status =
170 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
172 rte_prefetch0(&(*pkts)->pool);
176 /* Populate 1 descriptor with data from 1 mbuf */
178 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
180 uint64_t buf_dma_addr;
183 buf_dma_addr = rte_mbuf_data_iova(*pkts);
184 pkt_len = (*pkts)->data_len;
186 /* write data to descriptor */
187 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
188 txdp->read.cmd_type_len =
189 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
190 txdp->read.olinfo_status =
191 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
192 rte_prefetch0(&(*pkts)->pool);
196 * Fill H/W descriptor ring with mbuf data.
197 * Copy mbuf pointers to the S/W ring.
200 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
203 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
204 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
205 const int N_PER_LOOP = 4;
206 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
207 int mainpart, leftover;
211 * Process most of the packets in chunks of N pkts. Any
212 * leftover packets will get processed one at a time.
214 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
215 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
216 for (i = 0; i < mainpart; i += N_PER_LOOP) {
217 /* Copy N mbuf pointers to the S/W ring */
218 for (j = 0; j < N_PER_LOOP; ++j) {
219 (txep + i + j)->mbuf = *(pkts + i + j);
221 tx4(txdp + i, pkts + i);
224 if (unlikely(leftover > 0)) {
225 for (i = 0; i < leftover; ++i) {
226 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
227 tx1(txdp + mainpart + i, pkts + mainpart + i);
232 static inline uint16_t
233 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
236 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
237 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
241 * Begin scanning the H/W ring for done descriptors when the
242 * number of available descriptors drops below tx_free_thresh. For
243 * each done descriptor, free the associated buffer.
245 if (txq->nb_tx_free < txq->tx_free_thresh)
246 ixgbe_tx_free_bufs(txq);
248 /* Only use descriptors that are available */
249 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
250 if (unlikely(nb_pkts == 0))
253 /* Use exactly nb_pkts descriptors */
254 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
257 * At this point, we know there are enough descriptors in the
258 * ring to transmit all the packets. This assumes that each
259 * mbuf contains a single segment, and that no new offloads
260 * are expected, which would require a new context descriptor.
264 * See if we're going to wrap-around. If so, handle the top
265 * of the descriptor ring first, then do the bottom. If not,
266 * the processing looks just like the "bottom" part anyway...
268 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
269 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
270 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
273 * We know that the last descriptor in the ring will need to
274 * have its RS bit set because tx_rs_thresh has to be
275 * a divisor of the ring size
277 tx_r[txq->tx_next_rs].read.cmd_type_len |=
278 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
279 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
284 /* Fill H/W descriptor ring with mbuf data */
285 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
286 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
289 * Determine if RS bit should be set
290 * This is what we actually want:
291 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
292 * but instead of subtracting 1 and doing >=, we can just do
293 * greater than without subtracting.
295 if (txq->tx_tail > txq->tx_next_rs) {
296 tx_r[txq->tx_next_rs].read.cmd_type_len |=
297 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
298 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
300 if (txq->tx_next_rs >= txq->nb_tx_desc)
301 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
305 * Check for wrap-around. This would only happen if we used
306 * up to the last descriptor in the ring, no more, no less.
308 if (txq->tx_tail >= txq->nb_tx_desc)
311 /* update tail pointer */
313 IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
319 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
324 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
325 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
326 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
328 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
333 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
334 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
335 nb_tx = (uint16_t)(nb_tx + ret);
336 nb_pkts = (uint16_t)(nb_pkts - ret);
345 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
349 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
354 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
355 ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
367 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
368 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
369 uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
370 __rte_unused uint64_t *mdata)
372 uint32_t type_tucmd_mlhl;
373 uint32_t mss_l4len_idx = 0;
375 uint32_t vlan_macip_lens;
376 union ixgbe_tx_offload tx_offload_mask;
377 uint32_t seqnum_seed = 0;
379 ctx_idx = txq->ctx_curr;
380 tx_offload_mask.data[0] = 0;
381 tx_offload_mask.data[1] = 0;
384 /* Specify which HW CTX to upload. */
385 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
387 if (ol_flags & PKT_TX_VLAN_PKT) {
388 tx_offload_mask.vlan_tci |= ~0;
391 /* check if TCP segmentation required for this packet */
392 if (ol_flags & PKT_TX_TCP_SEG) {
393 /* implies IP cksum in IPv4 */
394 if (ol_flags & PKT_TX_IP_CKSUM)
395 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
396 IXGBE_ADVTXD_TUCMD_L4T_TCP |
397 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
399 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
400 IXGBE_ADVTXD_TUCMD_L4T_TCP |
401 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
403 tx_offload_mask.l2_len |= ~0;
404 tx_offload_mask.l3_len |= ~0;
405 tx_offload_mask.l4_len |= ~0;
406 tx_offload_mask.tso_segsz |= ~0;
407 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
408 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
409 } else { /* no TSO, check if hardware checksum is needed */
410 if (ol_flags & PKT_TX_IP_CKSUM) {
411 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
412 tx_offload_mask.l2_len |= ~0;
413 tx_offload_mask.l3_len |= ~0;
416 switch (ol_flags & PKT_TX_L4_MASK) {
417 case PKT_TX_UDP_CKSUM:
418 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
419 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
420 mss_l4len_idx |= sizeof(struct rte_udp_hdr)
421 << IXGBE_ADVTXD_L4LEN_SHIFT;
422 tx_offload_mask.l2_len |= ~0;
423 tx_offload_mask.l3_len |= ~0;
425 case PKT_TX_TCP_CKSUM:
426 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
427 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
428 mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
429 << IXGBE_ADVTXD_L4LEN_SHIFT;
430 tx_offload_mask.l2_len |= ~0;
431 tx_offload_mask.l3_len |= ~0;
433 case PKT_TX_SCTP_CKSUM:
434 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
435 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
436 mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
437 << IXGBE_ADVTXD_L4LEN_SHIFT;
438 tx_offload_mask.l2_len |= ~0;
439 tx_offload_mask.l3_len |= ~0;
442 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
443 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
448 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
449 tx_offload_mask.outer_l2_len |= ~0;
450 tx_offload_mask.outer_l3_len |= ~0;
451 tx_offload_mask.l2_len |= ~0;
452 seqnum_seed |= tx_offload.outer_l3_len
453 << IXGBE_ADVTXD_OUTER_IPLEN;
454 seqnum_seed |= tx_offload.l2_len
455 << IXGBE_ADVTXD_TUNNEL_LEN;
457 #ifdef RTE_LIB_SECURITY
458 if (ol_flags & PKT_TX_SEC_OFFLOAD) {
459 union ixgbe_crypto_tx_desc_md *md =
460 (union ixgbe_crypto_tx_desc_md *)mdata;
462 (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
463 type_tucmd_mlhl |= md->enc ?
464 (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
465 IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
467 (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
468 tx_offload_mask.sa_idx |= ~0;
469 tx_offload_mask.sec_pad_len |= ~0;
473 txq->ctx_cache[ctx_idx].flags = ol_flags;
474 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
475 tx_offload_mask.data[0] & tx_offload.data[0];
476 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
477 tx_offload_mask.data[1] & tx_offload.data[1];
478 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
480 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
481 vlan_macip_lens = tx_offload.l3_len;
482 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
483 vlan_macip_lens |= (tx_offload.outer_l2_len <<
484 IXGBE_ADVTXD_MACLEN_SHIFT);
486 vlan_macip_lens |= (tx_offload.l2_len <<
487 IXGBE_ADVTXD_MACLEN_SHIFT);
488 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
489 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
490 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
491 ctx_txd->seqnum_seed = seqnum_seed;
495 * Check which hardware context can be used. Use the existing match
496 * or create a new context descriptor.
498 static inline uint32_t
499 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
500 union ixgbe_tx_offload tx_offload)
502 /* If match with the current used context */
503 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
504 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
505 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
506 & tx_offload.data[0])) &&
507 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
508 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
509 & tx_offload.data[1]))))
510 return txq->ctx_curr;
512 /* What if match with the next context */
514 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
515 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
516 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
517 & tx_offload.data[0])) &&
518 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
519 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
520 & tx_offload.data[1]))))
521 return txq->ctx_curr;
523 /* Mismatch, use the previous context */
524 return IXGBE_CTX_NUM;
527 static inline uint32_t
528 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
532 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
533 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
534 if (ol_flags & PKT_TX_IP_CKSUM)
535 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
536 if (ol_flags & PKT_TX_TCP_SEG)
537 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
541 static inline uint32_t
542 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
544 uint32_t cmdtype = 0;
546 if (ol_flags & PKT_TX_VLAN_PKT)
547 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
548 if (ol_flags & PKT_TX_TCP_SEG)
549 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
550 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
551 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
552 if (ol_flags & PKT_TX_MACSEC)
553 cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
557 /* Default RS bit threshold values */
558 #ifndef DEFAULT_TX_RS_THRESH
559 #define DEFAULT_TX_RS_THRESH 32
561 #ifndef DEFAULT_TX_FREE_THRESH
562 #define DEFAULT_TX_FREE_THRESH 32
565 /* Reset transmit descriptors after they have been used */
567 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
569 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
570 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
571 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
572 uint16_t nb_tx_desc = txq->nb_tx_desc;
573 uint16_t desc_to_clean_to;
574 uint16_t nb_tx_to_clean;
577 /* Determine the last descriptor needing to be cleaned */
578 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
579 if (desc_to_clean_to >= nb_tx_desc)
580 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
582 /* Check to make sure the last descriptor to clean is done */
583 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
584 status = txr[desc_to_clean_to].wb.status;
585 if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
587 "TX descriptor %4u is not done"
588 "(port=%d queue=%d)",
590 txq->port_id, txq->queue_id);
591 /* Failed to clean any descriptors, better luck next time */
595 /* Figure out how many descriptors will be cleaned */
596 if (last_desc_cleaned > desc_to_clean_to)
597 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
600 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
604 "Cleaning %4u TX descriptors: %4u to %4u "
605 "(port=%d queue=%d)",
606 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
607 txq->port_id, txq->queue_id);
610 * The last descriptor to clean is done, so that means all the
611 * descriptors from the last descriptor that was cleaned
612 * up to the last descriptor with the RS bit set
613 * are done. Only reset the threshold descriptor.
615 txr[desc_to_clean_to].wb.status = 0;
617 /* Update the txq to reflect the last descriptor that was cleaned */
618 txq->last_desc_cleaned = desc_to_clean_to;
619 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
626 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
629 struct ixgbe_tx_queue *txq;
630 struct ixgbe_tx_entry *sw_ring;
631 struct ixgbe_tx_entry *txe, *txn;
632 volatile union ixgbe_adv_tx_desc *txr;
633 volatile union ixgbe_adv_tx_desc *txd, *txp;
634 struct rte_mbuf *tx_pkt;
635 struct rte_mbuf *m_seg;
636 uint64_t buf_dma_addr;
637 uint32_t olinfo_status;
638 uint32_t cmd_type_len;
649 union ixgbe_tx_offload tx_offload;
650 #ifdef RTE_LIB_SECURITY
654 tx_offload.data[0] = 0;
655 tx_offload.data[1] = 0;
657 sw_ring = txq->sw_ring;
659 tx_id = txq->tx_tail;
660 txe = &sw_ring[tx_id];
663 /* Determine if the descriptor ring needs to be cleaned. */
664 if (txq->nb_tx_free < txq->tx_free_thresh)
665 ixgbe_xmit_cleanup(txq);
667 rte_prefetch0(&txe->mbuf->pool);
670 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
673 pkt_len = tx_pkt->pkt_len;
676 * Determine how many (if any) context descriptors
677 * are needed for offload functionality.
679 ol_flags = tx_pkt->ol_flags;
680 #ifdef RTE_LIB_SECURITY
681 use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
684 /* If hardware offload required */
685 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
687 tx_offload.l2_len = tx_pkt->l2_len;
688 tx_offload.l3_len = tx_pkt->l3_len;
689 tx_offload.l4_len = tx_pkt->l4_len;
690 tx_offload.vlan_tci = tx_pkt->vlan_tci;
691 tx_offload.tso_segsz = tx_pkt->tso_segsz;
692 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
693 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
694 #ifdef RTE_LIB_SECURITY
696 union ixgbe_crypto_tx_desc_md *ipsec_mdata =
697 (union ixgbe_crypto_tx_desc_md *)
698 rte_security_dynfield(tx_pkt);
699 tx_offload.sa_idx = ipsec_mdata->sa_idx;
700 tx_offload.sec_pad_len = ipsec_mdata->pad_len;
704 /* If new context need be built or reuse the exist ctx. */
705 ctx = what_advctx_update(txq, tx_ol_req,
707 /* Only allocate context descriptor if required*/
708 new_ctx = (ctx == IXGBE_CTX_NUM);
713 * Keep track of how many descriptors are used this loop
714 * This will always be the number of segments + the number of
715 * Context descriptors required to transmit the packet
717 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
720 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
721 /* set RS on the previous packet in the burst */
722 txp->read.cmd_type_len |=
723 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
726 * The number of descriptors that must be allocated for a
727 * packet is the number of segments of that packet, plus 1
728 * Context Descriptor for the hardware offload, if any.
729 * Determine the last TX descriptor to allocate in the TX ring
730 * for the packet, starting from the current position (tx_id)
733 tx_last = (uint16_t) (tx_id + nb_used - 1);
736 if (tx_last >= txq->nb_tx_desc)
737 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
739 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
740 " tx_first=%u tx_last=%u",
741 (unsigned) txq->port_id,
742 (unsigned) txq->queue_id,
748 * Make sure there are enough TX descriptors available to
749 * transmit the entire packet.
750 * nb_used better be less than or equal to txq->tx_rs_thresh
752 if (nb_used > txq->nb_tx_free) {
754 "Not enough free TX descriptors "
755 "nb_used=%4u nb_free=%4u "
756 "(port=%d queue=%d)",
757 nb_used, txq->nb_tx_free,
758 txq->port_id, txq->queue_id);
760 if (ixgbe_xmit_cleanup(txq) != 0) {
761 /* Could not clean any descriptors */
767 /* nb_used better be <= txq->tx_rs_thresh */
768 if (unlikely(nb_used > txq->tx_rs_thresh)) {
770 "The number of descriptors needed to "
771 "transmit the packet exceeds the "
772 "RS bit threshold. This will impact "
774 "nb_used=%4u nb_free=%4u "
776 "(port=%d queue=%d)",
777 nb_used, txq->nb_tx_free,
779 txq->port_id, txq->queue_id);
781 * Loop here until there are enough TX
782 * descriptors or until the ring cannot be
785 while (nb_used > txq->nb_tx_free) {
786 if (ixgbe_xmit_cleanup(txq) != 0) {
788 * Could not clean any
800 * By now there are enough free TX descriptors to transmit
805 * Set common flags of all TX Data Descriptors.
807 * The following bits must be set in all Data Descriptors:
808 * - IXGBE_ADVTXD_DTYP_DATA
809 * - IXGBE_ADVTXD_DCMD_DEXT
811 * The following bits must be set in the first Data Descriptor
812 * and are ignored in the other ones:
813 * - IXGBE_ADVTXD_DCMD_IFCS
814 * - IXGBE_ADVTXD_MAC_1588
815 * - IXGBE_ADVTXD_DCMD_VLE
817 * The following bits must only be set in the last Data
819 * - IXGBE_TXD_CMD_EOP
821 * The following bits can be set in any Data Descriptor, but
822 * are only set in the last Data Descriptor:
825 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
826 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
828 #ifdef RTE_LIBRTE_IEEE1588
829 if (ol_flags & PKT_TX_IEEE1588_TMST)
830 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
836 if (ol_flags & PKT_TX_TCP_SEG) {
837 /* when TSO is on, paylen in descriptor is the
838 * not the packet len but the tcp payload len */
839 pkt_len -= (tx_offload.l2_len +
840 tx_offload.l3_len + tx_offload.l4_len);
844 * Setup the TX Advanced Context Descriptor if required
847 volatile struct ixgbe_adv_tx_context_desc *
850 ctx_txd = (volatile struct
851 ixgbe_adv_tx_context_desc *)
854 txn = &sw_ring[txe->next_id];
855 rte_prefetch0(&txn->mbuf->pool);
857 if (txe->mbuf != NULL) {
858 rte_pktmbuf_free_seg(txe->mbuf);
862 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
864 rte_security_dynfield(tx_pkt));
866 txe->last_id = tx_last;
867 tx_id = txe->next_id;
872 * Setup the TX Advanced Data Descriptor,
873 * This path will go through
874 * whatever new/reuse the context descriptor
876 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
877 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
878 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
881 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
882 #ifdef RTE_LIB_SECURITY
884 olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
890 txn = &sw_ring[txe->next_id];
891 rte_prefetch0(&txn->mbuf->pool);
893 if (txe->mbuf != NULL)
894 rte_pktmbuf_free_seg(txe->mbuf);
898 * Set up Transmit Data Descriptor.
900 slen = m_seg->data_len;
901 buf_dma_addr = rte_mbuf_data_iova(m_seg);
902 txd->read.buffer_addr =
903 rte_cpu_to_le_64(buf_dma_addr);
904 txd->read.cmd_type_len =
905 rte_cpu_to_le_32(cmd_type_len | slen);
906 txd->read.olinfo_status =
907 rte_cpu_to_le_32(olinfo_status);
908 txe->last_id = tx_last;
909 tx_id = txe->next_id;
912 } while (m_seg != NULL);
915 * The last packet data descriptor needs End Of Packet (EOP)
917 cmd_type_len |= IXGBE_TXD_CMD_EOP;
918 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
919 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
921 /* Set RS bit only on threshold packets' last descriptor */
922 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
924 "Setting RS bit on TXD id="
925 "%4u (port=%d queue=%d)",
926 tx_last, txq->port_id, txq->queue_id);
928 cmd_type_len |= IXGBE_TXD_CMD_RS;
930 /* Update txq RS bit counters */
936 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
940 /* set RS on last packet in the burst */
942 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
947 * Set the Transmit Descriptor Tail (TDT)
949 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
950 (unsigned) txq->port_id, (unsigned) txq->queue_id,
951 (unsigned) tx_id, (unsigned) nb_tx);
952 IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
953 txq->tx_tail = tx_id;
958 /*********************************************************************
962 **********************************************************************/
964 ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
969 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
971 for (i = 0; i < nb_pkts; i++) {
973 ol_flags = m->ol_flags;
976 * Check if packet meets requirements for number of segments
978 * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
982 if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
987 if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
992 /* check the size of packet */
993 if (m->pkt_len < IXGBE_TX_MIN_PKT_LEN) {
998 #ifdef RTE_ETHDEV_DEBUG_TX
999 ret = rte_validate_tx_offload(m);
1005 ret = rte_net_intel_cksum_prepare(m);
1015 /*********************************************************************
1019 **********************************************************************/
1021 #define IXGBE_PACKET_TYPE_ETHER 0X00
1022 #define IXGBE_PACKET_TYPE_IPV4 0X01
1023 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
1024 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
1025 #define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
1026 #define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
1027 #define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
1028 #define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
1029 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
1030 #define IXGBE_PACKET_TYPE_IPV6 0X04
1031 #define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
1032 #define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
1033 #define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
1034 #define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
1035 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
1036 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
1037 #define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
1038 #define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
1039 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
1040 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
1041 #define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
1042 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
1043 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
1044 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
1045 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
1046 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
1047 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
1048 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
1049 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
1050 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
1051 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
1052 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
1053 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
1055 #define IXGBE_PACKET_TYPE_NVGRE 0X00
1056 #define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
1057 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11
1058 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
1059 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
1060 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
1061 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
1062 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
1063 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
1064 #define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
1065 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
1066 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
1067 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
1068 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
1069 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
1070 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
1071 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
1072 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
1073 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
1074 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
1075 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D
1076 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
1077 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
1079 #define IXGBE_PACKET_TYPE_VXLAN 0X80
1080 #define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81
1081 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91
1082 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
1083 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
1084 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
1085 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
1086 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
1087 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
1088 #define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
1089 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
1090 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
1091 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
1092 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
1093 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
1094 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
1095 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
1096 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
1097 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
1098 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
1099 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D
1100 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
1101 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
1104 * Use 2 different table for normal packet and tunnel packet
1105 * to save the space.
1108 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
1109 [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
1110 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
1112 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1113 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
1114 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1115 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
1116 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1117 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
1118 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1119 RTE_PTYPE_L3_IPV4_EXT,
1120 [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1121 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
1122 [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1123 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
1124 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1125 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
1126 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
1128 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1129 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
1130 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1131 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
1132 [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1133 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
1134 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1135 RTE_PTYPE_L3_IPV6_EXT,
1136 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1137 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
1138 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1139 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
1140 [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1141 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
1142 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1143 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1144 RTE_PTYPE_INNER_L3_IPV6,
1145 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1146 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1147 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1148 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1149 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1150 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1151 [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1152 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1153 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1154 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
1155 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1156 RTE_PTYPE_INNER_L3_IPV6,
1157 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1158 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1159 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1160 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1161 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1162 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1163 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1164 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1165 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1166 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1167 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1168 RTE_PTYPE_INNER_L3_IPV6_EXT,
1169 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1170 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1171 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1172 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1173 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1174 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1175 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1176 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1177 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1178 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1179 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1180 RTE_PTYPE_INNER_L3_IPV6_EXT,
1181 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1182 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1183 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1184 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1185 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1186 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1187 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
1188 RTE_PTYPE_L2_ETHER |
1189 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1190 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1194 ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
1195 [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
1196 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1197 RTE_PTYPE_INNER_L2_ETHER,
1198 [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
1199 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1200 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1201 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1202 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1203 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
1204 [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
1205 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1206 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
1207 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1208 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1209 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1210 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1211 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1212 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
1213 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1214 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1215 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1216 [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1217 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1218 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1219 RTE_PTYPE_INNER_L4_TCP,
1220 [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1221 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1222 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1223 RTE_PTYPE_INNER_L4_TCP,
1224 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1225 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1226 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1227 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1228 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1229 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1230 RTE_PTYPE_INNER_L4_TCP,
1231 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
1232 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1233 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1234 RTE_PTYPE_INNER_L3_IPV4,
1235 [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1236 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1237 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1238 RTE_PTYPE_INNER_L4_UDP,
1239 [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1240 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1241 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1242 RTE_PTYPE_INNER_L4_UDP,
1243 [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1244 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1245 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1246 RTE_PTYPE_INNER_L4_SCTP,
1247 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1248 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1249 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1250 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1251 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1252 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1253 RTE_PTYPE_INNER_L4_UDP,
1254 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1255 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1256 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1257 RTE_PTYPE_INNER_L4_SCTP,
1258 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
1259 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1260 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1261 RTE_PTYPE_INNER_L3_IPV4,
1262 [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1263 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1264 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1265 RTE_PTYPE_INNER_L4_SCTP,
1266 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1267 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1268 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1269 RTE_PTYPE_INNER_L4_SCTP,
1270 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1271 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1272 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1273 RTE_PTYPE_INNER_L4_TCP,
1274 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1275 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1276 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1277 RTE_PTYPE_INNER_L4_UDP,
1279 [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
1280 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1281 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
1282 [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
1283 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1284 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1285 RTE_PTYPE_INNER_L3_IPV4,
1286 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1287 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1288 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1289 RTE_PTYPE_INNER_L3_IPV4_EXT,
1290 [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
1291 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1292 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1293 RTE_PTYPE_INNER_L3_IPV6,
1294 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1295 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1296 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1297 RTE_PTYPE_INNER_L3_IPV4,
1298 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1299 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1300 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1301 RTE_PTYPE_INNER_L3_IPV6_EXT,
1302 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1303 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1304 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1305 RTE_PTYPE_INNER_L3_IPV4,
1306 [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1307 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1308 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1309 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
1310 [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1311 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1312 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1313 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1314 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1315 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1316 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1317 RTE_PTYPE_INNER_L3_IPV4,
1318 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1319 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1320 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1321 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1322 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
1323 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1324 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1325 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1326 [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1327 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1328 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1329 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
1330 [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1331 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1332 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1333 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1334 [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1335 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1336 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1337 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1338 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1339 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1340 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1341 RTE_PTYPE_INNER_L3_IPV4,
1342 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1343 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1344 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1345 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1346 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1347 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1348 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1349 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1350 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
1351 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1352 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1353 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1354 [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1355 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1356 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1357 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
1358 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1359 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1360 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1361 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
1362 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1363 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1364 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1365 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
1366 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1367 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1368 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1369 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
1373 ixgbe_monitor_callback(const uint64_t value,
1374 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
1376 const uint64_t m = rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD);
1378 * we expect the DD bit to be set to 1 if this descriptor was already
1381 return (value & m) == m ? -1 : 0;
1385 ixgbe_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
1387 volatile union ixgbe_adv_rx_desc *rxdp;
1388 struct ixgbe_rx_queue *rxq = rx_queue;
1391 desc = rxq->rx_tail;
1392 rxdp = &rxq->rx_ring[desc];
1393 /* watch for changes in status bit */
1394 pmc->addr = &rxdp->wb.upper.status_error;
1396 /* comparison callback */
1397 pmc->fn = ixgbe_monitor_callback;
1399 /* the registers are 32-bit */
1400 pmc->size = sizeof(uint32_t);
1405 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
1406 static inline uint32_t
1407 ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
1410 if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1411 return RTE_PTYPE_UNKNOWN;
1413 pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
1415 /* For tunnel packet */
1416 if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
1417 /* Remove the tunnel bit to save the space. */
1418 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
1419 return ptype_table_tn[pkt_info];
1423 * For x550, if it's not tunnel,
1424 * tunnel type bit should be set to 0.
1425 * Reuse 82599's mask.
1427 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
1429 return ptype_table[pkt_info];
1432 static inline uint64_t
1433 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1435 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1436 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1437 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1438 PKT_RX_RSS_HASH, 0, 0, 0,
1439 0, 0, 0, PKT_RX_FDIR,
1441 #ifdef RTE_LIBRTE_IEEE1588
1442 static uint64_t ip_pkt_etqf_map[8] = {
1443 0, 0, 0, PKT_RX_IEEE1588_PTP,
1447 if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1448 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1449 ip_rss_types_map[pkt_info & 0XF];
1451 return ip_rss_types_map[pkt_info & 0XF];
1453 return ip_rss_types_map[pkt_info & 0XF];
1457 static inline uint64_t
1458 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1463 * Check if VLAN present only.
1464 * Do not check whether L3/L4 rx checksum done by NIC or not,
1465 * That can be found from rte_eth_rxmode.offloads flag
1467 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
1469 #ifdef RTE_LIBRTE_IEEE1588
1470 if (rx_status & IXGBE_RXD_STAT_TMST)
1471 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1476 static inline uint64_t
1477 rx_desc_error_to_pkt_flags(uint32_t rx_status, uint16_t pkt_info,
1478 uint8_t rx_udp_csum_zero_err)
1483 * Bit 31: IPE, IPv4 checksum error
1484 * Bit 30: L4I, L4I integrity error
1486 static uint64_t error_to_pkt_flags_map[4] = {
1487 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
1488 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
1489 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
1490 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1492 pkt_flags = error_to_pkt_flags_map[(rx_status >>
1493 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1495 /* Mask out the bad UDP checksum error if the hardware has UDP zero
1496 * checksum error issue, so that the software application will then
1497 * have to recompute the checksum itself if needed.
1499 if ((rx_status & IXGBE_RXDADV_ERR_TCPE) &&
1500 (pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
1501 rx_udp_csum_zero_err)
1502 pkt_flags &= ~PKT_RX_L4_CKSUM_BAD;
1504 if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1505 (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1506 pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1509 #ifdef RTE_LIB_SECURITY
1510 if (rx_status & IXGBE_RXD_STAT_SECP) {
1511 pkt_flags |= PKT_RX_SEC_OFFLOAD;
1512 if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
1513 pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
1521 * LOOK_AHEAD defines how many desc statuses to check beyond the
1522 * current descriptor.
1523 * It must be a pound define for optimal performance.
1524 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1525 * function only works with LOOK_AHEAD=8.
1527 #define LOOK_AHEAD 8
1528 #if (LOOK_AHEAD != 8)
1529 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1532 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1534 volatile union ixgbe_adv_rx_desc *rxdp;
1535 struct ixgbe_rx_entry *rxep;
1536 struct rte_mbuf *mb;
1540 uint32_t s[LOOK_AHEAD];
1541 uint32_t pkt_info[LOOK_AHEAD];
1542 int i, j, nb_rx = 0;
1544 uint64_t vlan_flags = rxq->vlan_flags;
1546 /* get references to current descriptor and S/W ring entry */
1547 rxdp = &rxq->rx_ring[rxq->rx_tail];
1548 rxep = &rxq->sw_ring[rxq->rx_tail];
1550 status = rxdp->wb.upper.status_error;
1551 /* check to make sure there is at least 1 packet to receive */
1552 if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1556 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1557 * reference packets that are ready to be received.
1559 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1560 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1561 /* Read desc statuses backwards to avoid race condition */
1562 for (j = 0; j < LOOK_AHEAD; j++)
1563 s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1567 /* Compute how many status bits were set */
1568 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1569 (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
1572 for (j = 0; j < nb_dd; j++)
1573 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
1578 /* Translate descriptor info to mbuf format */
1579 for (j = 0; j < nb_dd; ++j) {
1581 pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1583 mb->data_len = pkt_len;
1584 mb->pkt_len = pkt_len;
1585 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1587 /* convert descriptor fields to rte mbuf flags */
1588 pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1590 pkt_flags |= rx_desc_error_to_pkt_flags(s[j],
1591 (uint16_t)pkt_info[j],
1592 rxq->rx_udp_csum_zero_err);
1593 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
1594 ((uint16_t)pkt_info[j]);
1595 mb->ol_flags = pkt_flags;
1597 ixgbe_rxd_pkt_info_to_pkt_type
1598 (pkt_info[j], rxq->pkt_type_mask);
1600 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1601 mb->hash.rss = rte_le_to_cpu_32(
1602 rxdp[j].wb.lower.hi_dword.rss);
1603 else if (pkt_flags & PKT_RX_FDIR) {
1604 mb->hash.fdir.hash = rte_le_to_cpu_16(
1605 rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1606 IXGBE_ATR_HASH_MASK;
1607 mb->hash.fdir.id = rte_le_to_cpu_16(
1608 rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1612 /* Move mbuf pointers from the S/W ring to the stage */
1613 for (j = 0; j < LOOK_AHEAD; ++j) {
1614 rxq->rx_stage[i + j] = rxep[j].mbuf;
1617 /* stop if all requested packets could not be received */
1618 if (nb_dd != LOOK_AHEAD)
1622 /* clear software ring entries so we can cleanup correctly */
1623 for (i = 0; i < nb_rx; ++i) {
1624 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1632 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1634 volatile union ixgbe_adv_rx_desc *rxdp;
1635 struct ixgbe_rx_entry *rxep;
1636 struct rte_mbuf *mb;
1641 /* allocate buffers in bulk directly into the S/W ring */
1642 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1643 rxep = &rxq->sw_ring[alloc_idx];
1644 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1645 rxq->rx_free_thresh);
1646 if (unlikely(diag != 0))
1649 rxdp = &rxq->rx_ring[alloc_idx];
1650 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1651 /* populate the static rte mbuf fields */
1654 mb->port = rxq->port_id;
1657 rte_mbuf_refcnt_set(mb, 1);
1658 mb->data_off = RTE_PKTMBUF_HEADROOM;
1660 /* populate the descriptors */
1661 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1662 rxdp[i].read.hdr_addr = 0;
1663 rxdp[i].read.pkt_addr = dma_addr;
1666 /* update state of internal queue structure */
1667 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1668 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1669 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1675 static inline uint16_t
1676 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1679 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1682 /* how many packets are ready to return? */
1683 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1685 /* copy mbuf pointers to the application's packet list */
1686 for (i = 0; i < nb_pkts; ++i)
1687 rx_pkts[i] = stage[i];
1689 /* update internal queue state */
1690 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1691 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1696 static inline uint16_t
1697 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1700 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1703 /* Any previously recv'd pkts will be returned from the Rx stage */
1704 if (rxq->rx_nb_avail)
1705 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1707 /* Scan the H/W ring for packets to receive */
1708 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1710 /* update internal queue state */
1711 rxq->rx_next_avail = 0;
1712 rxq->rx_nb_avail = nb_rx;
1713 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1715 /* if required, allocate new buffers to replenish descriptors */
1716 if (rxq->rx_tail > rxq->rx_free_trigger) {
1717 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1719 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1722 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1723 "queue_id=%u", (unsigned) rxq->port_id,
1724 (unsigned) rxq->queue_id);
1726 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1727 rxq->rx_free_thresh;
1730 * Need to rewind any previous receives if we cannot
1731 * allocate new buffers to replenish the old ones.
1733 rxq->rx_nb_avail = 0;
1734 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1735 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1736 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1741 /* update tail pointer */
1743 IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr,
1747 if (rxq->rx_tail >= rxq->nb_rx_desc)
1750 /* received any packets this loop? */
1751 if (rxq->rx_nb_avail)
1752 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1757 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1759 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1764 if (unlikely(nb_pkts == 0))
1767 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1768 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1770 /* request is relatively large, chunk it up */
1775 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1776 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1777 nb_rx = (uint16_t)(nb_rx + ret);
1778 nb_pkts = (uint16_t)(nb_pkts - ret);
1787 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1790 struct ixgbe_rx_queue *rxq;
1791 volatile union ixgbe_adv_rx_desc *rx_ring;
1792 volatile union ixgbe_adv_rx_desc *rxdp;
1793 struct ixgbe_rx_entry *sw_ring;
1794 struct ixgbe_rx_entry *rxe;
1795 struct rte_mbuf *rxm;
1796 struct rte_mbuf *nmb;
1797 union ixgbe_adv_rx_desc rxd;
1806 uint64_t vlan_flags;
1811 rx_id = rxq->rx_tail;
1812 rx_ring = rxq->rx_ring;
1813 sw_ring = rxq->sw_ring;
1814 vlan_flags = rxq->vlan_flags;
1815 while (nb_rx < nb_pkts) {
1817 * The order of operations here is important as the DD status
1818 * bit must not be read after any other descriptor fields.
1819 * rx_ring and rxdp are pointing to volatile data so the order
1820 * of accesses cannot be reordered by the compiler. If they were
1821 * not volatile, they could be reordered which could lead to
1822 * using invalid descriptor fields when read from rxd.
1824 rxdp = &rx_ring[rx_id];
1825 staterr = rxdp->wb.upper.status_error;
1826 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1833 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1834 * is likely to be invalid and to be dropped by the various
1835 * validation checks performed by the network stack.
1837 * Allocate a new mbuf to replenish the RX ring descriptor.
1838 * If the allocation fails:
1839 * - arrange for that RX descriptor to be the first one
1840 * being parsed the next time the receive function is
1841 * invoked [on the same queue].
1843 * - Stop parsing the RX ring and return immediately.
1845 * This policy do not drop the packet received in the RX
1846 * descriptor for which the allocation of a new mbuf failed.
1847 * Thus, it allows that packet to be later retrieved if
1848 * mbuf have been freed in the mean time.
1849 * As a side effect, holding RX descriptors instead of
1850 * systematically giving them back to the NIC may lead to
1851 * RX ring exhaustion situations.
1852 * However, the NIC can gracefully prevent such situations
1853 * to happen by sending specific "back-pressure" flow control
1854 * frames to its peer(s).
1856 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1857 "ext_err_stat=0x%08x pkt_len=%u",
1858 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1859 (unsigned) rx_id, (unsigned) staterr,
1860 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1862 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1864 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1865 "queue_id=%u", (unsigned) rxq->port_id,
1866 (unsigned) rxq->queue_id);
1867 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1872 rxe = &sw_ring[rx_id];
1874 if (rx_id == rxq->nb_rx_desc)
1877 /* Prefetch next mbuf while processing current one. */
1878 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1881 * When next RX descriptor is on a cache-line boundary,
1882 * prefetch the next 4 RX descriptors and the next 8 pointers
1885 if ((rx_id & 0x3) == 0) {
1886 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1887 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1893 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1894 rxdp->read.hdr_addr = 0;
1895 rxdp->read.pkt_addr = dma_addr;
1898 * Initialize the returned mbuf.
1899 * 1) setup generic mbuf fields:
1900 * - number of segments,
1903 * - RX port identifier.
1904 * 2) integrate hardware offload data, if any:
1905 * - RSS flag & hash,
1906 * - IP checksum flag,
1907 * - VLAN TCI, if any,
1910 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1912 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1913 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1916 rxm->pkt_len = pkt_len;
1917 rxm->data_len = pkt_len;
1918 rxm->port = rxq->port_id;
1920 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1921 /* Only valid if PKT_RX_VLAN set in pkt_flags */
1922 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1924 pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
1925 pkt_flags = pkt_flags |
1926 rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info,
1927 rxq->rx_udp_csum_zero_err);
1928 pkt_flags = pkt_flags |
1929 ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1930 rxm->ol_flags = pkt_flags;
1932 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1933 rxq->pkt_type_mask);
1935 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1936 rxm->hash.rss = rte_le_to_cpu_32(
1937 rxd.wb.lower.hi_dword.rss);
1938 else if (pkt_flags & PKT_RX_FDIR) {
1939 rxm->hash.fdir.hash = rte_le_to_cpu_16(
1940 rxd.wb.lower.hi_dword.csum_ip.csum) &
1941 IXGBE_ATR_HASH_MASK;
1942 rxm->hash.fdir.id = rte_le_to_cpu_16(
1943 rxd.wb.lower.hi_dword.csum_ip.ip_id);
1946 * Store the mbuf address into the next entry of the array
1947 * of returned packets.
1949 rx_pkts[nb_rx++] = rxm;
1951 rxq->rx_tail = rx_id;
1954 * If the number of free RX descriptors is greater than the RX free
1955 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1957 * Update the RDT with the value of the last processed RX descriptor
1958 * minus 1, to guarantee that the RDT register is never equal to the
1959 * RDH register, which creates a "full" ring situtation from the
1960 * hardware point of view...
1962 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1963 if (nb_hold > rxq->rx_free_thresh) {
1964 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1965 "nb_hold=%u nb_rx=%u",
1966 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1967 (unsigned) rx_id, (unsigned) nb_hold,
1969 rx_id = (uint16_t) ((rx_id == 0) ?
1970 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1971 IXGBE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, rx_id);
1974 rxq->nb_rx_hold = nb_hold;
1979 * Detect an RSC descriptor.
1981 static inline uint32_t
1982 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1984 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1985 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1989 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1991 * Fill the following info in the HEAD buffer of the Rx cluster:
1992 * - RX port identifier
1993 * - hardware offload data, if any:
1995 * - IP checksum flag
1996 * - VLAN TCI, if any
1998 * @head HEAD of the packet cluster
1999 * @desc HW descriptor to get data from
2000 * @rxq Pointer to the Rx queue
2003 ixgbe_fill_cluster_head_buf(
2004 struct rte_mbuf *head,
2005 union ixgbe_adv_rx_desc *desc,
2006 struct ixgbe_rx_queue *rxq,
2012 head->port = rxq->port_id;
2014 /* The vlan_tci field is only valid when PKT_RX_VLAN is
2015 * set in the pkt_flags field.
2017 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
2018 pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
2019 pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
2020 pkt_flags |= rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info,
2021 rxq->rx_udp_csum_zero_err);
2022 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
2023 head->ol_flags = pkt_flags;
2025 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
2027 if (likely(pkt_flags & PKT_RX_RSS_HASH))
2028 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
2029 else if (pkt_flags & PKT_RX_FDIR) {
2030 head->hash.fdir.hash =
2031 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
2032 & IXGBE_ATR_HASH_MASK;
2033 head->hash.fdir.id =
2034 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
2039 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
2041 * @rx_queue Rx queue handle
2042 * @rx_pkts table of received packets
2043 * @nb_pkts size of rx_pkts table
2044 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
2046 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
2047 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
2049 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
2050 * 1) When non-EOP RSC completion arrives:
2051 * a) Update the HEAD of the current RSC aggregation cluster with the new
2052 * segment's data length.
2053 * b) Set the "next" pointer of the current segment to point to the segment
2054 * at the NEXTP index.
2055 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
2056 * in the sw_rsc_ring.
2057 * 2) When EOP arrives we just update the cluster's total length and offload
2058 * flags and deliver the cluster up to the upper layers. In our case - put it
2059 * in the rx_pkts table.
2061 * Returns the number of received packets/clusters (according to the "bulk
2062 * receive" interface).
2064 static inline uint16_t
2065 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
2068 struct ixgbe_rx_queue *rxq = rx_queue;
2069 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
2070 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
2071 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
2072 uint16_t rx_id = rxq->rx_tail;
2074 uint16_t nb_hold = rxq->nb_rx_hold;
2075 uint16_t prev_id = rxq->rx_tail;
2077 while (nb_rx < nb_pkts) {
2079 struct ixgbe_rx_entry *rxe;
2080 struct ixgbe_scattered_rx_entry *sc_entry;
2081 struct ixgbe_scattered_rx_entry *next_sc_entry = NULL;
2082 struct ixgbe_rx_entry *next_rxe = NULL;
2083 struct rte_mbuf *first_seg;
2084 struct rte_mbuf *rxm;
2085 struct rte_mbuf *nmb = NULL;
2086 union ixgbe_adv_rx_desc rxd;
2089 volatile union ixgbe_adv_rx_desc *rxdp;
2094 * The code in this whole file uses the volatile pointer to
2095 * ensure the read ordering of the status and the rest of the
2096 * descriptor fields (on the compiler level only!!!). This is so
2097 * UGLY - why not to just use the compiler barrier instead? DPDK
2098 * even has the rte_compiler_barrier() for that.
2100 * But most importantly this is just wrong because this doesn't
2101 * ensure memory ordering in a general case at all. For
2102 * instance, DPDK is supposed to work on Power CPUs where
2103 * compiler barrier may just not be enough!
2105 * I tried to write only this function properly to have a
2106 * starting point (as a part of an LRO/RSC series) but the
2107 * compiler cursed at me when I tried to cast away the
2108 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
2109 * keeping it the way it is for now.
2111 * The code in this file is broken in so many other places and
2112 * will just not work on a big endian CPU anyway therefore the
2113 * lines below will have to be revisited together with the rest
2117 * - Get rid of "volatile" and let the compiler do its job.
2118 * - Use the proper memory barrier (rte_rmb()) to ensure the
2119 * memory ordering below.
2121 rxdp = &rx_ring[rx_id];
2122 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
2124 if (!(staterr & IXGBE_RXDADV_STAT_DD))
2129 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
2130 "staterr=0x%x data_len=%u",
2131 rxq->port_id, rxq->queue_id, rx_id, staterr,
2132 rte_le_to_cpu_16(rxd.wb.upper.length));
2135 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
2137 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
2138 "port_id=%u queue_id=%u",
2139 rxq->port_id, rxq->queue_id);
2141 rte_eth_devices[rxq->port_id].data->
2142 rx_mbuf_alloc_failed++;
2145 } else if (nb_hold > rxq->rx_free_thresh) {
2146 uint16_t next_rdt = rxq->rx_free_trigger;
2148 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
2150 IXGBE_PCI_REG_WC_WRITE_RELAXED(
2153 nb_hold -= rxq->rx_free_thresh;
2155 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
2156 "port_id=%u queue_id=%u",
2157 rxq->port_id, rxq->queue_id);
2159 rte_eth_devices[rxq->port_id].data->
2160 rx_mbuf_alloc_failed++;
2166 rxe = &sw_ring[rx_id];
2167 eop = staterr & IXGBE_RXDADV_STAT_EOP;
2169 next_id = rx_id + 1;
2170 if (next_id == rxq->nb_rx_desc)
2173 /* Prefetch next mbuf while processing current one. */
2174 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
2177 * When next RX descriptor is on a cache-line boundary,
2178 * prefetch the next 4 RX descriptors and the next 4 pointers
2181 if ((next_id & 0x3) == 0) {
2182 rte_ixgbe_prefetch(&rx_ring[next_id]);
2183 rte_ixgbe_prefetch(&sw_ring[next_id]);
2190 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2192 * Update RX descriptor with the physical address of the
2193 * new data buffer of the new allocated mbuf.
2197 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2198 rxdp->read.hdr_addr = 0;
2199 rxdp->read.pkt_addr = dma;
2204 * Set data length & data buffer address of mbuf.
2206 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
2207 rxm->data_len = data_len;
2212 * Get next descriptor index:
2213 * - For RSC it's in the NEXTP field.
2214 * - For a scattered packet - it's just a following
2217 if (ixgbe_rsc_count(&rxd))
2219 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
2220 IXGBE_RXDADV_NEXTP_SHIFT;
2224 next_sc_entry = &sw_sc_ring[nextp_id];
2225 next_rxe = &sw_ring[nextp_id];
2226 rte_ixgbe_prefetch(next_rxe);
2229 sc_entry = &sw_sc_ring[rx_id];
2230 first_seg = sc_entry->fbuf;
2231 sc_entry->fbuf = NULL;
2234 * If this is the first buffer of the received packet,
2235 * set the pointer to the first mbuf of the packet and
2236 * initialize its context.
2237 * Otherwise, update the total length and the number of segments
2238 * of the current scattered packet, and update the pointer to
2239 * the last mbuf of the current packet.
2241 if (first_seg == NULL) {
2243 first_seg->pkt_len = data_len;
2244 first_seg->nb_segs = 1;
2246 first_seg->pkt_len += data_len;
2247 first_seg->nb_segs++;
2254 * If this is not the last buffer of the received packet, update
2255 * the pointer to the first mbuf at the NEXTP entry in the
2256 * sw_sc_ring and continue to parse the RX ring.
2258 if (!eop && next_rxe) {
2259 rxm->next = next_rxe->mbuf;
2260 next_sc_entry->fbuf = first_seg;
2264 /* Initialize the first mbuf of the returned packet */
2265 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
2268 * Deal with the case, when HW CRC srip is disabled.
2269 * That can't happen when LRO is enabled, but still could
2270 * happen for scattered RX mode.
2272 first_seg->pkt_len -= rxq->crc_len;
2273 if (unlikely(rxm->data_len <= rxq->crc_len)) {
2274 struct rte_mbuf *lp;
2276 for (lp = first_seg; lp->next != rxm; lp = lp->next)
2279 first_seg->nb_segs--;
2280 lp->data_len -= rxq->crc_len - rxm->data_len;
2282 rte_pktmbuf_free_seg(rxm);
2284 rxm->data_len -= rxq->crc_len;
2286 /* Prefetch data of first segment, if configured to do so. */
2287 rte_packet_prefetch((char *)first_seg->buf_addr +
2288 first_seg->data_off);
2291 * Store the mbuf address into the next entry of the array
2292 * of returned packets.
2294 rx_pkts[nb_rx++] = first_seg;
2298 * Record index of the next RX descriptor to probe.
2300 rxq->rx_tail = rx_id;
2303 * If the number of free RX descriptors is greater than the RX free
2304 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2306 * Update the RDT with the value of the last processed RX descriptor
2307 * minus 1, to guarantee that the RDT register is never equal to the
2308 * RDH register, which creates a "full" ring situtation from the
2309 * hardware point of view...
2311 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
2312 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
2313 "nb_hold=%u nb_rx=%u",
2314 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
2317 IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
2321 rxq->nb_rx_hold = nb_hold;
2326 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2329 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
2333 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2336 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
2339 /*********************************************************************
2341 * Queue management functions
2343 **********************************************************************/
2345 static void __rte_cold
2346 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
2350 if (txq->sw_ring != NULL) {
2351 for (i = 0; i < txq->nb_tx_desc; i++) {
2352 if (txq->sw_ring[i].mbuf != NULL) {
2353 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2354 txq->sw_ring[i].mbuf = NULL;
2361 ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt)
2363 struct ixgbe_tx_entry *swr_ring = txq->sw_ring;
2364 uint16_t i, tx_last, tx_id;
2365 uint16_t nb_tx_free_last;
2366 uint16_t nb_tx_to_clean;
2369 /* Start free mbuf from the next of tx_tail */
2370 tx_last = txq->tx_tail;
2371 tx_id = swr_ring[tx_last].next_id;
2373 if (txq->nb_tx_free == 0 && ixgbe_xmit_cleanup(txq))
2376 nb_tx_to_clean = txq->nb_tx_free;
2377 nb_tx_free_last = txq->nb_tx_free;
2379 free_cnt = txq->nb_tx_desc;
2381 /* Loop through swr_ring to count the amount of
2382 * freeable mubfs and packets.
2384 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2385 for (i = 0; i < nb_tx_to_clean &&
2386 pkt_cnt < free_cnt &&
2387 tx_id != tx_last; i++) {
2388 if (swr_ring[tx_id].mbuf != NULL) {
2389 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2390 swr_ring[tx_id].mbuf = NULL;
2393 * last segment in the packet,
2394 * increment packet count
2396 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2399 tx_id = swr_ring[tx_id].next_id;
2402 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2403 txq->nb_tx_free || tx_id == tx_last)
2406 if (pkt_cnt < free_cnt) {
2407 if (ixgbe_xmit_cleanup(txq))
2410 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2411 nb_tx_free_last = txq->nb_tx_free;
2415 return (int)pkt_cnt;
2419 ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq,
2424 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2425 free_cnt = txq->nb_tx_desc;
2427 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2429 for (i = 0; i < cnt; i += n) {
2430 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2433 n = ixgbe_tx_free_bufs(txq);
2443 ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq __rte_unused,
2444 uint32_t free_cnt __rte_unused)
2450 ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
2452 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
2453 if (txq->offloads == 0 &&
2454 #ifdef RTE_LIB_SECURITY
2455 !(txq->using_ipsec) &&
2457 txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) {
2458 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2459 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
2460 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2461 txq->sw_ring_v != NULL)) {
2462 return ixgbe_tx_done_cleanup_vec(txq, free_cnt);
2464 return ixgbe_tx_done_cleanup_simple(txq, free_cnt);
2468 return ixgbe_tx_done_cleanup_full(txq, free_cnt);
2471 static void __rte_cold
2472 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
2475 txq->sw_ring != NULL)
2476 rte_free(txq->sw_ring);
2479 static void __rte_cold
2480 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
2482 if (txq != NULL && txq->ops != NULL) {
2483 txq->ops->release_mbufs(txq);
2484 txq->ops->free_swring(txq);
2490 ixgbe_dev_tx_queue_release(void *txq)
2492 ixgbe_tx_queue_release(txq);
2495 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
2496 static void __rte_cold
2497 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
2499 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
2500 struct ixgbe_tx_entry *txe = txq->sw_ring;
2503 /* Zero out HW ring memory */
2504 for (i = 0; i < txq->nb_tx_desc; i++) {
2505 txq->tx_ring[i] = zeroed_desc;
2508 /* Initialize SW ring entries */
2509 prev = (uint16_t) (txq->nb_tx_desc - 1);
2510 for (i = 0; i < txq->nb_tx_desc; i++) {
2511 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
2513 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
2516 txe[prev].next_id = i;
2520 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2521 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2524 txq->nb_tx_used = 0;
2526 * Always allow 1 descriptor to be un-allocated to avoid
2527 * a H/W race condition
2529 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2530 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2532 memset((void *)&txq->ctx_cache, 0,
2533 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
2536 static const struct ixgbe_txq_ops def_txq_ops = {
2537 .release_mbufs = ixgbe_tx_queue_release_mbufs,
2538 .free_swring = ixgbe_tx_free_swring,
2539 .reset = ixgbe_reset_tx_queue,
2542 /* Takes an ethdev and a queue and sets up the tx function to be used based on
2543 * the queue parameters. Used in tx_queue_setup by primary process and then
2544 * in dev_init by secondary process when attaching to an existing ethdev.
2547 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
2549 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2550 if ((txq->offloads == 0) &&
2551 #ifdef RTE_LIB_SECURITY
2552 !(txq->using_ipsec) &&
2554 (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
2555 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2556 dev->tx_pkt_prepare = NULL;
2557 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2558 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
2559 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2560 ixgbe_txq_vec_setup(txq) == 0)) {
2561 PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
2562 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
2564 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
2566 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2568 " - offloads = 0x%" PRIx64,
2571 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2572 (unsigned long)txq->tx_rs_thresh,
2573 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2574 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2575 dev->tx_pkt_prepare = ixgbe_prep_pkts;
2580 ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2588 ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2590 uint64_t tx_offload_capa;
2591 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2594 DEV_TX_OFFLOAD_VLAN_INSERT |
2595 DEV_TX_OFFLOAD_IPV4_CKSUM |
2596 DEV_TX_OFFLOAD_UDP_CKSUM |
2597 DEV_TX_OFFLOAD_TCP_CKSUM |
2598 DEV_TX_OFFLOAD_SCTP_CKSUM |
2599 DEV_TX_OFFLOAD_TCP_TSO |
2600 DEV_TX_OFFLOAD_MULTI_SEGS;
2602 if (hw->mac.type == ixgbe_mac_82599EB ||
2603 hw->mac.type == ixgbe_mac_X540)
2604 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2606 if (hw->mac.type == ixgbe_mac_X550 ||
2607 hw->mac.type == ixgbe_mac_X550EM_x ||
2608 hw->mac.type == ixgbe_mac_X550EM_a)
2609 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2611 #ifdef RTE_LIB_SECURITY
2612 if (dev->security_ctx)
2613 tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2615 return tx_offload_capa;
2619 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2622 unsigned int socket_id,
2623 const struct rte_eth_txconf *tx_conf)
2625 const struct rte_memzone *tz;
2626 struct ixgbe_tx_queue *txq;
2627 struct ixgbe_hw *hw;
2628 uint16_t tx_rs_thresh, tx_free_thresh;
2631 PMD_INIT_FUNC_TRACE();
2632 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2634 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2637 * Validate number of transmit descriptors.
2638 * It must not exceed hardware maximum, and must be multiple
2641 if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2642 (nb_desc > IXGBE_MAX_RING_DESC) ||
2643 (nb_desc < IXGBE_MIN_RING_DESC)) {
2648 * The following two parameters control the setting of the RS bit on
2649 * transmit descriptors.
2650 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2651 * descriptors have been used.
2652 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2653 * descriptors are used or if the number of descriptors required
2654 * to transmit a packet is greater than the number of free TX
2656 * The following constraints must be satisfied:
2657 * tx_rs_thresh must be greater than 0.
2658 * tx_rs_thresh must be less than the size of the ring minus 2.
2659 * tx_rs_thresh must be less than or equal to tx_free_thresh.
2660 * tx_rs_thresh must be a divisor of the ring size.
2661 * tx_free_thresh must be greater than 0.
2662 * tx_free_thresh must be less than the size of the ring minus 3.
2663 * tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
2664 * One descriptor in the TX ring is used as a sentinel to avoid a
2665 * H/W race condition, hence the maximum threshold constraints.
2666 * When set to zero use default values.
2668 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2669 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2670 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
2671 tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
2672 nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
2673 if (tx_conf->tx_rs_thresh > 0)
2674 tx_rs_thresh = tx_conf->tx_rs_thresh;
2675 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
2676 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
2677 "exceed nb_desc. (tx_rs_thresh=%u "
2678 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
2679 (unsigned int)tx_rs_thresh,
2680 (unsigned int)tx_free_thresh,
2681 (unsigned int)nb_desc,
2682 (int)dev->data->port_id,
2686 if (tx_rs_thresh >= (nb_desc - 2)) {
2687 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2688 "of TX descriptors minus 2. (tx_rs_thresh=%u "
2689 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2690 (int)dev->data->port_id, (int)queue_idx);
2693 if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2694 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2695 "(tx_rs_thresh=%u port=%d queue=%d)",
2696 DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2697 (int)dev->data->port_id, (int)queue_idx);
2700 if (tx_free_thresh >= (nb_desc - 3)) {
2701 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2702 "tx_free_thresh must be less than the number of "
2703 "TX descriptors minus 3. (tx_free_thresh=%u "
2704 "port=%d queue=%d)",
2705 (unsigned int)tx_free_thresh,
2706 (int)dev->data->port_id, (int)queue_idx);
2709 if (tx_rs_thresh > tx_free_thresh) {
2710 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2711 "tx_free_thresh. (tx_free_thresh=%u "
2712 "tx_rs_thresh=%u port=%d queue=%d)",
2713 (unsigned int)tx_free_thresh,
2714 (unsigned int)tx_rs_thresh,
2715 (int)dev->data->port_id,
2719 if ((nb_desc % tx_rs_thresh) != 0) {
2720 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2721 "number of TX descriptors. (tx_rs_thresh=%u "
2722 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2723 (int)dev->data->port_id, (int)queue_idx);
2728 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2729 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2730 * by the NIC and all descriptors are written back after the NIC
2731 * accumulates WTHRESH descriptors.
2733 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2734 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2735 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2736 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2737 (int)dev->data->port_id, (int)queue_idx);
2741 /* Free memory prior to re-allocation if needed... */
2742 if (dev->data->tx_queues[queue_idx] != NULL) {
2743 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2744 dev->data->tx_queues[queue_idx] = NULL;
2747 /* First allocate the tx queue data structure */
2748 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2749 RTE_CACHE_LINE_SIZE, socket_id);
2754 * Allocate TX ring hardware descriptors. A memzone large enough to
2755 * handle the maximum ring size is allocated in order to allow for
2756 * resizing in later calls to the queue setup function.
2758 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2759 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2760 IXGBE_ALIGN, socket_id);
2762 ixgbe_tx_queue_release(txq);
2766 txq->nb_tx_desc = nb_desc;
2767 txq->tx_rs_thresh = tx_rs_thresh;
2768 txq->tx_free_thresh = tx_free_thresh;
2769 txq->pthresh = tx_conf->tx_thresh.pthresh;
2770 txq->hthresh = tx_conf->tx_thresh.hthresh;
2771 txq->wthresh = tx_conf->tx_thresh.wthresh;
2772 txq->queue_id = queue_idx;
2773 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2774 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2775 txq->port_id = dev->data->port_id;
2776 txq->offloads = offloads;
2777 txq->ops = &def_txq_ops;
2778 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2779 #ifdef RTE_LIB_SECURITY
2780 txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2781 DEV_TX_OFFLOAD_SECURITY);
2785 * Modification to set VFTDT for virtual function if vf is detected
2787 if (hw->mac.type == ixgbe_mac_82599_vf ||
2788 hw->mac.type == ixgbe_mac_X540_vf ||
2789 hw->mac.type == ixgbe_mac_X550_vf ||
2790 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2791 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2792 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2794 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2796 txq->tx_ring_phys_addr = tz->iova;
2797 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2799 /* Allocate software ring */
2800 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2801 sizeof(struct ixgbe_tx_entry) * nb_desc,
2802 RTE_CACHE_LINE_SIZE, socket_id);
2803 if (txq->sw_ring == NULL) {
2804 ixgbe_tx_queue_release(txq);
2807 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2808 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2810 /* set up vector or scalar TX function as appropriate */
2811 ixgbe_set_tx_function(dev, txq);
2813 txq->ops->reset(txq);
2815 dev->data->tx_queues[queue_idx] = txq;
2822 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2824 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2825 * in the sw_rsc_ring is not set to NULL but rather points to the next
2826 * mbuf of this RSC aggregation (that has not been completed yet and still
2827 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2828 * will just free first "nb_segs" segments of the cluster explicitly by calling
2829 * an rte_pktmbuf_free_seg().
2831 * @m scattered cluster head
2833 static void __rte_cold
2834 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2836 uint16_t i, nb_segs = m->nb_segs;
2837 struct rte_mbuf *next_seg;
2839 for (i = 0; i < nb_segs; i++) {
2841 rte_pktmbuf_free_seg(m);
2846 static void __rte_cold
2847 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2851 /* SSE Vector driver has a different way of releasing mbufs. */
2852 if (rxq->rx_using_sse) {
2853 ixgbe_rx_queue_release_mbufs_vec(rxq);
2857 if (rxq->sw_ring != NULL) {
2858 for (i = 0; i < rxq->nb_rx_desc; i++) {
2859 if (rxq->sw_ring[i].mbuf != NULL) {
2860 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2861 rxq->sw_ring[i].mbuf = NULL;
2864 if (rxq->rx_nb_avail) {
2865 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2866 struct rte_mbuf *mb;
2868 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2869 rte_pktmbuf_free_seg(mb);
2871 rxq->rx_nb_avail = 0;
2875 if (rxq->sw_sc_ring)
2876 for (i = 0; i < rxq->nb_rx_desc; i++)
2877 if (rxq->sw_sc_ring[i].fbuf) {
2878 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2879 rxq->sw_sc_ring[i].fbuf = NULL;
2883 static void __rte_cold
2884 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2887 ixgbe_rx_queue_release_mbufs(rxq);
2888 rte_free(rxq->sw_ring);
2889 rte_free(rxq->sw_sc_ring);
2895 ixgbe_dev_rx_queue_release(void *rxq)
2897 ixgbe_rx_queue_release(rxq);
2901 * Check if Rx Burst Bulk Alloc function can be used.
2903 * 0: the preconditions are satisfied and the bulk allocation function
2905 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2906 * function must be used.
2908 static inline int __rte_cold
2909 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2914 * Make sure the following pre-conditions are satisfied:
2915 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2916 * rxq->rx_free_thresh < rxq->nb_rx_desc
2917 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2918 * Scattered packets are not supported. This should be checked
2919 * outside of this function.
2921 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2922 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2923 "rxq->rx_free_thresh=%d, "
2924 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2925 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2927 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2928 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2929 "rxq->rx_free_thresh=%d, "
2930 "rxq->nb_rx_desc=%d",
2931 rxq->rx_free_thresh, rxq->nb_rx_desc);
2933 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2934 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2935 "rxq->nb_rx_desc=%d, "
2936 "rxq->rx_free_thresh=%d",
2937 rxq->nb_rx_desc, rxq->rx_free_thresh);
2944 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2945 static void __rte_cold
2946 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2948 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2950 uint16_t len = rxq->nb_rx_desc;
2953 * By default, the Rx queue setup function allocates enough memory for
2954 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2955 * extra memory at the end of the descriptor ring to be zero'd out.
2957 if (adapter->rx_bulk_alloc_allowed)
2958 /* zero out extra memory */
2959 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2962 * Zero out HW ring memory. Zero out extra memory at the end of
2963 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2964 * reads extra memory as zeros.
2966 for (i = 0; i < len; i++) {
2967 rxq->rx_ring[i] = zeroed_desc;
2971 * initialize extra software ring entries. Space for these extra
2972 * entries is always allocated
2974 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2975 for (i = rxq->nb_rx_desc; i < len; ++i) {
2976 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2979 rxq->rx_nb_avail = 0;
2980 rxq->rx_next_avail = 0;
2981 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2983 rxq->nb_rx_hold = 0;
2985 if (rxq->pkt_first_seg != NULL)
2986 rte_pktmbuf_free(rxq->pkt_first_seg);
2988 rxq->pkt_first_seg = NULL;
2989 rxq->pkt_last_seg = NULL;
2991 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
2992 rxq->rxrearm_start = 0;
2993 rxq->rxrearm_nb = 0;
2998 ixgbe_is_vf(struct rte_eth_dev *dev)
3000 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3002 switch (hw->mac.type) {
3003 case ixgbe_mac_82599_vf:
3004 case ixgbe_mac_X540_vf:
3005 case ixgbe_mac_X550_vf:
3006 case ixgbe_mac_X550EM_x_vf:
3007 case ixgbe_mac_X550EM_a_vf:
3015 ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
3017 uint64_t offloads = 0;
3018 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3020 if (hw->mac.type != ixgbe_mac_82598EB)
3021 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3027 ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
3030 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3032 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
3033 DEV_RX_OFFLOAD_UDP_CKSUM |
3034 DEV_RX_OFFLOAD_TCP_CKSUM |
3035 DEV_RX_OFFLOAD_KEEP_CRC |
3036 DEV_RX_OFFLOAD_JUMBO_FRAME |
3037 DEV_RX_OFFLOAD_VLAN_FILTER |
3038 DEV_RX_OFFLOAD_SCATTER |
3039 DEV_RX_OFFLOAD_RSS_HASH;
3041 if (hw->mac.type == ixgbe_mac_82598EB)
3042 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3044 if (ixgbe_is_vf(dev) == 0)
3045 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3048 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
3051 if ((hw->mac.type == ixgbe_mac_82599EB ||
3052 hw->mac.type == ixgbe_mac_X540 ||
3053 hw->mac.type == ixgbe_mac_X550) &&
3054 !RTE_ETH_DEV_SRIOV(dev).active)
3055 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
3057 if (hw->mac.type == ixgbe_mac_82599EB ||
3058 hw->mac.type == ixgbe_mac_X540)
3059 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
3061 if (hw->mac.type == ixgbe_mac_X550 ||
3062 hw->mac.type == ixgbe_mac_X550EM_x ||
3063 hw->mac.type == ixgbe_mac_X550EM_a)
3064 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
3066 #ifdef RTE_LIB_SECURITY
3067 if (dev->security_ctx)
3068 offloads |= DEV_RX_OFFLOAD_SECURITY;
3075 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
3078 unsigned int socket_id,
3079 const struct rte_eth_rxconf *rx_conf,
3080 struct rte_mempool *mp)
3082 const struct rte_memzone *rz;
3083 struct ixgbe_rx_queue *rxq;
3084 struct ixgbe_hw *hw;
3086 struct ixgbe_adapter *adapter = dev->data->dev_private;
3089 PMD_INIT_FUNC_TRACE();
3090 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3092 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
3095 * Validate number of receive descriptors.
3096 * It must not exceed hardware maximum, and must be multiple
3099 if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
3100 (nb_desc > IXGBE_MAX_RING_DESC) ||
3101 (nb_desc < IXGBE_MIN_RING_DESC)) {
3105 /* Free memory prior to re-allocation if needed... */
3106 if (dev->data->rx_queues[queue_idx] != NULL) {
3107 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
3108 dev->data->rx_queues[queue_idx] = NULL;
3111 /* First allocate the rx queue data structure */
3112 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
3113 RTE_CACHE_LINE_SIZE, socket_id);
3117 rxq->nb_rx_desc = nb_desc;
3118 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
3119 rxq->queue_id = queue_idx;
3120 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
3121 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
3122 rxq->port_id = dev->data->port_id;
3123 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
3124 rxq->crc_len = RTE_ETHER_CRC_LEN;
3127 rxq->drop_en = rx_conf->rx_drop_en;
3128 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
3129 rxq->offloads = offloads;
3132 * The packet type in RX descriptor is different for different NICs.
3133 * Some bits are used for x550 but reserved for other NICS.
3134 * So set different masks for different NICs.
3136 if (hw->mac.type == ixgbe_mac_X550 ||
3137 hw->mac.type == ixgbe_mac_X550EM_x ||
3138 hw->mac.type == ixgbe_mac_X550EM_a ||
3139 hw->mac.type == ixgbe_mac_X550_vf ||
3140 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
3141 hw->mac.type == ixgbe_mac_X550EM_a_vf)
3142 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
3144 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
3147 * 82599 errata, UDP frames with a 0 checksum can be marked as checksum
3150 if (hw->mac.type == ixgbe_mac_82599EB)
3151 rxq->rx_udp_csum_zero_err = 1;
3154 * Allocate RX ring hardware descriptors. A memzone large enough to
3155 * handle the maximum ring size is allocated in order to allow for
3156 * resizing in later calls to the queue setup function.
3158 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
3159 RX_RING_SZ, IXGBE_ALIGN, socket_id);
3161 ixgbe_rx_queue_release(rxq);
3166 * Zero init all the descriptors in the ring.
3168 memset(rz->addr, 0, RX_RING_SZ);
3171 * Modified to setup VFRDT for Virtual Function
3173 if (hw->mac.type == ixgbe_mac_82599_vf ||
3174 hw->mac.type == ixgbe_mac_X540_vf ||
3175 hw->mac.type == ixgbe_mac_X550_vf ||
3176 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
3177 hw->mac.type == ixgbe_mac_X550EM_a_vf) {
3179 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
3181 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
3184 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
3186 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
3189 rxq->rx_ring_phys_addr = rz->iova;
3190 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
3193 * Certain constraints must be met in order to use the bulk buffer
3194 * allocation Rx burst function. If any of Rx queues doesn't meet them
3195 * the feature should be disabled for the whole port.
3197 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
3198 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
3199 "preconditions - canceling the feature for "
3200 "the whole port[%d]",
3201 rxq->queue_id, rxq->port_id);
3202 adapter->rx_bulk_alloc_allowed = false;
3206 * Allocate software ring. Allow for space at the end of the
3207 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
3208 * function does not access an invalid memory region.
3211 if (adapter->rx_bulk_alloc_allowed)
3212 len += RTE_PMD_IXGBE_RX_MAX_BURST;
3214 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
3215 sizeof(struct ixgbe_rx_entry) * len,
3216 RTE_CACHE_LINE_SIZE, socket_id);
3217 if (!rxq->sw_ring) {
3218 ixgbe_rx_queue_release(rxq);
3223 * Always allocate even if it's not going to be needed in order to
3224 * simplify the code.
3226 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
3227 * be requested in ixgbe_dev_rx_init(), which is called later from
3231 rte_zmalloc_socket("rxq->sw_sc_ring",
3232 sizeof(struct ixgbe_scattered_rx_entry) * len,
3233 RTE_CACHE_LINE_SIZE, socket_id);
3234 if (!rxq->sw_sc_ring) {
3235 ixgbe_rx_queue_release(rxq);
3239 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
3240 "dma_addr=0x%"PRIx64,
3241 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
3242 rxq->rx_ring_phys_addr);
3244 if (!rte_is_power_of_2(nb_desc)) {
3245 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
3246 "preconditions - canceling the feature for "
3247 "the whole port[%d]",
3248 rxq->queue_id, rxq->port_id);
3249 adapter->rx_vec_allowed = false;
3251 ixgbe_rxq_vec_setup(rxq);
3253 dev->data->rx_queues[queue_idx] = rxq;
3255 ixgbe_reset_rx_queue(adapter, rxq);
3261 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3263 #define IXGBE_RXQ_SCAN_INTERVAL 4
3264 volatile union ixgbe_adv_rx_desc *rxdp;
3265 struct ixgbe_rx_queue *rxq;
3268 rxq = dev->data->rx_queues[rx_queue_id];
3269 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
3271 while ((desc < rxq->nb_rx_desc) &&
3272 (rxdp->wb.upper.status_error &
3273 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
3274 desc += IXGBE_RXQ_SCAN_INTERVAL;
3275 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
3276 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3277 rxdp = &(rxq->rx_ring[rxq->rx_tail +
3278 desc - rxq->nb_rx_desc]);
3285 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
3287 volatile union ixgbe_adv_rx_desc *rxdp;
3288 struct ixgbe_rx_queue *rxq = rx_queue;
3291 if (unlikely(offset >= rxq->nb_rx_desc))
3293 desc = rxq->rx_tail + offset;
3294 if (desc >= rxq->nb_rx_desc)
3295 desc -= rxq->nb_rx_desc;
3297 rxdp = &rxq->rx_ring[desc];
3298 return !!(rxdp->wb.upper.status_error &
3299 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
3303 ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
3305 struct ixgbe_rx_queue *rxq = rx_queue;
3306 volatile uint32_t *status;
3307 uint32_t nb_hold, desc;
3309 if (unlikely(offset >= rxq->nb_rx_desc))
3312 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
3313 if (rxq->rx_using_sse)
3314 nb_hold = rxq->rxrearm_nb;
3317 nb_hold = rxq->nb_rx_hold;
3318 if (offset >= rxq->nb_rx_desc - nb_hold)
3319 return RTE_ETH_RX_DESC_UNAVAIL;
3321 desc = rxq->rx_tail + offset;
3322 if (desc >= rxq->nb_rx_desc)
3323 desc -= rxq->nb_rx_desc;
3325 status = &rxq->rx_ring[desc].wb.upper.status_error;
3326 if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
3327 return RTE_ETH_RX_DESC_DONE;
3329 return RTE_ETH_RX_DESC_AVAIL;
3333 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
3335 struct ixgbe_tx_queue *txq = tx_queue;
3336 volatile uint32_t *status;
3339 if (unlikely(offset >= txq->nb_tx_desc))
3342 desc = txq->tx_tail + offset;
3343 /* go to next desc that has the RS bit */
3344 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
3346 if (desc >= txq->nb_tx_desc) {
3347 desc -= txq->nb_tx_desc;
3348 if (desc >= txq->nb_tx_desc)
3349 desc -= txq->nb_tx_desc;
3352 status = &txq->tx_ring[desc].wb.status;
3353 if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
3354 return RTE_ETH_TX_DESC_DONE;
3356 return RTE_ETH_TX_DESC_FULL;
3360 * Set up link loopback for X540/X550 mode Tx->Rx.
3362 static inline void __rte_cold
3363 ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
3366 PMD_INIT_FUNC_TRACE();
3368 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
3370 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3371 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
3372 macc = IXGBE_READ_REG(hw, IXGBE_MACC);
3375 /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */
3376 autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE;
3377 /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */
3378 macc |= IXGBE_MACC_FLU;
3380 autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE;
3381 macc &= ~IXGBE_MACC_FLU;
3384 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3385 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
3387 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
3391 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
3394 struct ixgbe_adapter *adapter = dev->data->dev_private;
3395 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3397 PMD_INIT_FUNC_TRACE();
3399 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3400 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
3403 txq->ops->release_mbufs(txq);
3404 txq->ops->reset(txq);
3408 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3409 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3412 ixgbe_rx_queue_release_mbufs(rxq);
3413 ixgbe_reset_rx_queue(adapter, rxq);
3416 /* If loopback mode was enabled, reconfigure the link accordingly */
3417 if (dev->data->dev_conf.lpbk_mode != 0) {
3418 if (hw->mac.type == ixgbe_mac_X540 ||
3419 hw->mac.type == ixgbe_mac_X550 ||
3420 hw->mac.type == ixgbe_mac_X550EM_x ||
3421 hw->mac.type == ixgbe_mac_X550EM_a)
3422 ixgbe_setup_loopback_link_x540_x550(hw, false);
3427 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
3431 PMD_INIT_FUNC_TRACE();
3433 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3434 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
3435 dev->data->rx_queues[i] = NULL;
3436 rte_eth_dma_zone_free(dev, "rx_ring", i);
3438 dev->data->nb_rx_queues = 0;
3440 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3441 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
3442 dev->data->tx_queues[i] = NULL;
3443 rte_eth_dma_zone_free(dev, "tx_ring", i);
3445 dev->data->nb_tx_queues = 0;
3448 /*********************************************************************
3450 * Device RX/TX init functions
3452 **********************************************************************/
3455 * Receive Side Scaling (RSS)
3456 * See section 7.1.2.8 in the following document:
3457 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
3460 * The source and destination IP addresses of the IP header and the source
3461 * and destination ports of TCP/UDP headers, if any, of received packets are
3462 * hashed against a configurable random key to compute a 32-bit RSS hash result.
3463 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
3464 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
3465 * RSS output index which is used as the RX queue index where to store the
3467 * The following output is supplied in the RX write-back descriptor:
3468 * - 32-bit result of the Microsoft RSS hash function,
3469 * - 4-bit RSS type field.
3473 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
3474 * Used as the default key.
3476 static uint8_t rss_intel_key[40] = {
3477 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
3478 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
3479 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3480 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
3481 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
3485 ixgbe_rss_disable(struct rte_eth_dev *dev)
3487 struct ixgbe_hw *hw;
3491 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3492 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3493 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3494 mrqc &= ~IXGBE_MRQC_RSSEN;
3495 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3499 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
3509 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3510 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3512 hash_key = rss_conf->rss_key;
3513 if (hash_key != NULL) {
3514 /* Fill in RSS hash key */
3515 for (i = 0; i < 10; i++) {
3516 rss_key = hash_key[(i * 4)];
3517 rss_key |= hash_key[(i * 4) + 1] << 8;
3518 rss_key |= hash_key[(i * 4) + 2] << 16;
3519 rss_key |= hash_key[(i * 4) + 3] << 24;
3520 IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
3524 /* Set configured hashing protocols in MRQC register */
3525 rss_hf = rss_conf->rss_hf;
3526 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
3527 if (rss_hf & ETH_RSS_IPV4)
3528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3529 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
3530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3531 if (rss_hf & ETH_RSS_IPV6)
3532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3533 if (rss_hf & ETH_RSS_IPV6_EX)
3534 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3535 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
3536 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3537 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
3538 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3539 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
3540 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3541 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
3542 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3543 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
3544 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3545 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3549 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
3550 struct rte_eth_rss_conf *rss_conf)
3552 struct ixgbe_hw *hw;
3557 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3559 if (!ixgbe_rss_update_sp(hw->mac.type)) {
3560 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
3564 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3567 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
3568 * "RSS enabling cannot be done dynamically while it must be
3569 * preceded by a software reset"
3570 * Before changing anything, first check that the update RSS operation
3571 * does not attempt to disable RSS, if RSS was enabled at
3572 * initialization time, or does not attempt to enable RSS, if RSS was
3573 * disabled at initialization time.
3575 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
3576 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3577 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
3578 if (rss_hf != 0) /* Enable RSS */
3580 return 0; /* Nothing to do */
3583 if (rss_hf == 0) /* Disable RSS */
3585 ixgbe_hw_rss_hash_set(hw, rss_conf);
3590 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3591 struct rte_eth_rss_conf *rss_conf)
3593 struct ixgbe_hw *hw;
3602 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3603 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3604 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3605 hash_key = rss_conf->rss_key;
3606 if (hash_key != NULL) {
3607 /* Return RSS hash key */
3608 for (i = 0; i < 10; i++) {
3609 rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
3610 hash_key[(i * 4)] = rss_key & 0x000000FF;
3611 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
3612 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
3613 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
3617 /* Get RSS functions configured in MRQC register */
3618 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3619 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
3620 rss_conf->rss_hf = 0;
3624 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
3625 rss_hf |= ETH_RSS_IPV4;
3626 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
3627 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
3628 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
3629 rss_hf |= ETH_RSS_IPV6;
3630 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
3631 rss_hf |= ETH_RSS_IPV6_EX;
3632 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
3633 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
3634 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
3635 rss_hf |= ETH_RSS_IPV6_TCP_EX;
3636 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
3637 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
3638 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
3639 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
3640 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
3641 rss_hf |= ETH_RSS_IPV6_UDP_EX;
3642 rss_conf->rss_hf = rss_hf;
3647 ixgbe_rss_configure(struct rte_eth_dev *dev)
3649 struct rte_eth_rss_conf rss_conf;
3650 struct ixgbe_adapter *adapter;
3651 struct ixgbe_hw *hw;
3655 uint16_t sp_reta_size;
3658 PMD_INIT_FUNC_TRACE();
3659 adapter = dev->data->dev_private;
3660 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3662 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3665 * Fill in redirection table
3666 * The byte-swap is needed because NIC registers are in
3667 * little-endian order.
3669 if (adapter->rss_reta_updated == 0) {
3671 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
3672 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3674 if (j == dev->data->nb_rx_queues)
3676 reta = (reta << 8) | j;
3678 IXGBE_WRITE_REG(hw, reta_reg,
3684 * Configure the RSS key and the RSS protocols used to compute
3685 * the RSS hash of input packets.
3687 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3688 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
3689 ixgbe_rss_disable(dev);
3692 if (rss_conf.rss_key == NULL)
3693 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3694 ixgbe_hw_rss_hash_set(hw, &rss_conf);
3697 #define NUM_VFTA_REGISTERS 128
3698 #define NIC_RX_BUFFER_SIZE 0x200
3699 #define X550_RX_BUFFER_SIZE 0x180
3702 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3704 struct rte_eth_vmdq_dcb_conf *cfg;
3705 struct ixgbe_hw *hw;
3706 enum rte_eth_nb_pools num_pools;
3707 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3709 uint8_t nb_tcs; /* number of traffic classes */
3712 PMD_INIT_FUNC_TRACE();
3713 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3714 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3715 num_pools = cfg->nb_queue_pools;
3716 /* Check we have a valid number of pools */
3717 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3718 ixgbe_rss_disable(dev);
3721 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3722 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3726 * split rx buffer up into sections, each for 1 traffic class
3728 switch (hw->mac.type) {
3729 case ixgbe_mac_X550:
3730 case ixgbe_mac_X550EM_x:
3731 case ixgbe_mac_X550EM_a:
3732 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
3735 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3738 for (i = 0; i < nb_tcs; i++) {
3739 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3741 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3742 /* clear 10 bits. */
3743 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
3744 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3746 /* zero alloc all unused TCs */
3747 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3748 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3750 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3751 /* clear 10 bits. */
3752 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3755 /* MRQC: enable vmdq and dcb */
3756 mrqc = (num_pools == ETH_16_POOLS) ?
3757 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
3758 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3760 /* PFVTCTL: turn on virtualisation and set the default pool */
3761 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3762 if (cfg->enable_default_pool) {
3763 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3765 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3768 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3770 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
3772 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3774 * mapping is done with 3 bits per priority,
3775 * so shift by i*3 each time
3777 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3779 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
3781 /* RTRPCS: DCB related */
3782 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
3784 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3785 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3786 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3787 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3789 /* VFTA - enable all vlan filters */
3790 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3791 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3794 /* VFRE: pool enabling for receive - 16 or 32 */
3795 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
3796 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3799 * MPSAR - allow pools to read specific mac addresses
3800 * In this case, all pools should be able to read from mac addr 0
3802 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
3803 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
3805 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3806 for (i = 0; i < cfg->nb_pool_maps; i++) {
3807 /* set vlan id in VF register and set the valid bit */
3808 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
3809 (cfg->pool_map[i].vlan_id & 0xFFF)));
3811 * Put the allowed pools in VFB reg. As we only have 16 or 32
3812 * pools, we only need to use the first half of the register
3815 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
3820 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3821 * @dev: pointer to eth_dev structure
3822 * @dcb_config: pointer to ixgbe_dcb_config structure
3825 ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3826 struct ixgbe_dcb_config *dcb_config)
3829 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3831 PMD_INIT_FUNC_TRACE();
3832 if (hw->mac.type != ixgbe_mac_82598EB) {
3833 /* Disable the Tx desc arbiter so that MTQC can be changed */
3834 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3835 reg |= IXGBE_RTTDCS_ARBDIS;
3836 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3838 /* Enable DCB for Tx with 8 TCs */
3839 if (dcb_config->num_tcs.pg_tcs == 8) {
3840 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3842 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3844 if (dcb_config->vt_mode)
3845 reg |= IXGBE_MTQC_VT_ENA;
3846 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3848 /* Enable the Tx desc arbiter */
3849 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3850 reg &= ~IXGBE_RTTDCS_ARBDIS;
3851 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3853 /* Enable Security TX Buffer IFG for DCB */
3854 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3855 reg |= IXGBE_SECTX_DCB;
3856 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3861 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3862 * @dev: pointer to rte_eth_dev structure
3863 * @dcb_config: pointer to ixgbe_dcb_config structure
3866 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3867 struct ixgbe_dcb_config *dcb_config)
3869 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3870 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3871 struct ixgbe_hw *hw =
3872 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3874 PMD_INIT_FUNC_TRACE();
3875 if (hw->mac.type != ixgbe_mac_82598EB)
3876 /*PF VF Transmit Enable*/
3877 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3878 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3880 /*Configure general DCB TX parameters*/
3881 ixgbe_dcb_tx_hw_config(dev, dcb_config);
3885 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3886 struct ixgbe_dcb_config *dcb_config)
3888 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3889 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3890 struct ixgbe_dcb_tc_config *tc;
3893 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3894 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3895 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3896 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3898 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3899 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3902 /* Initialize User Priority to Traffic Class mapping */
3903 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3904 tc = &dcb_config->tc_config[j];
3905 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3908 /* User Priority to Traffic Class mapping */
3909 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3910 j = vmdq_rx_conf->dcb_tc[i];
3911 tc = &dcb_config->tc_config[j];
3912 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3918 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3919 struct ixgbe_dcb_config *dcb_config)
3921 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3922 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3923 struct ixgbe_dcb_tc_config *tc;
3926 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3927 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3928 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3929 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3931 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3932 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3935 /* Initialize User Priority to Traffic Class mapping */
3936 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3937 tc = &dcb_config->tc_config[j];
3938 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3941 /* User Priority to Traffic Class mapping */
3942 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3943 j = vmdq_tx_conf->dcb_tc[i];
3944 tc = &dcb_config->tc_config[j];
3945 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3951 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3952 struct ixgbe_dcb_config *dcb_config)
3954 struct rte_eth_dcb_rx_conf *rx_conf =
3955 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3956 struct ixgbe_dcb_tc_config *tc;
3959 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3960 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3962 /* Initialize User Priority to Traffic Class mapping */
3963 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3964 tc = &dcb_config->tc_config[j];
3965 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3968 /* User Priority to Traffic Class mapping */
3969 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3970 j = rx_conf->dcb_tc[i];
3971 tc = &dcb_config->tc_config[j];
3972 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3978 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3979 struct ixgbe_dcb_config *dcb_config)
3981 struct rte_eth_dcb_tx_conf *tx_conf =
3982 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3983 struct ixgbe_dcb_tc_config *tc;
3986 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3987 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3989 /* Initialize User Priority to Traffic Class mapping */
3990 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3991 tc = &dcb_config->tc_config[j];
3992 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3995 /* User Priority to Traffic Class mapping */
3996 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3997 j = tx_conf->dcb_tc[i];
3998 tc = &dcb_config->tc_config[j];
3999 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
4005 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
4006 * @dev: pointer to eth_dev structure
4007 * @dcb_config: pointer to ixgbe_dcb_config structure
4010 ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
4011 struct ixgbe_dcb_config *dcb_config)
4017 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4019 PMD_INIT_FUNC_TRACE();
4021 * Disable the arbiter before changing parameters
4022 * (always enable recycle mode; WSP)
4024 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
4025 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
4027 if (hw->mac.type != ixgbe_mac_82598EB) {
4028 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
4029 if (dcb_config->num_tcs.pg_tcs == 4) {
4030 if (dcb_config->vt_mode)
4031 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
4032 IXGBE_MRQC_VMDQRT4TCEN;
4034 /* no matter the mode is DCB or DCB_RSS, just
4035 * set the MRQE to RSSXTCEN. RSS is controlled
4038 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
4039 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
4040 IXGBE_MRQC_RTRSS4TCEN;
4043 if (dcb_config->num_tcs.pg_tcs == 8) {
4044 if (dcb_config->vt_mode)
4045 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
4046 IXGBE_MRQC_VMDQRT8TCEN;
4048 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
4049 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
4050 IXGBE_MRQC_RTRSS8TCEN;
4054 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
4056 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4057 /* Disable drop for all queues in VMDQ mode*/
4058 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4059 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4061 (q << IXGBE_QDE_IDX_SHIFT)));
4063 /* Enable drop for all queues in SRIOV mode */
4064 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4065 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4067 (q << IXGBE_QDE_IDX_SHIFT) |
4072 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4073 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4074 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4075 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4077 /* VFTA - enable all vlan filters */
4078 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
4079 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
4083 * Configure Rx packet plane (recycle mode; WSP) and
4086 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
4087 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
4091 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
4092 uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
4094 switch (hw->mac.type) {
4095 case ixgbe_mac_82598EB:
4096 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
4098 case ixgbe_mac_82599EB:
4099 case ixgbe_mac_X540:
4100 case ixgbe_mac_X550:
4101 case ixgbe_mac_X550EM_x:
4102 case ixgbe_mac_X550EM_a:
4103 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
4112 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
4113 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
4115 switch (hw->mac.type) {
4116 case ixgbe_mac_82598EB:
4117 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
4118 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
4120 case ixgbe_mac_82599EB:
4121 case ixgbe_mac_X540:
4122 case ixgbe_mac_X550:
4123 case ixgbe_mac_X550EM_x:
4124 case ixgbe_mac_X550EM_a:
4125 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
4126 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
4133 #define DCB_RX_CONFIG 1
4134 #define DCB_TX_CONFIG 1
4135 #define DCB_TX_PB 1024
4137 * ixgbe_dcb_hw_configure - Enable DCB and configure
4138 * general DCB in VT mode and non-VT mode parameters
4139 * @dev: pointer to rte_eth_dev structure
4140 * @dcb_config: pointer to ixgbe_dcb_config structure
4143 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
4144 struct ixgbe_dcb_config *dcb_config)
4147 uint8_t i, pfc_en, nb_tcs;
4148 uint16_t pbsize, rx_buffer_size;
4149 uint8_t config_dcb_rx = 0;
4150 uint8_t config_dcb_tx = 0;
4151 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
4152 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
4153 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
4154 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
4155 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
4156 struct ixgbe_dcb_tc_config *tc;
4157 uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN +
4159 struct ixgbe_hw *hw =
4160 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4161 struct ixgbe_bw_conf *bw_conf =
4162 IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
4164 switch (dev->data->dev_conf.rxmode.mq_mode) {
4165 case ETH_MQ_RX_VMDQ_DCB:
4166 dcb_config->vt_mode = true;
4167 if (hw->mac.type != ixgbe_mac_82598EB) {
4168 config_dcb_rx = DCB_RX_CONFIG;
4170 *get dcb and VT rx configuration parameters
4173 ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
4174 /*Configure general VMDQ and DCB RX parameters*/
4175 ixgbe_vmdq_dcb_configure(dev);
4179 case ETH_MQ_RX_DCB_RSS:
4180 dcb_config->vt_mode = false;
4181 config_dcb_rx = DCB_RX_CONFIG;
4182 /* Get dcb TX configuration parameters from rte_eth_conf */
4183 ixgbe_dcb_rx_config(dev, dcb_config);
4184 /*Configure general DCB RX parameters*/
4185 ixgbe_dcb_rx_hw_config(dev, dcb_config);
4188 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
4191 switch (dev->data->dev_conf.txmode.mq_mode) {
4192 case ETH_MQ_TX_VMDQ_DCB:
4193 dcb_config->vt_mode = true;
4194 config_dcb_tx = DCB_TX_CONFIG;
4195 /* get DCB and VT TX configuration parameters
4198 ixgbe_dcb_vt_tx_config(dev, dcb_config);
4199 /*Configure general VMDQ and DCB TX parameters*/
4200 ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
4204 dcb_config->vt_mode = false;
4205 config_dcb_tx = DCB_TX_CONFIG;
4206 /*get DCB TX configuration parameters from rte_eth_conf*/
4207 ixgbe_dcb_tx_config(dev, dcb_config);
4208 /*Configure general DCB TX parameters*/
4209 ixgbe_dcb_tx_hw_config(dev, dcb_config);
4212 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
4216 nb_tcs = dcb_config->num_tcs.pfc_tcs;
4218 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4219 if (nb_tcs == ETH_4_TCS) {
4220 /* Avoid un-configured priority mapping to TC0 */
4222 uint8_t mask = 0xFF;
4224 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
4225 mask = (uint8_t)(mask & (~(1 << map[i])));
4226 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
4227 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
4231 /* Re-configure 4 TCs BW */
4232 for (i = 0; i < nb_tcs; i++) {
4233 tc = &dcb_config->tc_config[i];
4234 if (bw_conf->tc_num != nb_tcs)
4235 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4236 (uint8_t)(100 / nb_tcs);
4237 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4238 (uint8_t)(100 / nb_tcs);
4240 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4241 tc = &dcb_config->tc_config[i];
4242 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
4243 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
4246 /* Re-configure 8 TCs BW */
4247 for (i = 0; i < nb_tcs; i++) {
4248 tc = &dcb_config->tc_config[i];
4249 if (bw_conf->tc_num != nb_tcs)
4250 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4251 (uint8_t)(100 / nb_tcs + (i & 1));
4252 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4253 (uint8_t)(100 / nb_tcs + (i & 1));
4257 switch (hw->mac.type) {
4258 case ixgbe_mac_X550:
4259 case ixgbe_mac_X550EM_x:
4260 case ixgbe_mac_X550EM_a:
4261 rx_buffer_size = X550_RX_BUFFER_SIZE;
4264 rx_buffer_size = NIC_RX_BUFFER_SIZE;
4268 if (config_dcb_rx) {
4269 /* Set RX buffer size */
4270 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4271 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
4273 for (i = 0; i < nb_tcs; i++) {
4274 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
4276 /* zero alloc all unused TCs */
4277 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4278 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4281 if (config_dcb_tx) {
4282 /* Only support an equally distributed
4283 * Tx packet buffer strategy.
4285 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
4286 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
4288 for (i = 0; i < nb_tcs; i++) {
4289 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4290 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4292 /* Clear unused TCs, if any, to zero buffer size*/
4293 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4294 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4295 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4299 /*Calculates traffic class credits*/
4300 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4301 IXGBE_DCB_TX_CONFIG);
4302 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4303 IXGBE_DCB_RX_CONFIG);
4305 if (config_dcb_rx) {
4306 /* Unpack CEE standard containers */
4307 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
4308 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4309 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
4310 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
4311 /* Configure PG(ETS) RX */
4312 ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
4315 if (config_dcb_tx) {
4316 /* Unpack CEE standard containers */
4317 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
4318 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4319 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
4320 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
4321 /* Configure PG(ETS) TX */
4322 ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
4325 /*Configure queue statistics registers*/
4326 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
4328 /* Check if the PFC is supported */
4329 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
4330 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4331 for (i = 0; i < nb_tcs; i++) {
4333 * If the TC count is 8,and the default high_water is 48,
4334 * the low_water is 16 as default.
4336 hw->fc.high_water[i] = (pbsize * 3) / 4;
4337 hw->fc.low_water[i] = pbsize / 4;
4338 /* Enable pfc for this TC */
4339 tc = &dcb_config->tc_config[i];
4340 tc->pfc = ixgbe_dcb_pfc_enabled;
4342 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
4343 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
4345 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
4352 * ixgbe_configure_dcb - Configure DCB Hardware
4353 * @dev: pointer to rte_eth_dev
4355 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
4357 struct ixgbe_dcb_config *dcb_cfg =
4358 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4359 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
4361 PMD_INIT_FUNC_TRACE();
4363 /* check support mq_mode for DCB */
4364 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
4365 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
4366 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
4369 if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
4372 /** Configure DCB hardware **/
4373 ixgbe_dcb_hw_configure(dev, dcb_cfg);
4377 * VMDq only support for 10 GbE NIC.
4380 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
4382 struct rte_eth_vmdq_rx_conf *cfg;
4383 struct ixgbe_hw *hw;
4384 enum rte_eth_nb_pools num_pools;
4385 uint32_t mrqc, vt_ctl, vlanctrl;
4389 PMD_INIT_FUNC_TRACE();
4390 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4391 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
4392 num_pools = cfg->nb_queue_pools;
4394 ixgbe_rss_disable(dev);
4396 /* MRQC: enable vmdq */
4397 mrqc = IXGBE_MRQC_VMDQEN;
4398 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4400 /* PFVTCTL: turn on virtualisation and set the default pool */
4401 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
4402 if (cfg->enable_default_pool)
4403 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
4405 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
4407 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
4409 for (i = 0; i < (int)num_pools; i++) {
4410 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
4411 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
4414 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4415 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4416 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4417 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4419 /* VFTA - enable all vlan filters */
4420 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
4421 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
4423 /* VFRE: pool enabling for receive - 64 */
4424 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
4425 if (num_pools == ETH_64_POOLS)
4426 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
4429 * MPSAR - allow pools to read specific mac addresses
4430 * In this case, all pools should be able to read from mac addr 0
4432 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
4433 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
4435 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
4436 for (i = 0; i < cfg->nb_pool_maps; i++) {
4437 /* set vlan id in VF register and set the valid bit */
4438 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
4439 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
4441 * Put the allowed pools in VFB reg. As we only have 16 or 64
4442 * pools, we only need to use the first half of the register
4445 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
4446 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
4447 (cfg->pool_map[i].pools & UINT32_MAX));
4449 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
4450 ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
4454 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
4455 if (cfg->enable_loop_back) {
4456 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4457 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
4458 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
4461 IXGBE_WRITE_FLUSH(hw);
4465 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
4466 * @hw: pointer to hardware structure
4469 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
4474 PMD_INIT_FUNC_TRACE();
4475 /*PF VF Transmit Enable*/
4476 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
4477 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
4479 /* Disable the Tx desc arbiter so that MTQC can be changed */
4480 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4481 reg |= IXGBE_RTTDCS_ARBDIS;
4482 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4484 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4485 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
4487 /* Disable drop for all queues */
4488 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4489 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4490 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
4492 /* Enable the Tx desc arbiter */
4493 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4494 reg &= ~IXGBE_RTTDCS_ARBDIS;
4495 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4497 IXGBE_WRITE_FLUSH(hw);
4500 static int __rte_cold
4501 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
4503 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
4507 /* Initialize software ring entries */
4508 for (i = 0; i < rxq->nb_rx_desc; i++) {
4509 volatile union ixgbe_adv_rx_desc *rxd;
4510 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
4513 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
4514 (unsigned) rxq->queue_id);
4518 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
4519 mbuf->port = rxq->port_id;
4522 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
4523 rxd = &rxq->rx_ring[i];
4524 rxd->read.hdr_addr = 0;
4525 rxd->read.pkt_addr = dma_addr;
4533 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
4535 struct ixgbe_hw *hw;
4538 ixgbe_rss_configure(dev);
4540 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4542 /* MRQC: enable VF RSS */
4543 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
4544 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
4545 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4547 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
4551 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
4555 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
4559 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4565 ixgbe_config_vf_default(struct rte_eth_dev *dev)
4567 struct ixgbe_hw *hw =
4568 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4570 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4572 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4577 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4578 IXGBE_MRQC_VMDQRT4TCEN);
4582 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4583 IXGBE_MRQC_VMDQRT8TCEN);
4587 "invalid pool number in IOV mode");
4594 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
4596 struct ixgbe_hw *hw =
4597 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4599 if (hw->mac.type == ixgbe_mac_82598EB)
4602 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4604 * SRIOV inactive scheme
4605 * any DCB/RSS w/o VMDq multi-queue setting
4607 switch (dev->data->dev_conf.rxmode.mq_mode) {
4609 case ETH_MQ_RX_DCB_RSS:
4610 case ETH_MQ_RX_VMDQ_RSS:
4611 ixgbe_rss_configure(dev);
4614 case ETH_MQ_RX_VMDQ_DCB:
4615 ixgbe_vmdq_dcb_configure(dev);
4618 case ETH_MQ_RX_VMDQ_ONLY:
4619 ixgbe_vmdq_rx_hw_configure(dev);
4622 case ETH_MQ_RX_NONE:
4624 /* if mq_mode is none, disable rss mode.*/
4625 ixgbe_rss_disable(dev);
4629 /* SRIOV active scheme
4630 * Support RSS together with SRIOV.
4632 switch (dev->data->dev_conf.rxmode.mq_mode) {
4634 case ETH_MQ_RX_VMDQ_RSS:
4635 ixgbe_config_vf_rss(dev);
4637 case ETH_MQ_RX_VMDQ_DCB:
4639 /* In SRIOV, the configuration is the same as VMDq case */
4640 ixgbe_vmdq_dcb_configure(dev);
4642 /* DCB/RSS together with SRIOV is not supported */
4643 case ETH_MQ_RX_VMDQ_DCB_RSS:
4644 case ETH_MQ_RX_DCB_RSS:
4646 "Could not support DCB/RSS with VMDq & SRIOV");
4649 ixgbe_config_vf_default(dev);
4658 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4660 struct ixgbe_hw *hw =
4661 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4665 if (hw->mac.type == ixgbe_mac_82598EB)
4668 /* disable arbiter before setting MTQC */
4669 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4670 rttdcs |= IXGBE_RTTDCS_ARBDIS;
4671 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4673 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4675 * SRIOV inactive scheme
4676 * any DCB w/o VMDq multi-queue setting
4678 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4679 ixgbe_vmdq_tx_hw_configure(hw);
4681 mtqc = IXGBE_MTQC_64Q_1PB;
4682 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4685 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4688 * SRIOV active scheme
4689 * FIXME if support DCB together with VMDq & SRIOV
4692 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4695 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
4698 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
4702 mtqc = IXGBE_MTQC_64Q_1PB;
4703 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4705 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4708 /* re-enable arbiter */
4709 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
4710 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4716 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
4718 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
4719 * spec rev. 3.0 chapter 8.2.3.8.13.
4721 * @pool Memory pool of the Rx queue
4723 static inline uint32_t
4724 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4726 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4728 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
4730 RTE_IPV4_MAX_PKT_LEN /
4731 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4734 return IXGBE_RSCCTL_MAXDESC_16;
4735 else if (maxdesc >= 8)
4736 return IXGBE_RSCCTL_MAXDESC_8;
4737 else if (maxdesc >= 4)
4738 return IXGBE_RSCCTL_MAXDESC_4;
4740 return IXGBE_RSCCTL_MAXDESC_1;
4744 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
4747 * (Taken from FreeBSD tree)
4748 * (yes this is all very magic and confusing :)
4751 * @entry the register array entry
4752 * @vector the MSIX vector for this queue
4756 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
4758 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4761 vector |= IXGBE_IVAR_ALLOC_VAL;
4763 switch (hw->mac.type) {
4765 case ixgbe_mac_82598EB:
4767 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4769 entry += (type * 64);
4770 index = (entry >> 2) & 0x1F;
4771 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4772 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4773 ivar |= (vector << (8 * (entry & 0x3)));
4774 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4777 case ixgbe_mac_82599EB:
4778 case ixgbe_mac_X540:
4779 if (type == -1) { /* MISC IVAR */
4780 index = (entry & 1) * 8;
4781 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4782 ivar &= ~(0xFF << index);
4783 ivar |= (vector << index);
4784 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4785 } else { /* RX/TX IVARS */
4786 index = (16 * (entry & 1)) + (8 * type);
4787 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4788 ivar &= ~(0xFF << index);
4789 ivar |= (vector << index);
4790 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4801 ixgbe_set_rx_function(struct rte_eth_dev *dev)
4803 uint16_t i, rx_using_sse;
4804 struct ixgbe_adapter *adapter = dev->data->dev_private;
4807 * In order to allow Vector Rx there are a few configuration
4808 * conditions to be met and Rx Bulk Allocation should be allowed.
4810 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
4811 !adapter->rx_bulk_alloc_allowed ||
4812 rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
4813 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
4815 dev->data->port_id);
4817 adapter->rx_vec_allowed = false;
4821 * Initialize the appropriate LRO callback.
4823 * If all queues satisfy the bulk allocation preconditions
4824 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
4825 * Otherwise use a single allocation version.
4827 if (dev->data->lro) {
4828 if (adapter->rx_bulk_alloc_allowed) {
4829 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4830 "allocation version");
4831 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4833 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4834 "allocation version");
4835 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4837 } else if (dev->data->scattered_rx) {
4839 * Set the non-LRO scattered callback: there are Vector and
4840 * single allocation versions.
4842 if (adapter->rx_vec_allowed) {
4843 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
4844 "callback (port=%d).",
4845 dev->data->port_id);
4847 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
4848 } else if (adapter->rx_bulk_alloc_allowed) {
4849 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4850 "allocation callback (port=%d).",
4851 dev->data->port_id);
4852 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4854 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
4855 "single allocation) "
4856 "Scattered Rx callback "
4858 dev->data->port_id);
4860 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4863 * Below we set "simple" callbacks according to port/queues parameters.
4864 * If parameters allow we are going to choose between the following
4868 * - Single buffer allocation (the simplest one)
4870 } else if (adapter->rx_vec_allowed) {
4871 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4872 "burst size no less than %d (port=%d).",
4873 RTE_IXGBE_DESCS_PER_LOOP,
4874 dev->data->port_id);
4876 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4877 } else if (adapter->rx_bulk_alloc_allowed) {
4878 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4879 "satisfied. Rx Burst Bulk Alloc function "
4880 "will be used on port=%d.",
4881 dev->data->port_id);
4883 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4885 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4886 "satisfied, or Scattered Rx is requested "
4888 dev->data->port_id);
4890 dev->rx_pkt_burst = ixgbe_recv_pkts;
4893 /* Propagate information about RX function choice through all queues. */
4896 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4897 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4899 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4900 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4902 rxq->rx_using_sse = rx_using_sse;
4903 #ifdef RTE_LIB_SECURITY
4904 rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4905 DEV_RX_OFFLOAD_SECURITY);
4911 * ixgbe_set_rsc - configure RSC related port HW registers
4913 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4914 * of 82599 Spec (x540 configuration is virtually the same).
4918 * Returns 0 in case of success or a non-zero error code
4921 ixgbe_set_rsc(struct rte_eth_dev *dev)
4923 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4924 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4925 struct rte_eth_dev_info dev_info = { 0 };
4926 bool rsc_capable = false;
4932 dev->dev_ops->dev_infos_get(dev, &dev_info);
4933 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4936 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4937 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4942 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4944 if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
4945 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4947 * According to chapter of 4.6.7.2.1 of the Spec Rev.
4948 * 3.0 RSC configuration requires HW CRC stripping being
4949 * enabled. If user requested both HW CRC stripping off
4950 * and RSC on - return an error.
4952 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4957 /* RFCTL configuration */
4958 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4959 if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4960 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4962 rfctl |= IXGBE_RFCTL_RSC_DIS;
4963 /* disable NFS filtering */
4964 rfctl |= IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS;
4965 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4967 /* If LRO hasn't been requested - we are done here. */
4968 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4971 /* Set RDRXCTL.RSCACKC bit */
4972 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4973 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4974 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4976 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4977 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4978 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4980 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4982 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4984 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4986 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4989 * ixgbe PMD doesn't support header-split at the moment.
4991 * Following the 4.6.7.2.1 chapter of the 82599/x540
4992 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4993 * should be configured even if header split is not
4994 * enabled. We will configure it 128 bytes following the
4995 * recommendation in the spec.
4997 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4998 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4999 IXGBE_SRRCTL_BSIZEHDR_MASK;
5002 * TODO: Consider setting the Receive Descriptor Minimum
5003 * Threshold Size for an RSC case. This is not an obviously
5004 * beneficiary option but the one worth considering...
5007 rscctl |= IXGBE_RSCCTL_RSCEN;
5008 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
5009 psrtype |= IXGBE_PSRTYPE_TCPHDR;
5012 * RSC: Set ITR interval corresponding to 2K ints/s.
5014 * Full-sized RSC aggregations for a 10Gb/s link will
5015 * arrive at about 20K aggregation/s rate.
5017 * 2K inst/s rate will make only 10% of the
5018 * aggregations to be closed due to the interrupt timer
5019 * expiration for a streaming at wire-speed case.
5021 * For a sparse streaming case this setting will yield
5022 * at most 500us latency for a single RSC aggregation.
5024 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
5025 eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
5026 eitr |= IXGBE_EITR_CNT_WDIS;
5028 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
5029 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
5030 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
5031 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
5034 * RSC requires the mapping of the queue to the
5037 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
5042 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
5048 * Initializes Receive Unit.
5051 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
5053 struct ixgbe_hw *hw;
5054 struct ixgbe_rx_queue *rxq;
5065 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
5068 PMD_INIT_FUNC_TRACE();
5069 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5072 * Make sure receives are disabled while setting
5073 * up the RX context (registers, descriptor rings, etc.).
5075 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5076 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
5078 /* Enable receipt of broadcasted frames */
5079 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5080 fctrl |= IXGBE_FCTRL_BAM;
5081 fctrl |= IXGBE_FCTRL_DPF;
5082 fctrl |= IXGBE_FCTRL_PMCF;
5083 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5086 * Configure CRC stripping, if any.
5088 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5089 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
5090 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
5092 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
5095 * Configure jumbo frame support, if any.
5097 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
5098 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
5099 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
5100 maxfrs &= 0x0000FFFF;
5101 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
5102 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
5104 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
5107 * If loopback mode is configured, set LPBK bit.
5109 if (dev->data->dev_conf.lpbk_mode != 0) {
5110 rc = ixgbe_check_supported_loopback_mode(dev);
5112 PMD_INIT_LOG(ERR, "Unsupported loopback mode");
5115 hlreg0 |= IXGBE_HLREG0_LPBK;
5117 hlreg0 &= ~IXGBE_HLREG0_LPBK;
5120 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5123 * Assume no header split and no VLAN strip support
5124 * on any Rx queue first .
5126 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
5127 /* Setup RX queues */
5128 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5129 rxq = dev->data->rx_queues[i];
5132 * Reset crc_len in case it was changed after queue setup by a
5133 * call to configure.
5135 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
5136 rxq->crc_len = RTE_ETHER_CRC_LEN;
5140 /* Setup the Base and Length of the Rx Descriptor Rings */
5141 bus_addr = rxq->rx_ring_phys_addr;
5142 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
5143 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5144 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
5145 (uint32_t)(bus_addr >> 32));
5146 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
5147 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5148 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5149 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
5151 /* Configure the SRRCTL register */
5152 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
5154 /* Set if packets are dropped when no descriptors available */
5156 srrctl |= IXGBE_SRRCTL_DROP_EN;
5159 * Configure the RX buffer size in the BSIZEPACKET field of
5160 * the SRRCTL register of the queue.
5161 * The value is in 1 KB resolution. Valid values can be from
5164 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5165 RTE_PKTMBUF_HEADROOM);
5166 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5167 IXGBE_SRRCTL_BSIZEPKT_MASK);
5169 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
5171 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5172 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5174 /* It adds dual VLAN length for supporting dual VLAN */
5175 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
5176 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
5177 dev->data->scattered_rx = 1;
5178 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5179 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5182 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
5183 dev->data->scattered_rx = 1;
5186 * Device configured with multiple RX queues.
5188 ixgbe_dev_mq_rx_configure(dev);
5191 * Setup the Checksum Register.
5192 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
5193 * Enable IP/L4 checkum computation by hardware if requested to do so.
5195 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
5196 rxcsum |= IXGBE_RXCSUM_PCSD;
5197 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
5198 rxcsum |= IXGBE_RXCSUM_IPPCSE;
5200 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
5202 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
5204 if (hw->mac.type == ixgbe_mac_82599EB ||
5205 hw->mac.type == ixgbe_mac_X540) {
5206 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
5207 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
5208 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
5210 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
5211 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
5212 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
5215 rc = ixgbe_set_rsc(dev);
5219 ixgbe_set_rx_function(dev);
5225 * Initializes Transmit Unit.
5228 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
5230 struct ixgbe_hw *hw;
5231 struct ixgbe_tx_queue *txq;
5237 PMD_INIT_FUNC_TRACE();
5238 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5240 /* Enable TX CRC (checksum offload requirement) and hw padding
5243 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5244 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
5245 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5247 /* Setup the Base and Length of the Tx Descriptor Rings */
5248 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5249 txq = dev->data->tx_queues[i];
5251 bus_addr = txq->tx_ring_phys_addr;
5252 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
5253 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5254 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
5255 (uint32_t)(bus_addr >> 32));
5256 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
5257 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5258 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5259 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5260 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5263 * Disable Tx Head Writeback RO bit, since this hoses
5264 * bookkeeping if things aren't delivered in order.
5266 switch (hw->mac.type) {
5267 case ixgbe_mac_82598EB:
5268 txctrl = IXGBE_READ_REG(hw,
5269 IXGBE_DCA_TXCTRL(txq->reg_idx));
5270 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5271 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
5275 case ixgbe_mac_82599EB:
5276 case ixgbe_mac_X540:
5277 case ixgbe_mac_X550:
5278 case ixgbe_mac_X550EM_x:
5279 case ixgbe_mac_X550EM_a:
5281 txctrl = IXGBE_READ_REG(hw,
5282 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
5283 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5284 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
5290 /* Device configured with multiple TX queues. */
5291 ixgbe_dev_mq_tx_configure(dev);
5295 * Check if requested loopback mode is supported
5298 ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev)
5300 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5302 if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX)
5303 if (hw->mac.type == ixgbe_mac_82599EB ||
5304 hw->mac.type == ixgbe_mac_X540 ||
5305 hw->mac.type == ixgbe_mac_X550 ||
5306 hw->mac.type == ixgbe_mac_X550EM_x ||
5307 hw->mac.type == ixgbe_mac_X550EM_a)
5314 * Set up link for 82599 loopback mode Tx->Rx.
5316 static inline void __rte_cold
5317 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
5319 PMD_INIT_FUNC_TRACE();
5321 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
5322 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
5324 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
5333 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
5334 ixgbe_reset_pipeline_82599(hw);
5336 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
5342 * Start Transmit and Receive Units.
5345 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
5347 struct ixgbe_hw *hw;
5348 struct ixgbe_tx_queue *txq;
5349 struct ixgbe_rx_queue *rxq;
5356 PMD_INIT_FUNC_TRACE();
5357 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5359 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5360 txq = dev->data->tx_queues[i];
5361 /* Setup Transmit Threshold Registers */
5362 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5363 txdctl |= txq->pthresh & 0x7F;
5364 txdctl |= ((txq->hthresh & 0x7F) << 8);
5365 txdctl |= ((txq->wthresh & 0x7F) << 16);
5366 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5369 if (hw->mac.type != ixgbe_mac_82598EB) {
5370 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
5371 dmatxctl |= IXGBE_DMATXCTL_TE;
5372 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
5375 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5376 txq = dev->data->tx_queues[i];
5377 if (!txq->tx_deferred_start) {
5378 ret = ixgbe_dev_tx_queue_start(dev, i);
5384 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5385 rxq = dev->data->rx_queues[i];
5386 if (!rxq->rx_deferred_start) {
5387 ret = ixgbe_dev_rx_queue_start(dev, i);
5393 /* Enable Receive engine */
5394 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5395 if (hw->mac.type == ixgbe_mac_82598EB)
5396 rxctrl |= IXGBE_RXCTRL_DMBYPS;
5397 rxctrl |= IXGBE_RXCTRL_RXEN;
5398 hw->mac.ops.enable_rx_dma(hw, rxctrl);
5400 /* If loopback mode is enabled, set up the link accordingly */
5401 if (dev->data->dev_conf.lpbk_mode != 0) {
5402 if (hw->mac.type == ixgbe_mac_82599EB)
5403 ixgbe_setup_loopback_link_82599(hw);
5404 else if (hw->mac.type == ixgbe_mac_X540 ||
5405 hw->mac.type == ixgbe_mac_X550 ||
5406 hw->mac.type == ixgbe_mac_X550EM_x ||
5407 hw->mac.type == ixgbe_mac_X550EM_a)
5408 ixgbe_setup_loopback_link_x540_x550(hw, true);
5411 #ifdef RTE_LIB_SECURITY
5412 if ((dev->data->dev_conf.rxmode.offloads &
5413 DEV_RX_OFFLOAD_SECURITY) ||
5414 (dev->data->dev_conf.txmode.offloads &
5415 DEV_TX_OFFLOAD_SECURITY)) {
5416 ret = ixgbe_crypto_enable_ipsec(dev);
5419 "ixgbe_crypto_enable_ipsec fails with %d.",
5430 * Start Receive Units for specified queue.
5433 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5435 struct ixgbe_hw *hw;
5436 struct ixgbe_rx_queue *rxq;
5440 PMD_INIT_FUNC_TRACE();
5441 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5443 rxq = dev->data->rx_queues[rx_queue_id];
5445 /* Allocate buffers for descriptor rings */
5446 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
5447 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
5451 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5452 rxdctl |= IXGBE_RXDCTL_ENABLE;
5453 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5455 /* Wait until RX Enable ready */
5456 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5459 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5460 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5462 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
5464 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5465 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
5466 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5472 * Stop Receive Units for specified queue.
5475 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5477 struct ixgbe_hw *hw;
5478 struct ixgbe_adapter *adapter = dev->data->dev_private;
5479 struct ixgbe_rx_queue *rxq;
5483 PMD_INIT_FUNC_TRACE();
5484 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5486 rxq = dev->data->rx_queues[rx_queue_id];
5488 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5489 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5490 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5492 /* Wait until RX Enable bit clear */
5493 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5496 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5497 } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
5499 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
5501 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5503 ixgbe_rx_queue_release_mbufs(rxq);
5504 ixgbe_reset_rx_queue(adapter, rxq);
5505 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5512 * Start Transmit Units for specified queue.
5515 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5517 struct ixgbe_hw *hw;
5518 struct ixgbe_tx_queue *txq;
5522 PMD_INIT_FUNC_TRACE();
5523 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5525 txq = dev->data->tx_queues[tx_queue_id];
5526 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5527 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5528 txdctl |= IXGBE_TXDCTL_ENABLE;
5529 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5531 /* Wait until TX Enable ready */
5532 if (hw->mac.type == ixgbe_mac_82599EB) {
5533 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5536 txdctl = IXGBE_READ_REG(hw,
5537 IXGBE_TXDCTL(txq->reg_idx));
5538 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5540 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
5544 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5545 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5551 * Stop Transmit Units for specified queue.
5554 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5556 struct ixgbe_hw *hw;
5557 struct ixgbe_tx_queue *txq;
5559 uint32_t txtdh, txtdt;
5562 PMD_INIT_FUNC_TRACE();
5563 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5565 txq = dev->data->tx_queues[tx_queue_id];
5567 /* Wait until TX queue is empty */
5568 if (hw->mac.type == ixgbe_mac_82599EB) {
5569 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5571 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5572 txtdh = IXGBE_READ_REG(hw,
5573 IXGBE_TDH(txq->reg_idx));
5574 txtdt = IXGBE_READ_REG(hw,
5575 IXGBE_TDT(txq->reg_idx));
5576 } while (--poll_ms && (txtdh != txtdt));
5579 "Tx Queue %d is not empty when stopping.",
5583 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5584 txdctl &= ~IXGBE_TXDCTL_ENABLE;
5585 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5587 /* Wait until TX Enable bit clear */
5588 if (hw->mac.type == ixgbe_mac_82599EB) {
5589 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5592 txdctl = IXGBE_READ_REG(hw,
5593 IXGBE_TXDCTL(txq->reg_idx));
5594 } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
5596 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
5600 if (txq->ops != NULL) {
5601 txq->ops->release_mbufs(txq);
5602 txq->ops->reset(txq);
5604 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5610 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5611 struct rte_eth_rxq_info *qinfo)
5613 struct ixgbe_rx_queue *rxq;
5615 rxq = dev->data->rx_queues[queue_id];
5617 qinfo->mp = rxq->mb_pool;
5618 qinfo->scattered_rx = dev->data->scattered_rx;
5619 qinfo->nb_desc = rxq->nb_rx_desc;
5621 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
5622 qinfo->conf.rx_drop_en = rxq->drop_en;
5623 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
5624 qinfo->conf.offloads = rxq->offloads;
5628 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5629 struct rte_eth_txq_info *qinfo)
5631 struct ixgbe_tx_queue *txq;
5633 txq = dev->data->tx_queues[queue_id];
5635 qinfo->nb_desc = txq->nb_tx_desc;
5637 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
5638 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
5639 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
5641 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
5642 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
5643 qinfo->conf.offloads = txq->offloads;
5644 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
5648 * [VF] Initializes Receive Unit.
5651 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
5653 struct ixgbe_hw *hw;
5654 struct ixgbe_rx_queue *rxq;
5655 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
5657 uint32_t srrctl, psrtype = 0;
5662 PMD_INIT_FUNC_TRACE();
5663 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5665 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
5666 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5667 "it should be power of 2");
5671 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
5672 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5673 "it should be equal to or less than %d",
5674 hw->mac.max_rx_queues);
5679 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
5680 * disables the VF receipt of packets if the PF MTU is > 1500.
5681 * This is done to deal with 82599 limitations that imposes
5682 * the PF and all VFs to share the same MTU.
5683 * Then, the PF driver enables again the VF receipt of packet when
5684 * the VF driver issues a IXGBE_VF_SET_LPE request.
5685 * In the meantime, the VF device cannot be used, even if the VF driver
5686 * and the Guest VM network stack are ready to accept packets with a
5687 * size up to the PF MTU.
5688 * As a work-around to this PF behaviour, force the call to
5689 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
5690 * VF packets received can work in all cases.
5692 if (ixgbevf_rlpml_set_vf(hw,
5693 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
5694 PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
5695 dev->data->dev_conf.rxmode.max_rx_pkt_len);
5700 * Assume no header split and no VLAN strip support
5701 * on any Rx queue first .
5703 rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
5704 /* Setup RX queues */
5705 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5706 rxq = dev->data->rx_queues[i];
5708 /* Allocate buffers for descriptor rings */
5709 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
5713 /* Setup the Base and Length of the Rx Descriptor Rings */
5714 bus_addr = rxq->rx_ring_phys_addr;
5716 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
5717 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5718 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
5719 (uint32_t)(bus_addr >> 32));
5720 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
5721 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5722 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
5723 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
5726 /* Configure the SRRCTL register */
5727 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
5729 /* Set if packets are dropped when no descriptors available */
5731 srrctl |= IXGBE_SRRCTL_DROP_EN;
5734 * Configure the RX buffer size in the BSIZEPACKET field of
5735 * the SRRCTL register of the queue.
5736 * The value is in 1 KB resolution. Valid values can be from
5739 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5740 RTE_PKTMBUF_HEADROOM);
5741 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5742 IXGBE_SRRCTL_BSIZEPKT_MASK);
5745 * VF modification to write virtual function SRRCTL register
5747 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
5749 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5750 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5752 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
5753 /* It adds dual VLAN length for supporting dual VLAN */
5754 (rxmode->max_rx_pkt_len +
5755 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
5756 if (!dev->data->scattered_rx)
5757 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
5758 dev->data->scattered_rx = 1;
5761 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5762 rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5765 /* Set RQPL for VF RSS according to max Rx queue */
5766 psrtype |= (dev->data->nb_rx_queues >> 1) <<
5767 IXGBE_PSRTYPE_RQPL_SHIFT;
5768 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
5770 ixgbe_set_rx_function(dev);
5776 * [VF] Initializes Transmit Unit.
5779 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
5781 struct ixgbe_hw *hw;
5782 struct ixgbe_tx_queue *txq;
5787 PMD_INIT_FUNC_TRACE();
5788 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5790 /* Setup the Base and Length of the Tx Descriptor Rings */
5791 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5792 txq = dev->data->tx_queues[i];
5793 bus_addr = txq->tx_ring_phys_addr;
5794 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
5795 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5796 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
5797 (uint32_t)(bus_addr >> 32));
5798 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
5799 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5800 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5801 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
5802 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
5805 * Disable Tx Head Writeback RO bit, since this hoses
5806 * bookkeeping if things aren't delivered in order.
5808 txctrl = IXGBE_READ_REG(hw,
5809 IXGBE_VFDCA_TXCTRL(i));
5810 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5811 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
5817 * [VF] Start Transmit and Receive Units.
5820 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
5822 struct ixgbe_hw *hw;
5823 struct ixgbe_tx_queue *txq;
5824 struct ixgbe_rx_queue *rxq;
5830 PMD_INIT_FUNC_TRACE();
5831 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5833 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5834 txq = dev->data->tx_queues[i];
5835 /* Setup Transmit Threshold Registers */
5836 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5837 txdctl |= txq->pthresh & 0x7F;
5838 txdctl |= ((txq->hthresh & 0x7F) << 8);
5839 txdctl |= ((txq->wthresh & 0x7F) << 16);
5840 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5843 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5845 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5846 txdctl |= IXGBE_TXDCTL_ENABLE;
5847 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5850 /* Wait until TX Enable ready */
5853 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5854 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5856 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
5858 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5860 rxq = dev->data->rx_queues[i];
5862 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5863 rxdctl |= IXGBE_RXDCTL_ENABLE;
5864 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
5866 /* Wait until RX Enable ready */
5870 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5871 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5873 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5875 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
5881 ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
5882 const struct rte_flow_action_rss *in)
5884 if (in->key_len > RTE_DIM(out->key) ||
5885 in->queue_num > RTE_DIM(out->queue))
5887 out->conf = (struct rte_flow_action_rss){
5891 .key_len = in->key_len,
5892 .queue_num = in->queue_num,
5893 .key = memcpy(out->key, in->key, in->key_len),
5894 .queue = memcpy(out->queue, in->queue,
5895 sizeof(*in->queue) * in->queue_num),
5901 ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5902 const struct rte_flow_action_rss *with)
5904 return (comp->func == with->func &&
5905 comp->level == with->level &&
5906 comp->types == with->types &&
5907 comp->key_len == with->key_len &&
5908 comp->queue_num == with->queue_num &&
5909 !memcmp(comp->key, with->key, with->key_len) &&
5910 !memcmp(comp->queue, with->queue,
5911 sizeof(*with->queue) * with->queue_num));
5915 ixgbe_config_rss_filter(struct rte_eth_dev *dev,
5916 struct ixgbe_rte_flow_rss_conf *conf, bool add)
5918 struct ixgbe_hw *hw;
5922 uint16_t sp_reta_size;
5924 struct rte_eth_rss_conf rss_conf = {
5925 .rss_key = conf->conf.key_len ?
5926 (void *)(uintptr_t)conf->conf.key : NULL,
5927 .rss_key_len = conf->conf.key_len,
5928 .rss_hf = conf->conf.types,
5930 struct ixgbe_filter_info *filter_info =
5931 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5933 PMD_INIT_FUNC_TRACE();
5934 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5936 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5939 if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
5941 ixgbe_rss_disable(dev);
5942 memset(&filter_info->rss_info, 0,
5943 sizeof(struct ixgbe_rte_flow_rss_conf));
5949 if (filter_info->rss_info.conf.queue_num)
5951 /* Fill in redirection table
5952 * The byte-swap is needed because NIC registers are in
5953 * little-endian order.
5956 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
5957 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5959 if (j == conf->conf.queue_num)
5961 reta = (reta << 8) | conf->conf.queue[j];
5963 IXGBE_WRITE_REG(hw, reta_reg,
5967 /* Configure the RSS key and the RSS protocols used to compute
5968 * the RSS hash of input packets.
5970 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
5971 ixgbe_rss_disable(dev);
5974 if (rss_conf.rss_key == NULL)
5975 rss_conf.rss_key = rss_intel_key; /* Default hash key */
5976 ixgbe_hw_rss_hash_set(hw, &rss_conf);
5978 if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
5984 /* Stubs needed for linkage when RTE_ARCH_PPC_64 is set */
5985 #if defined(RTE_ARCH_PPC_64)
5987 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5993 ixgbe_recv_pkts_vec(
5994 void __rte_unused *rx_queue,
5995 struct rte_mbuf __rte_unused **rx_pkts,
5996 uint16_t __rte_unused nb_pkts)
6002 ixgbe_recv_scattered_pkts_vec(
6003 void __rte_unused *rx_queue,
6004 struct rte_mbuf __rte_unused **rx_pkts,
6005 uint16_t __rte_unused nb_pkts)
6011 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
6017 ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue,
6018 struct rte_mbuf __rte_unused **tx_pkts,
6019 uint16_t __rte_unused nb_pkts)
6025 ixgbe_txq_vec_setup(struct ixgbe_tx_queue __rte_unused *txq)
6031 ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue __rte_unused *rxq)