1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
21 #include <rte_debug.h>
22 #include <rte_interrupts.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev_driver.h>
37 #include <rte_prefetch.h>
41 #include <rte_string_fns.h>
42 #include <rte_errno.h>
46 #include "ixgbe_logs.h"
47 #include "base/ixgbe_api.h"
48 #include "base/ixgbe_vf.h"
49 #include "ixgbe_ethdev.h"
50 #include "base/ixgbe_dcb.h"
51 #include "base/ixgbe_common.h"
52 #include "ixgbe_rxtx.h"
54 #ifdef RTE_LIBRTE_IEEE1588
55 #define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
57 #define IXGBE_TX_IEEE1588_TMST 0
59 /* Bit Mask to indicate what bits required for building TX context */
60 #define IXGBE_TX_OFFLOAD_MASK ( \
66 PKT_TX_OUTER_IP_CKSUM | \
67 PKT_TX_SEC_OFFLOAD | \
68 IXGBE_TX_IEEE1588_TMST)
70 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
71 (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
74 #define RTE_PMD_USE_PREFETCH
77 #ifdef RTE_PMD_USE_PREFETCH
79 * Prefetch a cache line into all cache levels.
81 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
83 #define rte_ixgbe_prefetch(p) do {} while (0)
86 #ifdef RTE_IXGBE_INC_VECTOR
87 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
91 /*********************************************************************
95 **********************************************************************/
98 * Check for descriptors with their DD bit set and free mbufs.
99 * Return the total number of buffers freed.
101 static __rte_always_inline int
102 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
104 struct ixgbe_tx_entry *txep;
107 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
109 /* check DD bit on threshold descriptor */
110 status = txq->tx_ring[txq->tx_next_dd].wb.status;
111 if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
115 * first buffer to free from S/W ring is at index
116 * tx_next_dd - (tx_rs_thresh-1)
118 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
120 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
121 /* free buffers one at a time */
122 m = rte_pktmbuf_prefree_seg(txep->mbuf);
125 if (unlikely(m == NULL))
128 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
129 (nb_free > 0 && m->pool != free[0]->pool)) {
130 rte_mempool_put_bulk(free[0]->pool,
131 (void **)free, nb_free);
139 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
141 /* buffers were freed, update counters */
142 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
143 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
144 if (txq->tx_next_dd >= txq->nb_tx_desc)
145 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
147 return txq->tx_rs_thresh;
150 /* Populate 4 descriptors with data from 4 mbufs */
152 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
154 uint64_t buf_dma_addr;
158 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
159 buf_dma_addr = rte_mbuf_data_iova(*pkts);
160 pkt_len = (*pkts)->data_len;
162 /* write data to descriptor */
163 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
165 txdp->read.cmd_type_len =
166 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
168 txdp->read.olinfo_status =
169 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
171 rte_prefetch0(&(*pkts)->pool);
175 /* Populate 1 descriptor with data from 1 mbuf */
177 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
179 uint64_t buf_dma_addr;
182 buf_dma_addr = rte_mbuf_data_iova(*pkts);
183 pkt_len = (*pkts)->data_len;
185 /* write data to descriptor */
186 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
187 txdp->read.cmd_type_len =
188 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
189 txdp->read.olinfo_status =
190 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
191 rte_prefetch0(&(*pkts)->pool);
195 * Fill H/W descriptor ring with mbuf data.
196 * Copy mbuf pointers to the S/W ring.
199 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
202 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
203 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
204 const int N_PER_LOOP = 4;
205 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
206 int mainpart, leftover;
210 * Process most of the packets in chunks of N pkts. Any
211 * leftover packets will get processed one at a time.
213 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
214 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
215 for (i = 0; i < mainpart; i += N_PER_LOOP) {
216 /* Copy N mbuf pointers to the S/W ring */
217 for (j = 0; j < N_PER_LOOP; ++j) {
218 (txep + i + j)->mbuf = *(pkts + i + j);
220 tx4(txdp + i, pkts + i);
223 if (unlikely(leftover > 0)) {
224 for (i = 0; i < leftover; ++i) {
225 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
226 tx1(txdp + mainpart + i, pkts + mainpart + i);
231 static inline uint16_t
232 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
235 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
236 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
240 * Begin scanning the H/W ring for done descriptors when the
241 * number of available descriptors drops below tx_free_thresh. For
242 * each done descriptor, free the associated buffer.
244 if (txq->nb_tx_free < txq->tx_free_thresh)
245 ixgbe_tx_free_bufs(txq);
247 /* Only use descriptors that are available */
248 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
249 if (unlikely(nb_pkts == 0))
252 /* Use exactly nb_pkts descriptors */
253 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
256 * At this point, we know there are enough descriptors in the
257 * ring to transmit all the packets. This assumes that each
258 * mbuf contains a single segment, and that no new offloads
259 * are expected, which would require a new context descriptor.
263 * See if we're going to wrap-around. If so, handle the top
264 * of the descriptor ring first, then do the bottom. If not,
265 * the processing looks just like the "bottom" part anyway...
267 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
268 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
269 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
272 * We know that the last descriptor in the ring will need to
273 * have its RS bit set because tx_rs_thresh has to be
274 * a divisor of the ring size
276 tx_r[txq->tx_next_rs].read.cmd_type_len |=
277 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
278 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
283 /* Fill H/W descriptor ring with mbuf data */
284 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
285 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
288 * Determine if RS bit should be set
289 * This is what we actually want:
290 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
291 * but instead of subtracting 1 and doing >=, we can just do
292 * greater than without subtracting.
294 if (txq->tx_tail > txq->tx_next_rs) {
295 tx_r[txq->tx_next_rs].read.cmd_type_len |=
296 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
297 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
299 if (txq->tx_next_rs >= txq->nb_tx_desc)
300 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
304 * Check for wrap-around. This would only happen if we used
305 * up to the last descriptor in the ring, no more, no less.
307 if (txq->tx_tail >= txq->nb_tx_desc)
310 /* update tail pointer */
312 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
318 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
323 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
324 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
325 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
327 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
332 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
333 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
334 nb_tx = (uint16_t)(nb_tx + ret);
335 nb_pkts = (uint16_t)(nb_pkts - ret);
343 #ifdef RTE_IXGBE_INC_VECTOR
345 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
349 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
354 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
355 ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
368 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
369 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
370 uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
371 __rte_unused uint64_t *mdata)
373 uint32_t type_tucmd_mlhl;
374 uint32_t mss_l4len_idx = 0;
376 uint32_t vlan_macip_lens;
377 union ixgbe_tx_offload tx_offload_mask;
378 uint32_t seqnum_seed = 0;
380 ctx_idx = txq->ctx_curr;
381 tx_offload_mask.data[0] = 0;
382 tx_offload_mask.data[1] = 0;
385 /* Specify which HW CTX to upload. */
386 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
388 if (ol_flags & PKT_TX_VLAN_PKT) {
389 tx_offload_mask.vlan_tci |= ~0;
392 /* check if TCP segmentation required for this packet */
393 if (ol_flags & PKT_TX_TCP_SEG) {
394 /* implies IP cksum in IPv4 */
395 if (ol_flags & PKT_TX_IP_CKSUM)
396 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
397 IXGBE_ADVTXD_TUCMD_L4T_TCP |
398 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
400 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
401 IXGBE_ADVTXD_TUCMD_L4T_TCP |
402 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
404 tx_offload_mask.l2_len |= ~0;
405 tx_offload_mask.l3_len |= ~0;
406 tx_offload_mask.l4_len |= ~0;
407 tx_offload_mask.tso_segsz |= ~0;
408 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
409 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
410 } else { /* no TSO, check if hardware checksum is needed */
411 if (ol_flags & PKT_TX_IP_CKSUM) {
412 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
413 tx_offload_mask.l2_len |= ~0;
414 tx_offload_mask.l3_len |= ~0;
417 switch (ol_flags & PKT_TX_L4_MASK) {
418 case PKT_TX_UDP_CKSUM:
419 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
420 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
421 mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
422 tx_offload_mask.l2_len |= ~0;
423 tx_offload_mask.l3_len |= ~0;
425 case PKT_TX_TCP_CKSUM:
426 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
427 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
428 mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
429 tx_offload_mask.l2_len |= ~0;
430 tx_offload_mask.l3_len |= ~0;
432 case PKT_TX_SCTP_CKSUM:
433 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
434 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
435 mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
436 tx_offload_mask.l2_len |= ~0;
437 tx_offload_mask.l3_len |= ~0;
440 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
441 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
446 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
447 tx_offload_mask.outer_l2_len |= ~0;
448 tx_offload_mask.outer_l3_len |= ~0;
449 tx_offload_mask.l2_len |= ~0;
450 seqnum_seed |= tx_offload.outer_l3_len
451 << IXGBE_ADVTXD_OUTER_IPLEN;
452 seqnum_seed |= tx_offload.l2_len
453 << IXGBE_ADVTXD_TUNNEL_LEN;
455 #ifdef RTE_LIBRTE_SECURITY
456 if (ol_flags & PKT_TX_SEC_OFFLOAD) {
457 union ixgbe_crypto_tx_desc_md *md =
458 (union ixgbe_crypto_tx_desc_md *)mdata;
460 (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
461 type_tucmd_mlhl |= md->enc ?
462 (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
463 IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
465 (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
466 tx_offload_mask.sa_idx |= ~0;
467 tx_offload_mask.sec_pad_len |= ~0;
471 txq->ctx_cache[ctx_idx].flags = ol_flags;
472 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
473 tx_offload_mask.data[0] & tx_offload.data[0];
474 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
475 tx_offload_mask.data[1] & tx_offload.data[1];
476 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
478 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
479 vlan_macip_lens = tx_offload.l3_len;
480 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
481 vlan_macip_lens |= (tx_offload.outer_l2_len <<
482 IXGBE_ADVTXD_MACLEN_SHIFT);
484 vlan_macip_lens |= (tx_offload.l2_len <<
485 IXGBE_ADVTXD_MACLEN_SHIFT);
486 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
487 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
488 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
489 ctx_txd->seqnum_seed = seqnum_seed;
493 * Check which hardware context can be used. Use the existing match
494 * or create a new context descriptor.
496 static inline uint32_t
497 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
498 union ixgbe_tx_offload tx_offload)
500 /* If match with the current used context */
501 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
502 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
503 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
504 & tx_offload.data[0])) &&
505 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
506 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
507 & tx_offload.data[1]))))
508 return txq->ctx_curr;
510 /* What if match with the next context */
512 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
513 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
514 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
515 & tx_offload.data[0])) &&
516 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
517 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
518 & tx_offload.data[1]))))
519 return txq->ctx_curr;
521 /* Mismatch, use the previous context */
522 return IXGBE_CTX_NUM;
525 static inline uint32_t
526 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
530 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
531 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
532 if (ol_flags & PKT_TX_IP_CKSUM)
533 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
534 if (ol_flags & PKT_TX_TCP_SEG)
535 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
539 static inline uint32_t
540 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
542 uint32_t cmdtype = 0;
544 if (ol_flags & PKT_TX_VLAN_PKT)
545 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
546 if (ol_flags & PKT_TX_TCP_SEG)
547 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
548 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
549 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
550 if (ol_flags & PKT_TX_MACSEC)
551 cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
555 /* Default RS bit threshold values */
556 #ifndef DEFAULT_TX_RS_THRESH
557 #define DEFAULT_TX_RS_THRESH 32
559 #ifndef DEFAULT_TX_FREE_THRESH
560 #define DEFAULT_TX_FREE_THRESH 32
563 /* Reset transmit descriptors after they have been used */
565 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
567 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
568 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
569 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
570 uint16_t nb_tx_desc = txq->nb_tx_desc;
571 uint16_t desc_to_clean_to;
572 uint16_t nb_tx_to_clean;
575 /* Determine the last descriptor needing to be cleaned */
576 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
577 if (desc_to_clean_to >= nb_tx_desc)
578 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
580 /* Check to make sure the last descriptor to clean is done */
581 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
582 status = txr[desc_to_clean_to].wb.status;
583 if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
584 PMD_TX_FREE_LOG(DEBUG,
585 "TX descriptor %4u is not done"
586 "(port=%d queue=%d)",
588 txq->port_id, txq->queue_id);
589 /* Failed to clean any descriptors, better luck next time */
593 /* Figure out how many descriptors will be cleaned */
594 if (last_desc_cleaned > desc_to_clean_to)
595 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
598 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
601 PMD_TX_FREE_LOG(DEBUG,
602 "Cleaning %4u TX descriptors: %4u to %4u "
603 "(port=%d queue=%d)",
604 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
605 txq->port_id, txq->queue_id);
608 * The last descriptor to clean is done, so that means all the
609 * descriptors from the last descriptor that was cleaned
610 * up to the last descriptor with the RS bit set
611 * are done. Only reset the threshold descriptor.
613 txr[desc_to_clean_to].wb.status = 0;
615 /* Update the txq to reflect the last descriptor that was cleaned */
616 txq->last_desc_cleaned = desc_to_clean_to;
617 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
624 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
627 struct ixgbe_tx_queue *txq;
628 struct ixgbe_tx_entry *sw_ring;
629 struct ixgbe_tx_entry *txe, *txn;
630 volatile union ixgbe_adv_tx_desc *txr;
631 volatile union ixgbe_adv_tx_desc *txd, *txp;
632 struct rte_mbuf *tx_pkt;
633 struct rte_mbuf *m_seg;
634 uint64_t buf_dma_addr;
635 uint32_t olinfo_status;
636 uint32_t cmd_type_len;
647 union ixgbe_tx_offload tx_offload;
648 #ifdef RTE_LIBRTE_SECURITY
652 tx_offload.data[0] = 0;
653 tx_offload.data[1] = 0;
655 sw_ring = txq->sw_ring;
657 tx_id = txq->tx_tail;
658 txe = &sw_ring[tx_id];
661 /* Determine if the descriptor ring needs to be cleaned. */
662 if (txq->nb_tx_free < txq->tx_free_thresh)
663 ixgbe_xmit_cleanup(txq);
665 rte_prefetch0(&txe->mbuf->pool);
668 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
671 pkt_len = tx_pkt->pkt_len;
674 * Determine how many (if any) context descriptors
675 * are needed for offload functionality.
677 ol_flags = tx_pkt->ol_flags;
678 #ifdef RTE_LIBRTE_SECURITY
679 use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
682 /* If hardware offload required */
683 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
685 tx_offload.l2_len = tx_pkt->l2_len;
686 tx_offload.l3_len = tx_pkt->l3_len;
687 tx_offload.l4_len = tx_pkt->l4_len;
688 tx_offload.vlan_tci = tx_pkt->vlan_tci;
689 tx_offload.tso_segsz = tx_pkt->tso_segsz;
690 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
691 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
692 #ifdef RTE_LIBRTE_SECURITY
694 union ixgbe_crypto_tx_desc_md *ipsec_mdata =
695 (union ixgbe_crypto_tx_desc_md *)
697 tx_offload.sa_idx = ipsec_mdata->sa_idx;
698 tx_offload.sec_pad_len = ipsec_mdata->pad_len;
702 /* If new context need be built or reuse the exist ctx. */
703 ctx = what_advctx_update(txq, tx_ol_req,
705 /* Only allocate context descriptor if required*/
706 new_ctx = (ctx == IXGBE_CTX_NUM);
711 * Keep track of how many descriptors are used this loop
712 * This will always be the number of segments + the number of
713 * Context descriptors required to transmit the packet
715 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
718 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
719 /* set RS on the previous packet in the burst */
720 txp->read.cmd_type_len |=
721 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
724 * The number of descriptors that must be allocated for a
725 * packet is the number of segments of that packet, plus 1
726 * Context Descriptor for the hardware offload, if any.
727 * Determine the last TX descriptor to allocate in the TX ring
728 * for the packet, starting from the current position (tx_id)
731 tx_last = (uint16_t) (tx_id + nb_used - 1);
734 if (tx_last >= txq->nb_tx_desc)
735 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
737 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
738 " tx_first=%u tx_last=%u",
739 (unsigned) txq->port_id,
740 (unsigned) txq->queue_id,
746 * Make sure there are enough TX descriptors available to
747 * transmit the entire packet.
748 * nb_used better be less than or equal to txq->tx_rs_thresh
750 if (nb_used > txq->nb_tx_free) {
751 PMD_TX_FREE_LOG(DEBUG,
752 "Not enough free TX descriptors "
753 "nb_used=%4u nb_free=%4u "
754 "(port=%d queue=%d)",
755 nb_used, txq->nb_tx_free,
756 txq->port_id, txq->queue_id);
758 if (ixgbe_xmit_cleanup(txq) != 0) {
759 /* Could not clean any descriptors */
765 /* nb_used better be <= txq->tx_rs_thresh */
766 if (unlikely(nb_used > txq->tx_rs_thresh)) {
767 PMD_TX_FREE_LOG(DEBUG,
768 "The number of descriptors needed to "
769 "transmit the packet exceeds the "
770 "RS bit threshold. This will impact "
772 "nb_used=%4u nb_free=%4u "
774 "(port=%d queue=%d)",
775 nb_used, txq->nb_tx_free,
777 txq->port_id, txq->queue_id);
779 * Loop here until there are enough TX
780 * descriptors or until the ring cannot be
783 while (nb_used > txq->nb_tx_free) {
784 if (ixgbe_xmit_cleanup(txq) != 0) {
786 * Could not clean any
798 * By now there are enough free TX descriptors to transmit
803 * Set common flags of all TX Data Descriptors.
805 * The following bits must be set in all Data Descriptors:
806 * - IXGBE_ADVTXD_DTYP_DATA
807 * - IXGBE_ADVTXD_DCMD_DEXT
809 * The following bits must be set in the first Data Descriptor
810 * and are ignored in the other ones:
811 * - IXGBE_ADVTXD_DCMD_IFCS
812 * - IXGBE_ADVTXD_MAC_1588
813 * - IXGBE_ADVTXD_DCMD_VLE
815 * The following bits must only be set in the last Data
817 * - IXGBE_TXD_CMD_EOP
819 * The following bits can be set in any Data Descriptor, but
820 * are only set in the last Data Descriptor:
823 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
824 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
826 #ifdef RTE_LIBRTE_IEEE1588
827 if (ol_flags & PKT_TX_IEEE1588_TMST)
828 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
834 if (ol_flags & PKT_TX_TCP_SEG) {
835 /* when TSO is on, paylen in descriptor is the
836 * not the packet len but the tcp payload len */
837 pkt_len -= (tx_offload.l2_len +
838 tx_offload.l3_len + tx_offload.l4_len);
842 * Setup the TX Advanced Context Descriptor if required
845 volatile struct ixgbe_adv_tx_context_desc *
848 ctx_txd = (volatile struct
849 ixgbe_adv_tx_context_desc *)
852 txn = &sw_ring[txe->next_id];
853 rte_prefetch0(&txn->mbuf->pool);
855 if (txe->mbuf != NULL) {
856 rte_pktmbuf_free_seg(txe->mbuf);
860 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
861 tx_offload, &tx_pkt->udata64);
863 txe->last_id = tx_last;
864 tx_id = txe->next_id;
869 * Setup the TX Advanced Data Descriptor,
870 * This path will go through
871 * whatever new/reuse the context descriptor
873 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
874 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
875 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
878 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
879 #ifdef RTE_LIBRTE_SECURITY
881 olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
887 txn = &sw_ring[txe->next_id];
888 rte_prefetch0(&txn->mbuf->pool);
890 if (txe->mbuf != NULL)
891 rte_pktmbuf_free_seg(txe->mbuf);
895 * Set up Transmit Data Descriptor.
897 slen = m_seg->data_len;
898 buf_dma_addr = rte_mbuf_data_iova(m_seg);
899 txd->read.buffer_addr =
900 rte_cpu_to_le_64(buf_dma_addr);
901 txd->read.cmd_type_len =
902 rte_cpu_to_le_32(cmd_type_len | slen);
903 txd->read.olinfo_status =
904 rte_cpu_to_le_32(olinfo_status);
905 txe->last_id = tx_last;
906 tx_id = txe->next_id;
909 } while (m_seg != NULL);
912 * The last packet data descriptor needs End Of Packet (EOP)
914 cmd_type_len |= IXGBE_TXD_CMD_EOP;
915 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
916 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
918 /* Set RS bit only on threshold packets' last descriptor */
919 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
920 PMD_TX_FREE_LOG(DEBUG,
921 "Setting RS bit on TXD id="
922 "%4u (port=%d queue=%d)",
923 tx_last, txq->port_id, txq->queue_id);
925 cmd_type_len |= IXGBE_TXD_CMD_RS;
927 /* Update txq RS bit counters */
933 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
937 /* set RS on last packet in the burst */
939 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
944 * Set the Transmit Descriptor Tail (TDT)
946 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
947 (unsigned) txq->port_id, (unsigned) txq->queue_id,
948 (unsigned) tx_id, (unsigned) nb_tx);
949 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
950 txq->tx_tail = tx_id;
955 /*********************************************************************
959 **********************************************************************/
961 ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
966 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
968 for (i = 0; i < nb_pkts; i++) {
970 ol_flags = m->ol_flags;
973 * Check if packet meets requirements for number of segments
975 * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
979 if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
984 if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
985 rte_errno = -ENOTSUP;
989 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
990 ret = rte_validate_tx_offload(m);
996 ret = rte_net_intel_cksum_prepare(m);
1006 /*********************************************************************
1010 **********************************************************************/
1012 #define IXGBE_PACKET_TYPE_ETHER 0X00
1013 #define IXGBE_PACKET_TYPE_IPV4 0X01
1014 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
1015 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
1016 #define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
1017 #define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
1018 #define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
1019 #define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
1020 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
1021 #define IXGBE_PACKET_TYPE_IPV6 0X04
1022 #define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
1023 #define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
1024 #define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
1025 #define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
1026 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
1027 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
1028 #define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
1029 #define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
1030 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
1031 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
1032 #define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
1033 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
1034 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
1035 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
1036 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
1037 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
1038 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
1039 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
1040 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
1041 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
1042 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
1043 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
1044 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
1046 #define IXGBE_PACKET_TYPE_NVGRE 0X00
1047 #define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
1048 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11
1049 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
1050 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
1051 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
1052 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
1053 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
1054 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
1055 #define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
1056 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
1057 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
1058 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
1059 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
1060 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
1061 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
1062 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
1063 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
1064 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
1065 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
1066 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D
1067 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
1068 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
1070 #define IXGBE_PACKET_TYPE_VXLAN 0X80
1071 #define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81
1072 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91
1073 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
1074 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
1075 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
1076 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
1077 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
1078 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
1079 #define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
1080 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
1081 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
1082 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
1083 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
1084 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
1085 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
1086 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
1087 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
1088 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
1089 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
1090 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D
1091 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
1092 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
1095 * Use 2 different table for normal packet and tunnel packet
1096 * to save the space.
1099 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
1100 [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
1101 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
1103 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1104 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
1105 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1106 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
1107 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1108 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
1109 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1110 RTE_PTYPE_L3_IPV4_EXT,
1111 [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1112 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
1113 [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1114 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
1115 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1116 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
1117 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
1119 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1120 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
1121 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1122 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
1123 [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1124 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
1125 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1126 RTE_PTYPE_L3_IPV6_EXT,
1127 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1128 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
1129 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1130 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
1131 [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1132 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
1133 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1134 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1135 RTE_PTYPE_INNER_L3_IPV6,
1136 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1137 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1138 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1139 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1140 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1141 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1142 [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1143 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1144 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1145 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
1146 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1147 RTE_PTYPE_INNER_L3_IPV6,
1148 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1149 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1150 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1151 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1152 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1153 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1154 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1155 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1156 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1157 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1158 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1159 RTE_PTYPE_INNER_L3_IPV6_EXT,
1160 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1161 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1162 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1163 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1164 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1165 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1166 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1167 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1168 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1169 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1170 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1171 RTE_PTYPE_INNER_L3_IPV6_EXT,
1172 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1173 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1174 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1175 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1176 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1177 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1178 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
1179 RTE_PTYPE_L2_ETHER |
1180 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1181 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1185 ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
1186 [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
1187 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1188 RTE_PTYPE_INNER_L2_ETHER,
1189 [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
1190 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1191 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1192 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1193 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1194 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
1195 [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
1196 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1197 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
1198 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1199 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1200 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1201 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1202 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1203 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
1204 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1205 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1206 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1207 [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1208 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1209 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1210 RTE_PTYPE_INNER_L4_TCP,
1211 [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1212 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1213 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1214 RTE_PTYPE_INNER_L4_TCP,
1215 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1216 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1217 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1218 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1219 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1220 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1221 RTE_PTYPE_INNER_L4_TCP,
1222 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
1223 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1224 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1225 RTE_PTYPE_INNER_L3_IPV4,
1226 [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1227 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1228 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1229 RTE_PTYPE_INNER_L4_UDP,
1230 [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1231 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1232 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1233 RTE_PTYPE_INNER_L4_UDP,
1234 [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1235 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1236 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1237 RTE_PTYPE_INNER_L4_SCTP,
1238 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1239 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1240 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1241 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1242 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1243 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1244 RTE_PTYPE_INNER_L4_UDP,
1245 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1246 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1247 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1248 RTE_PTYPE_INNER_L4_SCTP,
1249 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
1250 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1251 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1252 RTE_PTYPE_INNER_L3_IPV4,
1253 [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1254 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1255 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1256 RTE_PTYPE_INNER_L4_SCTP,
1257 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1258 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1259 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1260 RTE_PTYPE_INNER_L4_SCTP,
1261 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1262 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1263 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1264 RTE_PTYPE_INNER_L4_TCP,
1265 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1266 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1267 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1268 RTE_PTYPE_INNER_L4_UDP,
1270 [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
1271 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1272 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
1273 [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
1274 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1275 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1276 RTE_PTYPE_INNER_L3_IPV4,
1277 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1278 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1279 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1280 RTE_PTYPE_INNER_L3_IPV4_EXT,
1281 [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
1282 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1283 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1284 RTE_PTYPE_INNER_L3_IPV6,
1285 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1286 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1287 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1288 RTE_PTYPE_INNER_L3_IPV4,
1289 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1290 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1291 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1292 RTE_PTYPE_INNER_L3_IPV6_EXT,
1293 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1294 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1295 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1296 RTE_PTYPE_INNER_L3_IPV4,
1297 [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1298 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1299 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1300 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
1301 [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1302 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1303 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1304 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1305 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1306 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1307 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1308 RTE_PTYPE_INNER_L3_IPV4,
1309 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1310 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1311 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1312 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1313 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
1314 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1315 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1316 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1317 [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1318 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1319 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1320 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
1321 [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1322 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1323 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1324 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1325 [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1326 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1327 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1328 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1329 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1330 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1331 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1332 RTE_PTYPE_INNER_L3_IPV4,
1333 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1334 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1335 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1336 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1337 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1338 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1339 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1340 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1341 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
1342 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1343 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1344 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1345 [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1346 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1347 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1348 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
1349 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1350 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1351 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1352 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
1353 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1354 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1355 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1356 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
1357 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1358 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1359 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1360 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
1363 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
1364 static inline uint32_t
1365 ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
1368 if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1369 return RTE_PTYPE_UNKNOWN;
1371 pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
1373 /* For tunnel packet */
1374 if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
1375 /* Remove the tunnel bit to save the space. */
1376 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
1377 return ptype_table_tn[pkt_info];
1381 * For x550, if it's not tunnel,
1382 * tunnel type bit should be set to 0.
1383 * Reuse 82599's mask.
1385 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
1387 return ptype_table[pkt_info];
1390 static inline uint64_t
1391 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1393 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1394 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1395 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1396 PKT_RX_RSS_HASH, 0, 0, 0,
1397 0, 0, 0, PKT_RX_FDIR,
1399 #ifdef RTE_LIBRTE_IEEE1588
1400 static uint64_t ip_pkt_etqf_map[8] = {
1401 0, 0, 0, PKT_RX_IEEE1588_PTP,
1405 if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1406 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1407 ip_rss_types_map[pkt_info & 0XF];
1409 return ip_rss_types_map[pkt_info & 0XF];
1411 return ip_rss_types_map[pkt_info & 0XF];
1415 static inline uint64_t
1416 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1421 * Check if VLAN present only.
1422 * Do not check whether L3/L4 rx checksum done by NIC or not,
1423 * That can be found from rte_eth_rxmode.hw_ip_checksum flag
1425 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
1427 #ifdef RTE_LIBRTE_IEEE1588
1428 if (rx_status & IXGBE_RXD_STAT_TMST)
1429 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1434 static inline uint64_t
1435 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1440 * Bit 31: IPE, IPv4 checksum error
1441 * Bit 30: L4I, L4I integrity error
1443 static uint64_t error_to_pkt_flags_map[4] = {
1444 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
1445 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
1446 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
1447 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1449 pkt_flags = error_to_pkt_flags_map[(rx_status >>
1450 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1452 if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1453 (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1454 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1457 #ifdef RTE_LIBRTE_SECURITY
1458 if (rx_status & IXGBE_RXD_STAT_SECP) {
1459 pkt_flags |= PKT_RX_SEC_OFFLOAD;
1460 if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
1461 pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
1469 * LOOK_AHEAD defines how many desc statuses to check beyond the
1470 * current descriptor.
1471 * It must be a pound define for optimal performance.
1472 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1473 * function only works with LOOK_AHEAD=8.
1475 #define LOOK_AHEAD 8
1476 #if (LOOK_AHEAD != 8)
1477 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1480 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1482 volatile union ixgbe_adv_rx_desc *rxdp;
1483 struct ixgbe_rx_entry *rxep;
1484 struct rte_mbuf *mb;
1488 uint32_t s[LOOK_AHEAD];
1489 uint32_t pkt_info[LOOK_AHEAD];
1490 int i, j, nb_rx = 0;
1492 uint64_t vlan_flags = rxq->vlan_flags;
1494 /* get references to current descriptor and S/W ring entry */
1495 rxdp = &rxq->rx_ring[rxq->rx_tail];
1496 rxep = &rxq->sw_ring[rxq->rx_tail];
1498 status = rxdp->wb.upper.status_error;
1499 /* check to make sure there is at least 1 packet to receive */
1500 if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1504 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1505 * reference packets that are ready to be received.
1507 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1508 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1509 /* Read desc statuses backwards to avoid race condition */
1510 for (j = 0; j < LOOK_AHEAD; j++)
1511 s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1515 /* Compute how many status bits were set */
1516 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1517 (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
1520 for (j = 0; j < nb_dd; j++)
1521 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
1526 /* Translate descriptor info to mbuf format */
1527 for (j = 0; j < nb_dd; ++j) {
1529 pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1531 mb->data_len = pkt_len;
1532 mb->pkt_len = pkt_len;
1533 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1535 /* convert descriptor fields to rte mbuf flags */
1536 pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1538 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1539 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
1540 ((uint16_t)pkt_info[j]);
1541 mb->ol_flags = pkt_flags;
1543 ixgbe_rxd_pkt_info_to_pkt_type
1544 (pkt_info[j], rxq->pkt_type_mask);
1546 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1547 mb->hash.rss = rte_le_to_cpu_32(
1548 rxdp[j].wb.lower.hi_dword.rss);
1549 else if (pkt_flags & PKT_RX_FDIR) {
1550 mb->hash.fdir.hash = rte_le_to_cpu_16(
1551 rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1552 IXGBE_ATR_HASH_MASK;
1553 mb->hash.fdir.id = rte_le_to_cpu_16(
1554 rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1558 /* Move mbuf pointers from the S/W ring to the stage */
1559 for (j = 0; j < LOOK_AHEAD; ++j) {
1560 rxq->rx_stage[i + j] = rxep[j].mbuf;
1563 /* stop if all requested packets could not be received */
1564 if (nb_dd != LOOK_AHEAD)
1568 /* clear software ring entries so we can cleanup correctly */
1569 for (i = 0; i < nb_rx; ++i) {
1570 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1578 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1580 volatile union ixgbe_adv_rx_desc *rxdp;
1581 struct ixgbe_rx_entry *rxep;
1582 struct rte_mbuf *mb;
1587 /* allocate buffers in bulk directly into the S/W ring */
1588 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1589 rxep = &rxq->sw_ring[alloc_idx];
1590 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1591 rxq->rx_free_thresh);
1592 if (unlikely(diag != 0))
1595 rxdp = &rxq->rx_ring[alloc_idx];
1596 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1597 /* populate the static rte mbuf fields */
1600 mb->port = rxq->port_id;
1603 rte_mbuf_refcnt_set(mb, 1);
1604 mb->data_off = RTE_PKTMBUF_HEADROOM;
1606 /* populate the descriptors */
1607 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1608 rxdp[i].read.hdr_addr = 0;
1609 rxdp[i].read.pkt_addr = dma_addr;
1612 /* update state of internal queue structure */
1613 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1614 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1615 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1621 static inline uint16_t
1622 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1625 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1628 /* how many packets are ready to return? */
1629 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1631 /* copy mbuf pointers to the application's packet list */
1632 for (i = 0; i < nb_pkts; ++i)
1633 rx_pkts[i] = stage[i];
1635 /* update internal queue state */
1636 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1637 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1642 static inline uint16_t
1643 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1646 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1649 /* Any previously recv'd pkts will be returned from the Rx stage */
1650 if (rxq->rx_nb_avail)
1651 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1653 /* Scan the H/W ring for packets to receive */
1654 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1656 /* update internal queue state */
1657 rxq->rx_next_avail = 0;
1658 rxq->rx_nb_avail = nb_rx;
1659 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1661 /* if required, allocate new buffers to replenish descriptors */
1662 if (rxq->rx_tail > rxq->rx_free_trigger) {
1663 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1665 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1668 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1669 "queue_id=%u", (unsigned) rxq->port_id,
1670 (unsigned) rxq->queue_id);
1672 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1673 rxq->rx_free_thresh;
1676 * Need to rewind any previous receives if we cannot
1677 * allocate new buffers to replenish the old ones.
1679 rxq->rx_nb_avail = 0;
1680 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1681 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1682 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1687 /* update tail pointer */
1689 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
1693 if (rxq->rx_tail >= rxq->nb_rx_desc)
1696 /* received any packets this loop? */
1697 if (rxq->rx_nb_avail)
1698 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1703 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1705 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1710 if (unlikely(nb_pkts == 0))
1713 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1714 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1716 /* request is relatively large, chunk it up */
1721 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1722 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1723 nb_rx = (uint16_t)(nb_rx + ret);
1724 nb_pkts = (uint16_t)(nb_pkts - ret);
1733 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1736 struct ixgbe_rx_queue *rxq;
1737 volatile union ixgbe_adv_rx_desc *rx_ring;
1738 volatile union ixgbe_adv_rx_desc *rxdp;
1739 struct ixgbe_rx_entry *sw_ring;
1740 struct ixgbe_rx_entry *rxe;
1741 struct rte_mbuf *rxm;
1742 struct rte_mbuf *nmb;
1743 union ixgbe_adv_rx_desc rxd;
1752 uint64_t vlan_flags;
1757 rx_id = rxq->rx_tail;
1758 rx_ring = rxq->rx_ring;
1759 sw_ring = rxq->sw_ring;
1760 vlan_flags = rxq->vlan_flags;
1761 while (nb_rx < nb_pkts) {
1763 * The order of operations here is important as the DD status
1764 * bit must not be read after any other descriptor fields.
1765 * rx_ring and rxdp are pointing to volatile data so the order
1766 * of accesses cannot be reordered by the compiler. If they were
1767 * not volatile, they could be reordered which could lead to
1768 * using invalid descriptor fields when read from rxd.
1770 rxdp = &rx_ring[rx_id];
1771 staterr = rxdp->wb.upper.status_error;
1772 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1779 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1780 * is likely to be invalid and to be dropped by the various
1781 * validation checks performed by the network stack.
1783 * Allocate a new mbuf to replenish the RX ring descriptor.
1784 * If the allocation fails:
1785 * - arrange for that RX descriptor to be the first one
1786 * being parsed the next time the receive function is
1787 * invoked [on the same queue].
1789 * - Stop parsing the RX ring and return immediately.
1791 * This policy do not drop the packet received in the RX
1792 * descriptor for which the allocation of a new mbuf failed.
1793 * Thus, it allows that packet to be later retrieved if
1794 * mbuf have been freed in the mean time.
1795 * As a side effect, holding RX descriptors instead of
1796 * systematically giving them back to the NIC may lead to
1797 * RX ring exhaustion situations.
1798 * However, the NIC can gracefully prevent such situations
1799 * to happen by sending specific "back-pressure" flow control
1800 * frames to its peer(s).
1802 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1803 "ext_err_stat=0x%08x pkt_len=%u",
1804 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1805 (unsigned) rx_id, (unsigned) staterr,
1806 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1808 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1810 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1811 "queue_id=%u", (unsigned) rxq->port_id,
1812 (unsigned) rxq->queue_id);
1813 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1818 rxe = &sw_ring[rx_id];
1820 if (rx_id == rxq->nb_rx_desc)
1823 /* Prefetch next mbuf while processing current one. */
1824 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1827 * When next RX descriptor is on a cache-line boundary,
1828 * prefetch the next 4 RX descriptors and the next 8 pointers
1831 if ((rx_id & 0x3) == 0) {
1832 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1833 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1839 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1840 rxdp->read.hdr_addr = 0;
1841 rxdp->read.pkt_addr = dma_addr;
1844 * Initialize the returned mbuf.
1845 * 1) setup generic mbuf fields:
1846 * - number of segments,
1849 * - RX port identifier.
1850 * 2) integrate hardware offload data, if any:
1851 * - RSS flag & hash,
1852 * - IP checksum flag,
1853 * - VLAN TCI, if any,
1856 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1858 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1859 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1862 rxm->pkt_len = pkt_len;
1863 rxm->data_len = pkt_len;
1864 rxm->port = rxq->port_id;
1866 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1867 /* Only valid if PKT_RX_VLAN set in pkt_flags */
1868 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1870 pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
1871 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1872 pkt_flags = pkt_flags |
1873 ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1874 rxm->ol_flags = pkt_flags;
1876 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1877 rxq->pkt_type_mask);
1879 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1880 rxm->hash.rss = rte_le_to_cpu_32(
1881 rxd.wb.lower.hi_dword.rss);
1882 else if (pkt_flags & PKT_RX_FDIR) {
1883 rxm->hash.fdir.hash = rte_le_to_cpu_16(
1884 rxd.wb.lower.hi_dword.csum_ip.csum) &
1885 IXGBE_ATR_HASH_MASK;
1886 rxm->hash.fdir.id = rte_le_to_cpu_16(
1887 rxd.wb.lower.hi_dword.csum_ip.ip_id);
1890 * Store the mbuf address into the next entry of the array
1891 * of returned packets.
1893 rx_pkts[nb_rx++] = rxm;
1895 rxq->rx_tail = rx_id;
1898 * If the number of free RX descriptors is greater than the RX free
1899 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1901 * Update the RDT with the value of the last processed RX descriptor
1902 * minus 1, to guarantee that the RDT register is never equal to the
1903 * RDH register, which creates a "full" ring situtation from the
1904 * hardware point of view...
1906 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1907 if (nb_hold > rxq->rx_free_thresh) {
1908 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1909 "nb_hold=%u nb_rx=%u",
1910 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1911 (unsigned) rx_id, (unsigned) nb_hold,
1913 rx_id = (uint16_t) ((rx_id == 0) ?
1914 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1915 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1918 rxq->nb_rx_hold = nb_hold;
1923 * Detect an RSC descriptor.
1925 static inline uint32_t
1926 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1928 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1929 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1933 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1935 * Fill the following info in the HEAD buffer of the Rx cluster:
1936 * - RX port identifier
1937 * - hardware offload data, if any:
1939 * - IP checksum flag
1940 * - VLAN TCI, if any
1942 * @head HEAD of the packet cluster
1943 * @desc HW descriptor to get data from
1944 * @rxq Pointer to the Rx queue
1947 ixgbe_fill_cluster_head_buf(
1948 struct rte_mbuf *head,
1949 union ixgbe_adv_rx_desc *desc,
1950 struct ixgbe_rx_queue *rxq,
1956 head->port = rxq->port_id;
1958 /* The vlan_tci field is only valid when PKT_RX_VLAN is
1959 * set in the pkt_flags field.
1961 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1962 pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
1963 pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1964 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1965 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1966 head->ol_flags = pkt_flags;
1968 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
1970 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1971 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1972 else if (pkt_flags & PKT_RX_FDIR) {
1973 head->hash.fdir.hash =
1974 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1975 & IXGBE_ATR_HASH_MASK;
1976 head->hash.fdir.id =
1977 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1982 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1984 * @rx_queue Rx queue handle
1985 * @rx_pkts table of received packets
1986 * @nb_pkts size of rx_pkts table
1987 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1989 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1990 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1992 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1993 * 1) When non-EOP RSC completion arrives:
1994 * a) Update the HEAD of the current RSC aggregation cluster with the new
1995 * segment's data length.
1996 * b) Set the "next" pointer of the current segment to point to the segment
1997 * at the NEXTP index.
1998 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1999 * in the sw_rsc_ring.
2000 * 2) When EOP arrives we just update the cluster's total length and offload
2001 * flags and deliver the cluster up to the upper layers. In our case - put it
2002 * in the rx_pkts table.
2004 * Returns the number of received packets/clusters (according to the "bulk
2005 * receive" interface).
2007 static inline uint16_t
2008 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
2011 struct ixgbe_rx_queue *rxq = rx_queue;
2012 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
2013 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
2014 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
2015 uint16_t rx_id = rxq->rx_tail;
2017 uint16_t nb_hold = rxq->nb_rx_hold;
2018 uint16_t prev_id = rxq->rx_tail;
2020 while (nb_rx < nb_pkts) {
2022 struct ixgbe_rx_entry *rxe;
2023 struct ixgbe_scattered_rx_entry *sc_entry;
2024 struct ixgbe_scattered_rx_entry *next_sc_entry;
2025 struct ixgbe_rx_entry *next_rxe = NULL;
2026 struct rte_mbuf *first_seg;
2027 struct rte_mbuf *rxm;
2028 struct rte_mbuf *nmb;
2029 union ixgbe_adv_rx_desc rxd;
2032 volatile union ixgbe_adv_rx_desc *rxdp;
2037 * The code in this whole file uses the volatile pointer to
2038 * ensure the read ordering of the status and the rest of the
2039 * descriptor fields (on the compiler level only!!!). This is so
2040 * UGLY - why not to just use the compiler barrier instead? DPDK
2041 * even has the rte_compiler_barrier() for that.
2043 * But most importantly this is just wrong because this doesn't
2044 * ensure memory ordering in a general case at all. For
2045 * instance, DPDK is supposed to work on Power CPUs where
2046 * compiler barrier may just not be enough!
2048 * I tried to write only this function properly to have a
2049 * starting point (as a part of an LRO/RSC series) but the
2050 * compiler cursed at me when I tried to cast away the
2051 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
2052 * keeping it the way it is for now.
2054 * The code in this file is broken in so many other places and
2055 * will just not work on a big endian CPU anyway therefore the
2056 * lines below will have to be revisited together with the rest
2060 * - Get rid of "volatile" crap and let the compiler do its
2062 * - Use the proper memory barrier (rte_rmb()) to ensure the
2063 * memory ordering below.
2065 rxdp = &rx_ring[rx_id];
2066 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
2068 if (!(staterr & IXGBE_RXDADV_STAT_DD))
2073 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
2074 "staterr=0x%x data_len=%u",
2075 rxq->port_id, rxq->queue_id, rx_id, staterr,
2076 rte_le_to_cpu_16(rxd.wb.upper.length));
2079 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
2081 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
2082 "port_id=%u queue_id=%u",
2083 rxq->port_id, rxq->queue_id);
2085 rte_eth_devices[rxq->port_id].data->
2086 rx_mbuf_alloc_failed++;
2089 } else if (nb_hold > rxq->rx_free_thresh) {
2090 uint16_t next_rdt = rxq->rx_free_trigger;
2092 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
2094 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
2096 nb_hold -= rxq->rx_free_thresh;
2098 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
2099 "port_id=%u queue_id=%u",
2100 rxq->port_id, rxq->queue_id);
2102 rte_eth_devices[rxq->port_id].data->
2103 rx_mbuf_alloc_failed++;
2109 rxe = &sw_ring[rx_id];
2110 eop = staterr & IXGBE_RXDADV_STAT_EOP;
2112 next_id = rx_id + 1;
2113 if (next_id == rxq->nb_rx_desc)
2116 /* Prefetch next mbuf while processing current one. */
2117 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
2120 * When next RX descriptor is on a cache-line boundary,
2121 * prefetch the next 4 RX descriptors and the next 4 pointers
2124 if ((next_id & 0x3) == 0) {
2125 rte_ixgbe_prefetch(&rx_ring[next_id]);
2126 rte_ixgbe_prefetch(&sw_ring[next_id]);
2133 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2135 * Update RX descriptor with the physical address of the
2136 * new data buffer of the new allocated mbuf.
2140 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2141 rxdp->read.hdr_addr = 0;
2142 rxdp->read.pkt_addr = dma;
2147 * Set data length & data buffer address of mbuf.
2149 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
2150 rxm->data_len = data_len;
2155 * Get next descriptor index:
2156 * - For RSC it's in the NEXTP field.
2157 * - For a scattered packet - it's just a following
2160 if (ixgbe_rsc_count(&rxd))
2162 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
2163 IXGBE_RXDADV_NEXTP_SHIFT;
2167 next_sc_entry = &sw_sc_ring[nextp_id];
2168 next_rxe = &sw_ring[nextp_id];
2169 rte_ixgbe_prefetch(next_rxe);
2172 sc_entry = &sw_sc_ring[rx_id];
2173 first_seg = sc_entry->fbuf;
2174 sc_entry->fbuf = NULL;
2177 * If this is the first buffer of the received packet,
2178 * set the pointer to the first mbuf of the packet and
2179 * initialize its context.
2180 * Otherwise, update the total length and the number of segments
2181 * of the current scattered packet, and update the pointer to
2182 * the last mbuf of the current packet.
2184 if (first_seg == NULL) {
2186 first_seg->pkt_len = data_len;
2187 first_seg->nb_segs = 1;
2189 first_seg->pkt_len += data_len;
2190 first_seg->nb_segs++;
2197 * If this is not the last buffer of the received packet, update
2198 * the pointer to the first mbuf at the NEXTP entry in the
2199 * sw_sc_ring and continue to parse the RX ring.
2201 if (!eop && next_rxe) {
2202 rxm->next = next_rxe->mbuf;
2203 next_sc_entry->fbuf = first_seg;
2207 /* Initialize the first mbuf of the returned packet */
2208 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
2211 * Deal with the case, when HW CRC srip is disabled.
2212 * That can't happen when LRO is enabled, but still could
2213 * happen for scattered RX mode.
2215 first_seg->pkt_len -= rxq->crc_len;
2216 if (unlikely(rxm->data_len <= rxq->crc_len)) {
2217 struct rte_mbuf *lp;
2219 for (lp = first_seg; lp->next != rxm; lp = lp->next)
2222 first_seg->nb_segs--;
2223 lp->data_len -= rxq->crc_len - rxm->data_len;
2225 rte_pktmbuf_free_seg(rxm);
2227 rxm->data_len -= rxq->crc_len;
2229 /* Prefetch data of first segment, if configured to do so. */
2230 rte_packet_prefetch((char *)first_seg->buf_addr +
2231 first_seg->data_off);
2234 * Store the mbuf address into the next entry of the array
2235 * of returned packets.
2237 rx_pkts[nb_rx++] = first_seg;
2241 * Record index of the next RX descriptor to probe.
2243 rxq->rx_tail = rx_id;
2246 * If the number of free RX descriptors is greater than the RX free
2247 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2249 * Update the RDT with the value of the last processed RX descriptor
2250 * minus 1, to guarantee that the RDT register is never equal to the
2251 * RDH register, which creates a "full" ring situtation from the
2252 * hardware point of view...
2254 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
2255 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
2256 "nb_hold=%u nb_rx=%u",
2257 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
2260 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
2264 rxq->nb_rx_hold = nb_hold;
2269 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2272 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
2276 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2279 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
2282 /*********************************************************************
2284 * Queue management functions
2286 **********************************************************************/
2288 static void __attribute__((cold))
2289 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
2293 if (txq->sw_ring != NULL) {
2294 for (i = 0; i < txq->nb_tx_desc; i++) {
2295 if (txq->sw_ring[i].mbuf != NULL) {
2296 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2297 txq->sw_ring[i].mbuf = NULL;
2303 static void __attribute__((cold))
2304 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
2307 txq->sw_ring != NULL)
2308 rte_free(txq->sw_ring);
2311 static void __attribute__((cold))
2312 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
2314 if (txq != NULL && txq->ops != NULL) {
2315 txq->ops->release_mbufs(txq);
2316 txq->ops->free_swring(txq);
2321 void __attribute__((cold))
2322 ixgbe_dev_tx_queue_release(void *txq)
2324 ixgbe_tx_queue_release(txq);
2327 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
2328 static void __attribute__((cold))
2329 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
2331 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
2332 struct ixgbe_tx_entry *txe = txq->sw_ring;
2335 /* Zero out HW ring memory */
2336 for (i = 0; i < txq->nb_tx_desc; i++) {
2337 txq->tx_ring[i] = zeroed_desc;
2340 /* Initialize SW ring entries */
2341 prev = (uint16_t) (txq->nb_tx_desc - 1);
2342 for (i = 0; i < txq->nb_tx_desc; i++) {
2343 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
2345 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
2348 txe[prev].next_id = i;
2352 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2353 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2356 txq->nb_tx_used = 0;
2358 * Always allow 1 descriptor to be un-allocated to avoid
2359 * a H/W race condition
2361 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2362 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2364 memset((void *)&txq->ctx_cache, 0,
2365 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
2368 static const struct ixgbe_txq_ops def_txq_ops = {
2369 .release_mbufs = ixgbe_tx_queue_release_mbufs,
2370 .free_swring = ixgbe_tx_free_swring,
2371 .reset = ixgbe_reset_tx_queue,
2374 /* Takes an ethdev and a queue and sets up the tx function to be used based on
2375 * the queue parameters. Used in tx_queue_setup by primary process and then
2376 * in dev_init by secondary process when attaching to an existing ethdev.
2378 void __attribute__((cold))
2379 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
2381 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2382 if ((txq->offloads == 0) &&
2383 #ifdef RTE_LIBRTE_SECURITY
2384 !(txq->using_ipsec) &&
2386 (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
2387 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2388 dev->tx_pkt_prepare = NULL;
2389 #ifdef RTE_IXGBE_INC_VECTOR
2390 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2391 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2392 ixgbe_txq_vec_setup(txq) == 0)) {
2393 PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
2394 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
2397 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
2399 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2401 " - offloads = 0x%" PRIx64,
2404 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2405 (unsigned long)txq->tx_rs_thresh,
2406 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2407 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2408 dev->tx_pkt_prepare = ixgbe_prep_pkts;
2413 ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2421 ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2423 uint64_t tx_offload_capa;
2424 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2427 DEV_TX_OFFLOAD_VLAN_INSERT |
2428 DEV_TX_OFFLOAD_IPV4_CKSUM |
2429 DEV_TX_OFFLOAD_UDP_CKSUM |
2430 DEV_TX_OFFLOAD_TCP_CKSUM |
2431 DEV_TX_OFFLOAD_SCTP_CKSUM |
2432 DEV_TX_OFFLOAD_TCP_TSO |
2433 DEV_TX_OFFLOAD_MULTI_SEGS;
2435 if (hw->mac.type == ixgbe_mac_82599EB ||
2436 hw->mac.type == ixgbe_mac_X540)
2437 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2439 if (hw->mac.type == ixgbe_mac_X550 ||
2440 hw->mac.type == ixgbe_mac_X550EM_x ||
2441 hw->mac.type == ixgbe_mac_X550EM_a)
2442 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2444 #ifdef RTE_LIBRTE_SECURITY
2445 if (dev->security_ctx)
2446 tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2448 return tx_offload_capa;
2451 int __attribute__((cold))
2452 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2455 unsigned int socket_id,
2456 const struct rte_eth_txconf *tx_conf)
2458 const struct rte_memzone *tz;
2459 struct ixgbe_tx_queue *txq;
2460 struct ixgbe_hw *hw;
2461 uint16_t tx_rs_thresh, tx_free_thresh;
2464 PMD_INIT_FUNC_TRACE();
2465 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2467 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2470 * Validate number of transmit descriptors.
2471 * It must not exceed hardware maximum, and must be multiple
2474 if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2475 (nb_desc > IXGBE_MAX_RING_DESC) ||
2476 (nb_desc < IXGBE_MIN_RING_DESC)) {
2481 * The following two parameters control the setting of the RS bit on
2482 * transmit descriptors.
2483 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2484 * descriptors have been used.
2485 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2486 * descriptors are used or if the number of descriptors required
2487 * to transmit a packet is greater than the number of free TX
2489 * The following constraints must be satisfied:
2490 * tx_rs_thresh must be greater than 0.
2491 * tx_rs_thresh must be less than the size of the ring minus 2.
2492 * tx_rs_thresh must be less than or equal to tx_free_thresh.
2493 * tx_rs_thresh must be a divisor of the ring size.
2494 * tx_free_thresh must be greater than 0.
2495 * tx_free_thresh must be less than the size of the ring minus 3.
2496 * One descriptor in the TX ring is used as a sentinel to avoid a
2497 * H/W race condition, hence the maximum threshold constraints.
2498 * When set to zero use default values.
2500 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2501 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2502 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2503 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2504 if (tx_rs_thresh >= (nb_desc - 2)) {
2505 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2506 "of TX descriptors minus 2. (tx_rs_thresh=%u "
2507 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2508 (int)dev->data->port_id, (int)queue_idx);
2511 if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2512 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2513 "(tx_rs_thresh=%u port=%d queue=%d)",
2514 DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2515 (int)dev->data->port_id, (int)queue_idx);
2518 if (tx_free_thresh >= (nb_desc - 3)) {
2519 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2520 "tx_free_thresh must be less than the number of "
2521 "TX descriptors minus 3. (tx_free_thresh=%u "
2522 "port=%d queue=%d)",
2523 (unsigned int)tx_free_thresh,
2524 (int)dev->data->port_id, (int)queue_idx);
2527 if (tx_rs_thresh > tx_free_thresh) {
2528 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2529 "tx_free_thresh. (tx_free_thresh=%u "
2530 "tx_rs_thresh=%u port=%d queue=%d)",
2531 (unsigned int)tx_free_thresh,
2532 (unsigned int)tx_rs_thresh,
2533 (int)dev->data->port_id,
2537 if ((nb_desc % tx_rs_thresh) != 0) {
2538 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2539 "number of TX descriptors. (tx_rs_thresh=%u "
2540 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2541 (int)dev->data->port_id, (int)queue_idx);
2546 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2547 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2548 * by the NIC and all descriptors are written back after the NIC
2549 * accumulates WTHRESH descriptors.
2551 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2552 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2553 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2554 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2555 (int)dev->data->port_id, (int)queue_idx);
2559 /* Free memory prior to re-allocation if needed... */
2560 if (dev->data->tx_queues[queue_idx] != NULL) {
2561 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2562 dev->data->tx_queues[queue_idx] = NULL;
2565 /* First allocate the tx queue data structure */
2566 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2567 RTE_CACHE_LINE_SIZE, socket_id);
2572 * Allocate TX ring hardware descriptors. A memzone large enough to
2573 * handle the maximum ring size is allocated in order to allow for
2574 * resizing in later calls to the queue setup function.
2576 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2577 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2578 IXGBE_ALIGN, socket_id);
2580 ixgbe_tx_queue_release(txq);
2584 txq->nb_tx_desc = nb_desc;
2585 txq->tx_rs_thresh = tx_rs_thresh;
2586 txq->tx_free_thresh = tx_free_thresh;
2587 txq->pthresh = tx_conf->tx_thresh.pthresh;
2588 txq->hthresh = tx_conf->tx_thresh.hthresh;
2589 txq->wthresh = tx_conf->tx_thresh.wthresh;
2590 txq->queue_id = queue_idx;
2591 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2592 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2593 txq->port_id = dev->data->port_id;
2594 txq->offloads = offloads;
2595 txq->ops = &def_txq_ops;
2596 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2597 #ifdef RTE_LIBRTE_SECURITY
2598 txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2599 DEV_TX_OFFLOAD_SECURITY);
2603 * Modification to set VFTDT for virtual function if vf is detected
2605 if (hw->mac.type == ixgbe_mac_82599_vf ||
2606 hw->mac.type == ixgbe_mac_X540_vf ||
2607 hw->mac.type == ixgbe_mac_X550_vf ||
2608 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2609 hw->mac.type == ixgbe_mac_X550EM_a_vf) {
2610 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2611 txq->tdh_reg_addr = IXGBE_PCI_REG_ADDR(hw,
2612 IXGBE_VFTDH(queue_idx));
2614 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2615 txq->tdh_reg_addr = IXGBE_PCI_REG_ADDR(hw,
2616 IXGBE_TDH(txq->reg_idx));
2619 txq->tx_ring_phys_addr = tz->iova;
2620 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2622 /* Allocate software ring */
2623 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2624 sizeof(struct ixgbe_tx_entry) * nb_desc,
2625 RTE_CACHE_LINE_SIZE, socket_id);
2626 if (txq->sw_ring == NULL) {
2627 ixgbe_tx_queue_release(txq);
2630 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2631 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2633 /* set up vector or scalar TX function as appropriate */
2634 ixgbe_set_tx_function(dev, txq);
2636 txq->ops->reset(txq);
2638 dev->data->tx_queues[queue_idx] = txq;
2645 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2647 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2648 * in the sw_rsc_ring is not set to NULL but rather points to the next
2649 * mbuf of this RSC aggregation (that has not been completed yet and still
2650 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2651 * will just free first "nb_segs" segments of the cluster explicitly by calling
2652 * an rte_pktmbuf_free_seg().
2654 * @m scattered cluster head
2656 static void __attribute__((cold))
2657 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2659 uint16_t i, nb_segs = m->nb_segs;
2660 struct rte_mbuf *next_seg;
2662 for (i = 0; i < nb_segs; i++) {
2664 rte_pktmbuf_free_seg(m);
2669 static void __attribute__((cold))
2670 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2674 #ifdef RTE_IXGBE_INC_VECTOR
2675 /* SSE Vector driver has a different way of releasing mbufs. */
2676 if (rxq->rx_using_sse) {
2677 ixgbe_rx_queue_release_mbufs_vec(rxq);
2682 if (rxq->sw_ring != NULL) {
2683 for (i = 0; i < rxq->nb_rx_desc; i++) {
2684 if (rxq->sw_ring[i].mbuf != NULL) {
2685 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2686 rxq->sw_ring[i].mbuf = NULL;
2689 if (rxq->rx_nb_avail) {
2690 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2691 struct rte_mbuf *mb;
2693 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2694 rte_pktmbuf_free_seg(mb);
2696 rxq->rx_nb_avail = 0;
2700 if (rxq->sw_sc_ring)
2701 for (i = 0; i < rxq->nb_rx_desc; i++)
2702 if (rxq->sw_sc_ring[i].fbuf) {
2703 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2704 rxq->sw_sc_ring[i].fbuf = NULL;
2708 static void __attribute__((cold))
2709 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2712 ixgbe_rx_queue_release_mbufs(rxq);
2713 rte_free(rxq->sw_ring);
2714 rte_free(rxq->sw_sc_ring);
2719 void __attribute__((cold))
2720 ixgbe_dev_rx_queue_release(void *rxq)
2722 ixgbe_rx_queue_release(rxq);
2726 * Check if Rx Burst Bulk Alloc function can be used.
2728 * 0: the preconditions are satisfied and the bulk allocation function
2730 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2731 * function must be used.
2733 static inline int __attribute__((cold))
2734 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2739 * Make sure the following pre-conditions are satisfied:
2740 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2741 * rxq->rx_free_thresh < rxq->nb_rx_desc
2742 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2743 * Scattered packets are not supported. This should be checked
2744 * outside of this function.
2746 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2747 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2748 "rxq->rx_free_thresh=%d, "
2749 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2750 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2752 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2753 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2754 "rxq->rx_free_thresh=%d, "
2755 "rxq->nb_rx_desc=%d",
2756 rxq->rx_free_thresh, rxq->nb_rx_desc);
2758 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2759 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2760 "rxq->nb_rx_desc=%d, "
2761 "rxq->rx_free_thresh=%d",
2762 rxq->nb_rx_desc, rxq->rx_free_thresh);
2769 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2770 static void __attribute__((cold))
2771 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2773 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2775 uint16_t len = rxq->nb_rx_desc;
2778 * By default, the Rx queue setup function allocates enough memory for
2779 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2780 * extra memory at the end of the descriptor ring to be zero'd out.
2782 if (adapter->rx_bulk_alloc_allowed)
2783 /* zero out extra memory */
2784 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2787 * Zero out HW ring memory. Zero out extra memory at the end of
2788 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2789 * reads extra memory as zeros.
2791 for (i = 0; i < len; i++) {
2792 rxq->rx_ring[i] = zeroed_desc;
2796 * initialize extra software ring entries. Space for these extra
2797 * entries is always allocated
2799 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2800 for (i = rxq->nb_rx_desc; i < len; ++i) {
2801 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2804 rxq->rx_nb_avail = 0;
2805 rxq->rx_next_avail = 0;
2806 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2808 rxq->nb_rx_hold = 0;
2809 rxq->pkt_first_seg = NULL;
2810 rxq->pkt_last_seg = NULL;
2812 #ifdef RTE_IXGBE_INC_VECTOR
2813 rxq->rxrearm_start = 0;
2814 rxq->rxrearm_nb = 0;
2819 ixgbe_is_vf(struct rte_eth_dev *dev)
2821 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2823 switch (hw->mac.type) {
2824 case ixgbe_mac_82599_vf:
2825 case ixgbe_mac_X540_vf:
2826 case ixgbe_mac_X550_vf:
2827 case ixgbe_mac_X550EM_x_vf:
2828 case ixgbe_mac_X550EM_a_vf:
2836 ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
2838 uint64_t offloads = 0;
2839 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2841 if (hw->mac.type != ixgbe_mac_82598EB)
2842 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2848 ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2851 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2853 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
2854 DEV_RX_OFFLOAD_UDP_CKSUM |
2855 DEV_RX_OFFLOAD_TCP_CKSUM |
2856 DEV_RX_OFFLOAD_CRC_STRIP |
2857 DEV_RX_OFFLOAD_JUMBO_FRAME |
2858 DEV_RX_OFFLOAD_SCATTER;
2860 if (hw->mac.type == ixgbe_mac_82598EB)
2861 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2863 if (ixgbe_is_vf(dev) == 0)
2864 offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
2865 DEV_RX_OFFLOAD_VLAN_EXTEND);
2868 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2871 if ((hw->mac.type == ixgbe_mac_82599EB ||
2872 hw->mac.type == ixgbe_mac_X540) &&
2873 !RTE_ETH_DEV_SRIOV(dev).active)
2874 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
2876 if (hw->mac.type == ixgbe_mac_82599EB ||
2877 hw->mac.type == ixgbe_mac_X540)
2878 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
2880 if (hw->mac.type == ixgbe_mac_X550 ||
2881 hw->mac.type == ixgbe_mac_X550EM_x ||
2882 hw->mac.type == ixgbe_mac_X550EM_a)
2883 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
2885 #ifdef RTE_LIBRTE_SECURITY
2886 if (dev->security_ctx)
2887 offloads |= DEV_RX_OFFLOAD_SECURITY;
2893 int __attribute__((cold))
2894 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2897 unsigned int socket_id,
2898 const struct rte_eth_rxconf *rx_conf,
2899 struct rte_mempool *mp)
2901 const struct rte_memzone *rz;
2902 struct ixgbe_rx_queue *rxq;
2903 struct ixgbe_hw *hw;
2905 struct ixgbe_adapter *adapter =
2906 (struct ixgbe_adapter *)dev->data->dev_private;
2909 PMD_INIT_FUNC_TRACE();
2910 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2912 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2915 * Validate number of receive descriptors.
2916 * It must not exceed hardware maximum, and must be multiple
2919 if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2920 (nb_desc > IXGBE_MAX_RING_DESC) ||
2921 (nb_desc < IXGBE_MIN_RING_DESC)) {
2925 /* Free memory prior to re-allocation if needed... */
2926 if (dev->data->rx_queues[queue_idx] != NULL) {
2927 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2928 dev->data->rx_queues[queue_idx] = NULL;
2931 /* First allocate the rx queue data structure */
2932 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2933 RTE_CACHE_LINE_SIZE, socket_id);
2937 rxq->nb_rx_desc = nb_desc;
2938 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2939 rxq->queue_id = queue_idx;
2940 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2941 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2942 rxq->port_id = dev->data->port_id;
2943 rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
2944 DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
2945 rxq->drop_en = rx_conf->rx_drop_en;
2946 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2947 rxq->offloads = offloads;
2950 * The packet type in RX descriptor is different for different NICs.
2951 * Some bits are used for x550 but reserved for other NICS.
2952 * So set different masks for different NICs.
2954 if (hw->mac.type == ixgbe_mac_X550 ||
2955 hw->mac.type == ixgbe_mac_X550EM_x ||
2956 hw->mac.type == ixgbe_mac_X550EM_a ||
2957 hw->mac.type == ixgbe_mac_X550_vf ||
2958 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2959 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2960 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
2962 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
2965 * Allocate RX ring hardware descriptors. A memzone large enough to
2966 * handle the maximum ring size is allocated in order to allow for
2967 * resizing in later calls to the queue setup function.
2969 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2970 RX_RING_SZ, IXGBE_ALIGN, socket_id);
2972 ixgbe_rx_queue_release(rxq);
2977 * Zero init all the descriptors in the ring.
2979 memset(rz->addr, 0, RX_RING_SZ);
2982 * Modified to setup VFRDT for Virtual Function
2984 if (hw->mac.type == ixgbe_mac_82599_vf ||
2985 hw->mac.type == ixgbe_mac_X540_vf ||
2986 hw->mac.type == ixgbe_mac_X550_vf ||
2987 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2988 hw->mac.type == ixgbe_mac_X550EM_a_vf) {
2990 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2992 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2995 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2997 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
3000 rxq->rx_ring_phys_addr = rz->iova;
3001 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
3004 * Certain constraints must be met in order to use the bulk buffer
3005 * allocation Rx burst function. If any of Rx queues doesn't meet them
3006 * the feature should be disabled for the whole port.
3008 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
3009 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
3010 "preconditions - canceling the feature for "
3011 "the whole port[%d]",
3012 rxq->queue_id, rxq->port_id);
3013 adapter->rx_bulk_alloc_allowed = false;
3017 * Allocate software ring. Allow for space at the end of the
3018 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
3019 * function does not access an invalid memory region.
3022 if (adapter->rx_bulk_alloc_allowed)
3023 len += RTE_PMD_IXGBE_RX_MAX_BURST;
3025 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
3026 sizeof(struct ixgbe_rx_entry) * len,
3027 RTE_CACHE_LINE_SIZE, socket_id);
3028 if (!rxq->sw_ring) {
3029 ixgbe_rx_queue_release(rxq);
3034 * Always allocate even if it's not going to be needed in order to
3035 * simplify the code.
3037 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
3038 * be requested in ixgbe_dev_rx_init(), which is called later from
3042 rte_zmalloc_socket("rxq->sw_sc_ring",
3043 sizeof(struct ixgbe_scattered_rx_entry) * len,
3044 RTE_CACHE_LINE_SIZE, socket_id);
3045 if (!rxq->sw_sc_ring) {
3046 ixgbe_rx_queue_release(rxq);
3050 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
3051 "dma_addr=0x%"PRIx64,
3052 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
3053 rxq->rx_ring_phys_addr);
3055 if (!rte_is_power_of_2(nb_desc)) {
3056 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
3057 "preconditions - canceling the feature for "
3058 "the whole port[%d]",
3059 rxq->queue_id, rxq->port_id);
3060 adapter->rx_vec_allowed = false;
3062 ixgbe_rxq_vec_setup(rxq);
3064 dev->data->rx_queues[queue_idx] = rxq;
3066 ixgbe_reset_rx_queue(adapter, rxq);
3072 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3074 #define IXGBE_RXQ_SCAN_INTERVAL 4
3075 volatile union ixgbe_adv_rx_desc *rxdp;
3076 struct ixgbe_rx_queue *rxq;
3079 rxq = dev->data->rx_queues[rx_queue_id];
3080 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
3082 while ((desc < rxq->nb_rx_desc) &&
3083 (rxdp->wb.upper.status_error &
3084 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
3085 desc += IXGBE_RXQ_SCAN_INTERVAL;
3086 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
3087 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3088 rxdp = &(rxq->rx_ring[rxq->rx_tail +
3089 desc - rxq->nb_rx_desc]);
3096 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
3098 volatile union ixgbe_adv_rx_desc *rxdp;
3099 struct ixgbe_rx_queue *rxq = rx_queue;
3102 if (unlikely(offset >= rxq->nb_rx_desc))
3104 desc = rxq->rx_tail + offset;
3105 if (desc >= rxq->nb_rx_desc)
3106 desc -= rxq->nb_rx_desc;
3108 rxdp = &rxq->rx_ring[desc];
3109 return !!(rxdp->wb.upper.status_error &
3110 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
3114 ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
3116 struct ixgbe_rx_queue *rxq = rx_queue;
3117 volatile uint32_t *status;
3118 uint32_t nb_hold, desc;
3120 if (unlikely(offset >= rxq->nb_rx_desc))
3123 #ifdef RTE_IXGBE_INC_VECTOR
3124 if (rxq->rx_using_sse)
3125 nb_hold = rxq->rxrearm_nb;
3128 nb_hold = rxq->nb_rx_hold;
3129 if (offset >= rxq->nb_rx_desc - nb_hold)
3130 return RTE_ETH_RX_DESC_UNAVAIL;
3132 desc = rxq->rx_tail + offset;
3133 if (desc >= rxq->nb_rx_desc)
3134 desc -= rxq->nb_rx_desc;
3136 status = &rxq->rx_ring[desc].wb.upper.status_error;
3137 if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
3138 return RTE_ETH_RX_DESC_DONE;
3140 return RTE_ETH_RX_DESC_AVAIL;
3144 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
3146 struct ixgbe_tx_queue *txq = tx_queue;
3147 volatile uint32_t *status;
3150 if (unlikely(offset >= txq->nb_tx_desc))
3152 if (offset >= txq->nb_tx_desc - txq->nb_tx_free)
3153 return RTE_ETH_TX_DESC_DONE;
3155 desc = txq->tx_tail - offset - 1;
3157 desc += txq->nb_tx_desc;
3159 /* offset is too small, no other way than reading PCI reg */
3160 if (unlikely(offset < txq->tx_rs_thresh)) {
3161 int16_t tx_head, queue_size;
3162 tx_head = ixgbe_read_addr(txq->tdh_reg_addr);
3163 queue_size = txq->tx_tail - tx_head;
3165 queue_size += txq->nb_tx_desc;
3166 return queue_size > offset ? RTE_ETH_TX_DESC_FULL :
3167 RTE_ETH_TX_DESC_DONE;
3170 /* index of the dd bit to look at */
3171 dd = (desc / txq->tx_rs_thresh + 1) * txq->tx_rs_thresh - 1;
3173 /* In full featured mode, RS bit is only set in the last descriptor */
3174 /* of a multisegments packet */
3175 if (!((txq->offloads == 0) &&
3176 (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)))
3177 dd = txq->sw_ring[dd].last_id;
3179 status = &txq->tx_ring[dd].wb.status;
3180 if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
3181 return RTE_ETH_TX_DESC_DONE;
3183 return RTE_ETH_TX_DESC_FULL;
3186 void __attribute__((cold))
3187 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
3190 struct ixgbe_adapter *adapter =
3191 (struct ixgbe_adapter *)dev->data->dev_private;
3193 PMD_INIT_FUNC_TRACE();
3195 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3196 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
3199 txq->ops->release_mbufs(txq);
3200 txq->ops->reset(txq);
3204 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3205 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3208 ixgbe_rx_queue_release_mbufs(rxq);
3209 ixgbe_reset_rx_queue(adapter, rxq);
3215 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
3219 PMD_INIT_FUNC_TRACE();
3221 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3222 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
3223 dev->data->rx_queues[i] = NULL;
3225 dev->data->nb_rx_queues = 0;
3227 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3228 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
3229 dev->data->tx_queues[i] = NULL;
3231 dev->data->nb_tx_queues = 0;
3234 /*********************************************************************
3236 * Device RX/TX init functions
3238 **********************************************************************/
3241 * Receive Side Scaling (RSS)
3242 * See section 7.1.2.8 in the following document:
3243 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
3246 * The source and destination IP addresses of the IP header and the source
3247 * and destination ports of TCP/UDP headers, if any, of received packets are
3248 * hashed against a configurable random key to compute a 32-bit RSS hash result.
3249 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
3250 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
3251 * RSS output index which is used as the RX queue index where to store the
3253 * The following output is supplied in the RX write-back descriptor:
3254 * - 32-bit result of the Microsoft RSS hash function,
3255 * - 4-bit RSS type field.
3259 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
3260 * Used as the default key.
3262 static uint8_t rss_intel_key[40] = {
3263 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
3264 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
3265 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3266 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
3267 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
3271 ixgbe_rss_disable(struct rte_eth_dev *dev)
3273 struct ixgbe_hw *hw;
3277 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3278 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3279 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3280 mrqc &= ~IXGBE_MRQC_RSSEN;
3281 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3285 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
3295 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3296 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3298 hash_key = rss_conf->rss_key;
3299 if (hash_key != NULL) {
3300 /* Fill in RSS hash key */
3301 for (i = 0; i < 10; i++) {
3302 rss_key = hash_key[(i * 4)];
3303 rss_key |= hash_key[(i * 4) + 1] << 8;
3304 rss_key |= hash_key[(i * 4) + 2] << 16;
3305 rss_key |= hash_key[(i * 4) + 3] << 24;
3306 IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
3310 /* Set configured hashing protocols in MRQC register */
3311 rss_hf = rss_conf->rss_hf;
3312 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
3313 if (rss_hf & ETH_RSS_IPV4)
3314 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3315 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
3316 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3317 if (rss_hf & ETH_RSS_IPV6)
3318 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3319 if (rss_hf & ETH_RSS_IPV6_EX)
3320 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3321 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
3322 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3323 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
3324 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3325 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
3326 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3327 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
3328 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3329 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
3330 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3331 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3335 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
3336 struct rte_eth_rss_conf *rss_conf)
3338 struct ixgbe_hw *hw;
3343 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3345 if (!ixgbe_rss_update_sp(hw->mac.type)) {
3346 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
3350 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3353 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
3354 * "RSS enabling cannot be done dynamically while it must be
3355 * preceded by a software reset"
3356 * Before changing anything, first check that the update RSS operation
3357 * does not attempt to disable RSS, if RSS was enabled at
3358 * initialization time, or does not attempt to enable RSS, if RSS was
3359 * disabled at initialization time.
3361 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
3362 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3363 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
3364 if (rss_hf != 0) /* Enable RSS */
3366 return 0; /* Nothing to do */
3369 if (rss_hf == 0) /* Disable RSS */
3371 ixgbe_hw_rss_hash_set(hw, rss_conf);
3376 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3377 struct rte_eth_rss_conf *rss_conf)
3379 struct ixgbe_hw *hw;
3388 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3389 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3390 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3391 hash_key = rss_conf->rss_key;
3392 if (hash_key != NULL) {
3393 /* Return RSS hash key */
3394 for (i = 0; i < 10; i++) {
3395 rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
3396 hash_key[(i * 4)] = rss_key & 0x000000FF;
3397 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
3398 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
3399 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
3403 /* Get RSS functions configured in MRQC register */
3404 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3405 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
3406 rss_conf->rss_hf = 0;
3410 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
3411 rss_hf |= ETH_RSS_IPV4;
3412 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
3413 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
3414 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
3415 rss_hf |= ETH_RSS_IPV6;
3416 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
3417 rss_hf |= ETH_RSS_IPV6_EX;
3418 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
3419 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
3420 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
3421 rss_hf |= ETH_RSS_IPV6_TCP_EX;
3422 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
3423 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
3424 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
3425 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
3426 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
3427 rss_hf |= ETH_RSS_IPV6_UDP_EX;
3428 rss_conf->rss_hf = rss_hf;
3433 ixgbe_rss_configure(struct rte_eth_dev *dev)
3435 struct rte_eth_rss_conf rss_conf;
3436 struct ixgbe_hw *hw;
3440 uint16_t sp_reta_size;
3443 PMD_INIT_FUNC_TRACE();
3444 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3446 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3449 * Fill in redirection table
3450 * The byte-swap is needed because NIC registers are in
3451 * little-endian order.
3454 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
3455 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3457 if (j == dev->data->nb_rx_queues)
3459 reta = (reta << 8) | j;
3461 IXGBE_WRITE_REG(hw, reta_reg,
3466 * Configure the RSS key and the RSS protocols used to compute
3467 * the RSS hash of input packets.
3469 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3470 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
3471 ixgbe_rss_disable(dev);
3474 if (rss_conf.rss_key == NULL)
3475 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3476 ixgbe_hw_rss_hash_set(hw, &rss_conf);
3479 #define NUM_VFTA_REGISTERS 128
3480 #define NIC_RX_BUFFER_SIZE 0x200
3481 #define X550_RX_BUFFER_SIZE 0x180
3484 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3486 struct rte_eth_vmdq_dcb_conf *cfg;
3487 struct ixgbe_hw *hw;
3488 enum rte_eth_nb_pools num_pools;
3489 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3491 uint8_t nb_tcs; /* number of traffic classes */
3494 PMD_INIT_FUNC_TRACE();
3495 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3496 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3497 num_pools = cfg->nb_queue_pools;
3498 /* Check we have a valid number of pools */
3499 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3500 ixgbe_rss_disable(dev);
3503 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3504 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3508 * split rx buffer up into sections, each for 1 traffic class
3510 switch (hw->mac.type) {
3511 case ixgbe_mac_X550:
3512 case ixgbe_mac_X550EM_x:
3513 case ixgbe_mac_X550EM_a:
3514 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
3517 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3520 for (i = 0; i < nb_tcs; i++) {
3521 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3523 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3524 /* clear 10 bits. */
3525 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
3526 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3528 /* zero alloc all unused TCs */
3529 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3530 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3532 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3533 /* clear 10 bits. */
3534 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3537 /* MRQC: enable vmdq and dcb */
3538 mrqc = (num_pools == ETH_16_POOLS) ?
3539 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
3540 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3542 /* PFVTCTL: turn on virtualisation and set the default pool */
3543 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3544 if (cfg->enable_default_pool) {
3545 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3547 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3550 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3552 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
3554 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3556 * mapping is done with 3 bits per priority,
3557 * so shift by i*3 each time
3559 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3561 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
3563 /* RTRPCS: DCB related */
3564 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
3566 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3567 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3568 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3569 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3571 /* VFTA - enable all vlan filters */
3572 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3573 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3576 /* VFRE: pool enabling for receive - 16 or 32 */
3577 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
3578 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3581 * MPSAR - allow pools to read specific mac addresses
3582 * In this case, all pools should be able to read from mac addr 0
3584 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
3585 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
3587 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3588 for (i = 0; i < cfg->nb_pool_maps; i++) {
3589 /* set vlan id in VF register and set the valid bit */
3590 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
3591 (cfg->pool_map[i].vlan_id & 0xFFF)));
3593 * Put the allowed pools in VFB reg. As we only have 16 or 32
3594 * pools, we only need to use the first half of the register
3597 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
3602 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3603 * @dev: pointer to eth_dev structure
3604 * @dcb_config: pointer to ixgbe_dcb_config structure
3607 ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3608 struct ixgbe_dcb_config *dcb_config)
3611 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3613 PMD_INIT_FUNC_TRACE();
3614 if (hw->mac.type != ixgbe_mac_82598EB) {
3615 /* Disable the Tx desc arbiter so that MTQC can be changed */
3616 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3617 reg |= IXGBE_RTTDCS_ARBDIS;
3618 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3620 /* Enable DCB for Tx with 8 TCs */
3621 if (dcb_config->num_tcs.pg_tcs == 8) {
3622 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3624 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3626 if (dcb_config->vt_mode)
3627 reg |= IXGBE_MTQC_VT_ENA;
3628 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3630 /* Enable the Tx desc arbiter */
3631 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3632 reg &= ~IXGBE_RTTDCS_ARBDIS;
3633 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3635 /* Enable Security TX Buffer IFG for DCB */
3636 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3637 reg |= IXGBE_SECTX_DCB;
3638 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3643 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3644 * @dev: pointer to rte_eth_dev structure
3645 * @dcb_config: pointer to ixgbe_dcb_config structure
3648 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3649 struct ixgbe_dcb_config *dcb_config)
3651 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3652 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3653 struct ixgbe_hw *hw =
3654 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3656 PMD_INIT_FUNC_TRACE();
3657 if (hw->mac.type != ixgbe_mac_82598EB)
3658 /*PF VF Transmit Enable*/
3659 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3660 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3662 /*Configure general DCB TX parameters*/
3663 ixgbe_dcb_tx_hw_config(dev, dcb_config);
3667 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3668 struct ixgbe_dcb_config *dcb_config)
3670 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3671 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3672 struct ixgbe_dcb_tc_config *tc;
3675 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3676 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3677 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3678 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3680 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3681 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3684 /* Initialize User Priority to Traffic Class mapping */
3685 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3686 tc = &dcb_config->tc_config[j];
3687 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3690 /* User Priority to Traffic Class mapping */
3691 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3692 j = vmdq_rx_conf->dcb_tc[i];
3693 tc = &dcb_config->tc_config[j];
3694 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3700 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3701 struct ixgbe_dcb_config *dcb_config)
3703 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3704 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3705 struct ixgbe_dcb_tc_config *tc;
3708 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3709 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3710 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3711 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3713 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3714 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3717 /* Initialize User Priority to Traffic Class mapping */
3718 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3719 tc = &dcb_config->tc_config[j];
3720 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3723 /* User Priority to Traffic Class mapping */
3724 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3725 j = vmdq_tx_conf->dcb_tc[i];
3726 tc = &dcb_config->tc_config[j];
3727 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3733 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3734 struct ixgbe_dcb_config *dcb_config)
3736 struct rte_eth_dcb_rx_conf *rx_conf =
3737 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3738 struct ixgbe_dcb_tc_config *tc;
3741 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3742 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3744 /* Initialize User Priority to Traffic Class mapping */
3745 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3746 tc = &dcb_config->tc_config[j];
3747 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3750 /* User Priority to Traffic Class mapping */
3751 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3752 j = rx_conf->dcb_tc[i];
3753 tc = &dcb_config->tc_config[j];
3754 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3760 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3761 struct ixgbe_dcb_config *dcb_config)
3763 struct rte_eth_dcb_tx_conf *tx_conf =
3764 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3765 struct ixgbe_dcb_tc_config *tc;
3768 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3769 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3771 /* Initialize User Priority to Traffic Class mapping */
3772 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3773 tc = &dcb_config->tc_config[j];
3774 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3777 /* User Priority to Traffic Class mapping */
3778 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3779 j = tx_conf->dcb_tc[i];
3780 tc = &dcb_config->tc_config[j];
3781 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3787 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3788 * @dev: pointer to eth_dev structure
3789 * @dcb_config: pointer to ixgbe_dcb_config structure
3792 ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3793 struct ixgbe_dcb_config *dcb_config)
3799 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3801 PMD_INIT_FUNC_TRACE();
3803 * Disable the arbiter before changing parameters
3804 * (always enable recycle mode; WSP)
3806 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3807 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3809 if (hw->mac.type != ixgbe_mac_82598EB) {
3810 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3811 if (dcb_config->num_tcs.pg_tcs == 4) {
3812 if (dcb_config->vt_mode)
3813 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3814 IXGBE_MRQC_VMDQRT4TCEN;
3816 /* no matter the mode is DCB or DCB_RSS, just
3817 * set the MRQE to RSSXTCEN. RSS is controlled
3820 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3821 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3822 IXGBE_MRQC_RTRSS4TCEN;
3825 if (dcb_config->num_tcs.pg_tcs == 8) {
3826 if (dcb_config->vt_mode)
3827 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3828 IXGBE_MRQC_VMDQRT8TCEN;
3830 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3831 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3832 IXGBE_MRQC_RTRSS8TCEN;
3836 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3838 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3839 /* Disable drop for all queues in VMDQ mode*/
3840 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3841 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3843 (q << IXGBE_QDE_IDX_SHIFT)));
3845 /* Enable drop for all queues in SRIOV mode */
3846 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3847 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3849 (q << IXGBE_QDE_IDX_SHIFT) |
3854 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3855 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3856 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3857 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3859 /* VFTA - enable all vlan filters */
3860 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3861 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3865 * Configure Rx packet plane (recycle mode; WSP) and
3868 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3869 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3873 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3874 uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3876 switch (hw->mac.type) {
3877 case ixgbe_mac_82598EB:
3878 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3880 case ixgbe_mac_82599EB:
3881 case ixgbe_mac_X540:
3882 case ixgbe_mac_X550:
3883 case ixgbe_mac_X550EM_x:
3884 case ixgbe_mac_X550EM_a:
3885 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3894 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3895 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3897 switch (hw->mac.type) {
3898 case ixgbe_mac_82598EB:
3899 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
3900 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
3902 case ixgbe_mac_82599EB:
3903 case ixgbe_mac_X540:
3904 case ixgbe_mac_X550:
3905 case ixgbe_mac_X550EM_x:
3906 case ixgbe_mac_X550EM_a:
3907 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
3908 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
3915 #define DCB_RX_CONFIG 1
3916 #define DCB_TX_CONFIG 1
3917 #define DCB_TX_PB 1024
3919 * ixgbe_dcb_hw_configure - Enable DCB and configure
3920 * general DCB in VT mode and non-VT mode parameters
3921 * @dev: pointer to rte_eth_dev structure
3922 * @dcb_config: pointer to ixgbe_dcb_config structure
3925 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3926 struct ixgbe_dcb_config *dcb_config)
3929 uint8_t i, pfc_en, nb_tcs;
3930 uint16_t pbsize, rx_buffer_size;
3931 uint8_t config_dcb_rx = 0;
3932 uint8_t config_dcb_tx = 0;
3933 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3934 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3935 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3936 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3937 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3938 struct ixgbe_dcb_tc_config *tc;
3939 uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3940 struct ixgbe_hw *hw =
3941 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3942 struct ixgbe_bw_conf *bw_conf =
3943 IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
3945 switch (dev->data->dev_conf.rxmode.mq_mode) {
3946 case ETH_MQ_RX_VMDQ_DCB:
3947 dcb_config->vt_mode = true;
3948 if (hw->mac.type != ixgbe_mac_82598EB) {
3949 config_dcb_rx = DCB_RX_CONFIG;
3951 *get dcb and VT rx configuration parameters
3954 ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
3955 /*Configure general VMDQ and DCB RX parameters*/
3956 ixgbe_vmdq_dcb_configure(dev);
3960 case ETH_MQ_RX_DCB_RSS:
3961 dcb_config->vt_mode = false;
3962 config_dcb_rx = DCB_RX_CONFIG;
3963 /* Get dcb TX configuration parameters from rte_eth_conf */
3964 ixgbe_dcb_rx_config(dev, dcb_config);
3965 /*Configure general DCB RX parameters*/
3966 ixgbe_dcb_rx_hw_config(dev, dcb_config);
3969 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3972 switch (dev->data->dev_conf.txmode.mq_mode) {
3973 case ETH_MQ_TX_VMDQ_DCB:
3974 dcb_config->vt_mode = true;
3975 config_dcb_tx = DCB_TX_CONFIG;
3976 /* get DCB and VT TX configuration parameters
3979 ixgbe_dcb_vt_tx_config(dev, dcb_config);
3980 /*Configure general VMDQ and DCB TX parameters*/
3981 ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
3985 dcb_config->vt_mode = false;
3986 config_dcb_tx = DCB_TX_CONFIG;
3987 /*get DCB TX configuration parameters from rte_eth_conf*/
3988 ixgbe_dcb_tx_config(dev, dcb_config);
3989 /*Configure general DCB TX parameters*/
3990 ixgbe_dcb_tx_hw_config(dev, dcb_config);
3993 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3997 nb_tcs = dcb_config->num_tcs.pfc_tcs;
3999 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4000 if (nb_tcs == ETH_4_TCS) {
4001 /* Avoid un-configured priority mapping to TC0 */
4003 uint8_t mask = 0xFF;
4005 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
4006 mask = (uint8_t)(mask & (~(1 << map[i])));
4007 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
4008 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
4012 /* Re-configure 4 TCs BW */
4013 for (i = 0; i < nb_tcs; i++) {
4014 tc = &dcb_config->tc_config[i];
4015 if (bw_conf->tc_num != nb_tcs)
4016 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4017 (uint8_t)(100 / nb_tcs);
4018 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4019 (uint8_t)(100 / nb_tcs);
4021 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4022 tc = &dcb_config->tc_config[i];
4023 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
4024 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
4027 /* Re-configure 8 TCs BW */
4028 for (i = 0; i < nb_tcs; i++) {
4029 tc = &dcb_config->tc_config[i];
4030 if (bw_conf->tc_num != nb_tcs)
4031 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4032 (uint8_t)(100 / nb_tcs + (i & 1));
4033 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4034 (uint8_t)(100 / nb_tcs + (i & 1));
4038 switch (hw->mac.type) {
4039 case ixgbe_mac_X550:
4040 case ixgbe_mac_X550EM_x:
4041 case ixgbe_mac_X550EM_a:
4042 rx_buffer_size = X550_RX_BUFFER_SIZE;
4045 rx_buffer_size = NIC_RX_BUFFER_SIZE;
4049 if (config_dcb_rx) {
4050 /* Set RX buffer size */
4051 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4052 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
4054 for (i = 0; i < nb_tcs; i++) {
4055 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
4057 /* zero alloc all unused TCs */
4058 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4059 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4062 if (config_dcb_tx) {
4063 /* Only support an equally distributed
4064 * Tx packet buffer strategy.
4066 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
4067 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
4069 for (i = 0; i < nb_tcs; i++) {
4070 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4071 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4073 /* Clear unused TCs, if any, to zero buffer size*/
4074 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4075 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4076 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4080 /*Calculates traffic class credits*/
4081 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4082 IXGBE_DCB_TX_CONFIG);
4083 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4084 IXGBE_DCB_RX_CONFIG);
4086 if (config_dcb_rx) {
4087 /* Unpack CEE standard containers */
4088 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
4089 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4090 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
4091 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
4092 /* Configure PG(ETS) RX */
4093 ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
4096 if (config_dcb_tx) {
4097 /* Unpack CEE standard containers */
4098 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
4099 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4100 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
4101 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
4102 /* Configure PG(ETS) TX */
4103 ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
4106 /*Configure queue statistics registers*/
4107 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
4109 /* Check if the PFC is supported */
4110 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
4111 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4112 for (i = 0; i < nb_tcs; i++) {
4114 * If the TC count is 8,and the default high_water is 48,
4115 * the low_water is 16 as default.
4117 hw->fc.high_water[i] = (pbsize * 3) / 4;
4118 hw->fc.low_water[i] = pbsize / 4;
4119 /* Enable pfc for this TC */
4120 tc = &dcb_config->tc_config[i];
4121 tc->pfc = ixgbe_dcb_pfc_enabled;
4123 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
4124 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
4126 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
4133 * ixgbe_configure_dcb - Configure DCB Hardware
4134 * @dev: pointer to rte_eth_dev
4136 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
4138 struct ixgbe_dcb_config *dcb_cfg =
4139 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4140 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
4142 PMD_INIT_FUNC_TRACE();
4144 /* check support mq_mode for DCB */
4145 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
4146 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
4147 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
4150 if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
4153 /** Configure DCB hardware **/
4154 ixgbe_dcb_hw_configure(dev, dcb_cfg);
4158 * VMDq only support for 10 GbE NIC.
4161 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
4163 struct rte_eth_vmdq_rx_conf *cfg;
4164 struct ixgbe_hw *hw;
4165 enum rte_eth_nb_pools num_pools;
4166 uint32_t mrqc, vt_ctl, vlanctrl;
4170 PMD_INIT_FUNC_TRACE();
4171 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4172 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
4173 num_pools = cfg->nb_queue_pools;
4175 ixgbe_rss_disable(dev);
4177 /* MRQC: enable vmdq */
4178 mrqc = IXGBE_MRQC_VMDQEN;
4179 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4181 /* PFVTCTL: turn on virtualisation and set the default pool */
4182 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
4183 if (cfg->enable_default_pool)
4184 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
4186 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
4188 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
4190 for (i = 0; i < (int)num_pools; i++) {
4191 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
4192 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
4195 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4196 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4197 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4198 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4200 /* VFTA - enable all vlan filters */
4201 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
4202 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
4204 /* VFRE: pool enabling for receive - 64 */
4205 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
4206 if (num_pools == ETH_64_POOLS)
4207 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
4210 * MPSAR - allow pools to read specific mac addresses
4211 * In this case, all pools should be able to read from mac addr 0
4213 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
4214 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
4216 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
4217 for (i = 0; i < cfg->nb_pool_maps; i++) {
4218 /* set vlan id in VF register and set the valid bit */
4219 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
4220 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
4222 * Put the allowed pools in VFB reg. As we only have 16 or 64
4223 * pools, we only need to use the first half of the register
4226 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
4227 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
4228 (cfg->pool_map[i].pools & UINT32_MAX));
4230 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
4231 ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
4235 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
4236 if (cfg->enable_loop_back) {
4237 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4238 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
4239 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
4242 IXGBE_WRITE_FLUSH(hw);
4246 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
4247 * @hw: pointer to hardware structure
4250 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
4255 PMD_INIT_FUNC_TRACE();
4256 /*PF VF Transmit Enable*/
4257 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
4258 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
4260 /* Disable the Tx desc arbiter so that MTQC can be changed */
4261 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4262 reg |= IXGBE_RTTDCS_ARBDIS;
4263 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4265 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4266 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
4268 /* Disable drop for all queues */
4269 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4270 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4271 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
4273 /* Enable the Tx desc arbiter */
4274 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4275 reg &= ~IXGBE_RTTDCS_ARBDIS;
4276 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4278 IXGBE_WRITE_FLUSH(hw);
4281 static int __attribute__((cold))
4282 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
4284 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
4288 /* Initialize software ring entries */
4289 for (i = 0; i < rxq->nb_rx_desc; i++) {
4290 volatile union ixgbe_adv_rx_desc *rxd;
4291 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
4294 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
4295 (unsigned) rxq->queue_id);
4299 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
4300 mbuf->port = rxq->port_id;
4303 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
4304 rxd = &rxq->rx_ring[i];
4305 rxd->read.hdr_addr = 0;
4306 rxd->read.pkt_addr = dma_addr;
4314 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
4316 struct ixgbe_hw *hw;
4319 ixgbe_rss_configure(dev);
4321 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4323 /* MRQC: enable VF RSS */
4324 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
4325 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
4326 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4328 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
4332 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
4336 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
4340 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4346 ixgbe_config_vf_default(struct rte_eth_dev *dev)
4348 struct ixgbe_hw *hw =
4349 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4351 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4353 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4358 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4359 IXGBE_MRQC_VMDQRT4TCEN);
4363 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4364 IXGBE_MRQC_VMDQRT8TCEN);
4368 "invalid pool number in IOV mode");
4375 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
4377 struct ixgbe_hw *hw =
4378 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4380 if (hw->mac.type == ixgbe_mac_82598EB)
4383 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4385 * SRIOV inactive scheme
4386 * any DCB/RSS w/o VMDq multi-queue setting
4388 switch (dev->data->dev_conf.rxmode.mq_mode) {
4390 case ETH_MQ_RX_DCB_RSS:
4391 case ETH_MQ_RX_VMDQ_RSS:
4392 ixgbe_rss_configure(dev);
4395 case ETH_MQ_RX_VMDQ_DCB:
4396 ixgbe_vmdq_dcb_configure(dev);
4399 case ETH_MQ_RX_VMDQ_ONLY:
4400 ixgbe_vmdq_rx_hw_configure(dev);
4403 case ETH_MQ_RX_NONE:
4405 /* if mq_mode is none, disable rss mode.*/
4406 ixgbe_rss_disable(dev);
4410 /* SRIOV active scheme
4411 * Support RSS together with SRIOV.
4413 switch (dev->data->dev_conf.rxmode.mq_mode) {
4415 case ETH_MQ_RX_VMDQ_RSS:
4416 ixgbe_config_vf_rss(dev);
4418 case ETH_MQ_RX_VMDQ_DCB:
4420 /* In SRIOV, the configuration is the same as VMDq case */
4421 ixgbe_vmdq_dcb_configure(dev);
4423 /* DCB/RSS together with SRIOV is not supported */
4424 case ETH_MQ_RX_VMDQ_DCB_RSS:
4425 case ETH_MQ_RX_DCB_RSS:
4427 "Could not support DCB/RSS with VMDq & SRIOV");
4430 ixgbe_config_vf_default(dev);
4439 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4441 struct ixgbe_hw *hw =
4442 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4446 if (hw->mac.type == ixgbe_mac_82598EB)
4449 /* disable arbiter before setting MTQC */
4450 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4451 rttdcs |= IXGBE_RTTDCS_ARBDIS;
4452 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4454 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4456 * SRIOV inactive scheme
4457 * any DCB w/o VMDq multi-queue setting
4459 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4460 ixgbe_vmdq_tx_hw_configure(hw);
4462 mtqc = IXGBE_MTQC_64Q_1PB;
4463 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4466 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4469 * SRIOV active scheme
4470 * FIXME if support DCB together with VMDq & SRIOV
4473 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4476 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
4479 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
4483 mtqc = IXGBE_MTQC_64Q_1PB;
4484 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4486 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4489 /* re-enable arbiter */
4490 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
4491 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4497 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
4499 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
4500 * spec rev. 3.0 chapter 8.2.3.8.13.
4502 * @pool Memory pool of the Rx queue
4504 static inline uint32_t
4505 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4507 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4509 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
4512 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4515 return IXGBE_RSCCTL_MAXDESC_16;
4516 else if (maxdesc >= 8)
4517 return IXGBE_RSCCTL_MAXDESC_8;
4518 else if (maxdesc >= 4)
4519 return IXGBE_RSCCTL_MAXDESC_4;
4521 return IXGBE_RSCCTL_MAXDESC_1;
4525 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
4528 * (Taken from FreeBSD tree)
4529 * (yes this is all very magic and confusing :)
4532 * @entry the register array entry
4533 * @vector the MSIX vector for this queue
4537 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
4539 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4542 vector |= IXGBE_IVAR_ALLOC_VAL;
4544 switch (hw->mac.type) {
4546 case ixgbe_mac_82598EB:
4548 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4550 entry += (type * 64);
4551 index = (entry >> 2) & 0x1F;
4552 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4553 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4554 ivar |= (vector << (8 * (entry & 0x3)));
4555 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4558 case ixgbe_mac_82599EB:
4559 case ixgbe_mac_X540:
4560 if (type == -1) { /* MISC IVAR */
4561 index = (entry & 1) * 8;
4562 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4563 ivar &= ~(0xFF << index);
4564 ivar |= (vector << index);
4565 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4566 } else { /* RX/TX IVARS */
4567 index = (16 * (entry & 1)) + (8 * type);
4568 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4569 ivar &= ~(0xFF << index);
4570 ivar |= (vector << index);
4571 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4581 void __attribute__((cold))
4582 ixgbe_set_rx_function(struct rte_eth_dev *dev)
4584 uint16_t i, rx_using_sse;
4585 struct ixgbe_adapter *adapter =
4586 (struct ixgbe_adapter *)dev->data->dev_private;
4589 * In order to allow Vector Rx there are a few configuration
4590 * conditions to be met and Rx Bulk Allocation should be allowed.
4592 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
4593 !adapter->rx_bulk_alloc_allowed) {
4594 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
4595 "preconditions or RTE_IXGBE_INC_VECTOR is "
4597 dev->data->port_id);
4599 adapter->rx_vec_allowed = false;
4603 * Initialize the appropriate LRO callback.
4605 * If all queues satisfy the bulk allocation preconditions
4606 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
4607 * Otherwise use a single allocation version.
4609 if (dev->data->lro) {
4610 if (adapter->rx_bulk_alloc_allowed) {
4611 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4612 "allocation version");
4613 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4615 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4616 "allocation version");
4617 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4619 } else if (dev->data->scattered_rx) {
4621 * Set the non-LRO scattered callback: there are Vector and
4622 * single allocation versions.
4624 if (adapter->rx_vec_allowed) {
4625 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
4626 "callback (port=%d).",
4627 dev->data->port_id);
4629 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
4630 } else if (adapter->rx_bulk_alloc_allowed) {
4631 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4632 "allocation callback (port=%d).",
4633 dev->data->port_id);
4634 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4636 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
4637 "single allocation) "
4638 "Scattered Rx callback "
4640 dev->data->port_id);
4642 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4645 * Below we set "simple" callbacks according to port/queues parameters.
4646 * If parameters allow we are going to choose between the following
4650 * - Single buffer allocation (the simplest one)
4652 } else if (adapter->rx_vec_allowed) {
4653 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4654 "burst size no less than %d (port=%d).",
4655 RTE_IXGBE_DESCS_PER_LOOP,
4656 dev->data->port_id);
4658 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4659 } else if (adapter->rx_bulk_alloc_allowed) {
4660 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4661 "satisfied. Rx Burst Bulk Alloc function "
4662 "will be used on port=%d.",
4663 dev->data->port_id);
4665 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4667 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4668 "satisfied, or Scattered Rx is requested "
4670 dev->data->port_id);
4672 dev->rx_pkt_burst = ixgbe_recv_pkts;
4675 /* Propagate information about RX function choice through all queues. */
4678 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4679 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4681 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4682 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4684 rxq->rx_using_sse = rx_using_sse;
4685 #ifdef RTE_LIBRTE_SECURITY
4686 rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4687 DEV_RX_OFFLOAD_SECURITY);
4693 * ixgbe_set_rsc - configure RSC related port HW registers
4695 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4696 * of 82599 Spec (x540 configuration is virtually the same).
4700 * Returns 0 in case of success or a non-zero error code
4703 ixgbe_set_rsc(struct rte_eth_dev *dev)
4705 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4706 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4707 struct rte_eth_dev_info dev_info = { 0 };
4708 bool rsc_capable = false;
4714 dev->dev_ops->dev_infos_get(dev, &dev_info);
4715 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4718 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4719 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4724 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4726 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
4727 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4729 * According to chapter of 4.6.7.2.1 of the Spec Rev.
4730 * 3.0 RSC configuration requires HW CRC stripping being
4731 * enabled. If user requested both HW CRC stripping off
4732 * and RSC on - return an error.
4734 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4739 /* RFCTL configuration */
4740 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4741 if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4743 * Since NFS packets coalescing is not supported - clear
4744 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4747 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4748 IXGBE_RFCTL_NFSR_DIS);
4750 rfctl |= IXGBE_RFCTL_RSC_DIS;
4751 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4753 /* If LRO hasn't been requested - we are done here. */
4754 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4757 /* Set RDRXCTL.RSCACKC bit */
4758 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4759 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4760 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4762 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4763 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4764 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4766 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4768 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4770 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4772 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4775 * ixgbe PMD doesn't support header-split at the moment.
4777 * Following the 4.6.7.2.1 chapter of the 82599/x540
4778 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4779 * should be configured even if header split is not
4780 * enabled. We will configure it 128 bytes following the
4781 * recommendation in the spec.
4783 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4784 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4785 IXGBE_SRRCTL_BSIZEHDR_MASK;
4788 * TODO: Consider setting the Receive Descriptor Minimum
4789 * Threshold Size for an RSC case. This is not an obviously
4790 * beneficiary option but the one worth considering...
4793 rscctl |= IXGBE_RSCCTL_RSCEN;
4794 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4795 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4798 * RSC: Set ITR interval corresponding to 2K ints/s.
4800 * Full-sized RSC aggregations for a 10Gb/s link will
4801 * arrive at about 20K aggregation/s rate.
4803 * 2K inst/s rate will make only 10% of the
4804 * aggregations to be closed due to the interrupt timer
4805 * expiration for a streaming at wire-speed case.
4807 * For a sparse streaming case this setting will yield
4808 * at most 500us latency for a single RSC aggregation.
4810 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4811 eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
4812 eitr |= IXGBE_EITR_CNT_WDIS;
4814 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4815 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4816 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4817 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4820 * RSC requires the mapping of the queue to the
4823 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4828 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4834 * Initializes Receive Unit.
4836 int __attribute__((cold))
4837 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4839 struct ixgbe_hw *hw;
4840 struct ixgbe_rx_queue *rxq;
4851 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4854 PMD_INIT_FUNC_TRACE();
4855 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4858 * Make sure receives are disabled while setting
4859 * up the RX context (registers, descriptor rings, etc.).
4861 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4862 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4864 /* Enable receipt of broadcasted frames */
4865 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4866 fctrl |= IXGBE_FCTRL_BAM;
4867 fctrl |= IXGBE_FCTRL_DPF;
4868 fctrl |= IXGBE_FCTRL_PMCF;
4869 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4872 * Configure CRC stripping, if any.
4874 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4875 if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
4876 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4878 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4881 * Configure jumbo frame support, if any.
4883 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
4884 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4885 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4886 maxfrs &= 0x0000FFFF;
4887 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4888 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4890 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4893 * If loopback mode is configured for 82599, set LPBK bit.
4895 if (hw->mac.type == ixgbe_mac_82599EB &&
4896 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4897 hlreg0 |= IXGBE_HLREG0_LPBK;
4899 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4901 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4904 * Assume no header split and no VLAN strip support
4905 * on any Rx queue first .
4907 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4908 /* Setup RX queues */
4909 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4910 rxq = dev->data->rx_queues[i];
4913 * Reset crc_len in case it was changed after queue setup by a
4914 * call to configure.
4916 rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ?
4919 /* Setup the Base and Length of the Rx Descriptor Rings */
4920 bus_addr = rxq->rx_ring_phys_addr;
4921 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4922 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4923 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4924 (uint32_t)(bus_addr >> 32));
4925 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4926 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4927 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4928 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4930 /* Configure the SRRCTL register */
4931 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4933 /* Set if packets are dropped when no descriptors available */
4935 srrctl |= IXGBE_SRRCTL_DROP_EN;
4938 * Configure the RX buffer size in the BSIZEPACKET field of
4939 * the SRRCTL register of the queue.
4940 * The value is in 1 KB resolution. Valid values can be from
4943 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4944 RTE_PKTMBUF_HEADROOM);
4945 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4946 IXGBE_SRRCTL_BSIZEPKT_MASK);
4948 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4950 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4951 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4953 /* It adds dual VLAN length for supporting dual VLAN */
4954 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4955 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
4956 dev->data->scattered_rx = 1;
4957 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4958 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
4961 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
4962 dev->data->scattered_rx = 1;
4965 * Device configured with multiple RX queues.
4967 ixgbe_dev_mq_rx_configure(dev);
4970 * Setup the Checksum Register.
4971 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4972 * Enable IP/L4 checkum computation by hardware if requested to do so.
4974 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
4975 rxcsum |= IXGBE_RXCSUM_PCSD;
4976 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
4977 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4979 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
4981 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4983 if (hw->mac.type == ixgbe_mac_82599EB ||
4984 hw->mac.type == ixgbe_mac_X540) {
4985 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4986 if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
4987 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4989 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
4990 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4991 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4994 rc = ixgbe_set_rsc(dev);
4998 ixgbe_set_rx_function(dev);
5004 * Initializes Transmit Unit.
5006 void __attribute__((cold))
5007 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
5009 struct ixgbe_hw *hw;
5010 struct ixgbe_tx_queue *txq;
5016 PMD_INIT_FUNC_TRACE();
5017 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5019 /* Enable TX CRC (checksum offload requirement) and hw padding
5022 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5023 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
5024 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5026 /* Setup the Base and Length of the Tx Descriptor Rings */
5027 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5028 txq = dev->data->tx_queues[i];
5030 bus_addr = txq->tx_ring_phys_addr;
5031 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
5032 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5033 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
5034 (uint32_t)(bus_addr >> 32));
5035 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
5036 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5037 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5038 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5039 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5042 * Disable Tx Head Writeback RO bit, since this hoses
5043 * bookkeeping if things aren't delivered in order.
5045 switch (hw->mac.type) {
5046 case ixgbe_mac_82598EB:
5047 txctrl = IXGBE_READ_REG(hw,
5048 IXGBE_DCA_TXCTRL(txq->reg_idx));
5049 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5050 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
5054 case ixgbe_mac_82599EB:
5055 case ixgbe_mac_X540:
5056 case ixgbe_mac_X550:
5057 case ixgbe_mac_X550EM_x:
5058 case ixgbe_mac_X550EM_a:
5060 txctrl = IXGBE_READ_REG(hw,
5061 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
5062 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5063 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
5069 /* Device configured with multiple TX queues. */
5070 ixgbe_dev_mq_tx_configure(dev);
5074 * Set up link for 82599 loopback mode Tx->Rx.
5076 static inline void __attribute__((cold))
5077 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
5079 PMD_INIT_FUNC_TRACE();
5081 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
5082 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
5084 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
5093 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
5094 ixgbe_reset_pipeline_82599(hw);
5096 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
5102 * Start Transmit and Receive Units.
5104 int __attribute__((cold))
5105 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
5107 struct ixgbe_hw *hw;
5108 struct ixgbe_tx_queue *txq;
5109 struct ixgbe_rx_queue *rxq;
5116 PMD_INIT_FUNC_TRACE();
5117 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5119 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5120 txq = dev->data->tx_queues[i];
5121 /* Setup Transmit Threshold Registers */
5122 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5123 txdctl |= txq->pthresh & 0x7F;
5124 txdctl |= ((txq->hthresh & 0x7F) << 8);
5125 txdctl |= ((txq->wthresh & 0x7F) << 16);
5126 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5129 if (hw->mac.type != ixgbe_mac_82598EB) {
5130 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
5131 dmatxctl |= IXGBE_DMATXCTL_TE;
5132 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
5135 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5136 txq = dev->data->tx_queues[i];
5137 if (!txq->tx_deferred_start) {
5138 ret = ixgbe_dev_tx_queue_start(dev, i);
5144 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5145 rxq = dev->data->rx_queues[i];
5146 if (!rxq->rx_deferred_start) {
5147 ret = ixgbe_dev_rx_queue_start(dev, i);
5153 /* Enable Receive engine */
5154 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5155 if (hw->mac.type == ixgbe_mac_82598EB)
5156 rxctrl |= IXGBE_RXCTRL_DMBYPS;
5157 rxctrl |= IXGBE_RXCTRL_RXEN;
5158 hw->mac.ops.enable_rx_dma(hw, rxctrl);
5160 /* If loopback mode is enabled for 82599, set up the link accordingly */
5161 if (hw->mac.type == ixgbe_mac_82599EB &&
5162 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
5163 ixgbe_setup_loopback_link_82599(hw);
5165 #ifdef RTE_LIBRTE_SECURITY
5166 if ((dev->data->dev_conf.rxmode.offloads &
5167 DEV_RX_OFFLOAD_SECURITY) ||
5168 (dev->data->dev_conf.txmode.offloads &
5169 DEV_TX_OFFLOAD_SECURITY)) {
5170 ret = ixgbe_crypto_enable_ipsec(dev);
5173 "ixgbe_crypto_enable_ipsec fails with %d.",
5184 * Start Receive Units for specified queue.
5186 int __attribute__((cold))
5187 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5189 struct ixgbe_hw *hw;
5190 struct ixgbe_rx_queue *rxq;
5194 PMD_INIT_FUNC_TRACE();
5195 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5197 if (rx_queue_id < dev->data->nb_rx_queues) {
5198 rxq = dev->data->rx_queues[rx_queue_id];
5200 /* Allocate buffers for descriptor rings */
5201 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
5202 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
5206 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5207 rxdctl |= IXGBE_RXDCTL_ENABLE;
5208 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5210 /* Wait until RX Enable ready */
5211 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5214 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5215 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5217 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
5220 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5221 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
5222 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5230 * Stop Receive Units for specified queue.
5232 int __attribute__((cold))
5233 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5235 struct ixgbe_hw *hw;
5236 struct ixgbe_adapter *adapter =
5237 (struct ixgbe_adapter *)dev->data->dev_private;
5238 struct ixgbe_rx_queue *rxq;
5242 PMD_INIT_FUNC_TRACE();
5243 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5245 if (rx_queue_id < dev->data->nb_rx_queues) {
5246 rxq = dev->data->rx_queues[rx_queue_id];
5248 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5249 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5250 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5252 /* Wait until RX Enable bit clear */
5253 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5256 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5257 } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
5259 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
5262 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5264 ixgbe_rx_queue_release_mbufs(rxq);
5265 ixgbe_reset_rx_queue(adapter, rxq);
5266 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5275 * Start Transmit Units for specified queue.
5277 int __attribute__((cold))
5278 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5280 struct ixgbe_hw *hw;
5281 struct ixgbe_tx_queue *txq;
5285 PMD_INIT_FUNC_TRACE();
5286 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5288 if (tx_queue_id < dev->data->nb_tx_queues) {
5289 txq = dev->data->tx_queues[tx_queue_id];
5290 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5291 txdctl |= IXGBE_TXDCTL_ENABLE;
5292 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5294 /* Wait until TX Enable ready */
5295 if (hw->mac.type == ixgbe_mac_82599EB) {
5296 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5299 txdctl = IXGBE_READ_REG(hw,
5300 IXGBE_TXDCTL(txq->reg_idx));
5301 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5303 PMD_INIT_LOG(ERR, "Could not enable "
5304 "Tx Queue %d", tx_queue_id);
5307 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5308 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5309 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5317 * Stop Transmit Units for specified queue.
5319 int __attribute__((cold))
5320 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5322 struct ixgbe_hw *hw;
5323 struct ixgbe_tx_queue *txq;
5325 uint32_t txtdh, txtdt;
5328 PMD_INIT_FUNC_TRACE();
5329 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5331 if (tx_queue_id >= dev->data->nb_tx_queues)
5334 txq = dev->data->tx_queues[tx_queue_id];
5336 /* Wait until TX queue is empty */
5337 if (hw->mac.type == ixgbe_mac_82599EB) {
5338 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5340 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5341 txtdh = IXGBE_READ_REG(hw,
5342 IXGBE_TDH(txq->reg_idx));
5343 txtdt = IXGBE_READ_REG(hw,
5344 IXGBE_TDT(txq->reg_idx));
5345 } while (--poll_ms && (txtdh != txtdt));
5347 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
5348 "when stopping.", tx_queue_id);
5351 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5352 txdctl &= ~IXGBE_TXDCTL_ENABLE;
5353 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5355 /* Wait until TX Enable bit clear */
5356 if (hw->mac.type == ixgbe_mac_82599EB) {
5357 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5360 txdctl = IXGBE_READ_REG(hw,
5361 IXGBE_TXDCTL(txq->reg_idx));
5362 } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
5364 PMD_INIT_LOG(ERR, "Could not disable "
5365 "Tx Queue %d", tx_queue_id);
5368 if (txq->ops != NULL) {
5369 txq->ops->release_mbufs(txq);
5370 txq->ops->reset(txq);
5372 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5378 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5379 struct rte_eth_rxq_info *qinfo)
5381 struct ixgbe_rx_queue *rxq;
5383 rxq = dev->data->rx_queues[queue_id];
5385 qinfo->mp = rxq->mb_pool;
5386 qinfo->scattered_rx = dev->data->scattered_rx;
5387 qinfo->nb_desc = rxq->nb_rx_desc;
5389 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
5390 qinfo->conf.rx_drop_en = rxq->drop_en;
5391 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
5392 qinfo->conf.offloads = rxq->offloads;
5396 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5397 struct rte_eth_txq_info *qinfo)
5399 struct ixgbe_tx_queue *txq;
5401 txq = dev->data->tx_queues[queue_id];
5403 qinfo->nb_desc = txq->nb_tx_desc;
5405 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
5406 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
5407 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
5409 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
5410 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
5411 qinfo->conf.offloads = txq->offloads;
5412 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
5416 * [VF] Initializes Receive Unit.
5418 int __attribute__((cold))
5419 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
5421 struct ixgbe_hw *hw;
5422 struct ixgbe_rx_queue *rxq;
5423 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
5425 uint32_t srrctl, psrtype = 0;
5430 PMD_INIT_FUNC_TRACE();
5431 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5433 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
5434 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5435 "it should be power of 2");
5439 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
5440 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5441 "it should be equal to or less than %d",
5442 hw->mac.max_rx_queues);
5447 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
5448 * disables the VF receipt of packets if the PF MTU is > 1500.
5449 * This is done to deal with 82599 limitations that imposes
5450 * the PF and all VFs to share the same MTU.
5451 * Then, the PF driver enables again the VF receipt of packet when
5452 * the VF driver issues a IXGBE_VF_SET_LPE request.
5453 * In the meantime, the VF device cannot be used, even if the VF driver
5454 * and the Guest VM network stack are ready to accept packets with a
5455 * size up to the PF MTU.
5456 * As a work-around to this PF behaviour, force the call to
5457 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
5458 * VF packets received can work in all cases.
5460 ixgbevf_rlpml_set_vf(hw,
5461 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
5464 * Assume no header split and no VLAN strip support
5465 * on any Rx queue first .
5467 rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
5468 /* Setup RX queues */
5469 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5470 rxq = dev->data->rx_queues[i];
5472 /* Allocate buffers for descriptor rings */
5473 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
5477 /* Setup the Base and Length of the Rx Descriptor Rings */
5478 bus_addr = rxq->rx_ring_phys_addr;
5480 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
5481 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5482 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
5483 (uint32_t)(bus_addr >> 32));
5484 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
5485 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5486 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
5487 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
5490 /* Configure the SRRCTL register */
5491 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
5493 /* Set if packets are dropped when no descriptors available */
5495 srrctl |= IXGBE_SRRCTL_DROP_EN;
5498 * Configure the RX buffer size in the BSIZEPACKET field of
5499 * the SRRCTL register of the queue.
5500 * The value is in 1 KB resolution. Valid values can be from
5503 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5504 RTE_PKTMBUF_HEADROOM);
5505 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5506 IXGBE_SRRCTL_BSIZEPKT_MASK);
5509 * VF modification to write virtual function SRRCTL register
5511 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
5513 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5514 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5516 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
5517 /* It adds dual VLAN length for supporting dual VLAN */
5518 (rxmode->max_rx_pkt_len +
5519 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
5520 if (!dev->data->scattered_rx)
5521 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
5522 dev->data->scattered_rx = 1;
5525 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5526 rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5529 /* Set RQPL for VF RSS according to max Rx queue */
5530 psrtype |= (dev->data->nb_rx_queues >> 1) <<
5531 IXGBE_PSRTYPE_RQPL_SHIFT;
5532 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
5534 ixgbe_set_rx_function(dev);
5540 * [VF] Initializes Transmit Unit.
5542 void __attribute__((cold))
5543 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
5545 struct ixgbe_hw *hw;
5546 struct ixgbe_tx_queue *txq;
5551 PMD_INIT_FUNC_TRACE();
5552 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5554 /* Setup the Base and Length of the Tx Descriptor Rings */
5555 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5556 txq = dev->data->tx_queues[i];
5557 bus_addr = txq->tx_ring_phys_addr;
5558 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
5559 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5560 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
5561 (uint32_t)(bus_addr >> 32));
5562 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
5563 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5564 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5565 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
5566 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
5569 * Disable Tx Head Writeback RO bit, since this hoses
5570 * bookkeeping if things aren't delivered in order.
5572 txctrl = IXGBE_READ_REG(hw,
5573 IXGBE_VFDCA_TXCTRL(i));
5574 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5575 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
5581 * [VF] Start Transmit and Receive Units.
5583 void __attribute__((cold))
5584 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
5586 struct ixgbe_hw *hw;
5587 struct ixgbe_tx_queue *txq;
5588 struct ixgbe_rx_queue *rxq;
5594 PMD_INIT_FUNC_TRACE();
5595 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5597 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5598 txq = dev->data->tx_queues[i];
5599 /* Setup Transmit Threshold Registers */
5600 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5601 txdctl |= txq->pthresh & 0x7F;
5602 txdctl |= ((txq->hthresh & 0x7F) << 8);
5603 txdctl |= ((txq->wthresh & 0x7F) << 16);
5604 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5607 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5609 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5610 txdctl |= IXGBE_TXDCTL_ENABLE;
5611 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5614 /* Wait until TX Enable ready */
5617 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5618 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5620 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
5622 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5624 rxq = dev->data->rx_queues[i];
5626 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5627 rxdctl |= IXGBE_RXDCTL_ENABLE;
5628 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
5630 /* Wait until RX Enable ready */
5634 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5635 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5637 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5639 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
5645 ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
5646 const struct rte_flow_action_rss *in)
5648 if (in->key_len > RTE_DIM(out->key) ||
5649 in->queue_num > RTE_DIM(out->queue))
5651 out->conf = (struct rte_flow_action_rss){
5655 .key_len = in->key_len,
5656 .queue_num = in->queue_num,
5657 .key = memcpy(out->key, in->key, in->key_len),
5658 .queue = memcpy(out->queue, in->queue,
5659 sizeof(*in->queue) * in->queue_num),
5665 ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5666 const struct rte_flow_action_rss *with)
5668 return (comp->func == with->func &&
5669 comp->level == with->level &&
5670 comp->types == with->types &&
5671 comp->key_len == with->key_len &&
5672 comp->queue_num == with->queue_num &&
5673 !memcmp(comp->key, with->key, with->key_len) &&
5674 !memcmp(comp->queue, with->queue,
5675 sizeof(*with->queue) * with->queue_num));
5679 ixgbe_config_rss_filter(struct rte_eth_dev *dev,
5680 struct ixgbe_rte_flow_rss_conf *conf, bool add)
5682 struct ixgbe_hw *hw;
5686 uint16_t sp_reta_size;
5688 struct rte_eth_rss_conf rss_conf = {
5689 .rss_key = conf->conf.key_len ?
5690 (void *)(uintptr_t)conf->conf.key : NULL,
5691 .rss_key_len = conf->conf.key_len,
5692 .rss_hf = conf->conf.types,
5694 struct ixgbe_filter_info *filter_info =
5695 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5697 PMD_INIT_FUNC_TRACE();
5698 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5700 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5703 if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
5705 ixgbe_rss_disable(dev);
5706 memset(&filter_info->rss_info, 0,
5707 sizeof(struct ixgbe_rte_flow_rss_conf));
5713 if (filter_info->rss_info.conf.queue_num)
5715 /* Fill in redirection table
5716 * The byte-swap is needed because NIC registers are in
5717 * little-endian order.
5720 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
5721 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5723 if (j == conf->conf.queue_num)
5725 reta = (reta << 8) | conf->conf.queue[j];
5727 IXGBE_WRITE_REG(hw, reta_reg,
5731 /* Configure the RSS key and the RSS protocols used to compute
5732 * the RSS hash of input packets.
5734 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
5735 ixgbe_rss_disable(dev);
5738 if (rss_conf.rss_key == NULL)
5739 rss_conf.rss_key = rss_intel_key; /* Default hash key */
5740 ixgbe_hw_rss_hash_set(hw, &rss_conf);
5742 if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
5748 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
5749 int __attribute__((weak))
5750 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5755 uint16_t __attribute__((weak))
5756 ixgbe_recv_pkts_vec(
5757 void __rte_unused *rx_queue,
5758 struct rte_mbuf __rte_unused **rx_pkts,
5759 uint16_t __rte_unused nb_pkts)
5764 uint16_t __attribute__((weak))
5765 ixgbe_recv_scattered_pkts_vec(
5766 void __rte_unused *rx_queue,
5767 struct rte_mbuf __rte_unused **rx_pkts,
5768 uint16_t __rte_unused nb_pkts)
5773 int __attribute__((weak))
5774 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)