1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
21 #include <rte_debug.h>
22 #include <rte_interrupts.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev_driver.h>
37 #include <rte_prefetch.h>
41 #include <rte_string_fns.h>
42 #include <rte_errno.h>
46 #include "ixgbe_logs.h"
47 #include "base/ixgbe_api.h"
48 #include "base/ixgbe_vf.h"
49 #include "ixgbe_ethdev.h"
50 #include "base/ixgbe_dcb.h"
51 #include "base/ixgbe_common.h"
52 #include "ixgbe_rxtx.h"
54 #ifdef RTE_LIBRTE_IEEE1588
55 #define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
57 #define IXGBE_TX_IEEE1588_TMST 0
59 /* Bit Mask to indicate what bits required for building TX context */
60 #define IXGBE_TX_OFFLOAD_MASK ( \
70 PKT_TX_OUTER_IP_CKSUM | \
71 PKT_TX_SEC_OFFLOAD | \
72 IXGBE_TX_IEEE1588_TMST)
74 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
75 (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
78 #define RTE_PMD_USE_PREFETCH
81 #ifdef RTE_PMD_USE_PREFETCH
83 * Prefetch a cache line into all cache levels.
85 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
87 #define rte_ixgbe_prefetch(p) do {} while (0)
90 #ifdef RTE_IXGBE_INC_VECTOR
91 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
95 /*********************************************************************
99 **********************************************************************/
102 * Check for descriptors with their DD bit set and free mbufs.
103 * Return the total number of buffers freed.
105 static __rte_always_inline int
106 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
108 struct ixgbe_tx_entry *txep;
111 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
113 /* check DD bit on threshold descriptor */
114 status = txq->tx_ring[txq->tx_next_dd].wb.status;
115 if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
119 * first buffer to free from S/W ring is at index
120 * tx_next_dd - (tx_rs_thresh-1)
122 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
124 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
125 /* free buffers one at a time */
126 m = rte_pktmbuf_prefree_seg(txep->mbuf);
129 if (unlikely(m == NULL))
132 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
133 (nb_free > 0 && m->pool != free[0]->pool)) {
134 rte_mempool_put_bulk(free[0]->pool,
135 (void **)free, nb_free);
143 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
145 /* buffers were freed, update counters */
146 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
147 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
148 if (txq->tx_next_dd >= txq->nb_tx_desc)
149 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
151 return txq->tx_rs_thresh;
154 /* Populate 4 descriptors with data from 4 mbufs */
156 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
158 uint64_t buf_dma_addr;
162 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
163 buf_dma_addr = rte_mbuf_data_iova(*pkts);
164 pkt_len = (*pkts)->data_len;
166 /* write data to descriptor */
167 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
169 txdp->read.cmd_type_len =
170 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
172 txdp->read.olinfo_status =
173 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
175 rte_prefetch0(&(*pkts)->pool);
179 /* Populate 1 descriptor with data from 1 mbuf */
181 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
183 uint64_t buf_dma_addr;
186 buf_dma_addr = rte_mbuf_data_iova(*pkts);
187 pkt_len = (*pkts)->data_len;
189 /* write data to descriptor */
190 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
191 txdp->read.cmd_type_len =
192 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
193 txdp->read.olinfo_status =
194 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
195 rte_prefetch0(&(*pkts)->pool);
199 * Fill H/W descriptor ring with mbuf data.
200 * Copy mbuf pointers to the S/W ring.
203 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
206 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
207 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
208 const int N_PER_LOOP = 4;
209 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
210 int mainpart, leftover;
214 * Process most of the packets in chunks of N pkts. Any
215 * leftover packets will get processed one at a time.
217 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
218 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
219 for (i = 0; i < mainpart; i += N_PER_LOOP) {
220 /* Copy N mbuf pointers to the S/W ring */
221 for (j = 0; j < N_PER_LOOP; ++j) {
222 (txep + i + j)->mbuf = *(pkts + i + j);
224 tx4(txdp + i, pkts + i);
227 if (unlikely(leftover > 0)) {
228 for (i = 0; i < leftover; ++i) {
229 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
230 tx1(txdp + mainpart + i, pkts + mainpart + i);
235 static inline uint16_t
236 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
239 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
240 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
244 * Begin scanning the H/W ring for done descriptors when the
245 * number of available descriptors drops below tx_free_thresh. For
246 * each done descriptor, free the associated buffer.
248 if (txq->nb_tx_free < txq->tx_free_thresh)
249 ixgbe_tx_free_bufs(txq);
251 /* Only use descriptors that are available */
252 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
253 if (unlikely(nb_pkts == 0))
256 /* Use exactly nb_pkts descriptors */
257 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
260 * At this point, we know there are enough descriptors in the
261 * ring to transmit all the packets. This assumes that each
262 * mbuf contains a single segment, and that no new offloads
263 * are expected, which would require a new context descriptor.
267 * See if we're going to wrap-around. If so, handle the top
268 * of the descriptor ring first, then do the bottom. If not,
269 * the processing looks just like the "bottom" part anyway...
271 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
272 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
273 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
276 * We know that the last descriptor in the ring will need to
277 * have its RS bit set because tx_rs_thresh has to be
278 * a divisor of the ring size
280 tx_r[txq->tx_next_rs].read.cmd_type_len |=
281 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
282 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
287 /* Fill H/W descriptor ring with mbuf data */
288 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
289 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
292 * Determine if RS bit should be set
293 * This is what we actually want:
294 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
295 * but instead of subtracting 1 and doing >=, we can just do
296 * greater than without subtracting.
298 if (txq->tx_tail > txq->tx_next_rs) {
299 tx_r[txq->tx_next_rs].read.cmd_type_len |=
300 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
301 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
303 if (txq->tx_next_rs >= txq->nb_tx_desc)
304 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
308 * Check for wrap-around. This would only happen if we used
309 * up to the last descriptor in the ring, no more, no less.
311 if (txq->tx_tail >= txq->nb_tx_desc)
314 /* update tail pointer */
316 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
322 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
327 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
328 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
329 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
331 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
336 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
337 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
338 nb_tx = (uint16_t)(nb_tx + ret);
339 nb_pkts = (uint16_t)(nb_pkts - ret);
347 #ifdef RTE_IXGBE_INC_VECTOR
349 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
353 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
358 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
359 ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
372 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
373 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
374 uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
375 __rte_unused uint64_t *mdata)
377 uint32_t type_tucmd_mlhl;
378 uint32_t mss_l4len_idx = 0;
380 uint32_t vlan_macip_lens;
381 union ixgbe_tx_offload tx_offload_mask;
382 uint32_t seqnum_seed = 0;
384 ctx_idx = txq->ctx_curr;
385 tx_offload_mask.data[0] = 0;
386 tx_offload_mask.data[1] = 0;
389 /* Specify which HW CTX to upload. */
390 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
392 if (ol_flags & PKT_TX_VLAN_PKT) {
393 tx_offload_mask.vlan_tci |= ~0;
396 /* check if TCP segmentation required for this packet */
397 if (ol_flags & PKT_TX_TCP_SEG) {
398 /* implies IP cksum in IPv4 */
399 if (ol_flags & PKT_TX_IP_CKSUM)
400 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
401 IXGBE_ADVTXD_TUCMD_L4T_TCP |
402 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
404 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
405 IXGBE_ADVTXD_TUCMD_L4T_TCP |
406 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
408 tx_offload_mask.l2_len |= ~0;
409 tx_offload_mask.l3_len |= ~0;
410 tx_offload_mask.l4_len |= ~0;
411 tx_offload_mask.tso_segsz |= ~0;
412 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
413 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
414 } else { /* no TSO, check if hardware checksum is needed */
415 if (ol_flags & PKT_TX_IP_CKSUM) {
416 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
417 tx_offload_mask.l2_len |= ~0;
418 tx_offload_mask.l3_len |= ~0;
421 switch (ol_flags & PKT_TX_L4_MASK) {
422 case PKT_TX_UDP_CKSUM:
423 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
424 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
425 mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
426 tx_offload_mask.l2_len |= ~0;
427 tx_offload_mask.l3_len |= ~0;
429 case PKT_TX_TCP_CKSUM:
430 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
431 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
432 mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
433 tx_offload_mask.l2_len |= ~0;
434 tx_offload_mask.l3_len |= ~0;
436 case PKT_TX_SCTP_CKSUM:
437 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
438 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
439 mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
440 tx_offload_mask.l2_len |= ~0;
441 tx_offload_mask.l3_len |= ~0;
444 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
445 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
450 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
451 tx_offload_mask.outer_l2_len |= ~0;
452 tx_offload_mask.outer_l3_len |= ~0;
453 tx_offload_mask.l2_len |= ~0;
454 seqnum_seed |= tx_offload.outer_l3_len
455 << IXGBE_ADVTXD_OUTER_IPLEN;
456 seqnum_seed |= tx_offload.l2_len
457 << IXGBE_ADVTXD_TUNNEL_LEN;
459 #ifdef RTE_LIBRTE_SECURITY
460 if (ol_flags & PKT_TX_SEC_OFFLOAD) {
461 union ixgbe_crypto_tx_desc_md *md =
462 (union ixgbe_crypto_tx_desc_md *)mdata;
464 (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
465 type_tucmd_mlhl |= md->enc ?
466 (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
467 IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
469 (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
470 tx_offload_mask.sa_idx |= ~0;
471 tx_offload_mask.sec_pad_len |= ~0;
475 txq->ctx_cache[ctx_idx].flags = ol_flags;
476 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
477 tx_offload_mask.data[0] & tx_offload.data[0];
478 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
479 tx_offload_mask.data[1] & tx_offload.data[1];
480 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
482 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
483 vlan_macip_lens = tx_offload.l3_len;
484 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
485 vlan_macip_lens |= (tx_offload.outer_l2_len <<
486 IXGBE_ADVTXD_MACLEN_SHIFT);
488 vlan_macip_lens |= (tx_offload.l2_len <<
489 IXGBE_ADVTXD_MACLEN_SHIFT);
490 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
491 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
492 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
493 ctx_txd->seqnum_seed = seqnum_seed;
497 * Check which hardware context can be used. Use the existing match
498 * or create a new context descriptor.
500 static inline uint32_t
501 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
502 union ixgbe_tx_offload tx_offload)
504 /* If match with the current used context */
505 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
506 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
507 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
508 & tx_offload.data[0])) &&
509 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
510 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
511 & tx_offload.data[1]))))
512 return txq->ctx_curr;
514 /* What if match with the next context */
516 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
517 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
518 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
519 & tx_offload.data[0])) &&
520 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
521 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
522 & tx_offload.data[1]))))
523 return txq->ctx_curr;
525 /* Mismatch, use the previous context */
526 return IXGBE_CTX_NUM;
529 static inline uint32_t
530 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
534 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
535 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
536 if (ol_flags & PKT_TX_IP_CKSUM)
537 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
538 if (ol_flags & PKT_TX_TCP_SEG)
539 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
543 static inline uint32_t
544 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
546 uint32_t cmdtype = 0;
548 if (ol_flags & PKT_TX_VLAN_PKT)
549 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
550 if (ol_flags & PKT_TX_TCP_SEG)
551 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
552 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
553 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
554 if (ol_flags & PKT_TX_MACSEC)
555 cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
559 /* Default RS bit threshold values */
560 #ifndef DEFAULT_TX_RS_THRESH
561 #define DEFAULT_TX_RS_THRESH 32
563 #ifndef DEFAULT_TX_FREE_THRESH
564 #define DEFAULT_TX_FREE_THRESH 32
567 /* Reset transmit descriptors after they have been used */
569 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
571 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
572 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
573 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
574 uint16_t nb_tx_desc = txq->nb_tx_desc;
575 uint16_t desc_to_clean_to;
576 uint16_t nb_tx_to_clean;
579 /* Determine the last descriptor needing to be cleaned */
580 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
581 if (desc_to_clean_to >= nb_tx_desc)
582 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
584 /* Check to make sure the last descriptor to clean is done */
585 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
586 status = txr[desc_to_clean_to].wb.status;
587 if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
588 PMD_TX_FREE_LOG(DEBUG,
589 "TX descriptor %4u is not done"
590 "(port=%d queue=%d)",
592 txq->port_id, txq->queue_id);
593 /* Failed to clean any descriptors, better luck next time */
597 /* Figure out how many descriptors will be cleaned */
598 if (last_desc_cleaned > desc_to_clean_to)
599 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
602 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
605 PMD_TX_FREE_LOG(DEBUG,
606 "Cleaning %4u TX descriptors: %4u to %4u "
607 "(port=%d queue=%d)",
608 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
609 txq->port_id, txq->queue_id);
612 * The last descriptor to clean is done, so that means all the
613 * descriptors from the last descriptor that was cleaned
614 * up to the last descriptor with the RS bit set
615 * are done. Only reset the threshold descriptor.
617 txr[desc_to_clean_to].wb.status = 0;
619 /* Update the txq to reflect the last descriptor that was cleaned */
620 txq->last_desc_cleaned = desc_to_clean_to;
621 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
628 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
631 struct ixgbe_tx_queue *txq;
632 struct ixgbe_tx_entry *sw_ring;
633 struct ixgbe_tx_entry *txe, *txn;
634 volatile union ixgbe_adv_tx_desc *txr;
635 volatile union ixgbe_adv_tx_desc *txd, *txp;
636 struct rte_mbuf *tx_pkt;
637 struct rte_mbuf *m_seg;
638 uint64_t buf_dma_addr;
639 uint32_t olinfo_status;
640 uint32_t cmd_type_len;
651 union ixgbe_tx_offload tx_offload;
652 #ifdef RTE_LIBRTE_SECURITY
656 tx_offload.data[0] = 0;
657 tx_offload.data[1] = 0;
659 sw_ring = txq->sw_ring;
661 tx_id = txq->tx_tail;
662 txe = &sw_ring[tx_id];
665 /* Determine if the descriptor ring needs to be cleaned. */
666 if (txq->nb_tx_free < txq->tx_free_thresh)
667 ixgbe_xmit_cleanup(txq);
669 rte_prefetch0(&txe->mbuf->pool);
672 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
675 pkt_len = tx_pkt->pkt_len;
678 * Determine how many (if any) context descriptors
679 * are needed for offload functionality.
681 ol_flags = tx_pkt->ol_flags;
682 #ifdef RTE_LIBRTE_SECURITY
683 use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
686 /* If hardware offload required */
687 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
689 tx_offload.l2_len = tx_pkt->l2_len;
690 tx_offload.l3_len = tx_pkt->l3_len;
691 tx_offload.l4_len = tx_pkt->l4_len;
692 tx_offload.vlan_tci = tx_pkt->vlan_tci;
693 tx_offload.tso_segsz = tx_pkt->tso_segsz;
694 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
695 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
696 #ifdef RTE_LIBRTE_SECURITY
698 union ixgbe_crypto_tx_desc_md *ipsec_mdata =
699 (union ixgbe_crypto_tx_desc_md *)
701 tx_offload.sa_idx = ipsec_mdata->sa_idx;
702 tx_offload.sec_pad_len = ipsec_mdata->pad_len;
706 /* If new context need be built or reuse the exist ctx. */
707 ctx = what_advctx_update(txq, tx_ol_req,
709 /* Only allocate context descriptor if required*/
710 new_ctx = (ctx == IXGBE_CTX_NUM);
715 * Keep track of how many descriptors are used this loop
716 * This will always be the number of segments + the number of
717 * Context descriptors required to transmit the packet
719 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
722 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
723 /* set RS on the previous packet in the burst */
724 txp->read.cmd_type_len |=
725 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
728 * The number of descriptors that must be allocated for a
729 * packet is the number of segments of that packet, plus 1
730 * Context Descriptor for the hardware offload, if any.
731 * Determine the last TX descriptor to allocate in the TX ring
732 * for the packet, starting from the current position (tx_id)
735 tx_last = (uint16_t) (tx_id + nb_used - 1);
738 if (tx_last >= txq->nb_tx_desc)
739 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
741 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
742 " tx_first=%u tx_last=%u",
743 (unsigned) txq->port_id,
744 (unsigned) txq->queue_id,
750 * Make sure there are enough TX descriptors available to
751 * transmit the entire packet.
752 * nb_used better be less than or equal to txq->tx_rs_thresh
754 if (nb_used > txq->nb_tx_free) {
755 PMD_TX_FREE_LOG(DEBUG,
756 "Not enough free TX descriptors "
757 "nb_used=%4u nb_free=%4u "
758 "(port=%d queue=%d)",
759 nb_used, txq->nb_tx_free,
760 txq->port_id, txq->queue_id);
762 if (ixgbe_xmit_cleanup(txq) != 0) {
763 /* Could not clean any descriptors */
769 /* nb_used better be <= txq->tx_rs_thresh */
770 if (unlikely(nb_used > txq->tx_rs_thresh)) {
771 PMD_TX_FREE_LOG(DEBUG,
772 "The number of descriptors needed to "
773 "transmit the packet exceeds the "
774 "RS bit threshold. This will impact "
776 "nb_used=%4u nb_free=%4u "
778 "(port=%d queue=%d)",
779 nb_used, txq->nb_tx_free,
781 txq->port_id, txq->queue_id);
783 * Loop here until there are enough TX
784 * descriptors or until the ring cannot be
787 while (nb_used > txq->nb_tx_free) {
788 if (ixgbe_xmit_cleanup(txq) != 0) {
790 * Could not clean any
802 * By now there are enough free TX descriptors to transmit
807 * Set common flags of all TX Data Descriptors.
809 * The following bits must be set in all Data Descriptors:
810 * - IXGBE_ADVTXD_DTYP_DATA
811 * - IXGBE_ADVTXD_DCMD_DEXT
813 * The following bits must be set in the first Data Descriptor
814 * and are ignored in the other ones:
815 * - IXGBE_ADVTXD_DCMD_IFCS
816 * - IXGBE_ADVTXD_MAC_1588
817 * - IXGBE_ADVTXD_DCMD_VLE
819 * The following bits must only be set in the last Data
821 * - IXGBE_TXD_CMD_EOP
823 * The following bits can be set in any Data Descriptor, but
824 * are only set in the last Data Descriptor:
827 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
828 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
830 #ifdef RTE_LIBRTE_IEEE1588
831 if (ol_flags & PKT_TX_IEEE1588_TMST)
832 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
838 if (ol_flags & PKT_TX_TCP_SEG) {
839 /* when TSO is on, paylen in descriptor is the
840 * not the packet len but the tcp payload len */
841 pkt_len -= (tx_offload.l2_len +
842 tx_offload.l3_len + tx_offload.l4_len);
846 * Setup the TX Advanced Context Descriptor if required
849 volatile struct ixgbe_adv_tx_context_desc *
852 ctx_txd = (volatile struct
853 ixgbe_adv_tx_context_desc *)
856 txn = &sw_ring[txe->next_id];
857 rte_prefetch0(&txn->mbuf->pool);
859 if (txe->mbuf != NULL) {
860 rte_pktmbuf_free_seg(txe->mbuf);
864 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
865 tx_offload, &tx_pkt->udata64);
867 txe->last_id = tx_last;
868 tx_id = txe->next_id;
873 * Setup the TX Advanced Data Descriptor,
874 * This path will go through
875 * whatever new/reuse the context descriptor
877 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
878 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
879 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
882 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
883 #ifdef RTE_LIBRTE_SECURITY
885 olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
891 txn = &sw_ring[txe->next_id];
892 rte_prefetch0(&txn->mbuf->pool);
894 if (txe->mbuf != NULL)
895 rte_pktmbuf_free_seg(txe->mbuf);
899 * Set up Transmit Data Descriptor.
901 slen = m_seg->data_len;
902 buf_dma_addr = rte_mbuf_data_iova(m_seg);
903 txd->read.buffer_addr =
904 rte_cpu_to_le_64(buf_dma_addr);
905 txd->read.cmd_type_len =
906 rte_cpu_to_le_32(cmd_type_len | slen);
907 txd->read.olinfo_status =
908 rte_cpu_to_le_32(olinfo_status);
909 txe->last_id = tx_last;
910 tx_id = txe->next_id;
913 } while (m_seg != NULL);
916 * The last packet data descriptor needs End Of Packet (EOP)
918 cmd_type_len |= IXGBE_TXD_CMD_EOP;
919 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
920 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
922 /* Set RS bit only on threshold packets' last descriptor */
923 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
924 PMD_TX_FREE_LOG(DEBUG,
925 "Setting RS bit on TXD id="
926 "%4u (port=%d queue=%d)",
927 tx_last, txq->port_id, txq->queue_id);
929 cmd_type_len |= IXGBE_TXD_CMD_RS;
931 /* Update txq RS bit counters */
937 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
941 /* set RS on last packet in the burst */
943 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
948 * Set the Transmit Descriptor Tail (TDT)
950 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
951 (unsigned) txq->port_id, (unsigned) txq->queue_id,
952 (unsigned) tx_id, (unsigned) nb_tx);
953 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
954 txq->tx_tail = tx_id;
959 /*********************************************************************
963 **********************************************************************/
965 ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
970 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
972 for (i = 0; i < nb_pkts; i++) {
974 ol_flags = m->ol_flags;
977 * Check if packet meets requirements for number of segments
979 * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
983 if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
988 if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
989 rte_errno = -ENOTSUP;
993 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
994 ret = rte_validate_tx_offload(m);
1000 ret = rte_net_intel_cksum_prepare(m);
1010 /*********************************************************************
1014 **********************************************************************/
1016 #define IXGBE_PACKET_TYPE_ETHER 0X00
1017 #define IXGBE_PACKET_TYPE_IPV4 0X01
1018 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
1019 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
1020 #define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
1021 #define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
1022 #define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
1023 #define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
1024 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
1025 #define IXGBE_PACKET_TYPE_IPV6 0X04
1026 #define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
1027 #define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
1028 #define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
1029 #define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
1030 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
1031 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
1032 #define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
1033 #define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
1034 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
1035 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
1036 #define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
1037 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
1038 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
1039 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
1040 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
1041 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
1042 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
1043 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
1044 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
1045 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
1046 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
1047 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
1048 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
1050 #define IXGBE_PACKET_TYPE_NVGRE 0X00
1051 #define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
1052 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11
1053 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
1054 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
1055 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
1056 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
1057 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
1058 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
1059 #define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
1060 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
1061 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
1062 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
1063 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
1064 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
1065 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
1066 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
1067 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
1068 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
1069 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
1070 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D
1071 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
1072 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
1074 #define IXGBE_PACKET_TYPE_VXLAN 0X80
1075 #define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81
1076 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91
1077 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
1078 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
1079 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
1080 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
1081 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
1082 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
1083 #define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
1084 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
1085 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
1086 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
1087 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
1088 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
1089 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
1090 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
1091 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
1092 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
1093 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
1094 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D
1095 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
1096 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
1099 * Use 2 different table for normal packet and tunnel packet
1100 * to save the space.
1103 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
1104 [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
1105 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
1107 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1108 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
1109 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1110 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
1111 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1112 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
1113 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1114 RTE_PTYPE_L3_IPV4_EXT,
1115 [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1116 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
1117 [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1118 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
1119 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1120 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
1121 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
1123 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1124 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
1125 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1126 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
1127 [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1128 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
1129 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1130 RTE_PTYPE_L3_IPV6_EXT,
1131 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1132 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
1133 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1134 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
1135 [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1136 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
1137 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1138 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1139 RTE_PTYPE_INNER_L3_IPV6,
1140 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1141 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1142 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1143 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1144 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1145 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1146 [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1147 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1148 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1149 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
1150 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1151 RTE_PTYPE_INNER_L3_IPV6,
1152 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1153 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1154 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1155 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1156 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1157 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1158 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1159 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1160 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1161 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1162 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1163 RTE_PTYPE_INNER_L3_IPV6_EXT,
1164 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1165 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1166 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1167 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1168 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1169 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1170 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1171 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1172 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1173 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1174 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1175 RTE_PTYPE_INNER_L3_IPV6_EXT,
1176 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1177 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1178 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1179 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1180 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1181 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1182 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
1183 RTE_PTYPE_L2_ETHER |
1184 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1185 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1189 ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
1190 [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
1191 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1192 RTE_PTYPE_INNER_L2_ETHER,
1193 [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
1194 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1195 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1196 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1197 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1198 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
1199 [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
1200 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1201 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
1202 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1203 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1204 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1205 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1206 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1207 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
1208 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1209 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1210 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1211 [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1212 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1213 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1214 RTE_PTYPE_INNER_L4_TCP,
1215 [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1216 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1217 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1218 RTE_PTYPE_INNER_L4_TCP,
1219 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1220 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1221 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1222 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1223 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1224 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1225 RTE_PTYPE_INNER_L4_TCP,
1226 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
1227 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1228 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1229 RTE_PTYPE_INNER_L3_IPV4,
1230 [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1231 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1232 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1233 RTE_PTYPE_INNER_L4_UDP,
1234 [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1235 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1236 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1237 RTE_PTYPE_INNER_L4_UDP,
1238 [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1239 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1240 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1241 RTE_PTYPE_INNER_L4_SCTP,
1242 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1243 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1244 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1245 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1246 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1247 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1248 RTE_PTYPE_INNER_L4_UDP,
1249 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1250 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1251 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1252 RTE_PTYPE_INNER_L4_SCTP,
1253 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
1254 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1255 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1256 RTE_PTYPE_INNER_L3_IPV4,
1257 [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1258 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1259 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1260 RTE_PTYPE_INNER_L4_SCTP,
1261 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1262 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1263 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1264 RTE_PTYPE_INNER_L4_SCTP,
1265 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1266 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1267 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1268 RTE_PTYPE_INNER_L4_TCP,
1269 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1270 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1271 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1272 RTE_PTYPE_INNER_L4_UDP,
1274 [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
1275 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1276 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
1277 [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
1278 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1279 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1280 RTE_PTYPE_INNER_L3_IPV4,
1281 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1282 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1283 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1284 RTE_PTYPE_INNER_L3_IPV4_EXT,
1285 [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
1286 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1287 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1288 RTE_PTYPE_INNER_L3_IPV6,
1289 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1290 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1291 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1292 RTE_PTYPE_INNER_L3_IPV4,
1293 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1294 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1295 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1296 RTE_PTYPE_INNER_L3_IPV6_EXT,
1297 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1298 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1299 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1300 RTE_PTYPE_INNER_L3_IPV4,
1301 [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1302 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1303 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1304 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
1305 [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1306 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1307 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1308 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1309 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1310 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1311 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1312 RTE_PTYPE_INNER_L3_IPV4,
1313 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1314 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1315 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1316 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1317 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
1318 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1319 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1320 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1321 [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1322 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1323 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1324 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
1325 [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1326 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1327 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1328 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1329 [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1330 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1331 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1332 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1333 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1334 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1335 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1336 RTE_PTYPE_INNER_L3_IPV4,
1337 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1338 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1339 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1340 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1341 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1342 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1343 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1344 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1345 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
1346 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1347 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1348 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1349 [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1350 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1351 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1352 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
1353 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1354 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1355 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1356 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
1357 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1358 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1359 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1360 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
1361 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1362 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1363 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1364 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
1367 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
1368 static inline uint32_t
1369 ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
1372 if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1373 return RTE_PTYPE_UNKNOWN;
1375 pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
1377 /* For tunnel packet */
1378 if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
1379 /* Remove the tunnel bit to save the space. */
1380 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
1381 return ptype_table_tn[pkt_info];
1385 * For x550, if it's not tunnel,
1386 * tunnel type bit should be set to 0.
1387 * Reuse 82599's mask.
1389 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
1391 return ptype_table[pkt_info];
1394 static inline uint64_t
1395 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1397 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1398 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1399 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1400 PKT_RX_RSS_HASH, 0, 0, 0,
1401 0, 0, 0, PKT_RX_FDIR,
1403 #ifdef RTE_LIBRTE_IEEE1588
1404 static uint64_t ip_pkt_etqf_map[8] = {
1405 0, 0, 0, PKT_RX_IEEE1588_PTP,
1409 if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1410 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1411 ip_rss_types_map[pkt_info & 0XF];
1413 return ip_rss_types_map[pkt_info & 0XF];
1415 return ip_rss_types_map[pkt_info & 0XF];
1419 static inline uint64_t
1420 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1425 * Check if VLAN present only.
1426 * Do not check whether L3/L4 rx checksum done by NIC or not,
1427 * That can be found from rte_eth_rxmode.offloads flag
1429 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
1431 #ifdef RTE_LIBRTE_IEEE1588
1432 if (rx_status & IXGBE_RXD_STAT_TMST)
1433 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1438 static inline uint64_t
1439 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1444 * Bit 31: IPE, IPv4 checksum error
1445 * Bit 30: L4I, L4I integrity error
1447 static uint64_t error_to_pkt_flags_map[4] = {
1448 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
1449 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
1450 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
1451 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1453 pkt_flags = error_to_pkt_flags_map[(rx_status >>
1454 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1456 if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1457 (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1458 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1461 #ifdef RTE_LIBRTE_SECURITY
1462 if (rx_status & IXGBE_RXD_STAT_SECP) {
1463 pkt_flags |= PKT_RX_SEC_OFFLOAD;
1464 if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
1465 pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
1473 * LOOK_AHEAD defines how many desc statuses to check beyond the
1474 * current descriptor.
1475 * It must be a pound define for optimal performance.
1476 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1477 * function only works with LOOK_AHEAD=8.
1479 #define LOOK_AHEAD 8
1480 #if (LOOK_AHEAD != 8)
1481 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1484 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1486 volatile union ixgbe_adv_rx_desc *rxdp;
1487 struct ixgbe_rx_entry *rxep;
1488 struct rte_mbuf *mb;
1492 uint32_t s[LOOK_AHEAD];
1493 uint32_t pkt_info[LOOK_AHEAD];
1494 int i, j, nb_rx = 0;
1496 uint64_t vlan_flags = rxq->vlan_flags;
1498 /* get references to current descriptor and S/W ring entry */
1499 rxdp = &rxq->rx_ring[rxq->rx_tail];
1500 rxep = &rxq->sw_ring[rxq->rx_tail];
1502 status = rxdp->wb.upper.status_error;
1503 /* check to make sure there is at least 1 packet to receive */
1504 if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1508 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1509 * reference packets that are ready to be received.
1511 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1512 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1513 /* Read desc statuses backwards to avoid race condition */
1514 for (j = 0; j < LOOK_AHEAD; j++)
1515 s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1519 /* Compute how many status bits were set */
1520 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1521 (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
1524 for (j = 0; j < nb_dd; j++)
1525 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
1530 /* Translate descriptor info to mbuf format */
1531 for (j = 0; j < nb_dd; ++j) {
1533 pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1535 mb->data_len = pkt_len;
1536 mb->pkt_len = pkt_len;
1537 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1539 /* convert descriptor fields to rte mbuf flags */
1540 pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1542 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1543 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
1544 ((uint16_t)pkt_info[j]);
1545 mb->ol_flags = pkt_flags;
1547 ixgbe_rxd_pkt_info_to_pkt_type
1548 (pkt_info[j], rxq->pkt_type_mask);
1550 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1551 mb->hash.rss = rte_le_to_cpu_32(
1552 rxdp[j].wb.lower.hi_dword.rss);
1553 else if (pkt_flags & PKT_RX_FDIR) {
1554 mb->hash.fdir.hash = rte_le_to_cpu_16(
1555 rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1556 IXGBE_ATR_HASH_MASK;
1557 mb->hash.fdir.id = rte_le_to_cpu_16(
1558 rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1562 /* Move mbuf pointers from the S/W ring to the stage */
1563 for (j = 0; j < LOOK_AHEAD; ++j) {
1564 rxq->rx_stage[i + j] = rxep[j].mbuf;
1567 /* stop if all requested packets could not be received */
1568 if (nb_dd != LOOK_AHEAD)
1572 /* clear software ring entries so we can cleanup correctly */
1573 for (i = 0; i < nb_rx; ++i) {
1574 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1582 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1584 volatile union ixgbe_adv_rx_desc *rxdp;
1585 struct ixgbe_rx_entry *rxep;
1586 struct rte_mbuf *mb;
1591 /* allocate buffers in bulk directly into the S/W ring */
1592 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1593 rxep = &rxq->sw_ring[alloc_idx];
1594 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1595 rxq->rx_free_thresh);
1596 if (unlikely(diag != 0))
1599 rxdp = &rxq->rx_ring[alloc_idx];
1600 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1601 /* populate the static rte mbuf fields */
1604 mb->port = rxq->port_id;
1607 rte_mbuf_refcnt_set(mb, 1);
1608 mb->data_off = RTE_PKTMBUF_HEADROOM;
1610 /* populate the descriptors */
1611 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1612 rxdp[i].read.hdr_addr = 0;
1613 rxdp[i].read.pkt_addr = dma_addr;
1616 /* update state of internal queue structure */
1617 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1618 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1619 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1625 static inline uint16_t
1626 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1629 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1632 /* how many packets are ready to return? */
1633 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1635 /* copy mbuf pointers to the application's packet list */
1636 for (i = 0; i < nb_pkts; ++i)
1637 rx_pkts[i] = stage[i];
1639 /* update internal queue state */
1640 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1641 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1646 static inline uint16_t
1647 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1650 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1653 /* Any previously recv'd pkts will be returned from the Rx stage */
1654 if (rxq->rx_nb_avail)
1655 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1657 /* Scan the H/W ring for packets to receive */
1658 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1660 /* update internal queue state */
1661 rxq->rx_next_avail = 0;
1662 rxq->rx_nb_avail = nb_rx;
1663 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1665 /* if required, allocate new buffers to replenish descriptors */
1666 if (rxq->rx_tail > rxq->rx_free_trigger) {
1667 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1669 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1672 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1673 "queue_id=%u", (unsigned) rxq->port_id,
1674 (unsigned) rxq->queue_id);
1676 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1677 rxq->rx_free_thresh;
1680 * Need to rewind any previous receives if we cannot
1681 * allocate new buffers to replenish the old ones.
1683 rxq->rx_nb_avail = 0;
1684 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1685 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1686 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1691 /* update tail pointer */
1693 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
1697 if (rxq->rx_tail >= rxq->nb_rx_desc)
1700 /* received any packets this loop? */
1701 if (rxq->rx_nb_avail)
1702 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1707 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1709 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1714 if (unlikely(nb_pkts == 0))
1717 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1718 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1720 /* request is relatively large, chunk it up */
1725 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1726 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1727 nb_rx = (uint16_t)(nb_rx + ret);
1728 nb_pkts = (uint16_t)(nb_pkts - ret);
1737 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1740 struct ixgbe_rx_queue *rxq;
1741 volatile union ixgbe_adv_rx_desc *rx_ring;
1742 volatile union ixgbe_adv_rx_desc *rxdp;
1743 struct ixgbe_rx_entry *sw_ring;
1744 struct ixgbe_rx_entry *rxe;
1745 struct rte_mbuf *rxm;
1746 struct rte_mbuf *nmb;
1747 union ixgbe_adv_rx_desc rxd;
1756 uint64_t vlan_flags;
1761 rx_id = rxq->rx_tail;
1762 rx_ring = rxq->rx_ring;
1763 sw_ring = rxq->sw_ring;
1764 vlan_flags = rxq->vlan_flags;
1765 while (nb_rx < nb_pkts) {
1767 * The order of operations here is important as the DD status
1768 * bit must not be read after any other descriptor fields.
1769 * rx_ring and rxdp are pointing to volatile data so the order
1770 * of accesses cannot be reordered by the compiler. If they were
1771 * not volatile, they could be reordered which could lead to
1772 * using invalid descriptor fields when read from rxd.
1774 rxdp = &rx_ring[rx_id];
1775 staterr = rxdp->wb.upper.status_error;
1776 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1783 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1784 * is likely to be invalid and to be dropped by the various
1785 * validation checks performed by the network stack.
1787 * Allocate a new mbuf to replenish the RX ring descriptor.
1788 * If the allocation fails:
1789 * - arrange for that RX descriptor to be the first one
1790 * being parsed the next time the receive function is
1791 * invoked [on the same queue].
1793 * - Stop parsing the RX ring and return immediately.
1795 * This policy do not drop the packet received in the RX
1796 * descriptor for which the allocation of a new mbuf failed.
1797 * Thus, it allows that packet to be later retrieved if
1798 * mbuf have been freed in the mean time.
1799 * As a side effect, holding RX descriptors instead of
1800 * systematically giving them back to the NIC may lead to
1801 * RX ring exhaustion situations.
1802 * However, the NIC can gracefully prevent such situations
1803 * to happen by sending specific "back-pressure" flow control
1804 * frames to its peer(s).
1806 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1807 "ext_err_stat=0x%08x pkt_len=%u",
1808 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1809 (unsigned) rx_id, (unsigned) staterr,
1810 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1812 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1814 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1815 "queue_id=%u", (unsigned) rxq->port_id,
1816 (unsigned) rxq->queue_id);
1817 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1822 rxe = &sw_ring[rx_id];
1824 if (rx_id == rxq->nb_rx_desc)
1827 /* Prefetch next mbuf while processing current one. */
1828 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1831 * When next RX descriptor is on a cache-line boundary,
1832 * prefetch the next 4 RX descriptors and the next 8 pointers
1835 if ((rx_id & 0x3) == 0) {
1836 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1837 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1843 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1844 rxdp->read.hdr_addr = 0;
1845 rxdp->read.pkt_addr = dma_addr;
1848 * Initialize the returned mbuf.
1849 * 1) setup generic mbuf fields:
1850 * - number of segments,
1853 * - RX port identifier.
1854 * 2) integrate hardware offload data, if any:
1855 * - RSS flag & hash,
1856 * - IP checksum flag,
1857 * - VLAN TCI, if any,
1860 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1862 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1863 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1866 rxm->pkt_len = pkt_len;
1867 rxm->data_len = pkt_len;
1868 rxm->port = rxq->port_id;
1870 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1871 /* Only valid if PKT_RX_VLAN set in pkt_flags */
1872 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1874 pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
1875 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1876 pkt_flags = pkt_flags |
1877 ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1878 rxm->ol_flags = pkt_flags;
1880 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1881 rxq->pkt_type_mask);
1883 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1884 rxm->hash.rss = rte_le_to_cpu_32(
1885 rxd.wb.lower.hi_dword.rss);
1886 else if (pkt_flags & PKT_RX_FDIR) {
1887 rxm->hash.fdir.hash = rte_le_to_cpu_16(
1888 rxd.wb.lower.hi_dword.csum_ip.csum) &
1889 IXGBE_ATR_HASH_MASK;
1890 rxm->hash.fdir.id = rte_le_to_cpu_16(
1891 rxd.wb.lower.hi_dword.csum_ip.ip_id);
1894 * Store the mbuf address into the next entry of the array
1895 * of returned packets.
1897 rx_pkts[nb_rx++] = rxm;
1899 rxq->rx_tail = rx_id;
1902 * If the number of free RX descriptors is greater than the RX free
1903 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1905 * Update the RDT with the value of the last processed RX descriptor
1906 * minus 1, to guarantee that the RDT register is never equal to the
1907 * RDH register, which creates a "full" ring situtation from the
1908 * hardware point of view...
1910 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1911 if (nb_hold > rxq->rx_free_thresh) {
1912 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1913 "nb_hold=%u nb_rx=%u",
1914 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1915 (unsigned) rx_id, (unsigned) nb_hold,
1917 rx_id = (uint16_t) ((rx_id == 0) ?
1918 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1919 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1922 rxq->nb_rx_hold = nb_hold;
1927 * Detect an RSC descriptor.
1929 static inline uint32_t
1930 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1932 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1933 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1937 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1939 * Fill the following info in the HEAD buffer of the Rx cluster:
1940 * - RX port identifier
1941 * - hardware offload data, if any:
1943 * - IP checksum flag
1944 * - VLAN TCI, if any
1946 * @head HEAD of the packet cluster
1947 * @desc HW descriptor to get data from
1948 * @rxq Pointer to the Rx queue
1951 ixgbe_fill_cluster_head_buf(
1952 struct rte_mbuf *head,
1953 union ixgbe_adv_rx_desc *desc,
1954 struct ixgbe_rx_queue *rxq,
1960 head->port = rxq->port_id;
1962 /* The vlan_tci field is only valid when PKT_RX_VLAN is
1963 * set in the pkt_flags field.
1965 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1966 pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
1967 pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1968 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1969 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1970 head->ol_flags = pkt_flags;
1972 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
1974 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1975 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1976 else if (pkt_flags & PKT_RX_FDIR) {
1977 head->hash.fdir.hash =
1978 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1979 & IXGBE_ATR_HASH_MASK;
1980 head->hash.fdir.id =
1981 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1986 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1988 * @rx_queue Rx queue handle
1989 * @rx_pkts table of received packets
1990 * @nb_pkts size of rx_pkts table
1991 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1993 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1994 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1996 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1997 * 1) When non-EOP RSC completion arrives:
1998 * a) Update the HEAD of the current RSC aggregation cluster with the new
1999 * segment's data length.
2000 * b) Set the "next" pointer of the current segment to point to the segment
2001 * at the NEXTP index.
2002 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
2003 * in the sw_rsc_ring.
2004 * 2) When EOP arrives we just update the cluster's total length and offload
2005 * flags and deliver the cluster up to the upper layers. In our case - put it
2006 * in the rx_pkts table.
2008 * Returns the number of received packets/clusters (according to the "bulk
2009 * receive" interface).
2011 static inline uint16_t
2012 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
2015 struct ixgbe_rx_queue *rxq = rx_queue;
2016 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
2017 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
2018 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
2019 uint16_t rx_id = rxq->rx_tail;
2021 uint16_t nb_hold = rxq->nb_rx_hold;
2022 uint16_t prev_id = rxq->rx_tail;
2024 while (nb_rx < nb_pkts) {
2026 struct ixgbe_rx_entry *rxe;
2027 struct ixgbe_scattered_rx_entry *sc_entry;
2028 struct ixgbe_scattered_rx_entry *next_sc_entry;
2029 struct ixgbe_rx_entry *next_rxe = NULL;
2030 struct rte_mbuf *first_seg;
2031 struct rte_mbuf *rxm;
2032 struct rte_mbuf *nmb = NULL;
2033 union ixgbe_adv_rx_desc rxd;
2036 volatile union ixgbe_adv_rx_desc *rxdp;
2041 * The code in this whole file uses the volatile pointer to
2042 * ensure the read ordering of the status and the rest of the
2043 * descriptor fields (on the compiler level only!!!). This is so
2044 * UGLY - why not to just use the compiler barrier instead? DPDK
2045 * even has the rte_compiler_barrier() for that.
2047 * But most importantly this is just wrong because this doesn't
2048 * ensure memory ordering in a general case at all. For
2049 * instance, DPDK is supposed to work on Power CPUs where
2050 * compiler barrier may just not be enough!
2052 * I tried to write only this function properly to have a
2053 * starting point (as a part of an LRO/RSC series) but the
2054 * compiler cursed at me when I tried to cast away the
2055 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
2056 * keeping it the way it is for now.
2058 * The code in this file is broken in so many other places and
2059 * will just not work on a big endian CPU anyway therefore the
2060 * lines below will have to be revisited together with the rest
2064 * - Get rid of "volatile" and let the compiler do its job.
2065 * - Use the proper memory barrier (rte_rmb()) to ensure the
2066 * memory ordering below.
2068 rxdp = &rx_ring[rx_id];
2069 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
2071 if (!(staterr & IXGBE_RXDADV_STAT_DD))
2076 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
2077 "staterr=0x%x data_len=%u",
2078 rxq->port_id, rxq->queue_id, rx_id, staterr,
2079 rte_le_to_cpu_16(rxd.wb.upper.length));
2082 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
2084 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
2085 "port_id=%u queue_id=%u",
2086 rxq->port_id, rxq->queue_id);
2088 rte_eth_devices[rxq->port_id].data->
2089 rx_mbuf_alloc_failed++;
2092 } else if (nb_hold > rxq->rx_free_thresh) {
2093 uint16_t next_rdt = rxq->rx_free_trigger;
2095 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
2097 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
2099 nb_hold -= rxq->rx_free_thresh;
2101 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
2102 "port_id=%u queue_id=%u",
2103 rxq->port_id, rxq->queue_id);
2105 rte_eth_devices[rxq->port_id].data->
2106 rx_mbuf_alloc_failed++;
2112 rxe = &sw_ring[rx_id];
2113 eop = staterr & IXGBE_RXDADV_STAT_EOP;
2115 next_id = rx_id + 1;
2116 if (next_id == rxq->nb_rx_desc)
2119 /* Prefetch next mbuf while processing current one. */
2120 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
2123 * When next RX descriptor is on a cache-line boundary,
2124 * prefetch the next 4 RX descriptors and the next 4 pointers
2127 if ((next_id & 0x3) == 0) {
2128 rte_ixgbe_prefetch(&rx_ring[next_id]);
2129 rte_ixgbe_prefetch(&sw_ring[next_id]);
2136 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2138 * Update RX descriptor with the physical address of the
2139 * new data buffer of the new allocated mbuf.
2143 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2144 rxdp->read.hdr_addr = 0;
2145 rxdp->read.pkt_addr = dma;
2150 * Set data length & data buffer address of mbuf.
2152 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
2153 rxm->data_len = data_len;
2158 * Get next descriptor index:
2159 * - For RSC it's in the NEXTP field.
2160 * - For a scattered packet - it's just a following
2163 if (ixgbe_rsc_count(&rxd))
2165 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
2166 IXGBE_RXDADV_NEXTP_SHIFT;
2170 next_sc_entry = &sw_sc_ring[nextp_id];
2171 next_rxe = &sw_ring[nextp_id];
2172 rte_ixgbe_prefetch(next_rxe);
2175 sc_entry = &sw_sc_ring[rx_id];
2176 first_seg = sc_entry->fbuf;
2177 sc_entry->fbuf = NULL;
2180 * If this is the first buffer of the received packet,
2181 * set the pointer to the first mbuf of the packet and
2182 * initialize its context.
2183 * Otherwise, update the total length and the number of segments
2184 * of the current scattered packet, and update the pointer to
2185 * the last mbuf of the current packet.
2187 if (first_seg == NULL) {
2189 first_seg->pkt_len = data_len;
2190 first_seg->nb_segs = 1;
2192 first_seg->pkt_len += data_len;
2193 first_seg->nb_segs++;
2200 * If this is not the last buffer of the received packet, update
2201 * the pointer to the first mbuf at the NEXTP entry in the
2202 * sw_sc_ring and continue to parse the RX ring.
2204 if (!eop && next_rxe) {
2205 rxm->next = next_rxe->mbuf;
2206 next_sc_entry->fbuf = first_seg;
2210 /* Initialize the first mbuf of the returned packet */
2211 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
2214 * Deal with the case, when HW CRC srip is disabled.
2215 * That can't happen when LRO is enabled, but still could
2216 * happen for scattered RX mode.
2218 first_seg->pkt_len -= rxq->crc_len;
2219 if (unlikely(rxm->data_len <= rxq->crc_len)) {
2220 struct rte_mbuf *lp;
2222 for (lp = first_seg; lp->next != rxm; lp = lp->next)
2225 first_seg->nb_segs--;
2226 lp->data_len -= rxq->crc_len - rxm->data_len;
2228 rte_pktmbuf_free_seg(rxm);
2230 rxm->data_len -= rxq->crc_len;
2232 /* Prefetch data of first segment, if configured to do so. */
2233 rte_packet_prefetch((char *)first_seg->buf_addr +
2234 first_seg->data_off);
2237 * Store the mbuf address into the next entry of the array
2238 * of returned packets.
2240 rx_pkts[nb_rx++] = first_seg;
2244 * Record index of the next RX descriptor to probe.
2246 rxq->rx_tail = rx_id;
2249 * If the number of free RX descriptors is greater than the RX free
2250 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2252 * Update the RDT with the value of the last processed RX descriptor
2253 * minus 1, to guarantee that the RDT register is never equal to the
2254 * RDH register, which creates a "full" ring situtation from the
2255 * hardware point of view...
2257 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
2258 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
2259 "nb_hold=%u nb_rx=%u",
2260 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
2263 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
2267 rxq->nb_rx_hold = nb_hold;
2272 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2275 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
2279 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2282 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
2285 /*********************************************************************
2287 * Queue management functions
2289 **********************************************************************/
2291 static void __attribute__((cold))
2292 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
2296 if (txq->sw_ring != NULL) {
2297 for (i = 0; i < txq->nb_tx_desc; i++) {
2298 if (txq->sw_ring[i].mbuf != NULL) {
2299 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2300 txq->sw_ring[i].mbuf = NULL;
2306 static void __attribute__((cold))
2307 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
2310 txq->sw_ring != NULL)
2311 rte_free(txq->sw_ring);
2314 static void __attribute__((cold))
2315 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
2317 if (txq != NULL && txq->ops != NULL) {
2318 txq->ops->release_mbufs(txq);
2319 txq->ops->free_swring(txq);
2324 void __attribute__((cold))
2325 ixgbe_dev_tx_queue_release(void *txq)
2327 ixgbe_tx_queue_release(txq);
2330 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
2331 static void __attribute__((cold))
2332 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
2334 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
2335 struct ixgbe_tx_entry *txe = txq->sw_ring;
2338 /* Zero out HW ring memory */
2339 for (i = 0; i < txq->nb_tx_desc; i++) {
2340 txq->tx_ring[i] = zeroed_desc;
2343 /* Initialize SW ring entries */
2344 prev = (uint16_t) (txq->nb_tx_desc - 1);
2345 for (i = 0; i < txq->nb_tx_desc; i++) {
2346 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
2348 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
2351 txe[prev].next_id = i;
2355 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2356 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2359 txq->nb_tx_used = 0;
2361 * Always allow 1 descriptor to be un-allocated to avoid
2362 * a H/W race condition
2364 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2365 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2367 memset((void *)&txq->ctx_cache, 0,
2368 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
2371 static const struct ixgbe_txq_ops def_txq_ops = {
2372 .release_mbufs = ixgbe_tx_queue_release_mbufs,
2373 .free_swring = ixgbe_tx_free_swring,
2374 .reset = ixgbe_reset_tx_queue,
2377 /* Takes an ethdev and a queue and sets up the tx function to be used based on
2378 * the queue parameters. Used in tx_queue_setup by primary process and then
2379 * in dev_init by secondary process when attaching to an existing ethdev.
2381 void __attribute__((cold))
2382 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
2384 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2385 if ((txq->offloads == 0) &&
2386 #ifdef RTE_LIBRTE_SECURITY
2387 !(txq->using_ipsec) &&
2389 (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
2390 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2391 dev->tx_pkt_prepare = NULL;
2392 #ifdef RTE_IXGBE_INC_VECTOR
2393 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2394 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2395 ixgbe_txq_vec_setup(txq) == 0)) {
2396 PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
2397 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
2400 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
2402 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2404 " - offloads = 0x%" PRIx64,
2407 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2408 (unsigned long)txq->tx_rs_thresh,
2409 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2410 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2411 dev->tx_pkt_prepare = ixgbe_prep_pkts;
2416 ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2424 ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2426 uint64_t tx_offload_capa;
2427 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2430 DEV_TX_OFFLOAD_VLAN_INSERT |
2431 DEV_TX_OFFLOAD_IPV4_CKSUM |
2432 DEV_TX_OFFLOAD_UDP_CKSUM |
2433 DEV_TX_OFFLOAD_TCP_CKSUM |
2434 DEV_TX_OFFLOAD_SCTP_CKSUM |
2435 DEV_TX_OFFLOAD_TCP_TSO |
2436 DEV_TX_OFFLOAD_MULTI_SEGS;
2438 if (hw->mac.type == ixgbe_mac_82599EB ||
2439 hw->mac.type == ixgbe_mac_X540)
2440 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2442 if (hw->mac.type == ixgbe_mac_X550 ||
2443 hw->mac.type == ixgbe_mac_X550EM_x ||
2444 hw->mac.type == ixgbe_mac_X550EM_a)
2445 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2447 #ifdef RTE_LIBRTE_SECURITY
2448 if (dev->security_ctx)
2449 tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2451 return tx_offload_capa;
2454 int __attribute__((cold))
2455 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2458 unsigned int socket_id,
2459 const struct rte_eth_txconf *tx_conf)
2461 const struct rte_memzone *tz;
2462 struct ixgbe_tx_queue *txq;
2463 struct ixgbe_hw *hw;
2464 uint16_t tx_rs_thresh, tx_free_thresh;
2467 PMD_INIT_FUNC_TRACE();
2468 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2470 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2473 * Validate number of transmit descriptors.
2474 * It must not exceed hardware maximum, and must be multiple
2477 if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2478 (nb_desc > IXGBE_MAX_RING_DESC) ||
2479 (nb_desc < IXGBE_MIN_RING_DESC)) {
2484 * The following two parameters control the setting of the RS bit on
2485 * transmit descriptors.
2486 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2487 * descriptors have been used.
2488 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2489 * descriptors are used or if the number of descriptors required
2490 * to transmit a packet is greater than the number of free TX
2492 * The following constraints must be satisfied:
2493 * tx_rs_thresh must be greater than 0.
2494 * tx_rs_thresh must be less than the size of the ring minus 2.
2495 * tx_rs_thresh must be less than or equal to tx_free_thresh.
2496 * tx_rs_thresh must be a divisor of the ring size.
2497 * tx_free_thresh must be greater than 0.
2498 * tx_free_thresh must be less than the size of the ring minus 3.
2499 * tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
2500 * One descriptor in the TX ring is used as a sentinel to avoid a
2501 * H/W race condition, hence the maximum threshold constraints.
2502 * When set to zero use default values.
2504 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2505 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2506 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
2507 tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
2508 nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
2509 if (tx_conf->tx_rs_thresh > 0)
2510 tx_rs_thresh = tx_conf->tx_rs_thresh;
2511 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
2512 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
2513 "exceed nb_desc. (tx_rs_thresh=%u "
2514 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
2515 (unsigned int)tx_rs_thresh,
2516 (unsigned int)tx_free_thresh,
2517 (unsigned int)nb_desc,
2518 (int)dev->data->port_id,
2522 if (tx_rs_thresh >= (nb_desc - 2)) {
2523 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2524 "of TX descriptors minus 2. (tx_rs_thresh=%u "
2525 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2526 (int)dev->data->port_id, (int)queue_idx);
2529 if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2530 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2531 "(tx_rs_thresh=%u port=%d queue=%d)",
2532 DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2533 (int)dev->data->port_id, (int)queue_idx);
2536 if (tx_free_thresh >= (nb_desc - 3)) {
2537 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2538 "tx_free_thresh must be less than the number of "
2539 "TX descriptors minus 3. (tx_free_thresh=%u "
2540 "port=%d queue=%d)",
2541 (unsigned int)tx_free_thresh,
2542 (int)dev->data->port_id, (int)queue_idx);
2545 if (tx_rs_thresh > tx_free_thresh) {
2546 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2547 "tx_free_thresh. (tx_free_thresh=%u "
2548 "tx_rs_thresh=%u port=%d queue=%d)",
2549 (unsigned int)tx_free_thresh,
2550 (unsigned int)tx_rs_thresh,
2551 (int)dev->data->port_id,
2555 if ((nb_desc % tx_rs_thresh) != 0) {
2556 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2557 "number of TX descriptors. (tx_rs_thresh=%u "
2558 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2559 (int)dev->data->port_id, (int)queue_idx);
2564 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2565 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2566 * by the NIC and all descriptors are written back after the NIC
2567 * accumulates WTHRESH descriptors.
2569 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2570 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2571 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2572 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2573 (int)dev->data->port_id, (int)queue_idx);
2577 /* Free memory prior to re-allocation if needed... */
2578 if (dev->data->tx_queues[queue_idx] != NULL) {
2579 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2580 dev->data->tx_queues[queue_idx] = NULL;
2583 /* First allocate the tx queue data structure */
2584 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2585 RTE_CACHE_LINE_SIZE, socket_id);
2590 * Allocate TX ring hardware descriptors. A memzone large enough to
2591 * handle the maximum ring size is allocated in order to allow for
2592 * resizing in later calls to the queue setup function.
2594 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2595 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2596 IXGBE_ALIGN, socket_id);
2598 ixgbe_tx_queue_release(txq);
2602 txq->nb_tx_desc = nb_desc;
2603 txq->tx_rs_thresh = tx_rs_thresh;
2604 txq->tx_free_thresh = tx_free_thresh;
2605 txq->pthresh = tx_conf->tx_thresh.pthresh;
2606 txq->hthresh = tx_conf->tx_thresh.hthresh;
2607 txq->wthresh = tx_conf->tx_thresh.wthresh;
2608 txq->queue_id = queue_idx;
2609 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2610 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2611 txq->port_id = dev->data->port_id;
2612 txq->offloads = offloads;
2613 txq->ops = &def_txq_ops;
2614 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2615 #ifdef RTE_LIBRTE_SECURITY
2616 txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2617 DEV_TX_OFFLOAD_SECURITY);
2621 * Modification to set VFTDT for virtual function if vf is detected
2623 if (hw->mac.type == ixgbe_mac_82599_vf ||
2624 hw->mac.type == ixgbe_mac_X540_vf ||
2625 hw->mac.type == ixgbe_mac_X550_vf ||
2626 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2627 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2628 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2630 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2632 txq->tx_ring_phys_addr = tz->iova;
2633 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2635 /* Allocate software ring */
2636 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2637 sizeof(struct ixgbe_tx_entry) * nb_desc,
2638 RTE_CACHE_LINE_SIZE, socket_id);
2639 if (txq->sw_ring == NULL) {
2640 ixgbe_tx_queue_release(txq);
2643 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2644 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2646 /* set up vector or scalar TX function as appropriate */
2647 ixgbe_set_tx_function(dev, txq);
2649 txq->ops->reset(txq);
2651 dev->data->tx_queues[queue_idx] = txq;
2658 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2660 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2661 * in the sw_rsc_ring is not set to NULL but rather points to the next
2662 * mbuf of this RSC aggregation (that has not been completed yet and still
2663 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2664 * will just free first "nb_segs" segments of the cluster explicitly by calling
2665 * an rte_pktmbuf_free_seg().
2667 * @m scattered cluster head
2669 static void __attribute__((cold))
2670 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2672 uint16_t i, nb_segs = m->nb_segs;
2673 struct rte_mbuf *next_seg;
2675 for (i = 0; i < nb_segs; i++) {
2677 rte_pktmbuf_free_seg(m);
2682 static void __attribute__((cold))
2683 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2687 #ifdef RTE_IXGBE_INC_VECTOR
2688 /* SSE Vector driver has a different way of releasing mbufs. */
2689 if (rxq->rx_using_sse) {
2690 ixgbe_rx_queue_release_mbufs_vec(rxq);
2695 if (rxq->sw_ring != NULL) {
2696 for (i = 0; i < rxq->nb_rx_desc; i++) {
2697 if (rxq->sw_ring[i].mbuf != NULL) {
2698 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2699 rxq->sw_ring[i].mbuf = NULL;
2702 if (rxq->rx_nb_avail) {
2703 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2704 struct rte_mbuf *mb;
2706 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2707 rte_pktmbuf_free_seg(mb);
2709 rxq->rx_nb_avail = 0;
2713 if (rxq->sw_sc_ring)
2714 for (i = 0; i < rxq->nb_rx_desc; i++)
2715 if (rxq->sw_sc_ring[i].fbuf) {
2716 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2717 rxq->sw_sc_ring[i].fbuf = NULL;
2721 static void __attribute__((cold))
2722 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2725 ixgbe_rx_queue_release_mbufs(rxq);
2726 rte_free(rxq->sw_ring);
2727 rte_free(rxq->sw_sc_ring);
2732 void __attribute__((cold))
2733 ixgbe_dev_rx_queue_release(void *rxq)
2735 ixgbe_rx_queue_release(rxq);
2739 * Check if Rx Burst Bulk Alloc function can be used.
2741 * 0: the preconditions are satisfied and the bulk allocation function
2743 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2744 * function must be used.
2746 static inline int __attribute__((cold))
2747 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2752 * Make sure the following pre-conditions are satisfied:
2753 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2754 * rxq->rx_free_thresh < rxq->nb_rx_desc
2755 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2756 * Scattered packets are not supported. This should be checked
2757 * outside of this function.
2759 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2760 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2761 "rxq->rx_free_thresh=%d, "
2762 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2763 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2765 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2766 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2767 "rxq->rx_free_thresh=%d, "
2768 "rxq->nb_rx_desc=%d",
2769 rxq->rx_free_thresh, rxq->nb_rx_desc);
2771 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2772 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2773 "rxq->nb_rx_desc=%d, "
2774 "rxq->rx_free_thresh=%d",
2775 rxq->nb_rx_desc, rxq->rx_free_thresh);
2782 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2783 static void __attribute__((cold))
2784 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2786 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2788 uint16_t len = rxq->nb_rx_desc;
2791 * By default, the Rx queue setup function allocates enough memory for
2792 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2793 * extra memory at the end of the descriptor ring to be zero'd out.
2795 if (adapter->rx_bulk_alloc_allowed)
2796 /* zero out extra memory */
2797 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2800 * Zero out HW ring memory. Zero out extra memory at the end of
2801 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2802 * reads extra memory as zeros.
2804 for (i = 0; i < len; i++) {
2805 rxq->rx_ring[i] = zeroed_desc;
2809 * initialize extra software ring entries. Space for these extra
2810 * entries is always allocated
2812 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2813 for (i = rxq->nb_rx_desc; i < len; ++i) {
2814 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2817 rxq->rx_nb_avail = 0;
2818 rxq->rx_next_avail = 0;
2819 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2821 rxq->nb_rx_hold = 0;
2822 rxq->pkt_first_seg = NULL;
2823 rxq->pkt_last_seg = NULL;
2825 #ifdef RTE_IXGBE_INC_VECTOR
2826 rxq->rxrearm_start = 0;
2827 rxq->rxrearm_nb = 0;
2832 ixgbe_is_vf(struct rte_eth_dev *dev)
2834 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2836 switch (hw->mac.type) {
2837 case ixgbe_mac_82599_vf:
2838 case ixgbe_mac_X540_vf:
2839 case ixgbe_mac_X550_vf:
2840 case ixgbe_mac_X550EM_x_vf:
2841 case ixgbe_mac_X550EM_a_vf:
2849 ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
2851 uint64_t offloads = 0;
2852 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2854 if (hw->mac.type != ixgbe_mac_82598EB)
2855 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2861 ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2864 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2866 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
2867 DEV_RX_OFFLOAD_UDP_CKSUM |
2868 DEV_RX_OFFLOAD_TCP_CKSUM |
2869 DEV_RX_OFFLOAD_KEEP_CRC |
2870 DEV_RX_OFFLOAD_JUMBO_FRAME |
2871 DEV_RX_OFFLOAD_VLAN_FILTER |
2872 DEV_RX_OFFLOAD_SCATTER;
2874 if (hw->mac.type == ixgbe_mac_82598EB)
2875 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2877 if (ixgbe_is_vf(dev) == 0)
2878 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2881 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2884 if ((hw->mac.type == ixgbe_mac_82599EB ||
2885 hw->mac.type == ixgbe_mac_X540 ||
2886 hw->mac.type == ixgbe_mac_X550) &&
2887 !RTE_ETH_DEV_SRIOV(dev).active)
2888 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
2890 if (hw->mac.type == ixgbe_mac_82599EB ||
2891 hw->mac.type == ixgbe_mac_X540)
2892 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
2894 if (hw->mac.type == ixgbe_mac_X550 ||
2895 hw->mac.type == ixgbe_mac_X550EM_x ||
2896 hw->mac.type == ixgbe_mac_X550EM_a)
2897 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
2899 #ifdef RTE_LIBRTE_SECURITY
2900 if (dev->security_ctx)
2901 offloads |= DEV_RX_OFFLOAD_SECURITY;
2907 int __attribute__((cold))
2908 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2911 unsigned int socket_id,
2912 const struct rte_eth_rxconf *rx_conf,
2913 struct rte_mempool *mp)
2915 const struct rte_memzone *rz;
2916 struct ixgbe_rx_queue *rxq;
2917 struct ixgbe_hw *hw;
2919 struct ixgbe_adapter *adapter =
2920 (struct ixgbe_adapter *)dev->data->dev_private;
2923 PMD_INIT_FUNC_TRACE();
2924 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2926 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2929 * Validate number of receive descriptors.
2930 * It must not exceed hardware maximum, and must be multiple
2933 if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2934 (nb_desc > IXGBE_MAX_RING_DESC) ||
2935 (nb_desc < IXGBE_MIN_RING_DESC)) {
2939 /* Free memory prior to re-allocation if needed... */
2940 if (dev->data->rx_queues[queue_idx] != NULL) {
2941 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2942 dev->data->rx_queues[queue_idx] = NULL;
2945 /* First allocate the rx queue data structure */
2946 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2947 RTE_CACHE_LINE_SIZE, socket_id);
2951 rxq->nb_rx_desc = nb_desc;
2952 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2953 rxq->queue_id = queue_idx;
2954 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2955 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2956 rxq->port_id = dev->data->port_id;
2957 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2958 rxq->crc_len = ETHER_CRC_LEN;
2961 rxq->drop_en = rx_conf->rx_drop_en;
2962 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2963 rxq->offloads = offloads;
2966 * The packet type in RX descriptor is different for different NICs.
2967 * Some bits are used for x550 but reserved for other NICS.
2968 * So set different masks for different NICs.
2970 if (hw->mac.type == ixgbe_mac_X550 ||
2971 hw->mac.type == ixgbe_mac_X550EM_x ||
2972 hw->mac.type == ixgbe_mac_X550EM_a ||
2973 hw->mac.type == ixgbe_mac_X550_vf ||
2974 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2975 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2976 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
2978 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
2981 * Allocate RX ring hardware descriptors. A memzone large enough to
2982 * handle the maximum ring size is allocated in order to allow for
2983 * resizing in later calls to the queue setup function.
2985 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2986 RX_RING_SZ, IXGBE_ALIGN, socket_id);
2988 ixgbe_rx_queue_release(rxq);
2993 * Zero init all the descriptors in the ring.
2995 memset(rz->addr, 0, RX_RING_SZ);
2998 * Modified to setup VFRDT for Virtual Function
3000 if (hw->mac.type == ixgbe_mac_82599_vf ||
3001 hw->mac.type == ixgbe_mac_X540_vf ||
3002 hw->mac.type == ixgbe_mac_X550_vf ||
3003 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
3004 hw->mac.type == ixgbe_mac_X550EM_a_vf) {
3006 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
3008 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
3011 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
3013 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
3016 rxq->rx_ring_phys_addr = rz->iova;
3017 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
3020 * Certain constraints must be met in order to use the bulk buffer
3021 * allocation Rx burst function. If any of Rx queues doesn't meet them
3022 * the feature should be disabled for the whole port.
3024 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
3025 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
3026 "preconditions - canceling the feature for "
3027 "the whole port[%d]",
3028 rxq->queue_id, rxq->port_id);
3029 adapter->rx_bulk_alloc_allowed = false;
3033 * Allocate software ring. Allow for space at the end of the
3034 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
3035 * function does not access an invalid memory region.
3038 if (adapter->rx_bulk_alloc_allowed)
3039 len += RTE_PMD_IXGBE_RX_MAX_BURST;
3041 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
3042 sizeof(struct ixgbe_rx_entry) * len,
3043 RTE_CACHE_LINE_SIZE, socket_id);
3044 if (!rxq->sw_ring) {
3045 ixgbe_rx_queue_release(rxq);
3050 * Always allocate even if it's not going to be needed in order to
3051 * simplify the code.
3053 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
3054 * be requested in ixgbe_dev_rx_init(), which is called later from
3058 rte_zmalloc_socket("rxq->sw_sc_ring",
3059 sizeof(struct ixgbe_scattered_rx_entry) * len,
3060 RTE_CACHE_LINE_SIZE, socket_id);
3061 if (!rxq->sw_sc_ring) {
3062 ixgbe_rx_queue_release(rxq);
3066 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
3067 "dma_addr=0x%"PRIx64,
3068 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
3069 rxq->rx_ring_phys_addr);
3071 if (!rte_is_power_of_2(nb_desc)) {
3072 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
3073 "preconditions - canceling the feature for "
3074 "the whole port[%d]",
3075 rxq->queue_id, rxq->port_id);
3076 adapter->rx_vec_allowed = false;
3078 ixgbe_rxq_vec_setup(rxq);
3080 dev->data->rx_queues[queue_idx] = rxq;
3082 ixgbe_reset_rx_queue(adapter, rxq);
3088 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3090 #define IXGBE_RXQ_SCAN_INTERVAL 4
3091 volatile union ixgbe_adv_rx_desc *rxdp;
3092 struct ixgbe_rx_queue *rxq;
3095 rxq = dev->data->rx_queues[rx_queue_id];
3096 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
3098 while ((desc < rxq->nb_rx_desc) &&
3099 (rxdp->wb.upper.status_error &
3100 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
3101 desc += IXGBE_RXQ_SCAN_INTERVAL;
3102 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
3103 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3104 rxdp = &(rxq->rx_ring[rxq->rx_tail +
3105 desc - rxq->nb_rx_desc]);
3112 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
3114 volatile union ixgbe_adv_rx_desc *rxdp;
3115 struct ixgbe_rx_queue *rxq = rx_queue;
3118 if (unlikely(offset >= rxq->nb_rx_desc))
3120 desc = rxq->rx_tail + offset;
3121 if (desc >= rxq->nb_rx_desc)
3122 desc -= rxq->nb_rx_desc;
3124 rxdp = &rxq->rx_ring[desc];
3125 return !!(rxdp->wb.upper.status_error &
3126 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
3130 ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
3132 struct ixgbe_rx_queue *rxq = rx_queue;
3133 volatile uint32_t *status;
3134 uint32_t nb_hold, desc;
3136 if (unlikely(offset >= rxq->nb_rx_desc))
3139 #ifdef RTE_IXGBE_INC_VECTOR
3140 if (rxq->rx_using_sse)
3141 nb_hold = rxq->rxrearm_nb;
3144 nb_hold = rxq->nb_rx_hold;
3145 if (offset >= rxq->nb_rx_desc - nb_hold)
3146 return RTE_ETH_RX_DESC_UNAVAIL;
3148 desc = rxq->rx_tail + offset;
3149 if (desc >= rxq->nb_rx_desc)
3150 desc -= rxq->nb_rx_desc;
3152 status = &rxq->rx_ring[desc].wb.upper.status_error;
3153 if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
3154 return RTE_ETH_RX_DESC_DONE;
3156 return RTE_ETH_RX_DESC_AVAIL;
3160 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
3162 struct ixgbe_tx_queue *txq = tx_queue;
3163 volatile uint32_t *status;
3166 if (unlikely(offset >= txq->nb_tx_desc))
3169 desc = txq->tx_tail + offset;
3170 /* go to next desc that has the RS bit */
3171 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
3173 if (desc >= txq->nb_tx_desc) {
3174 desc -= txq->nb_tx_desc;
3175 if (desc >= txq->nb_tx_desc)
3176 desc -= txq->nb_tx_desc;
3179 status = &txq->tx_ring[desc].wb.status;
3180 if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
3181 return RTE_ETH_TX_DESC_DONE;
3183 return RTE_ETH_TX_DESC_FULL;
3187 * Set up link loopback for X540/X550 mode Tx->Rx.
3189 static inline void __attribute__((cold))
3190 ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
3193 PMD_INIT_FUNC_TRACE();
3195 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
3197 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3198 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
3199 macc = IXGBE_READ_REG(hw, IXGBE_MACC);
3202 /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */
3203 autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE;
3204 /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */
3205 macc |= IXGBE_MACC_FLU;
3207 autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE;
3208 macc &= ~IXGBE_MACC_FLU;
3211 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3212 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
3214 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
3217 void __attribute__((cold))
3218 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
3221 struct ixgbe_adapter *adapter =
3222 (struct ixgbe_adapter *)dev->data->dev_private;
3223 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3225 PMD_INIT_FUNC_TRACE();
3227 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3228 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
3231 txq->ops->release_mbufs(txq);
3232 txq->ops->reset(txq);
3236 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3237 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3240 ixgbe_rx_queue_release_mbufs(rxq);
3241 ixgbe_reset_rx_queue(adapter, rxq);
3244 /* If loopback mode was enabled, reconfigure the link accordingly */
3245 if (dev->data->dev_conf.lpbk_mode != 0) {
3246 if (hw->mac.type == ixgbe_mac_X540 ||
3247 hw->mac.type == ixgbe_mac_X550 ||
3248 hw->mac.type == ixgbe_mac_X550EM_x ||
3249 hw->mac.type == ixgbe_mac_X550EM_a)
3250 ixgbe_setup_loopback_link_x540_x550(hw, false);
3255 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
3259 PMD_INIT_FUNC_TRACE();
3261 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3262 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
3263 dev->data->rx_queues[i] = NULL;
3265 dev->data->nb_rx_queues = 0;
3267 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3268 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
3269 dev->data->tx_queues[i] = NULL;
3271 dev->data->nb_tx_queues = 0;
3274 /*********************************************************************
3276 * Device RX/TX init functions
3278 **********************************************************************/
3281 * Receive Side Scaling (RSS)
3282 * See section 7.1.2.8 in the following document:
3283 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
3286 * The source and destination IP addresses of the IP header and the source
3287 * and destination ports of TCP/UDP headers, if any, of received packets are
3288 * hashed against a configurable random key to compute a 32-bit RSS hash result.
3289 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
3290 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
3291 * RSS output index which is used as the RX queue index where to store the
3293 * The following output is supplied in the RX write-back descriptor:
3294 * - 32-bit result of the Microsoft RSS hash function,
3295 * - 4-bit RSS type field.
3299 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
3300 * Used as the default key.
3302 static uint8_t rss_intel_key[40] = {
3303 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
3304 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
3305 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3306 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
3307 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
3311 ixgbe_rss_disable(struct rte_eth_dev *dev)
3313 struct ixgbe_hw *hw;
3317 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3318 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3319 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3320 mrqc &= ~IXGBE_MRQC_RSSEN;
3321 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3325 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
3335 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3336 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3338 hash_key = rss_conf->rss_key;
3339 if (hash_key != NULL) {
3340 /* Fill in RSS hash key */
3341 for (i = 0; i < 10; i++) {
3342 rss_key = hash_key[(i * 4)];
3343 rss_key |= hash_key[(i * 4) + 1] << 8;
3344 rss_key |= hash_key[(i * 4) + 2] << 16;
3345 rss_key |= hash_key[(i * 4) + 3] << 24;
3346 IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
3350 /* Set configured hashing protocols in MRQC register */
3351 rss_hf = rss_conf->rss_hf;
3352 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
3353 if (rss_hf & ETH_RSS_IPV4)
3354 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3355 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
3356 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3357 if (rss_hf & ETH_RSS_IPV6)
3358 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3359 if (rss_hf & ETH_RSS_IPV6_EX)
3360 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3361 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
3362 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3363 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
3364 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3365 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
3366 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3367 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
3368 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3369 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
3370 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3371 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3375 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
3376 struct rte_eth_rss_conf *rss_conf)
3378 struct ixgbe_hw *hw;
3383 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3385 if (!ixgbe_rss_update_sp(hw->mac.type)) {
3386 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
3390 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3393 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
3394 * "RSS enabling cannot be done dynamically while it must be
3395 * preceded by a software reset"
3396 * Before changing anything, first check that the update RSS operation
3397 * does not attempt to disable RSS, if RSS was enabled at
3398 * initialization time, or does not attempt to enable RSS, if RSS was
3399 * disabled at initialization time.
3401 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
3402 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3403 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
3404 if (rss_hf != 0) /* Enable RSS */
3406 return 0; /* Nothing to do */
3409 if (rss_hf == 0) /* Disable RSS */
3411 ixgbe_hw_rss_hash_set(hw, rss_conf);
3416 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3417 struct rte_eth_rss_conf *rss_conf)
3419 struct ixgbe_hw *hw;
3428 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3429 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3430 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3431 hash_key = rss_conf->rss_key;
3432 if (hash_key != NULL) {
3433 /* Return RSS hash key */
3434 for (i = 0; i < 10; i++) {
3435 rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
3436 hash_key[(i * 4)] = rss_key & 0x000000FF;
3437 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
3438 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
3439 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
3443 /* Get RSS functions configured in MRQC register */
3444 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3445 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
3446 rss_conf->rss_hf = 0;
3450 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
3451 rss_hf |= ETH_RSS_IPV4;
3452 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
3453 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
3454 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
3455 rss_hf |= ETH_RSS_IPV6;
3456 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
3457 rss_hf |= ETH_RSS_IPV6_EX;
3458 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
3459 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
3460 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
3461 rss_hf |= ETH_RSS_IPV6_TCP_EX;
3462 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
3463 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
3464 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
3465 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
3466 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
3467 rss_hf |= ETH_RSS_IPV6_UDP_EX;
3468 rss_conf->rss_hf = rss_hf;
3473 ixgbe_rss_configure(struct rte_eth_dev *dev)
3475 struct rte_eth_rss_conf rss_conf;
3476 struct ixgbe_adapter *adapter;
3477 struct ixgbe_hw *hw;
3481 uint16_t sp_reta_size;
3484 PMD_INIT_FUNC_TRACE();
3485 adapter = (struct ixgbe_adapter *)dev->data->dev_private;
3486 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3488 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3491 * Fill in redirection table
3492 * The byte-swap is needed because NIC registers are in
3493 * little-endian order.
3495 if (adapter->rss_reta_updated == 0) {
3497 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
3498 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3500 if (j == dev->data->nb_rx_queues)
3502 reta = (reta << 8) | j;
3504 IXGBE_WRITE_REG(hw, reta_reg,
3510 * Configure the RSS key and the RSS protocols used to compute
3511 * the RSS hash of input packets.
3513 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3514 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
3515 ixgbe_rss_disable(dev);
3518 if (rss_conf.rss_key == NULL)
3519 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3520 ixgbe_hw_rss_hash_set(hw, &rss_conf);
3523 #define NUM_VFTA_REGISTERS 128
3524 #define NIC_RX_BUFFER_SIZE 0x200
3525 #define X550_RX_BUFFER_SIZE 0x180
3528 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3530 struct rte_eth_vmdq_dcb_conf *cfg;
3531 struct ixgbe_hw *hw;
3532 enum rte_eth_nb_pools num_pools;
3533 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3535 uint8_t nb_tcs; /* number of traffic classes */
3538 PMD_INIT_FUNC_TRACE();
3539 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3540 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3541 num_pools = cfg->nb_queue_pools;
3542 /* Check we have a valid number of pools */
3543 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3544 ixgbe_rss_disable(dev);
3547 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3548 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3552 * split rx buffer up into sections, each for 1 traffic class
3554 switch (hw->mac.type) {
3555 case ixgbe_mac_X550:
3556 case ixgbe_mac_X550EM_x:
3557 case ixgbe_mac_X550EM_a:
3558 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
3561 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3564 for (i = 0; i < nb_tcs; i++) {
3565 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3567 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3568 /* clear 10 bits. */
3569 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
3570 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3572 /* zero alloc all unused TCs */
3573 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3574 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3576 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3577 /* clear 10 bits. */
3578 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3581 /* MRQC: enable vmdq and dcb */
3582 mrqc = (num_pools == ETH_16_POOLS) ?
3583 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
3584 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3586 /* PFVTCTL: turn on virtualisation and set the default pool */
3587 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3588 if (cfg->enable_default_pool) {
3589 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3591 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3594 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3596 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
3598 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3600 * mapping is done with 3 bits per priority,
3601 * so shift by i*3 each time
3603 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3605 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
3607 /* RTRPCS: DCB related */
3608 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
3610 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3611 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3612 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3613 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3615 /* VFTA - enable all vlan filters */
3616 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3617 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3620 /* VFRE: pool enabling for receive - 16 or 32 */
3621 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
3622 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3625 * MPSAR - allow pools to read specific mac addresses
3626 * In this case, all pools should be able to read from mac addr 0
3628 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
3629 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
3631 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3632 for (i = 0; i < cfg->nb_pool_maps; i++) {
3633 /* set vlan id in VF register and set the valid bit */
3634 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
3635 (cfg->pool_map[i].vlan_id & 0xFFF)));
3637 * Put the allowed pools in VFB reg. As we only have 16 or 32
3638 * pools, we only need to use the first half of the register
3641 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
3646 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3647 * @dev: pointer to eth_dev structure
3648 * @dcb_config: pointer to ixgbe_dcb_config structure
3651 ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3652 struct ixgbe_dcb_config *dcb_config)
3655 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3657 PMD_INIT_FUNC_TRACE();
3658 if (hw->mac.type != ixgbe_mac_82598EB) {
3659 /* Disable the Tx desc arbiter so that MTQC can be changed */
3660 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3661 reg |= IXGBE_RTTDCS_ARBDIS;
3662 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3664 /* Enable DCB for Tx with 8 TCs */
3665 if (dcb_config->num_tcs.pg_tcs == 8) {
3666 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3668 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3670 if (dcb_config->vt_mode)
3671 reg |= IXGBE_MTQC_VT_ENA;
3672 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3674 /* Enable the Tx desc arbiter */
3675 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3676 reg &= ~IXGBE_RTTDCS_ARBDIS;
3677 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3679 /* Enable Security TX Buffer IFG for DCB */
3680 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3681 reg |= IXGBE_SECTX_DCB;
3682 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3687 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3688 * @dev: pointer to rte_eth_dev structure
3689 * @dcb_config: pointer to ixgbe_dcb_config structure
3692 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3693 struct ixgbe_dcb_config *dcb_config)
3695 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3696 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3697 struct ixgbe_hw *hw =
3698 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3700 PMD_INIT_FUNC_TRACE();
3701 if (hw->mac.type != ixgbe_mac_82598EB)
3702 /*PF VF Transmit Enable*/
3703 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3704 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3706 /*Configure general DCB TX parameters*/
3707 ixgbe_dcb_tx_hw_config(dev, dcb_config);
3711 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3712 struct ixgbe_dcb_config *dcb_config)
3714 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3715 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3716 struct ixgbe_dcb_tc_config *tc;
3719 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3720 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3721 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3722 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3724 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3725 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3728 /* Initialize User Priority to Traffic Class mapping */
3729 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3730 tc = &dcb_config->tc_config[j];
3731 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3734 /* User Priority to Traffic Class mapping */
3735 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3736 j = vmdq_rx_conf->dcb_tc[i];
3737 tc = &dcb_config->tc_config[j];
3738 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3744 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3745 struct ixgbe_dcb_config *dcb_config)
3747 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3748 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3749 struct ixgbe_dcb_tc_config *tc;
3752 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3753 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3754 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3755 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3757 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3758 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3761 /* Initialize User Priority to Traffic Class mapping */
3762 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3763 tc = &dcb_config->tc_config[j];
3764 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3767 /* User Priority to Traffic Class mapping */
3768 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3769 j = vmdq_tx_conf->dcb_tc[i];
3770 tc = &dcb_config->tc_config[j];
3771 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3777 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3778 struct ixgbe_dcb_config *dcb_config)
3780 struct rte_eth_dcb_rx_conf *rx_conf =
3781 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3782 struct ixgbe_dcb_tc_config *tc;
3785 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3786 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3788 /* Initialize User Priority to Traffic Class mapping */
3789 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3790 tc = &dcb_config->tc_config[j];
3791 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3794 /* User Priority to Traffic Class mapping */
3795 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3796 j = rx_conf->dcb_tc[i];
3797 tc = &dcb_config->tc_config[j];
3798 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3804 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3805 struct ixgbe_dcb_config *dcb_config)
3807 struct rte_eth_dcb_tx_conf *tx_conf =
3808 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3809 struct ixgbe_dcb_tc_config *tc;
3812 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3813 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3815 /* Initialize User Priority to Traffic Class mapping */
3816 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3817 tc = &dcb_config->tc_config[j];
3818 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3821 /* User Priority to Traffic Class mapping */
3822 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3823 j = tx_conf->dcb_tc[i];
3824 tc = &dcb_config->tc_config[j];
3825 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3831 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3832 * @dev: pointer to eth_dev structure
3833 * @dcb_config: pointer to ixgbe_dcb_config structure
3836 ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3837 struct ixgbe_dcb_config *dcb_config)
3843 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3845 PMD_INIT_FUNC_TRACE();
3847 * Disable the arbiter before changing parameters
3848 * (always enable recycle mode; WSP)
3850 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3851 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3853 if (hw->mac.type != ixgbe_mac_82598EB) {
3854 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3855 if (dcb_config->num_tcs.pg_tcs == 4) {
3856 if (dcb_config->vt_mode)
3857 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3858 IXGBE_MRQC_VMDQRT4TCEN;
3860 /* no matter the mode is DCB or DCB_RSS, just
3861 * set the MRQE to RSSXTCEN. RSS is controlled
3864 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3865 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3866 IXGBE_MRQC_RTRSS4TCEN;
3869 if (dcb_config->num_tcs.pg_tcs == 8) {
3870 if (dcb_config->vt_mode)
3871 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3872 IXGBE_MRQC_VMDQRT8TCEN;
3874 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3875 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3876 IXGBE_MRQC_RTRSS8TCEN;
3880 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3882 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3883 /* Disable drop for all queues in VMDQ mode*/
3884 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3885 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3887 (q << IXGBE_QDE_IDX_SHIFT)));
3889 /* Enable drop for all queues in SRIOV mode */
3890 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3891 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3893 (q << IXGBE_QDE_IDX_SHIFT) |
3898 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3899 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3900 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3901 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3903 /* VFTA - enable all vlan filters */
3904 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3905 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3909 * Configure Rx packet plane (recycle mode; WSP) and
3912 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3913 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3917 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3918 uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3920 switch (hw->mac.type) {
3921 case ixgbe_mac_82598EB:
3922 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3924 case ixgbe_mac_82599EB:
3925 case ixgbe_mac_X540:
3926 case ixgbe_mac_X550:
3927 case ixgbe_mac_X550EM_x:
3928 case ixgbe_mac_X550EM_a:
3929 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3938 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3939 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3941 switch (hw->mac.type) {
3942 case ixgbe_mac_82598EB:
3943 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
3944 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
3946 case ixgbe_mac_82599EB:
3947 case ixgbe_mac_X540:
3948 case ixgbe_mac_X550:
3949 case ixgbe_mac_X550EM_x:
3950 case ixgbe_mac_X550EM_a:
3951 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
3952 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
3959 #define DCB_RX_CONFIG 1
3960 #define DCB_TX_CONFIG 1
3961 #define DCB_TX_PB 1024
3963 * ixgbe_dcb_hw_configure - Enable DCB and configure
3964 * general DCB in VT mode and non-VT mode parameters
3965 * @dev: pointer to rte_eth_dev structure
3966 * @dcb_config: pointer to ixgbe_dcb_config structure
3969 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3970 struct ixgbe_dcb_config *dcb_config)
3973 uint8_t i, pfc_en, nb_tcs;
3974 uint16_t pbsize, rx_buffer_size;
3975 uint8_t config_dcb_rx = 0;
3976 uint8_t config_dcb_tx = 0;
3977 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3978 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3979 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3980 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3981 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3982 struct ixgbe_dcb_tc_config *tc;
3983 uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3984 struct ixgbe_hw *hw =
3985 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3986 struct ixgbe_bw_conf *bw_conf =
3987 IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
3989 switch (dev->data->dev_conf.rxmode.mq_mode) {
3990 case ETH_MQ_RX_VMDQ_DCB:
3991 dcb_config->vt_mode = true;
3992 if (hw->mac.type != ixgbe_mac_82598EB) {
3993 config_dcb_rx = DCB_RX_CONFIG;
3995 *get dcb and VT rx configuration parameters
3998 ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
3999 /*Configure general VMDQ and DCB RX parameters*/
4000 ixgbe_vmdq_dcb_configure(dev);
4004 case ETH_MQ_RX_DCB_RSS:
4005 dcb_config->vt_mode = false;
4006 config_dcb_rx = DCB_RX_CONFIG;
4007 /* Get dcb TX configuration parameters from rte_eth_conf */
4008 ixgbe_dcb_rx_config(dev, dcb_config);
4009 /*Configure general DCB RX parameters*/
4010 ixgbe_dcb_rx_hw_config(dev, dcb_config);
4013 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
4016 switch (dev->data->dev_conf.txmode.mq_mode) {
4017 case ETH_MQ_TX_VMDQ_DCB:
4018 dcb_config->vt_mode = true;
4019 config_dcb_tx = DCB_TX_CONFIG;
4020 /* get DCB and VT TX configuration parameters
4023 ixgbe_dcb_vt_tx_config(dev, dcb_config);
4024 /*Configure general VMDQ and DCB TX parameters*/
4025 ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
4029 dcb_config->vt_mode = false;
4030 config_dcb_tx = DCB_TX_CONFIG;
4031 /*get DCB TX configuration parameters from rte_eth_conf*/
4032 ixgbe_dcb_tx_config(dev, dcb_config);
4033 /*Configure general DCB TX parameters*/
4034 ixgbe_dcb_tx_hw_config(dev, dcb_config);
4037 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
4041 nb_tcs = dcb_config->num_tcs.pfc_tcs;
4043 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4044 if (nb_tcs == ETH_4_TCS) {
4045 /* Avoid un-configured priority mapping to TC0 */
4047 uint8_t mask = 0xFF;
4049 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
4050 mask = (uint8_t)(mask & (~(1 << map[i])));
4051 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
4052 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
4056 /* Re-configure 4 TCs BW */
4057 for (i = 0; i < nb_tcs; i++) {
4058 tc = &dcb_config->tc_config[i];
4059 if (bw_conf->tc_num != nb_tcs)
4060 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4061 (uint8_t)(100 / nb_tcs);
4062 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4063 (uint8_t)(100 / nb_tcs);
4065 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4066 tc = &dcb_config->tc_config[i];
4067 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
4068 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
4071 /* Re-configure 8 TCs BW */
4072 for (i = 0; i < nb_tcs; i++) {
4073 tc = &dcb_config->tc_config[i];
4074 if (bw_conf->tc_num != nb_tcs)
4075 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4076 (uint8_t)(100 / nb_tcs + (i & 1));
4077 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4078 (uint8_t)(100 / nb_tcs + (i & 1));
4082 switch (hw->mac.type) {
4083 case ixgbe_mac_X550:
4084 case ixgbe_mac_X550EM_x:
4085 case ixgbe_mac_X550EM_a:
4086 rx_buffer_size = X550_RX_BUFFER_SIZE;
4089 rx_buffer_size = NIC_RX_BUFFER_SIZE;
4093 if (config_dcb_rx) {
4094 /* Set RX buffer size */
4095 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4096 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
4098 for (i = 0; i < nb_tcs; i++) {
4099 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
4101 /* zero alloc all unused TCs */
4102 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4103 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4106 if (config_dcb_tx) {
4107 /* Only support an equally distributed
4108 * Tx packet buffer strategy.
4110 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
4111 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
4113 for (i = 0; i < nb_tcs; i++) {
4114 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4115 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4117 /* Clear unused TCs, if any, to zero buffer size*/
4118 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4119 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4120 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4124 /*Calculates traffic class credits*/
4125 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4126 IXGBE_DCB_TX_CONFIG);
4127 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4128 IXGBE_DCB_RX_CONFIG);
4130 if (config_dcb_rx) {
4131 /* Unpack CEE standard containers */
4132 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
4133 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4134 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
4135 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
4136 /* Configure PG(ETS) RX */
4137 ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
4140 if (config_dcb_tx) {
4141 /* Unpack CEE standard containers */
4142 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
4143 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4144 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
4145 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
4146 /* Configure PG(ETS) TX */
4147 ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
4150 /*Configure queue statistics registers*/
4151 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
4153 /* Check if the PFC is supported */
4154 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
4155 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4156 for (i = 0; i < nb_tcs; i++) {
4158 * If the TC count is 8,and the default high_water is 48,
4159 * the low_water is 16 as default.
4161 hw->fc.high_water[i] = (pbsize * 3) / 4;
4162 hw->fc.low_water[i] = pbsize / 4;
4163 /* Enable pfc for this TC */
4164 tc = &dcb_config->tc_config[i];
4165 tc->pfc = ixgbe_dcb_pfc_enabled;
4167 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
4168 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
4170 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
4177 * ixgbe_configure_dcb - Configure DCB Hardware
4178 * @dev: pointer to rte_eth_dev
4180 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
4182 struct ixgbe_dcb_config *dcb_cfg =
4183 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4184 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
4186 PMD_INIT_FUNC_TRACE();
4188 /* check support mq_mode for DCB */
4189 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
4190 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
4191 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
4194 if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
4197 /** Configure DCB hardware **/
4198 ixgbe_dcb_hw_configure(dev, dcb_cfg);
4202 * VMDq only support for 10 GbE NIC.
4205 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
4207 struct rte_eth_vmdq_rx_conf *cfg;
4208 struct ixgbe_hw *hw;
4209 enum rte_eth_nb_pools num_pools;
4210 uint32_t mrqc, vt_ctl, vlanctrl;
4214 PMD_INIT_FUNC_TRACE();
4215 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4216 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
4217 num_pools = cfg->nb_queue_pools;
4219 ixgbe_rss_disable(dev);
4221 /* MRQC: enable vmdq */
4222 mrqc = IXGBE_MRQC_VMDQEN;
4223 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4225 /* PFVTCTL: turn on virtualisation and set the default pool */
4226 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
4227 if (cfg->enable_default_pool)
4228 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
4230 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
4232 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
4234 for (i = 0; i < (int)num_pools; i++) {
4235 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
4236 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
4239 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4240 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4241 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4242 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4244 /* VFTA - enable all vlan filters */
4245 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
4246 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
4248 /* VFRE: pool enabling for receive - 64 */
4249 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
4250 if (num_pools == ETH_64_POOLS)
4251 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
4254 * MPSAR - allow pools to read specific mac addresses
4255 * In this case, all pools should be able to read from mac addr 0
4257 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
4258 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
4260 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
4261 for (i = 0; i < cfg->nb_pool_maps; i++) {
4262 /* set vlan id in VF register and set the valid bit */
4263 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
4264 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
4266 * Put the allowed pools in VFB reg. As we only have 16 or 64
4267 * pools, we only need to use the first half of the register
4270 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
4271 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
4272 (cfg->pool_map[i].pools & UINT32_MAX));
4274 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
4275 ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
4279 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
4280 if (cfg->enable_loop_back) {
4281 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4282 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
4283 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
4286 IXGBE_WRITE_FLUSH(hw);
4290 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
4291 * @hw: pointer to hardware structure
4294 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
4299 PMD_INIT_FUNC_TRACE();
4300 /*PF VF Transmit Enable*/
4301 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
4302 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
4304 /* Disable the Tx desc arbiter so that MTQC can be changed */
4305 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4306 reg |= IXGBE_RTTDCS_ARBDIS;
4307 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4309 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4310 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
4312 /* Disable drop for all queues */
4313 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4314 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4315 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
4317 /* Enable the Tx desc arbiter */
4318 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4319 reg &= ~IXGBE_RTTDCS_ARBDIS;
4320 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4322 IXGBE_WRITE_FLUSH(hw);
4325 static int __attribute__((cold))
4326 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
4328 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
4332 /* Initialize software ring entries */
4333 for (i = 0; i < rxq->nb_rx_desc; i++) {
4334 volatile union ixgbe_adv_rx_desc *rxd;
4335 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
4338 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
4339 (unsigned) rxq->queue_id);
4343 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
4344 mbuf->port = rxq->port_id;
4347 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
4348 rxd = &rxq->rx_ring[i];
4349 rxd->read.hdr_addr = 0;
4350 rxd->read.pkt_addr = dma_addr;
4358 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
4360 struct ixgbe_hw *hw;
4363 ixgbe_rss_configure(dev);
4365 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4367 /* MRQC: enable VF RSS */
4368 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
4369 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
4370 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4372 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
4376 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
4380 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
4384 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4390 ixgbe_config_vf_default(struct rte_eth_dev *dev)
4392 struct ixgbe_hw *hw =
4393 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4395 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4397 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4402 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4403 IXGBE_MRQC_VMDQRT4TCEN);
4407 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4408 IXGBE_MRQC_VMDQRT8TCEN);
4412 "invalid pool number in IOV mode");
4419 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
4421 struct ixgbe_hw *hw =
4422 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4424 if (hw->mac.type == ixgbe_mac_82598EB)
4427 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4429 * SRIOV inactive scheme
4430 * any DCB/RSS w/o VMDq multi-queue setting
4432 switch (dev->data->dev_conf.rxmode.mq_mode) {
4434 case ETH_MQ_RX_DCB_RSS:
4435 case ETH_MQ_RX_VMDQ_RSS:
4436 ixgbe_rss_configure(dev);
4439 case ETH_MQ_RX_VMDQ_DCB:
4440 ixgbe_vmdq_dcb_configure(dev);
4443 case ETH_MQ_RX_VMDQ_ONLY:
4444 ixgbe_vmdq_rx_hw_configure(dev);
4447 case ETH_MQ_RX_NONE:
4449 /* if mq_mode is none, disable rss mode.*/
4450 ixgbe_rss_disable(dev);
4454 /* SRIOV active scheme
4455 * Support RSS together with SRIOV.
4457 switch (dev->data->dev_conf.rxmode.mq_mode) {
4459 case ETH_MQ_RX_VMDQ_RSS:
4460 ixgbe_config_vf_rss(dev);
4462 case ETH_MQ_RX_VMDQ_DCB:
4464 /* In SRIOV, the configuration is the same as VMDq case */
4465 ixgbe_vmdq_dcb_configure(dev);
4467 /* DCB/RSS together with SRIOV is not supported */
4468 case ETH_MQ_RX_VMDQ_DCB_RSS:
4469 case ETH_MQ_RX_DCB_RSS:
4471 "Could not support DCB/RSS with VMDq & SRIOV");
4474 ixgbe_config_vf_default(dev);
4483 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4485 struct ixgbe_hw *hw =
4486 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4490 if (hw->mac.type == ixgbe_mac_82598EB)
4493 /* disable arbiter before setting MTQC */
4494 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4495 rttdcs |= IXGBE_RTTDCS_ARBDIS;
4496 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4498 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4500 * SRIOV inactive scheme
4501 * any DCB w/o VMDq multi-queue setting
4503 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4504 ixgbe_vmdq_tx_hw_configure(hw);
4506 mtqc = IXGBE_MTQC_64Q_1PB;
4507 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4510 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4513 * SRIOV active scheme
4514 * FIXME if support DCB together with VMDq & SRIOV
4517 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4520 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
4523 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
4527 mtqc = IXGBE_MTQC_64Q_1PB;
4528 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4530 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4533 /* re-enable arbiter */
4534 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
4535 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4541 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
4543 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
4544 * spec rev. 3.0 chapter 8.2.3.8.13.
4546 * @pool Memory pool of the Rx queue
4548 static inline uint32_t
4549 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4551 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4553 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
4556 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4559 return IXGBE_RSCCTL_MAXDESC_16;
4560 else if (maxdesc >= 8)
4561 return IXGBE_RSCCTL_MAXDESC_8;
4562 else if (maxdesc >= 4)
4563 return IXGBE_RSCCTL_MAXDESC_4;
4565 return IXGBE_RSCCTL_MAXDESC_1;
4569 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
4572 * (Taken from FreeBSD tree)
4573 * (yes this is all very magic and confusing :)
4576 * @entry the register array entry
4577 * @vector the MSIX vector for this queue
4581 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
4583 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4586 vector |= IXGBE_IVAR_ALLOC_VAL;
4588 switch (hw->mac.type) {
4590 case ixgbe_mac_82598EB:
4592 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4594 entry += (type * 64);
4595 index = (entry >> 2) & 0x1F;
4596 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4597 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4598 ivar |= (vector << (8 * (entry & 0x3)));
4599 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4602 case ixgbe_mac_82599EB:
4603 case ixgbe_mac_X540:
4604 if (type == -1) { /* MISC IVAR */
4605 index = (entry & 1) * 8;
4606 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4607 ivar &= ~(0xFF << index);
4608 ivar |= (vector << index);
4609 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4610 } else { /* RX/TX IVARS */
4611 index = (16 * (entry & 1)) + (8 * type);
4612 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4613 ivar &= ~(0xFF << index);
4614 ivar |= (vector << index);
4615 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4625 void __attribute__((cold))
4626 ixgbe_set_rx_function(struct rte_eth_dev *dev)
4628 uint16_t i, rx_using_sse;
4629 struct ixgbe_adapter *adapter =
4630 (struct ixgbe_adapter *)dev->data->dev_private;
4633 * In order to allow Vector Rx there are a few configuration
4634 * conditions to be met and Rx Bulk Allocation should be allowed.
4636 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
4637 !adapter->rx_bulk_alloc_allowed) {
4638 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
4639 "preconditions or RTE_IXGBE_INC_VECTOR is "
4641 dev->data->port_id);
4643 adapter->rx_vec_allowed = false;
4647 * Initialize the appropriate LRO callback.
4649 * If all queues satisfy the bulk allocation preconditions
4650 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
4651 * Otherwise use a single allocation version.
4653 if (dev->data->lro) {
4654 if (adapter->rx_bulk_alloc_allowed) {
4655 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4656 "allocation version");
4657 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4659 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4660 "allocation version");
4661 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4663 } else if (dev->data->scattered_rx) {
4665 * Set the non-LRO scattered callback: there are Vector and
4666 * single allocation versions.
4668 if (adapter->rx_vec_allowed) {
4669 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
4670 "callback (port=%d).",
4671 dev->data->port_id);
4673 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
4674 } else if (adapter->rx_bulk_alloc_allowed) {
4675 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4676 "allocation callback (port=%d).",
4677 dev->data->port_id);
4678 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4680 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
4681 "single allocation) "
4682 "Scattered Rx callback "
4684 dev->data->port_id);
4686 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4689 * Below we set "simple" callbacks according to port/queues parameters.
4690 * If parameters allow we are going to choose between the following
4694 * - Single buffer allocation (the simplest one)
4696 } else if (adapter->rx_vec_allowed) {
4697 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4698 "burst size no less than %d (port=%d).",
4699 RTE_IXGBE_DESCS_PER_LOOP,
4700 dev->data->port_id);
4702 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4703 } else if (adapter->rx_bulk_alloc_allowed) {
4704 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4705 "satisfied. Rx Burst Bulk Alloc function "
4706 "will be used on port=%d.",
4707 dev->data->port_id);
4709 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4711 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4712 "satisfied, or Scattered Rx is requested "
4714 dev->data->port_id);
4716 dev->rx_pkt_burst = ixgbe_recv_pkts;
4719 /* Propagate information about RX function choice through all queues. */
4722 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4723 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4725 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4726 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4728 rxq->rx_using_sse = rx_using_sse;
4729 #ifdef RTE_LIBRTE_SECURITY
4730 rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4731 DEV_RX_OFFLOAD_SECURITY);
4737 * ixgbe_set_rsc - configure RSC related port HW registers
4739 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4740 * of 82599 Spec (x540 configuration is virtually the same).
4744 * Returns 0 in case of success or a non-zero error code
4747 ixgbe_set_rsc(struct rte_eth_dev *dev)
4749 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4750 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4751 struct rte_eth_dev_info dev_info = { 0 };
4752 bool rsc_capable = false;
4758 dev->dev_ops->dev_infos_get(dev, &dev_info);
4759 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4762 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4763 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4768 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4770 if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
4771 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4773 * According to chapter of 4.6.7.2.1 of the Spec Rev.
4774 * 3.0 RSC configuration requires HW CRC stripping being
4775 * enabled. If user requested both HW CRC stripping off
4776 * and RSC on - return an error.
4778 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4783 /* RFCTL configuration */
4784 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4785 if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4787 * Since NFS packets coalescing is not supported - clear
4788 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4791 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4792 IXGBE_RFCTL_NFSR_DIS);
4794 rfctl |= IXGBE_RFCTL_RSC_DIS;
4795 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4797 /* If LRO hasn't been requested - we are done here. */
4798 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4801 /* Set RDRXCTL.RSCACKC bit */
4802 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4803 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4804 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4806 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4807 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4808 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4810 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4812 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4814 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4816 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4819 * ixgbe PMD doesn't support header-split at the moment.
4821 * Following the 4.6.7.2.1 chapter of the 82599/x540
4822 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4823 * should be configured even if header split is not
4824 * enabled. We will configure it 128 bytes following the
4825 * recommendation in the spec.
4827 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4828 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4829 IXGBE_SRRCTL_BSIZEHDR_MASK;
4832 * TODO: Consider setting the Receive Descriptor Minimum
4833 * Threshold Size for an RSC case. This is not an obviously
4834 * beneficiary option but the one worth considering...
4837 rscctl |= IXGBE_RSCCTL_RSCEN;
4838 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4839 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4842 * RSC: Set ITR interval corresponding to 2K ints/s.
4844 * Full-sized RSC aggregations for a 10Gb/s link will
4845 * arrive at about 20K aggregation/s rate.
4847 * 2K inst/s rate will make only 10% of the
4848 * aggregations to be closed due to the interrupt timer
4849 * expiration for a streaming at wire-speed case.
4851 * For a sparse streaming case this setting will yield
4852 * at most 500us latency for a single RSC aggregation.
4854 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4855 eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
4856 eitr |= IXGBE_EITR_CNT_WDIS;
4858 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4859 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4860 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4861 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4864 * RSC requires the mapping of the queue to the
4867 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4872 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4878 * Initializes Receive Unit.
4880 int __attribute__((cold))
4881 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4883 struct ixgbe_hw *hw;
4884 struct ixgbe_rx_queue *rxq;
4895 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4898 PMD_INIT_FUNC_TRACE();
4899 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4902 * Make sure receives are disabled while setting
4903 * up the RX context (registers, descriptor rings, etc.).
4905 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4906 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4908 /* Enable receipt of broadcasted frames */
4909 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4910 fctrl |= IXGBE_FCTRL_BAM;
4911 fctrl |= IXGBE_FCTRL_DPF;
4912 fctrl |= IXGBE_FCTRL_PMCF;
4913 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4916 * Configure CRC stripping, if any.
4918 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4919 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4920 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4922 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4925 * Configure jumbo frame support, if any.
4927 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
4928 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4929 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4930 maxfrs &= 0x0000FFFF;
4931 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4932 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4934 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4937 * If loopback mode is configured, set LPBK bit.
4939 if (dev->data->dev_conf.lpbk_mode != 0) {
4940 rc = ixgbe_check_supported_loopback_mode(dev);
4942 PMD_INIT_LOG(ERR, "Unsupported loopback mode");
4945 hlreg0 |= IXGBE_HLREG0_LPBK;
4947 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4950 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4953 * Assume no header split and no VLAN strip support
4954 * on any Rx queue first .
4956 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4957 /* Setup RX queues */
4958 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4959 rxq = dev->data->rx_queues[i];
4962 * Reset crc_len in case it was changed after queue setup by a
4963 * call to configure.
4965 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4966 rxq->crc_len = ETHER_CRC_LEN;
4970 /* Setup the Base and Length of the Rx Descriptor Rings */
4971 bus_addr = rxq->rx_ring_phys_addr;
4972 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4973 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4974 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4975 (uint32_t)(bus_addr >> 32));
4976 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4977 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4978 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4979 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4981 /* Configure the SRRCTL register */
4982 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4984 /* Set if packets are dropped when no descriptors available */
4986 srrctl |= IXGBE_SRRCTL_DROP_EN;
4989 * Configure the RX buffer size in the BSIZEPACKET field of
4990 * the SRRCTL register of the queue.
4991 * The value is in 1 KB resolution. Valid values can be from
4994 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4995 RTE_PKTMBUF_HEADROOM);
4996 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4997 IXGBE_SRRCTL_BSIZEPKT_MASK);
4999 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
5001 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5002 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5004 /* It adds dual VLAN length for supporting dual VLAN */
5005 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
5006 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
5007 dev->data->scattered_rx = 1;
5008 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5009 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5012 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
5013 dev->data->scattered_rx = 1;
5016 * Device configured with multiple RX queues.
5018 ixgbe_dev_mq_rx_configure(dev);
5021 * Setup the Checksum Register.
5022 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
5023 * Enable IP/L4 checkum computation by hardware if requested to do so.
5025 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
5026 rxcsum |= IXGBE_RXCSUM_PCSD;
5027 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
5028 rxcsum |= IXGBE_RXCSUM_IPPCSE;
5030 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
5032 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
5034 if (hw->mac.type == ixgbe_mac_82599EB ||
5035 hw->mac.type == ixgbe_mac_X540) {
5036 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
5037 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
5038 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
5040 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
5041 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
5042 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
5045 rc = ixgbe_set_rsc(dev);
5049 ixgbe_set_rx_function(dev);
5055 * Initializes Transmit Unit.
5057 void __attribute__((cold))
5058 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
5060 struct ixgbe_hw *hw;
5061 struct ixgbe_tx_queue *txq;
5067 PMD_INIT_FUNC_TRACE();
5068 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5070 /* Enable TX CRC (checksum offload requirement) and hw padding
5073 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5074 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
5075 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5077 /* Setup the Base and Length of the Tx Descriptor Rings */
5078 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5079 txq = dev->data->tx_queues[i];
5081 bus_addr = txq->tx_ring_phys_addr;
5082 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
5083 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5084 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
5085 (uint32_t)(bus_addr >> 32));
5086 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
5087 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5088 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5089 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5090 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5093 * Disable Tx Head Writeback RO bit, since this hoses
5094 * bookkeeping if things aren't delivered in order.
5096 switch (hw->mac.type) {
5097 case ixgbe_mac_82598EB:
5098 txctrl = IXGBE_READ_REG(hw,
5099 IXGBE_DCA_TXCTRL(txq->reg_idx));
5100 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5101 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
5105 case ixgbe_mac_82599EB:
5106 case ixgbe_mac_X540:
5107 case ixgbe_mac_X550:
5108 case ixgbe_mac_X550EM_x:
5109 case ixgbe_mac_X550EM_a:
5111 txctrl = IXGBE_READ_REG(hw,
5112 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
5113 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5114 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
5120 /* Device configured with multiple TX queues. */
5121 ixgbe_dev_mq_tx_configure(dev);
5125 * Check if requested loopback mode is supported
5128 ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev)
5130 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5132 if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX)
5133 if (hw->mac.type == ixgbe_mac_82599EB ||
5134 hw->mac.type == ixgbe_mac_X540 ||
5135 hw->mac.type == ixgbe_mac_X550 ||
5136 hw->mac.type == ixgbe_mac_X550EM_x ||
5137 hw->mac.type == ixgbe_mac_X550EM_a)
5144 * Set up link for 82599 loopback mode Tx->Rx.
5146 static inline void __attribute__((cold))
5147 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
5149 PMD_INIT_FUNC_TRACE();
5151 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
5152 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
5154 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
5163 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
5164 ixgbe_reset_pipeline_82599(hw);
5166 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
5172 * Start Transmit and Receive Units.
5174 int __attribute__((cold))
5175 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
5177 struct ixgbe_hw *hw;
5178 struct ixgbe_tx_queue *txq;
5179 struct ixgbe_rx_queue *rxq;
5186 PMD_INIT_FUNC_TRACE();
5187 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5189 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5190 txq = dev->data->tx_queues[i];
5191 /* Setup Transmit Threshold Registers */
5192 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5193 txdctl |= txq->pthresh & 0x7F;
5194 txdctl |= ((txq->hthresh & 0x7F) << 8);
5195 txdctl |= ((txq->wthresh & 0x7F) << 16);
5196 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5199 if (hw->mac.type != ixgbe_mac_82598EB) {
5200 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
5201 dmatxctl |= IXGBE_DMATXCTL_TE;
5202 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
5205 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5206 txq = dev->data->tx_queues[i];
5207 if (!txq->tx_deferred_start) {
5208 ret = ixgbe_dev_tx_queue_start(dev, i);
5214 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5215 rxq = dev->data->rx_queues[i];
5216 if (!rxq->rx_deferred_start) {
5217 ret = ixgbe_dev_rx_queue_start(dev, i);
5223 /* Enable Receive engine */
5224 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5225 if (hw->mac.type == ixgbe_mac_82598EB)
5226 rxctrl |= IXGBE_RXCTRL_DMBYPS;
5227 rxctrl |= IXGBE_RXCTRL_RXEN;
5228 hw->mac.ops.enable_rx_dma(hw, rxctrl);
5230 /* If loopback mode is enabled, set up the link accordingly */
5231 if (dev->data->dev_conf.lpbk_mode != 0) {
5232 if (hw->mac.type == ixgbe_mac_82599EB)
5233 ixgbe_setup_loopback_link_82599(hw);
5234 else if (hw->mac.type == ixgbe_mac_X540 ||
5235 hw->mac.type == ixgbe_mac_X550 ||
5236 hw->mac.type == ixgbe_mac_X550EM_x ||
5237 hw->mac.type == ixgbe_mac_X550EM_a)
5238 ixgbe_setup_loopback_link_x540_x550(hw, true);
5241 #ifdef RTE_LIBRTE_SECURITY
5242 if ((dev->data->dev_conf.rxmode.offloads &
5243 DEV_RX_OFFLOAD_SECURITY) ||
5244 (dev->data->dev_conf.txmode.offloads &
5245 DEV_TX_OFFLOAD_SECURITY)) {
5246 ret = ixgbe_crypto_enable_ipsec(dev);
5249 "ixgbe_crypto_enable_ipsec fails with %d.",
5260 * Start Receive Units for specified queue.
5262 int __attribute__((cold))
5263 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5265 struct ixgbe_hw *hw;
5266 struct ixgbe_rx_queue *rxq;
5270 PMD_INIT_FUNC_TRACE();
5271 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5273 rxq = dev->data->rx_queues[rx_queue_id];
5275 /* Allocate buffers for descriptor rings */
5276 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
5277 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
5281 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5282 rxdctl |= IXGBE_RXDCTL_ENABLE;
5283 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5285 /* Wait until RX Enable ready */
5286 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5289 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5290 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5292 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
5294 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5295 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
5296 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5302 * Stop Receive Units for specified queue.
5304 int __attribute__((cold))
5305 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5307 struct ixgbe_hw *hw;
5308 struct ixgbe_adapter *adapter =
5309 (struct ixgbe_adapter *)dev->data->dev_private;
5310 struct ixgbe_rx_queue *rxq;
5314 PMD_INIT_FUNC_TRACE();
5315 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5317 rxq = dev->data->rx_queues[rx_queue_id];
5319 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5320 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5321 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5323 /* Wait until RX Enable bit clear */
5324 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5327 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5328 } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
5330 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
5332 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5334 ixgbe_rx_queue_release_mbufs(rxq);
5335 ixgbe_reset_rx_queue(adapter, rxq);
5336 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5343 * Start Transmit Units for specified queue.
5345 int __attribute__((cold))
5346 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5348 struct ixgbe_hw *hw;
5349 struct ixgbe_tx_queue *txq;
5353 PMD_INIT_FUNC_TRACE();
5354 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5356 txq = dev->data->tx_queues[tx_queue_id];
5357 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5358 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5359 txdctl |= IXGBE_TXDCTL_ENABLE;
5360 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5362 /* Wait until TX Enable ready */
5363 if (hw->mac.type == ixgbe_mac_82599EB) {
5364 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5367 txdctl = IXGBE_READ_REG(hw,
5368 IXGBE_TXDCTL(txq->reg_idx));
5369 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5371 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
5375 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5376 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5382 * Stop Transmit Units for specified queue.
5384 int __attribute__((cold))
5385 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5387 struct ixgbe_hw *hw;
5388 struct ixgbe_tx_queue *txq;
5390 uint32_t txtdh, txtdt;
5393 PMD_INIT_FUNC_TRACE();
5394 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5396 txq = dev->data->tx_queues[tx_queue_id];
5398 /* Wait until TX queue is empty */
5399 if (hw->mac.type == ixgbe_mac_82599EB) {
5400 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5402 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5403 txtdh = IXGBE_READ_REG(hw,
5404 IXGBE_TDH(txq->reg_idx));
5405 txtdt = IXGBE_READ_REG(hw,
5406 IXGBE_TDT(txq->reg_idx));
5407 } while (--poll_ms && (txtdh != txtdt));
5410 "Tx Queue %d is not empty when stopping.",
5414 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5415 txdctl &= ~IXGBE_TXDCTL_ENABLE;
5416 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5418 /* Wait until TX Enable bit clear */
5419 if (hw->mac.type == ixgbe_mac_82599EB) {
5420 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5423 txdctl = IXGBE_READ_REG(hw,
5424 IXGBE_TXDCTL(txq->reg_idx));
5425 } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
5427 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
5431 if (txq->ops != NULL) {
5432 txq->ops->release_mbufs(txq);
5433 txq->ops->reset(txq);
5435 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5441 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5442 struct rte_eth_rxq_info *qinfo)
5444 struct ixgbe_rx_queue *rxq;
5446 rxq = dev->data->rx_queues[queue_id];
5448 qinfo->mp = rxq->mb_pool;
5449 qinfo->scattered_rx = dev->data->scattered_rx;
5450 qinfo->nb_desc = rxq->nb_rx_desc;
5452 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
5453 qinfo->conf.rx_drop_en = rxq->drop_en;
5454 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
5455 qinfo->conf.offloads = rxq->offloads;
5459 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5460 struct rte_eth_txq_info *qinfo)
5462 struct ixgbe_tx_queue *txq;
5464 txq = dev->data->tx_queues[queue_id];
5466 qinfo->nb_desc = txq->nb_tx_desc;
5468 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
5469 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
5470 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
5472 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
5473 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
5474 qinfo->conf.offloads = txq->offloads;
5475 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
5479 * [VF] Initializes Receive Unit.
5481 int __attribute__((cold))
5482 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
5484 struct ixgbe_hw *hw;
5485 struct ixgbe_rx_queue *rxq;
5486 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
5488 uint32_t srrctl, psrtype = 0;
5493 PMD_INIT_FUNC_TRACE();
5494 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5496 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
5497 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5498 "it should be power of 2");
5502 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
5503 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5504 "it should be equal to or less than %d",
5505 hw->mac.max_rx_queues);
5510 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
5511 * disables the VF receipt of packets if the PF MTU is > 1500.
5512 * This is done to deal with 82599 limitations that imposes
5513 * the PF and all VFs to share the same MTU.
5514 * Then, the PF driver enables again the VF receipt of packet when
5515 * the VF driver issues a IXGBE_VF_SET_LPE request.
5516 * In the meantime, the VF device cannot be used, even if the VF driver
5517 * and the Guest VM network stack are ready to accept packets with a
5518 * size up to the PF MTU.
5519 * As a work-around to this PF behaviour, force the call to
5520 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
5521 * VF packets received can work in all cases.
5523 ixgbevf_rlpml_set_vf(hw,
5524 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
5527 * Assume no header split and no VLAN strip support
5528 * on any Rx queue first .
5530 rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
5531 /* Setup RX queues */
5532 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5533 rxq = dev->data->rx_queues[i];
5535 /* Allocate buffers for descriptor rings */
5536 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
5540 /* Setup the Base and Length of the Rx Descriptor Rings */
5541 bus_addr = rxq->rx_ring_phys_addr;
5543 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
5544 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5545 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
5546 (uint32_t)(bus_addr >> 32));
5547 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
5548 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5549 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
5550 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
5553 /* Configure the SRRCTL register */
5554 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
5556 /* Set if packets are dropped when no descriptors available */
5558 srrctl |= IXGBE_SRRCTL_DROP_EN;
5561 * Configure the RX buffer size in the BSIZEPACKET field of
5562 * the SRRCTL register of the queue.
5563 * The value is in 1 KB resolution. Valid values can be from
5566 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5567 RTE_PKTMBUF_HEADROOM);
5568 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5569 IXGBE_SRRCTL_BSIZEPKT_MASK);
5572 * VF modification to write virtual function SRRCTL register
5574 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
5576 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5577 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5579 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
5580 /* It adds dual VLAN length for supporting dual VLAN */
5581 (rxmode->max_rx_pkt_len +
5582 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
5583 if (!dev->data->scattered_rx)
5584 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
5585 dev->data->scattered_rx = 1;
5588 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5589 rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5592 /* Set RQPL for VF RSS according to max Rx queue */
5593 psrtype |= (dev->data->nb_rx_queues >> 1) <<
5594 IXGBE_PSRTYPE_RQPL_SHIFT;
5595 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
5597 ixgbe_set_rx_function(dev);
5603 * [VF] Initializes Transmit Unit.
5605 void __attribute__((cold))
5606 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
5608 struct ixgbe_hw *hw;
5609 struct ixgbe_tx_queue *txq;
5614 PMD_INIT_FUNC_TRACE();
5615 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5617 /* Setup the Base and Length of the Tx Descriptor Rings */
5618 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5619 txq = dev->data->tx_queues[i];
5620 bus_addr = txq->tx_ring_phys_addr;
5621 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
5622 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5623 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
5624 (uint32_t)(bus_addr >> 32));
5625 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
5626 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5627 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5628 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
5629 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
5632 * Disable Tx Head Writeback RO bit, since this hoses
5633 * bookkeeping if things aren't delivered in order.
5635 txctrl = IXGBE_READ_REG(hw,
5636 IXGBE_VFDCA_TXCTRL(i));
5637 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5638 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
5644 * [VF] Start Transmit and Receive Units.
5646 void __attribute__((cold))
5647 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
5649 struct ixgbe_hw *hw;
5650 struct ixgbe_tx_queue *txq;
5651 struct ixgbe_rx_queue *rxq;
5657 PMD_INIT_FUNC_TRACE();
5658 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5660 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5661 txq = dev->data->tx_queues[i];
5662 /* Setup Transmit Threshold Registers */
5663 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5664 txdctl |= txq->pthresh & 0x7F;
5665 txdctl |= ((txq->hthresh & 0x7F) << 8);
5666 txdctl |= ((txq->wthresh & 0x7F) << 16);
5667 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5670 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5672 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5673 txdctl |= IXGBE_TXDCTL_ENABLE;
5674 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5677 /* Wait until TX Enable ready */
5680 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5681 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5683 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
5685 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5687 rxq = dev->data->rx_queues[i];
5689 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5690 rxdctl |= IXGBE_RXDCTL_ENABLE;
5691 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
5693 /* Wait until RX Enable ready */
5697 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5698 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5700 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5702 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
5708 ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
5709 const struct rte_flow_action_rss *in)
5711 if (in->key_len > RTE_DIM(out->key) ||
5712 in->queue_num > RTE_DIM(out->queue))
5714 out->conf = (struct rte_flow_action_rss){
5718 .key_len = in->key_len,
5719 .queue_num = in->queue_num,
5720 .key = memcpy(out->key, in->key, in->key_len),
5721 .queue = memcpy(out->queue, in->queue,
5722 sizeof(*in->queue) * in->queue_num),
5728 ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5729 const struct rte_flow_action_rss *with)
5731 return (comp->func == with->func &&
5732 comp->level == with->level &&
5733 comp->types == with->types &&
5734 comp->key_len == with->key_len &&
5735 comp->queue_num == with->queue_num &&
5736 !memcmp(comp->key, with->key, with->key_len) &&
5737 !memcmp(comp->queue, with->queue,
5738 sizeof(*with->queue) * with->queue_num));
5742 ixgbe_config_rss_filter(struct rte_eth_dev *dev,
5743 struct ixgbe_rte_flow_rss_conf *conf, bool add)
5745 struct ixgbe_hw *hw;
5749 uint16_t sp_reta_size;
5751 struct rte_eth_rss_conf rss_conf = {
5752 .rss_key = conf->conf.key_len ?
5753 (void *)(uintptr_t)conf->conf.key : NULL,
5754 .rss_key_len = conf->conf.key_len,
5755 .rss_hf = conf->conf.types,
5757 struct ixgbe_filter_info *filter_info =
5758 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5760 PMD_INIT_FUNC_TRACE();
5761 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5763 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5766 if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
5768 ixgbe_rss_disable(dev);
5769 memset(&filter_info->rss_info, 0,
5770 sizeof(struct ixgbe_rte_flow_rss_conf));
5776 if (filter_info->rss_info.conf.queue_num)
5778 /* Fill in redirection table
5779 * The byte-swap is needed because NIC registers are in
5780 * little-endian order.
5783 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
5784 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5786 if (j == conf->conf.queue_num)
5788 reta = (reta << 8) | conf->conf.queue[j];
5790 IXGBE_WRITE_REG(hw, reta_reg,
5794 /* Configure the RSS key and the RSS protocols used to compute
5795 * the RSS hash of input packets.
5797 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
5798 ixgbe_rss_disable(dev);
5801 if (rss_conf.rss_key == NULL)
5802 rss_conf.rss_key = rss_intel_key; /* Default hash key */
5803 ixgbe_hw_rss_hash_set(hw, &rss_conf);
5805 if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
5811 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
5813 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5819 ixgbe_recv_pkts_vec(
5820 void __rte_unused *rx_queue,
5821 struct rte_mbuf __rte_unused **rx_pkts,
5822 uint16_t __rte_unused nb_pkts)
5828 ixgbe_recv_scattered_pkts_vec(
5829 void __rte_unused *rx_queue,
5830 struct rte_mbuf __rte_unused **rx_pkts,
5831 uint16_t __rte_unused nb_pkts)
5837 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)