1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
21 #include <rte_debug.h>
22 #include <rte_interrupts.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev_driver.h>
37 #include <rte_prefetch.h>
41 #include <rte_string_fns.h>
42 #include <rte_errno.h>
46 #include "ixgbe_logs.h"
47 #include "base/ixgbe_api.h"
48 #include "base/ixgbe_vf.h"
49 #include "ixgbe_ethdev.h"
50 #include "base/ixgbe_dcb.h"
51 #include "base/ixgbe_common.h"
52 #include "ixgbe_rxtx.h"
54 #ifdef RTE_LIBRTE_IEEE1588
55 #define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
57 #define IXGBE_TX_IEEE1588_TMST 0
59 /* Bit Mask to indicate what bits required for building TX context */
60 #define IXGBE_TX_OFFLOAD_MASK ( \
70 PKT_TX_OUTER_IP_CKSUM | \
71 PKT_TX_SEC_OFFLOAD | \
72 IXGBE_TX_IEEE1588_TMST)
74 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
75 (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
78 #define RTE_PMD_USE_PREFETCH
81 #ifdef RTE_PMD_USE_PREFETCH
83 * Prefetch a cache line into all cache levels.
85 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
87 #define rte_ixgbe_prefetch(p) do {} while (0)
90 #ifdef RTE_IXGBE_INC_VECTOR
91 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
95 /*********************************************************************
99 **********************************************************************/
102 * Check for descriptors with their DD bit set and free mbufs.
103 * Return the total number of buffers freed.
105 static __rte_always_inline int
106 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
108 struct ixgbe_tx_entry *txep;
111 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
113 /* check DD bit on threshold descriptor */
114 status = txq->tx_ring[txq->tx_next_dd].wb.status;
115 if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
119 * first buffer to free from S/W ring is at index
120 * tx_next_dd - (tx_rs_thresh-1)
122 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
124 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
125 /* free buffers one at a time */
126 m = rte_pktmbuf_prefree_seg(txep->mbuf);
129 if (unlikely(m == NULL))
132 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
133 (nb_free > 0 && m->pool != free[0]->pool)) {
134 rte_mempool_put_bulk(free[0]->pool,
135 (void **)free, nb_free);
143 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
145 /* buffers were freed, update counters */
146 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
147 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
148 if (txq->tx_next_dd >= txq->nb_tx_desc)
149 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
151 return txq->tx_rs_thresh;
154 /* Populate 4 descriptors with data from 4 mbufs */
156 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
158 uint64_t buf_dma_addr;
162 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
163 buf_dma_addr = rte_mbuf_data_iova(*pkts);
164 pkt_len = (*pkts)->data_len;
166 /* write data to descriptor */
167 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
169 txdp->read.cmd_type_len =
170 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
172 txdp->read.olinfo_status =
173 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
175 rte_prefetch0(&(*pkts)->pool);
179 /* Populate 1 descriptor with data from 1 mbuf */
181 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
183 uint64_t buf_dma_addr;
186 buf_dma_addr = rte_mbuf_data_iova(*pkts);
187 pkt_len = (*pkts)->data_len;
189 /* write data to descriptor */
190 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
191 txdp->read.cmd_type_len =
192 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
193 txdp->read.olinfo_status =
194 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
195 rte_prefetch0(&(*pkts)->pool);
199 * Fill H/W descriptor ring with mbuf data.
200 * Copy mbuf pointers to the S/W ring.
203 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
206 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
207 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
208 const int N_PER_LOOP = 4;
209 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
210 int mainpart, leftover;
214 * Process most of the packets in chunks of N pkts. Any
215 * leftover packets will get processed one at a time.
217 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
218 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
219 for (i = 0; i < mainpart; i += N_PER_LOOP) {
220 /* Copy N mbuf pointers to the S/W ring */
221 for (j = 0; j < N_PER_LOOP; ++j) {
222 (txep + i + j)->mbuf = *(pkts + i + j);
224 tx4(txdp + i, pkts + i);
227 if (unlikely(leftover > 0)) {
228 for (i = 0; i < leftover; ++i) {
229 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
230 tx1(txdp + mainpart + i, pkts + mainpart + i);
235 static inline uint16_t
236 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
239 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
240 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
244 * Begin scanning the H/W ring for done descriptors when the
245 * number of available descriptors drops below tx_free_thresh. For
246 * each done descriptor, free the associated buffer.
248 if (txq->nb_tx_free < txq->tx_free_thresh)
249 ixgbe_tx_free_bufs(txq);
251 /* Only use descriptors that are available */
252 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
253 if (unlikely(nb_pkts == 0))
256 /* Use exactly nb_pkts descriptors */
257 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
260 * At this point, we know there are enough descriptors in the
261 * ring to transmit all the packets. This assumes that each
262 * mbuf contains a single segment, and that no new offloads
263 * are expected, which would require a new context descriptor.
267 * See if we're going to wrap-around. If so, handle the top
268 * of the descriptor ring first, then do the bottom. If not,
269 * the processing looks just like the "bottom" part anyway...
271 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
272 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
273 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
276 * We know that the last descriptor in the ring will need to
277 * have its RS bit set because tx_rs_thresh has to be
278 * a divisor of the ring size
280 tx_r[txq->tx_next_rs].read.cmd_type_len |=
281 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
282 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
287 /* Fill H/W descriptor ring with mbuf data */
288 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
289 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
292 * Determine if RS bit should be set
293 * This is what we actually want:
294 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
295 * but instead of subtracting 1 and doing >=, we can just do
296 * greater than without subtracting.
298 if (txq->tx_tail > txq->tx_next_rs) {
299 tx_r[txq->tx_next_rs].read.cmd_type_len |=
300 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
301 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
303 if (txq->tx_next_rs >= txq->nb_tx_desc)
304 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
308 * Check for wrap-around. This would only happen if we used
309 * up to the last descriptor in the ring, no more, no less.
311 if (txq->tx_tail >= txq->nb_tx_desc)
314 /* update tail pointer */
316 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
322 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
327 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
328 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
329 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
331 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
336 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
337 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
338 nb_tx = (uint16_t)(nb_tx + ret);
339 nb_pkts = (uint16_t)(nb_pkts - ret);
347 #ifdef RTE_IXGBE_INC_VECTOR
349 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
353 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
358 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
359 ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
372 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
373 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
374 uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
375 __rte_unused uint64_t *mdata)
377 uint32_t type_tucmd_mlhl;
378 uint32_t mss_l4len_idx = 0;
380 uint32_t vlan_macip_lens;
381 union ixgbe_tx_offload tx_offload_mask;
382 uint32_t seqnum_seed = 0;
384 ctx_idx = txq->ctx_curr;
385 tx_offload_mask.data[0] = 0;
386 tx_offload_mask.data[1] = 0;
389 /* Specify which HW CTX to upload. */
390 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
392 if (ol_flags & PKT_TX_VLAN_PKT) {
393 tx_offload_mask.vlan_tci |= ~0;
396 /* check if TCP segmentation required for this packet */
397 if (ol_flags & PKT_TX_TCP_SEG) {
398 /* implies IP cksum in IPv4 */
399 if (ol_flags & PKT_TX_IP_CKSUM)
400 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
401 IXGBE_ADVTXD_TUCMD_L4T_TCP |
402 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
404 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
405 IXGBE_ADVTXD_TUCMD_L4T_TCP |
406 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
408 tx_offload_mask.l2_len |= ~0;
409 tx_offload_mask.l3_len |= ~0;
410 tx_offload_mask.l4_len |= ~0;
411 tx_offload_mask.tso_segsz |= ~0;
412 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
413 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
414 } else { /* no TSO, check if hardware checksum is needed */
415 if (ol_flags & PKT_TX_IP_CKSUM) {
416 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
417 tx_offload_mask.l2_len |= ~0;
418 tx_offload_mask.l3_len |= ~0;
421 switch (ol_flags & PKT_TX_L4_MASK) {
422 case PKT_TX_UDP_CKSUM:
423 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
424 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
425 mss_l4len_idx |= sizeof(struct rte_udp_hdr)
426 << IXGBE_ADVTXD_L4LEN_SHIFT;
427 tx_offload_mask.l2_len |= ~0;
428 tx_offload_mask.l3_len |= ~0;
430 case PKT_TX_TCP_CKSUM:
431 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
432 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
433 mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
434 << IXGBE_ADVTXD_L4LEN_SHIFT;
435 tx_offload_mask.l2_len |= ~0;
436 tx_offload_mask.l3_len |= ~0;
438 case PKT_TX_SCTP_CKSUM:
439 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
440 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
441 mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
442 << IXGBE_ADVTXD_L4LEN_SHIFT;
443 tx_offload_mask.l2_len |= ~0;
444 tx_offload_mask.l3_len |= ~0;
447 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
448 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
453 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
454 tx_offload_mask.outer_l2_len |= ~0;
455 tx_offload_mask.outer_l3_len |= ~0;
456 tx_offload_mask.l2_len |= ~0;
457 seqnum_seed |= tx_offload.outer_l3_len
458 << IXGBE_ADVTXD_OUTER_IPLEN;
459 seqnum_seed |= tx_offload.l2_len
460 << IXGBE_ADVTXD_TUNNEL_LEN;
462 #ifdef RTE_LIBRTE_SECURITY
463 if (ol_flags & PKT_TX_SEC_OFFLOAD) {
464 union ixgbe_crypto_tx_desc_md *md =
465 (union ixgbe_crypto_tx_desc_md *)mdata;
467 (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
468 type_tucmd_mlhl |= md->enc ?
469 (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
470 IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
472 (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
473 tx_offload_mask.sa_idx |= ~0;
474 tx_offload_mask.sec_pad_len |= ~0;
478 txq->ctx_cache[ctx_idx].flags = ol_flags;
479 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
480 tx_offload_mask.data[0] & tx_offload.data[0];
481 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
482 tx_offload_mask.data[1] & tx_offload.data[1];
483 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
485 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
486 vlan_macip_lens = tx_offload.l3_len;
487 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
488 vlan_macip_lens |= (tx_offload.outer_l2_len <<
489 IXGBE_ADVTXD_MACLEN_SHIFT);
491 vlan_macip_lens |= (tx_offload.l2_len <<
492 IXGBE_ADVTXD_MACLEN_SHIFT);
493 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
494 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
495 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
496 ctx_txd->seqnum_seed = seqnum_seed;
500 * Check which hardware context can be used. Use the existing match
501 * or create a new context descriptor.
503 static inline uint32_t
504 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
505 union ixgbe_tx_offload tx_offload)
507 /* If match with the current used context */
508 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
509 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
510 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
511 & tx_offload.data[0])) &&
512 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
513 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
514 & tx_offload.data[1]))))
515 return txq->ctx_curr;
517 /* What if match with the next context */
519 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
520 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
521 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
522 & tx_offload.data[0])) &&
523 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
524 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
525 & tx_offload.data[1]))))
526 return txq->ctx_curr;
528 /* Mismatch, use the previous context */
529 return IXGBE_CTX_NUM;
532 static inline uint32_t
533 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
537 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
538 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
539 if (ol_flags & PKT_TX_IP_CKSUM)
540 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
541 if (ol_flags & PKT_TX_TCP_SEG)
542 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
546 static inline uint32_t
547 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
549 uint32_t cmdtype = 0;
551 if (ol_flags & PKT_TX_VLAN_PKT)
552 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
553 if (ol_flags & PKT_TX_TCP_SEG)
554 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
555 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
556 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
557 if (ol_flags & PKT_TX_MACSEC)
558 cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
562 /* Default RS bit threshold values */
563 #ifndef DEFAULT_TX_RS_THRESH
564 #define DEFAULT_TX_RS_THRESH 32
566 #ifndef DEFAULT_TX_FREE_THRESH
567 #define DEFAULT_TX_FREE_THRESH 32
570 /* Reset transmit descriptors after they have been used */
572 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
574 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
575 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
576 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
577 uint16_t nb_tx_desc = txq->nb_tx_desc;
578 uint16_t desc_to_clean_to;
579 uint16_t nb_tx_to_clean;
582 /* Determine the last descriptor needing to be cleaned */
583 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
584 if (desc_to_clean_to >= nb_tx_desc)
585 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
587 /* Check to make sure the last descriptor to clean is done */
588 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
589 status = txr[desc_to_clean_to].wb.status;
590 if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
591 PMD_TX_FREE_LOG(DEBUG,
592 "TX descriptor %4u is not done"
593 "(port=%d queue=%d)",
595 txq->port_id, txq->queue_id);
596 /* Failed to clean any descriptors, better luck next time */
600 /* Figure out how many descriptors will be cleaned */
601 if (last_desc_cleaned > desc_to_clean_to)
602 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
605 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
608 PMD_TX_FREE_LOG(DEBUG,
609 "Cleaning %4u TX descriptors: %4u to %4u "
610 "(port=%d queue=%d)",
611 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
612 txq->port_id, txq->queue_id);
615 * The last descriptor to clean is done, so that means all the
616 * descriptors from the last descriptor that was cleaned
617 * up to the last descriptor with the RS bit set
618 * are done. Only reset the threshold descriptor.
620 txr[desc_to_clean_to].wb.status = 0;
622 /* Update the txq to reflect the last descriptor that was cleaned */
623 txq->last_desc_cleaned = desc_to_clean_to;
624 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
631 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
634 struct ixgbe_tx_queue *txq;
635 struct ixgbe_tx_entry *sw_ring;
636 struct ixgbe_tx_entry *txe, *txn;
637 volatile union ixgbe_adv_tx_desc *txr;
638 volatile union ixgbe_adv_tx_desc *txd, *txp;
639 struct rte_mbuf *tx_pkt;
640 struct rte_mbuf *m_seg;
641 uint64_t buf_dma_addr;
642 uint32_t olinfo_status;
643 uint32_t cmd_type_len;
654 union ixgbe_tx_offload tx_offload;
655 #ifdef RTE_LIBRTE_SECURITY
659 tx_offload.data[0] = 0;
660 tx_offload.data[1] = 0;
662 sw_ring = txq->sw_ring;
664 tx_id = txq->tx_tail;
665 txe = &sw_ring[tx_id];
668 /* Determine if the descriptor ring needs to be cleaned. */
669 if (txq->nb_tx_free < txq->tx_free_thresh)
670 ixgbe_xmit_cleanup(txq);
672 rte_prefetch0(&txe->mbuf->pool);
675 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
678 pkt_len = tx_pkt->pkt_len;
681 * Determine how many (if any) context descriptors
682 * are needed for offload functionality.
684 ol_flags = tx_pkt->ol_flags;
685 #ifdef RTE_LIBRTE_SECURITY
686 use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
689 /* If hardware offload required */
690 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
692 tx_offload.l2_len = tx_pkt->l2_len;
693 tx_offload.l3_len = tx_pkt->l3_len;
694 tx_offload.l4_len = tx_pkt->l4_len;
695 tx_offload.vlan_tci = tx_pkt->vlan_tci;
696 tx_offload.tso_segsz = tx_pkt->tso_segsz;
697 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
698 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
699 #ifdef RTE_LIBRTE_SECURITY
701 union ixgbe_crypto_tx_desc_md *ipsec_mdata =
702 (union ixgbe_crypto_tx_desc_md *)
704 tx_offload.sa_idx = ipsec_mdata->sa_idx;
705 tx_offload.sec_pad_len = ipsec_mdata->pad_len;
709 /* If new context need be built or reuse the exist ctx. */
710 ctx = what_advctx_update(txq, tx_ol_req,
712 /* Only allocate context descriptor if required*/
713 new_ctx = (ctx == IXGBE_CTX_NUM);
718 * Keep track of how many descriptors are used this loop
719 * This will always be the number of segments + the number of
720 * Context descriptors required to transmit the packet
722 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
725 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
726 /* set RS on the previous packet in the burst */
727 txp->read.cmd_type_len |=
728 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
731 * The number of descriptors that must be allocated for a
732 * packet is the number of segments of that packet, plus 1
733 * Context Descriptor for the hardware offload, if any.
734 * Determine the last TX descriptor to allocate in the TX ring
735 * for the packet, starting from the current position (tx_id)
738 tx_last = (uint16_t) (tx_id + nb_used - 1);
741 if (tx_last >= txq->nb_tx_desc)
742 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
744 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
745 " tx_first=%u tx_last=%u",
746 (unsigned) txq->port_id,
747 (unsigned) txq->queue_id,
753 * Make sure there are enough TX descriptors available to
754 * transmit the entire packet.
755 * nb_used better be less than or equal to txq->tx_rs_thresh
757 if (nb_used > txq->nb_tx_free) {
758 PMD_TX_FREE_LOG(DEBUG,
759 "Not enough free TX descriptors "
760 "nb_used=%4u nb_free=%4u "
761 "(port=%d queue=%d)",
762 nb_used, txq->nb_tx_free,
763 txq->port_id, txq->queue_id);
765 if (ixgbe_xmit_cleanup(txq) != 0) {
766 /* Could not clean any descriptors */
772 /* nb_used better be <= txq->tx_rs_thresh */
773 if (unlikely(nb_used > txq->tx_rs_thresh)) {
774 PMD_TX_FREE_LOG(DEBUG,
775 "The number of descriptors needed to "
776 "transmit the packet exceeds the "
777 "RS bit threshold. This will impact "
779 "nb_used=%4u nb_free=%4u "
781 "(port=%d queue=%d)",
782 nb_used, txq->nb_tx_free,
784 txq->port_id, txq->queue_id);
786 * Loop here until there are enough TX
787 * descriptors or until the ring cannot be
790 while (nb_used > txq->nb_tx_free) {
791 if (ixgbe_xmit_cleanup(txq) != 0) {
793 * Could not clean any
805 * By now there are enough free TX descriptors to transmit
810 * Set common flags of all TX Data Descriptors.
812 * The following bits must be set in all Data Descriptors:
813 * - IXGBE_ADVTXD_DTYP_DATA
814 * - IXGBE_ADVTXD_DCMD_DEXT
816 * The following bits must be set in the first Data Descriptor
817 * and are ignored in the other ones:
818 * - IXGBE_ADVTXD_DCMD_IFCS
819 * - IXGBE_ADVTXD_MAC_1588
820 * - IXGBE_ADVTXD_DCMD_VLE
822 * The following bits must only be set in the last Data
824 * - IXGBE_TXD_CMD_EOP
826 * The following bits can be set in any Data Descriptor, but
827 * are only set in the last Data Descriptor:
830 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
831 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
833 #ifdef RTE_LIBRTE_IEEE1588
834 if (ol_flags & PKT_TX_IEEE1588_TMST)
835 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
841 if (ol_flags & PKT_TX_TCP_SEG) {
842 /* when TSO is on, paylen in descriptor is the
843 * not the packet len but the tcp payload len */
844 pkt_len -= (tx_offload.l2_len +
845 tx_offload.l3_len + tx_offload.l4_len);
849 * Setup the TX Advanced Context Descriptor if required
852 volatile struct ixgbe_adv_tx_context_desc *
855 ctx_txd = (volatile struct
856 ixgbe_adv_tx_context_desc *)
859 txn = &sw_ring[txe->next_id];
860 rte_prefetch0(&txn->mbuf->pool);
862 if (txe->mbuf != NULL) {
863 rte_pktmbuf_free_seg(txe->mbuf);
867 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
868 tx_offload, &tx_pkt->udata64);
870 txe->last_id = tx_last;
871 tx_id = txe->next_id;
876 * Setup the TX Advanced Data Descriptor,
877 * This path will go through
878 * whatever new/reuse the context descriptor
880 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
881 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
882 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
885 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
886 #ifdef RTE_LIBRTE_SECURITY
888 olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
894 txn = &sw_ring[txe->next_id];
895 rte_prefetch0(&txn->mbuf->pool);
897 if (txe->mbuf != NULL)
898 rte_pktmbuf_free_seg(txe->mbuf);
902 * Set up Transmit Data Descriptor.
904 slen = m_seg->data_len;
905 buf_dma_addr = rte_mbuf_data_iova(m_seg);
906 txd->read.buffer_addr =
907 rte_cpu_to_le_64(buf_dma_addr);
908 txd->read.cmd_type_len =
909 rte_cpu_to_le_32(cmd_type_len | slen);
910 txd->read.olinfo_status =
911 rte_cpu_to_le_32(olinfo_status);
912 txe->last_id = tx_last;
913 tx_id = txe->next_id;
916 } while (m_seg != NULL);
919 * The last packet data descriptor needs End Of Packet (EOP)
921 cmd_type_len |= IXGBE_TXD_CMD_EOP;
922 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
923 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
925 /* Set RS bit only on threshold packets' last descriptor */
926 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
927 PMD_TX_FREE_LOG(DEBUG,
928 "Setting RS bit on TXD id="
929 "%4u (port=%d queue=%d)",
930 tx_last, txq->port_id, txq->queue_id);
932 cmd_type_len |= IXGBE_TXD_CMD_RS;
934 /* Update txq RS bit counters */
940 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
944 /* set RS on last packet in the burst */
946 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
951 * Set the Transmit Descriptor Tail (TDT)
953 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
954 (unsigned) txq->port_id, (unsigned) txq->queue_id,
955 (unsigned) tx_id, (unsigned) nb_tx);
956 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
957 txq->tx_tail = tx_id;
962 /*********************************************************************
966 **********************************************************************/
968 ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
973 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
975 for (i = 0; i < nb_pkts; i++) {
977 ol_flags = m->ol_flags;
980 * Check if packet meets requirements for number of segments
982 * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
986 if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
991 if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
992 rte_errno = -ENOTSUP;
996 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
997 ret = rte_validate_tx_offload(m);
1003 ret = rte_net_intel_cksum_prepare(m);
1013 /*********************************************************************
1017 **********************************************************************/
1019 #define IXGBE_PACKET_TYPE_ETHER 0X00
1020 #define IXGBE_PACKET_TYPE_IPV4 0X01
1021 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
1022 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
1023 #define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
1024 #define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
1025 #define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
1026 #define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
1027 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
1028 #define IXGBE_PACKET_TYPE_IPV6 0X04
1029 #define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
1030 #define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
1031 #define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
1032 #define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
1033 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
1034 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
1035 #define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
1036 #define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
1037 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
1038 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
1039 #define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
1040 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
1041 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
1042 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
1043 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
1044 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
1045 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
1046 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
1047 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
1048 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
1049 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
1050 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
1051 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
1053 #define IXGBE_PACKET_TYPE_NVGRE 0X00
1054 #define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
1055 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11
1056 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
1057 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
1058 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
1059 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
1060 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
1061 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
1062 #define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
1063 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
1064 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
1065 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
1066 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
1067 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
1068 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
1069 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
1070 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
1071 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
1072 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
1073 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D
1074 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
1075 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
1077 #define IXGBE_PACKET_TYPE_VXLAN 0X80
1078 #define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81
1079 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91
1080 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
1081 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
1082 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
1083 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
1084 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
1085 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
1086 #define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
1087 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
1088 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
1089 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
1090 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
1091 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
1092 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
1093 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
1094 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
1095 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
1096 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
1097 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D
1098 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
1099 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
1102 * Use 2 different table for normal packet and tunnel packet
1103 * to save the space.
1106 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
1107 [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
1108 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
1110 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1111 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
1112 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1113 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
1114 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1115 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
1116 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1117 RTE_PTYPE_L3_IPV4_EXT,
1118 [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1119 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
1120 [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1121 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
1122 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1123 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
1124 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
1126 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1127 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
1128 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1129 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
1130 [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1131 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
1132 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1133 RTE_PTYPE_L3_IPV6_EXT,
1134 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1135 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
1136 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1137 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
1138 [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1139 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
1140 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1141 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1142 RTE_PTYPE_INNER_L3_IPV6,
1143 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1144 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1145 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1146 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1147 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1148 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1149 [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1150 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1151 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1152 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
1153 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1154 RTE_PTYPE_INNER_L3_IPV6,
1155 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1156 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1157 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1158 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1159 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1160 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1161 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1162 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1163 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1164 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1165 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1166 RTE_PTYPE_INNER_L3_IPV6_EXT,
1167 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1168 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1169 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1170 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1171 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1172 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1173 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1174 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1175 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1176 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1177 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1178 RTE_PTYPE_INNER_L3_IPV6_EXT,
1179 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1180 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1181 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1182 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1183 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1184 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1185 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
1186 RTE_PTYPE_L2_ETHER |
1187 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1188 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1192 ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
1193 [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
1194 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1195 RTE_PTYPE_INNER_L2_ETHER,
1196 [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
1197 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1198 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1199 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1200 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1201 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
1202 [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
1203 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1204 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
1205 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1206 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1207 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1208 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1209 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1210 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
1211 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1212 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1213 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1214 [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1215 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1216 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1217 RTE_PTYPE_INNER_L4_TCP,
1218 [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1219 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1220 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1221 RTE_PTYPE_INNER_L4_TCP,
1222 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1223 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1224 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1225 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1226 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1227 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1228 RTE_PTYPE_INNER_L4_TCP,
1229 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
1230 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1231 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1232 RTE_PTYPE_INNER_L3_IPV4,
1233 [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1234 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1235 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1236 RTE_PTYPE_INNER_L4_UDP,
1237 [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1238 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1239 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1240 RTE_PTYPE_INNER_L4_UDP,
1241 [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1242 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1243 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1244 RTE_PTYPE_INNER_L4_SCTP,
1245 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1246 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1247 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1248 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1249 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1250 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1251 RTE_PTYPE_INNER_L4_UDP,
1252 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1253 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1254 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1255 RTE_PTYPE_INNER_L4_SCTP,
1256 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
1257 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1258 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1259 RTE_PTYPE_INNER_L3_IPV4,
1260 [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1261 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1262 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1263 RTE_PTYPE_INNER_L4_SCTP,
1264 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1265 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1266 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1267 RTE_PTYPE_INNER_L4_SCTP,
1268 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1269 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1270 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1271 RTE_PTYPE_INNER_L4_TCP,
1272 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1273 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1274 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1275 RTE_PTYPE_INNER_L4_UDP,
1277 [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
1278 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1279 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
1280 [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
1281 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1282 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1283 RTE_PTYPE_INNER_L3_IPV4,
1284 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1285 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1286 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1287 RTE_PTYPE_INNER_L3_IPV4_EXT,
1288 [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
1289 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1290 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1291 RTE_PTYPE_INNER_L3_IPV6,
1292 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1293 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1294 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1295 RTE_PTYPE_INNER_L3_IPV4,
1296 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1297 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1298 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1299 RTE_PTYPE_INNER_L3_IPV6_EXT,
1300 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1301 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1302 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1303 RTE_PTYPE_INNER_L3_IPV4,
1304 [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1305 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1306 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1307 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
1308 [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1309 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1310 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1311 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1312 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1313 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1314 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1315 RTE_PTYPE_INNER_L3_IPV4,
1316 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1317 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1318 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1319 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1320 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
1321 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1322 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1323 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1324 [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1325 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1326 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1327 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
1328 [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1329 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1330 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1331 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1332 [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1333 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1334 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1335 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1336 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1337 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1338 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1339 RTE_PTYPE_INNER_L3_IPV4,
1340 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1341 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1342 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1343 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1344 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1345 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1346 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1347 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1348 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
1349 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1350 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1351 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1352 [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1353 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1354 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1355 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
1356 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1357 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1358 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1359 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
1360 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1361 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1362 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1363 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
1364 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1365 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1366 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1367 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
1370 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
1371 static inline uint32_t
1372 ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
1375 if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1376 return RTE_PTYPE_UNKNOWN;
1378 pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
1380 /* For tunnel packet */
1381 if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
1382 /* Remove the tunnel bit to save the space. */
1383 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
1384 return ptype_table_tn[pkt_info];
1388 * For x550, if it's not tunnel,
1389 * tunnel type bit should be set to 0.
1390 * Reuse 82599's mask.
1392 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
1394 return ptype_table[pkt_info];
1397 static inline uint64_t
1398 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1400 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1401 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1402 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1403 PKT_RX_RSS_HASH, 0, 0, 0,
1404 0, 0, 0, PKT_RX_FDIR,
1406 #ifdef RTE_LIBRTE_IEEE1588
1407 static uint64_t ip_pkt_etqf_map[8] = {
1408 0, 0, 0, PKT_RX_IEEE1588_PTP,
1412 if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1413 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1414 ip_rss_types_map[pkt_info & 0XF];
1416 return ip_rss_types_map[pkt_info & 0XF];
1418 return ip_rss_types_map[pkt_info & 0XF];
1422 static inline uint64_t
1423 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1428 * Check if VLAN present only.
1429 * Do not check whether L3/L4 rx checksum done by NIC or not,
1430 * That can be found from rte_eth_rxmode.offloads flag
1432 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
1434 #ifdef RTE_LIBRTE_IEEE1588
1435 if (rx_status & IXGBE_RXD_STAT_TMST)
1436 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1441 static inline uint64_t
1442 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1447 * Bit 31: IPE, IPv4 checksum error
1448 * Bit 30: L4I, L4I integrity error
1450 static uint64_t error_to_pkt_flags_map[4] = {
1451 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
1452 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
1453 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
1454 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1456 pkt_flags = error_to_pkt_flags_map[(rx_status >>
1457 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1459 if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1460 (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1461 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1464 #ifdef RTE_LIBRTE_SECURITY
1465 if (rx_status & IXGBE_RXD_STAT_SECP) {
1466 pkt_flags |= PKT_RX_SEC_OFFLOAD;
1467 if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
1468 pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
1476 * LOOK_AHEAD defines how many desc statuses to check beyond the
1477 * current descriptor.
1478 * It must be a pound define for optimal performance.
1479 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1480 * function only works with LOOK_AHEAD=8.
1482 #define LOOK_AHEAD 8
1483 #if (LOOK_AHEAD != 8)
1484 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1487 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1489 volatile union ixgbe_adv_rx_desc *rxdp;
1490 struct ixgbe_rx_entry *rxep;
1491 struct rte_mbuf *mb;
1495 uint32_t s[LOOK_AHEAD];
1496 uint32_t pkt_info[LOOK_AHEAD];
1497 int i, j, nb_rx = 0;
1499 uint64_t vlan_flags = rxq->vlan_flags;
1501 /* get references to current descriptor and S/W ring entry */
1502 rxdp = &rxq->rx_ring[rxq->rx_tail];
1503 rxep = &rxq->sw_ring[rxq->rx_tail];
1505 status = rxdp->wb.upper.status_error;
1506 /* check to make sure there is at least 1 packet to receive */
1507 if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1511 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1512 * reference packets that are ready to be received.
1514 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1515 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1516 /* Read desc statuses backwards to avoid race condition */
1517 for (j = 0; j < LOOK_AHEAD; j++)
1518 s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1522 /* Compute how many status bits were set */
1523 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1524 (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
1527 for (j = 0; j < nb_dd; j++)
1528 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
1533 /* Translate descriptor info to mbuf format */
1534 for (j = 0; j < nb_dd; ++j) {
1536 pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1538 mb->data_len = pkt_len;
1539 mb->pkt_len = pkt_len;
1540 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1542 /* convert descriptor fields to rte mbuf flags */
1543 pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1545 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1546 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
1547 ((uint16_t)pkt_info[j]);
1548 mb->ol_flags = pkt_flags;
1550 ixgbe_rxd_pkt_info_to_pkt_type
1551 (pkt_info[j], rxq->pkt_type_mask);
1553 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1554 mb->hash.rss = rte_le_to_cpu_32(
1555 rxdp[j].wb.lower.hi_dword.rss);
1556 else if (pkt_flags & PKT_RX_FDIR) {
1557 mb->hash.fdir.hash = rte_le_to_cpu_16(
1558 rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1559 IXGBE_ATR_HASH_MASK;
1560 mb->hash.fdir.id = rte_le_to_cpu_16(
1561 rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1565 /* Move mbuf pointers from the S/W ring to the stage */
1566 for (j = 0; j < LOOK_AHEAD; ++j) {
1567 rxq->rx_stage[i + j] = rxep[j].mbuf;
1570 /* stop if all requested packets could not be received */
1571 if (nb_dd != LOOK_AHEAD)
1575 /* clear software ring entries so we can cleanup correctly */
1576 for (i = 0; i < nb_rx; ++i) {
1577 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1585 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1587 volatile union ixgbe_adv_rx_desc *rxdp;
1588 struct ixgbe_rx_entry *rxep;
1589 struct rte_mbuf *mb;
1594 /* allocate buffers in bulk directly into the S/W ring */
1595 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1596 rxep = &rxq->sw_ring[alloc_idx];
1597 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1598 rxq->rx_free_thresh);
1599 if (unlikely(diag != 0))
1602 rxdp = &rxq->rx_ring[alloc_idx];
1603 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1604 /* populate the static rte mbuf fields */
1607 mb->port = rxq->port_id;
1610 rte_mbuf_refcnt_set(mb, 1);
1611 mb->data_off = RTE_PKTMBUF_HEADROOM;
1613 /* populate the descriptors */
1614 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1615 rxdp[i].read.hdr_addr = 0;
1616 rxdp[i].read.pkt_addr = dma_addr;
1619 /* update state of internal queue structure */
1620 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1621 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1622 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1628 static inline uint16_t
1629 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1632 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1635 /* how many packets are ready to return? */
1636 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1638 /* copy mbuf pointers to the application's packet list */
1639 for (i = 0; i < nb_pkts; ++i)
1640 rx_pkts[i] = stage[i];
1642 /* update internal queue state */
1643 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1644 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1649 static inline uint16_t
1650 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1653 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1656 /* Any previously recv'd pkts will be returned from the Rx stage */
1657 if (rxq->rx_nb_avail)
1658 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1660 /* Scan the H/W ring for packets to receive */
1661 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1663 /* update internal queue state */
1664 rxq->rx_next_avail = 0;
1665 rxq->rx_nb_avail = nb_rx;
1666 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1668 /* if required, allocate new buffers to replenish descriptors */
1669 if (rxq->rx_tail > rxq->rx_free_trigger) {
1670 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1672 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1675 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1676 "queue_id=%u", (unsigned) rxq->port_id,
1677 (unsigned) rxq->queue_id);
1679 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1680 rxq->rx_free_thresh;
1683 * Need to rewind any previous receives if we cannot
1684 * allocate new buffers to replenish the old ones.
1686 rxq->rx_nb_avail = 0;
1687 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1688 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1689 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1694 /* update tail pointer */
1696 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
1700 if (rxq->rx_tail >= rxq->nb_rx_desc)
1703 /* received any packets this loop? */
1704 if (rxq->rx_nb_avail)
1705 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1710 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1712 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1717 if (unlikely(nb_pkts == 0))
1720 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1721 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1723 /* request is relatively large, chunk it up */
1728 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1729 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1730 nb_rx = (uint16_t)(nb_rx + ret);
1731 nb_pkts = (uint16_t)(nb_pkts - ret);
1740 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1743 struct ixgbe_rx_queue *rxq;
1744 volatile union ixgbe_adv_rx_desc *rx_ring;
1745 volatile union ixgbe_adv_rx_desc *rxdp;
1746 struct ixgbe_rx_entry *sw_ring;
1747 struct ixgbe_rx_entry *rxe;
1748 struct rte_mbuf *rxm;
1749 struct rte_mbuf *nmb;
1750 union ixgbe_adv_rx_desc rxd;
1759 uint64_t vlan_flags;
1764 rx_id = rxq->rx_tail;
1765 rx_ring = rxq->rx_ring;
1766 sw_ring = rxq->sw_ring;
1767 vlan_flags = rxq->vlan_flags;
1768 while (nb_rx < nb_pkts) {
1770 * The order of operations here is important as the DD status
1771 * bit must not be read after any other descriptor fields.
1772 * rx_ring and rxdp are pointing to volatile data so the order
1773 * of accesses cannot be reordered by the compiler. If they were
1774 * not volatile, they could be reordered which could lead to
1775 * using invalid descriptor fields when read from rxd.
1777 rxdp = &rx_ring[rx_id];
1778 staterr = rxdp->wb.upper.status_error;
1779 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1786 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1787 * is likely to be invalid and to be dropped by the various
1788 * validation checks performed by the network stack.
1790 * Allocate a new mbuf to replenish the RX ring descriptor.
1791 * If the allocation fails:
1792 * - arrange for that RX descriptor to be the first one
1793 * being parsed the next time the receive function is
1794 * invoked [on the same queue].
1796 * - Stop parsing the RX ring and return immediately.
1798 * This policy do not drop the packet received in the RX
1799 * descriptor for which the allocation of a new mbuf failed.
1800 * Thus, it allows that packet to be later retrieved if
1801 * mbuf have been freed in the mean time.
1802 * As a side effect, holding RX descriptors instead of
1803 * systematically giving them back to the NIC may lead to
1804 * RX ring exhaustion situations.
1805 * However, the NIC can gracefully prevent such situations
1806 * to happen by sending specific "back-pressure" flow control
1807 * frames to its peer(s).
1809 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1810 "ext_err_stat=0x%08x pkt_len=%u",
1811 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1812 (unsigned) rx_id, (unsigned) staterr,
1813 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1815 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1817 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1818 "queue_id=%u", (unsigned) rxq->port_id,
1819 (unsigned) rxq->queue_id);
1820 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1825 rxe = &sw_ring[rx_id];
1827 if (rx_id == rxq->nb_rx_desc)
1830 /* Prefetch next mbuf while processing current one. */
1831 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1834 * When next RX descriptor is on a cache-line boundary,
1835 * prefetch the next 4 RX descriptors and the next 8 pointers
1838 if ((rx_id & 0x3) == 0) {
1839 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1840 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1846 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1847 rxdp->read.hdr_addr = 0;
1848 rxdp->read.pkt_addr = dma_addr;
1851 * Initialize the returned mbuf.
1852 * 1) setup generic mbuf fields:
1853 * - number of segments,
1856 * - RX port identifier.
1857 * 2) integrate hardware offload data, if any:
1858 * - RSS flag & hash,
1859 * - IP checksum flag,
1860 * - VLAN TCI, if any,
1863 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1865 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1866 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1869 rxm->pkt_len = pkt_len;
1870 rxm->data_len = pkt_len;
1871 rxm->port = rxq->port_id;
1873 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1874 /* Only valid if PKT_RX_VLAN set in pkt_flags */
1875 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1877 pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
1878 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1879 pkt_flags = pkt_flags |
1880 ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1881 rxm->ol_flags = pkt_flags;
1883 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1884 rxq->pkt_type_mask);
1886 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1887 rxm->hash.rss = rte_le_to_cpu_32(
1888 rxd.wb.lower.hi_dword.rss);
1889 else if (pkt_flags & PKT_RX_FDIR) {
1890 rxm->hash.fdir.hash = rte_le_to_cpu_16(
1891 rxd.wb.lower.hi_dword.csum_ip.csum) &
1892 IXGBE_ATR_HASH_MASK;
1893 rxm->hash.fdir.id = rte_le_to_cpu_16(
1894 rxd.wb.lower.hi_dword.csum_ip.ip_id);
1897 * Store the mbuf address into the next entry of the array
1898 * of returned packets.
1900 rx_pkts[nb_rx++] = rxm;
1902 rxq->rx_tail = rx_id;
1905 * If the number of free RX descriptors is greater than the RX free
1906 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1908 * Update the RDT with the value of the last processed RX descriptor
1909 * minus 1, to guarantee that the RDT register is never equal to the
1910 * RDH register, which creates a "full" ring situtation from the
1911 * hardware point of view...
1913 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1914 if (nb_hold > rxq->rx_free_thresh) {
1915 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1916 "nb_hold=%u nb_rx=%u",
1917 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1918 (unsigned) rx_id, (unsigned) nb_hold,
1920 rx_id = (uint16_t) ((rx_id == 0) ?
1921 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1922 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1925 rxq->nb_rx_hold = nb_hold;
1930 * Detect an RSC descriptor.
1932 static inline uint32_t
1933 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1935 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1936 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1940 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1942 * Fill the following info in the HEAD buffer of the Rx cluster:
1943 * - RX port identifier
1944 * - hardware offload data, if any:
1946 * - IP checksum flag
1947 * - VLAN TCI, if any
1949 * @head HEAD of the packet cluster
1950 * @desc HW descriptor to get data from
1951 * @rxq Pointer to the Rx queue
1954 ixgbe_fill_cluster_head_buf(
1955 struct rte_mbuf *head,
1956 union ixgbe_adv_rx_desc *desc,
1957 struct ixgbe_rx_queue *rxq,
1963 head->port = rxq->port_id;
1965 /* The vlan_tci field is only valid when PKT_RX_VLAN is
1966 * set in the pkt_flags field.
1968 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1969 pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
1970 pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1971 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1972 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1973 head->ol_flags = pkt_flags;
1975 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
1977 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1978 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1979 else if (pkt_flags & PKT_RX_FDIR) {
1980 head->hash.fdir.hash =
1981 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1982 & IXGBE_ATR_HASH_MASK;
1983 head->hash.fdir.id =
1984 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1989 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1991 * @rx_queue Rx queue handle
1992 * @rx_pkts table of received packets
1993 * @nb_pkts size of rx_pkts table
1994 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1996 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1997 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1999 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
2000 * 1) When non-EOP RSC completion arrives:
2001 * a) Update the HEAD of the current RSC aggregation cluster with the new
2002 * segment's data length.
2003 * b) Set the "next" pointer of the current segment to point to the segment
2004 * at the NEXTP index.
2005 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
2006 * in the sw_rsc_ring.
2007 * 2) When EOP arrives we just update the cluster's total length and offload
2008 * flags and deliver the cluster up to the upper layers. In our case - put it
2009 * in the rx_pkts table.
2011 * Returns the number of received packets/clusters (according to the "bulk
2012 * receive" interface).
2014 static inline uint16_t
2015 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
2018 struct ixgbe_rx_queue *rxq = rx_queue;
2019 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
2020 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
2021 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
2022 uint16_t rx_id = rxq->rx_tail;
2024 uint16_t nb_hold = rxq->nb_rx_hold;
2025 uint16_t prev_id = rxq->rx_tail;
2027 while (nb_rx < nb_pkts) {
2029 struct ixgbe_rx_entry *rxe;
2030 struct ixgbe_scattered_rx_entry *sc_entry;
2031 struct ixgbe_scattered_rx_entry *next_sc_entry;
2032 struct ixgbe_rx_entry *next_rxe = NULL;
2033 struct rte_mbuf *first_seg;
2034 struct rte_mbuf *rxm;
2035 struct rte_mbuf *nmb = NULL;
2036 union ixgbe_adv_rx_desc rxd;
2039 volatile union ixgbe_adv_rx_desc *rxdp;
2044 * The code in this whole file uses the volatile pointer to
2045 * ensure the read ordering of the status and the rest of the
2046 * descriptor fields (on the compiler level only!!!). This is so
2047 * UGLY - why not to just use the compiler barrier instead? DPDK
2048 * even has the rte_compiler_barrier() for that.
2050 * But most importantly this is just wrong because this doesn't
2051 * ensure memory ordering in a general case at all. For
2052 * instance, DPDK is supposed to work on Power CPUs where
2053 * compiler barrier may just not be enough!
2055 * I tried to write only this function properly to have a
2056 * starting point (as a part of an LRO/RSC series) but the
2057 * compiler cursed at me when I tried to cast away the
2058 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
2059 * keeping it the way it is for now.
2061 * The code in this file is broken in so many other places and
2062 * will just not work on a big endian CPU anyway therefore the
2063 * lines below will have to be revisited together with the rest
2067 * - Get rid of "volatile" and let the compiler do its job.
2068 * - Use the proper memory barrier (rte_rmb()) to ensure the
2069 * memory ordering below.
2071 rxdp = &rx_ring[rx_id];
2072 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
2074 if (!(staterr & IXGBE_RXDADV_STAT_DD))
2079 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
2080 "staterr=0x%x data_len=%u",
2081 rxq->port_id, rxq->queue_id, rx_id, staterr,
2082 rte_le_to_cpu_16(rxd.wb.upper.length));
2085 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
2087 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
2088 "port_id=%u queue_id=%u",
2089 rxq->port_id, rxq->queue_id);
2091 rte_eth_devices[rxq->port_id].data->
2092 rx_mbuf_alloc_failed++;
2095 } else if (nb_hold > rxq->rx_free_thresh) {
2096 uint16_t next_rdt = rxq->rx_free_trigger;
2098 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
2100 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
2102 nb_hold -= rxq->rx_free_thresh;
2104 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
2105 "port_id=%u queue_id=%u",
2106 rxq->port_id, rxq->queue_id);
2108 rte_eth_devices[rxq->port_id].data->
2109 rx_mbuf_alloc_failed++;
2115 rxe = &sw_ring[rx_id];
2116 eop = staterr & IXGBE_RXDADV_STAT_EOP;
2118 next_id = rx_id + 1;
2119 if (next_id == rxq->nb_rx_desc)
2122 /* Prefetch next mbuf while processing current one. */
2123 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
2126 * When next RX descriptor is on a cache-line boundary,
2127 * prefetch the next 4 RX descriptors and the next 4 pointers
2130 if ((next_id & 0x3) == 0) {
2131 rte_ixgbe_prefetch(&rx_ring[next_id]);
2132 rte_ixgbe_prefetch(&sw_ring[next_id]);
2139 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2141 * Update RX descriptor with the physical address of the
2142 * new data buffer of the new allocated mbuf.
2146 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2147 rxdp->read.hdr_addr = 0;
2148 rxdp->read.pkt_addr = dma;
2153 * Set data length & data buffer address of mbuf.
2155 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
2156 rxm->data_len = data_len;
2161 * Get next descriptor index:
2162 * - For RSC it's in the NEXTP field.
2163 * - For a scattered packet - it's just a following
2166 if (ixgbe_rsc_count(&rxd))
2168 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
2169 IXGBE_RXDADV_NEXTP_SHIFT;
2173 next_sc_entry = &sw_sc_ring[nextp_id];
2174 next_rxe = &sw_ring[nextp_id];
2175 rte_ixgbe_prefetch(next_rxe);
2178 sc_entry = &sw_sc_ring[rx_id];
2179 first_seg = sc_entry->fbuf;
2180 sc_entry->fbuf = NULL;
2183 * If this is the first buffer of the received packet,
2184 * set the pointer to the first mbuf of the packet and
2185 * initialize its context.
2186 * Otherwise, update the total length and the number of segments
2187 * of the current scattered packet, and update the pointer to
2188 * the last mbuf of the current packet.
2190 if (first_seg == NULL) {
2192 first_seg->pkt_len = data_len;
2193 first_seg->nb_segs = 1;
2195 first_seg->pkt_len += data_len;
2196 first_seg->nb_segs++;
2203 * If this is not the last buffer of the received packet, update
2204 * the pointer to the first mbuf at the NEXTP entry in the
2205 * sw_sc_ring and continue to parse the RX ring.
2207 if (!eop && next_rxe) {
2208 rxm->next = next_rxe->mbuf;
2209 next_sc_entry->fbuf = first_seg;
2213 /* Initialize the first mbuf of the returned packet */
2214 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
2217 * Deal with the case, when HW CRC srip is disabled.
2218 * That can't happen when LRO is enabled, but still could
2219 * happen for scattered RX mode.
2221 first_seg->pkt_len -= rxq->crc_len;
2222 if (unlikely(rxm->data_len <= rxq->crc_len)) {
2223 struct rte_mbuf *lp;
2225 for (lp = first_seg; lp->next != rxm; lp = lp->next)
2228 first_seg->nb_segs--;
2229 lp->data_len -= rxq->crc_len - rxm->data_len;
2231 rte_pktmbuf_free_seg(rxm);
2233 rxm->data_len -= rxq->crc_len;
2235 /* Prefetch data of first segment, if configured to do so. */
2236 rte_packet_prefetch((char *)first_seg->buf_addr +
2237 first_seg->data_off);
2240 * Store the mbuf address into the next entry of the array
2241 * of returned packets.
2243 rx_pkts[nb_rx++] = first_seg;
2247 * Record index of the next RX descriptor to probe.
2249 rxq->rx_tail = rx_id;
2252 * If the number of free RX descriptors is greater than the RX free
2253 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2255 * Update the RDT with the value of the last processed RX descriptor
2256 * minus 1, to guarantee that the RDT register is never equal to the
2257 * RDH register, which creates a "full" ring situtation from the
2258 * hardware point of view...
2260 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
2261 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
2262 "nb_hold=%u nb_rx=%u",
2263 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
2266 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
2270 rxq->nb_rx_hold = nb_hold;
2275 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2278 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
2282 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2285 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
2288 /*********************************************************************
2290 * Queue management functions
2292 **********************************************************************/
2294 static void __attribute__((cold))
2295 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
2299 if (txq->sw_ring != NULL) {
2300 for (i = 0; i < txq->nb_tx_desc; i++) {
2301 if (txq->sw_ring[i].mbuf != NULL) {
2302 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2303 txq->sw_ring[i].mbuf = NULL;
2309 static void __attribute__((cold))
2310 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
2313 txq->sw_ring != NULL)
2314 rte_free(txq->sw_ring);
2317 static void __attribute__((cold))
2318 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
2320 if (txq != NULL && txq->ops != NULL) {
2321 txq->ops->release_mbufs(txq);
2322 txq->ops->free_swring(txq);
2327 void __attribute__((cold))
2328 ixgbe_dev_tx_queue_release(void *txq)
2330 ixgbe_tx_queue_release(txq);
2333 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
2334 static void __attribute__((cold))
2335 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
2337 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
2338 struct ixgbe_tx_entry *txe = txq->sw_ring;
2341 /* Zero out HW ring memory */
2342 for (i = 0; i < txq->nb_tx_desc; i++) {
2343 txq->tx_ring[i] = zeroed_desc;
2346 /* Initialize SW ring entries */
2347 prev = (uint16_t) (txq->nb_tx_desc - 1);
2348 for (i = 0; i < txq->nb_tx_desc; i++) {
2349 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
2351 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
2354 txe[prev].next_id = i;
2358 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2359 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2362 txq->nb_tx_used = 0;
2364 * Always allow 1 descriptor to be un-allocated to avoid
2365 * a H/W race condition
2367 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2368 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2370 memset((void *)&txq->ctx_cache, 0,
2371 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
2374 static const struct ixgbe_txq_ops def_txq_ops = {
2375 .release_mbufs = ixgbe_tx_queue_release_mbufs,
2376 .free_swring = ixgbe_tx_free_swring,
2377 .reset = ixgbe_reset_tx_queue,
2380 /* Takes an ethdev and a queue and sets up the tx function to be used based on
2381 * the queue parameters. Used in tx_queue_setup by primary process and then
2382 * in dev_init by secondary process when attaching to an existing ethdev.
2384 void __attribute__((cold))
2385 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
2387 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2388 if ((txq->offloads == 0) &&
2389 #ifdef RTE_LIBRTE_SECURITY
2390 !(txq->using_ipsec) &&
2392 (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
2393 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2394 dev->tx_pkt_prepare = NULL;
2395 #ifdef RTE_IXGBE_INC_VECTOR
2396 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2397 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2398 ixgbe_txq_vec_setup(txq) == 0)) {
2399 PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
2400 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
2403 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
2405 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2407 " - offloads = 0x%" PRIx64,
2410 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2411 (unsigned long)txq->tx_rs_thresh,
2412 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2413 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2414 dev->tx_pkt_prepare = ixgbe_prep_pkts;
2419 ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2427 ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2429 uint64_t tx_offload_capa;
2430 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2433 DEV_TX_OFFLOAD_VLAN_INSERT |
2434 DEV_TX_OFFLOAD_IPV4_CKSUM |
2435 DEV_TX_OFFLOAD_UDP_CKSUM |
2436 DEV_TX_OFFLOAD_TCP_CKSUM |
2437 DEV_TX_OFFLOAD_SCTP_CKSUM |
2438 DEV_TX_OFFLOAD_TCP_TSO |
2439 DEV_TX_OFFLOAD_MULTI_SEGS;
2441 if (hw->mac.type == ixgbe_mac_82599EB ||
2442 hw->mac.type == ixgbe_mac_X540)
2443 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2445 if (hw->mac.type == ixgbe_mac_X550 ||
2446 hw->mac.type == ixgbe_mac_X550EM_x ||
2447 hw->mac.type == ixgbe_mac_X550EM_a)
2448 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2450 #ifdef RTE_LIBRTE_SECURITY
2451 if (dev->security_ctx)
2452 tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2454 return tx_offload_capa;
2457 int __attribute__((cold))
2458 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2461 unsigned int socket_id,
2462 const struct rte_eth_txconf *tx_conf)
2464 const struct rte_memzone *tz;
2465 struct ixgbe_tx_queue *txq;
2466 struct ixgbe_hw *hw;
2467 uint16_t tx_rs_thresh, tx_free_thresh;
2470 PMD_INIT_FUNC_TRACE();
2471 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2473 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2476 * Validate number of transmit descriptors.
2477 * It must not exceed hardware maximum, and must be multiple
2480 if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2481 (nb_desc > IXGBE_MAX_RING_DESC) ||
2482 (nb_desc < IXGBE_MIN_RING_DESC)) {
2487 * The following two parameters control the setting of the RS bit on
2488 * transmit descriptors.
2489 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2490 * descriptors have been used.
2491 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2492 * descriptors are used or if the number of descriptors required
2493 * to transmit a packet is greater than the number of free TX
2495 * The following constraints must be satisfied:
2496 * tx_rs_thresh must be greater than 0.
2497 * tx_rs_thresh must be less than the size of the ring minus 2.
2498 * tx_rs_thresh must be less than or equal to tx_free_thresh.
2499 * tx_rs_thresh must be a divisor of the ring size.
2500 * tx_free_thresh must be greater than 0.
2501 * tx_free_thresh must be less than the size of the ring minus 3.
2502 * tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
2503 * One descriptor in the TX ring is used as a sentinel to avoid a
2504 * H/W race condition, hence the maximum threshold constraints.
2505 * When set to zero use default values.
2507 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2508 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2509 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
2510 tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
2511 nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
2512 if (tx_conf->tx_rs_thresh > 0)
2513 tx_rs_thresh = tx_conf->tx_rs_thresh;
2514 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
2515 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
2516 "exceed nb_desc. (tx_rs_thresh=%u "
2517 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
2518 (unsigned int)tx_rs_thresh,
2519 (unsigned int)tx_free_thresh,
2520 (unsigned int)nb_desc,
2521 (int)dev->data->port_id,
2525 if (tx_rs_thresh >= (nb_desc - 2)) {
2526 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2527 "of TX descriptors minus 2. (tx_rs_thresh=%u "
2528 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2529 (int)dev->data->port_id, (int)queue_idx);
2532 if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2533 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2534 "(tx_rs_thresh=%u port=%d queue=%d)",
2535 DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2536 (int)dev->data->port_id, (int)queue_idx);
2539 if (tx_free_thresh >= (nb_desc - 3)) {
2540 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2541 "tx_free_thresh must be less than the number of "
2542 "TX descriptors minus 3. (tx_free_thresh=%u "
2543 "port=%d queue=%d)",
2544 (unsigned int)tx_free_thresh,
2545 (int)dev->data->port_id, (int)queue_idx);
2548 if (tx_rs_thresh > tx_free_thresh) {
2549 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2550 "tx_free_thresh. (tx_free_thresh=%u "
2551 "tx_rs_thresh=%u port=%d queue=%d)",
2552 (unsigned int)tx_free_thresh,
2553 (unsigned int)tx_rs_thresh,
2554 (int)dev->data->port_id,
2558 if ((nb_desc % tx_rs_thresh) != 0) {
2559 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2560 "number of TX descriptors. (tx_rs_thresh=%u "
2561 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2562 (int)dev->data->port_id, (int)queue_idx);
2567 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2568 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2569 * by the NIC and all descriptors are written back after the NIC
2570 * accumulates WTHRESH descriptors.
2572 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2573 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2574 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2575 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2576 (int)dev->data->port_id, (int)queue_idx);
2580 /* Free memory prior to re-allocation if needed... */
2581 if (dev->data->tx_queues[queue_idx] != NULL) {
2582 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2583 dev->data->tx_queues[queue_idx] = NULL;
2586 /* First allocate the tx queue data structure */
2587 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2588 RTE_CACHE_LINE_SIZE, socket_id);
2593 * Allocate TX ring hardware descriptors. A memzone large enough to
2594 * handle the maximum ring size is allocated in order to allow for
2595 * resizing in later calls to the queue setup function.
2597 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2598 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2599 IXGBE_ALIGN, socket_id);
2601 ixgbe_tx_queue_release(txq);
2605 txq->nb_tx_desc = nb_desc;
2606 txq->tx_rs_thresh = tx_rs_thresh;
2607 txq->tx_free_thresh = tx_free_thresh;
2608 txq->pthresh = tx_conf->tx_thresh.pthresh;
2609 txq->hthresh = tx_conf->tx_thresh.hthresh;
2610 txq->wthresh = tx_conf->tx_thresh.wthresh;
2611 txq->queue_id = queue_idx;
2612 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2613 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2614 txq->port_id = dev->data->port_id;
2615 txq->offloads = offloads;
2616 txq->ops = &def_txq_ops;
2617 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2618 #ifdef RTE_LIBRTE_SECURITY
2619 txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2620 DEV_TX_OFFLOAD_SECURITY);
2624 * Modification to set VFTDT for virtual function if vf is detected
2626 if (hw->mac.type == ixgbe_mac_82599_vf ||
2627 hw->mac.type == ixgbe_mac_X540_vf ||
2628 hw->mac.type == ixgbe_mac_X550_vf ||
2629 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2630 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2631 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2633 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2635 txq->tx_ring_phys_addr = tz->iova;
2636 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2638 /* Allocate software ring */
2639 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2640 sizeof(struct ixgbe_tx_entry) * nb_desc,
2641 RTE_CACHE_LINE_SIZE, socket_id);
2642 if (txq->sw_ring == NULL) {
2643 ixgbe_tx_queue_release(txq);
2646 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2647 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2649 /* set up vector or scalar TX function as appropriate */
2650 ixgbe_set_tx_function(dev, txq);
2652 txq->ops->reset(txq);
2654 dev->data->tx_queues[queue_idx] = txq;
2661 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2663 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2664 * in the sw_rsc_ring is not set to NULL but rather points to the next
2665 * mbuf of this RSC aggregation (that has not been completed yet and still
2666 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2667 * will just free first "nb_segs" segments of the cluster explicitly by calling
2668 * an rte_pktmbuf_free_seg().
2670 * @m scattered cluster head
2672 static void __attribute__((cold))
2673 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2675 uint16_t i, nb_segs = m->nb_segs;
2676 struct rte_mbuf *next_seg;
2678 for (i = 0; i < nb_segs; i++) {
2680 rte_pktmbuf_free_seg(m);
2685 static void __attribute__((cold))
2686 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2690 #ifdef RTE_IXGBE_INC_VECTOR
2691 /* SSE Vector driver has a different way of releasing mbufs. */
2692 if (rxq->rx_using_sse) {
2693 ixgbe_rx_queue_release_mbufs_vec(rxq);
2698 if (rxq->sw_ring != NULL) {
2699 for (i = 0; i < rxq->nb_rx_desc; i++) {
2700 if (rxq->sw_ring[i].mbuf != NULL) {
2701 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2702 rxq->sw_ring[i].mbuf = NULL;
2705 if (rxq->rx_nb_avail) {
2706 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2707 struct rte_mbuf *mb;
2709 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2710 rte_pktmbuf_free_seg(mb);
2712 rxq->rx_nb_avail = 0;
2716 if (rxq->sw_sc_ring)
2717 for (i = 0; i < rxq->nb_rx_desc; i++)
2718 if (rxq->sw_sc_ring[i].fbuf) {
2719 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2720 rxq->sw_sc_ring[i].fbuf = NULL;
2724 static void __attribute__((cold))
2725 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2728 ixgbe_rx_queue_release_mbufs(rxq);
2729 rte_free(rxq->sw_ring);
2730 rte_free(rxq->sw_sc_ring);
2735 void __attribute__((cold))
2736 ixgbe_dev_rx_queue_release(void *rxq)
2738 ixgbe_rx_queue_release(rxq);
2742 * Check if Rx Burst Bulk Alloc function can be used.
2744 * 0: the preconditions are satisfied and the bulk allocation function
2746 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2747 * function must be used.
2749 static inline int __attribute__((cold))
2750 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2755 * Make sure the following pre-conditions are satisfied:
2756 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2757 * rxq->rx_free_thresh < rxq->nb_rx_desc
2758 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2759 * Scattered packets are not supported. This should be checked
2760 * outside of this function.
2762 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2763 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2764 "rxq->rx_free_thresh=%d, "
2765 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2766 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2768 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2769 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2770 "rxq->rx_free_thresh=%d, "
2771 "rxq->nb_rx_desc=%d",
2772 rxq->rx_free_thresh, rxq->nb_rx_desc);
2774 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2775 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2776 "rxq->nb_rx_desc=%d, "
2777 "rxq->rx_free_thresh=%d",
2778 rxq->nb_rx_desc, rxq->rx_free_thresh);
2785 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2786 static void __attribute__((cold))
2787 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2789 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2791 uint16_t len = rxq->nb_rx_desc;
2794 * By default, the Rx queue setup function allocates enough memory for
2795 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2796 * extra memory at the end of the descriptor ring to be zero'd out.
2798 if (adapter->rx_bulk_alloc_allowed)
2799 /* zero out extra memory */
2800 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2803 * Zero out HW ring memory. Zero out extra memory at the end of
2804 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2805 * reads extra memory as zeros.
2807 for (i = 0; i < len; i++) {
2808 rxq->rx_ring[i] = zeroed_desc;
2812 * initialize extra software ring entries. Space for these extra
2813 * entries is always allocated
2815 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2816 for (i = rxq->nb_rx_desc; i < len; ++i) {
2817 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2820 rxq->rx_nb_avail = 0;
2821 rxq->rx_next_avail = 0;
2822 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2824 rxq->nb_rx_hold = 0;
2825 rxq->pkt_first_seg = NULL;
2826 rxq->pkt_last_seg = NULL;
2828 #ifdef RTE_IXGBE_INC_VECTOR
2829 rxq->rxrearm_start = 0;
2830 rxq->rxrearm_nb = 0;
2835 ixgbe_is_vf(struct rte_eth_dev *dev)
2837 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2839 switch (hw->mac.type) {
2840 case ixgbe_mac_82599_vf:
2841 case ixgbe_mac_X540_vf:
2842 case ixgbe_mac_X550_vf:
2843 case ixgbe_mac_X550EM_x_vf:
2844 case ixgbe_mac_X550EM_a_vf:
2852 ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
2854 uint64_t offloads = 0;
2855 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2857 if (hw->mac.type != ixgbe_mac_82598EB)
2858 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2864 ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2867 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2869 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
2870 DEV_RX_OFFLOAD_UDP_CKSUM |
2871 DEV_RX_OFFLOAD_TCP_CKSUM |
2872 DEV_RX_OFFLOAD_KEEP_CRC |
2873 DEV_RX_OFFLOAD_JUMBO_FRAME |
2874 DEV_RX_OFFLOAD_VLAN_FILTER |
2875 DEV_RX_OFFLOAD_SCATTER;
2877 if (hw->mac.type == ixgbe_mac_82598EB)
2878 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2880 if (ixgbe_is_vf(dev) == 0)
2881 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2884 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2887 if ((hw->mac.type == ixgbe_mac_82599EB ||
2888 hw->mac.type == ixgbe_mac_X540 ||
2889 hw->mac.type == ixgbe_mac_X550) &&
2890 !RTE_ETH_DEV_SRIOV(dev).active)
2891 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
2893 if (hw->mac.type == ixgbe_mac_82599EB ||
2894 hw->mac.type == ixgbe_mac_X540)
2895 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
2897 if (hw->mac.type == ixgbe_mac_X550 ||
2898 hw->mac.type == ixgbe_mac_X550EM_x ||
2899 hw->mac.type == ixgbe_mac_X550EM_a)
2900 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
2902 #ifdef RTE_LIBRTE_SECURITY
2903 if (dev->security_ctx)
2904 offloads |= DEV_RX_OFFLOAD_SECURITY;
2910 int __attribute__((cold))
2911 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2914 unsigned int socket_id,
2915 const struct rte_eth_rxconf *rx_conf,
2916 struct rte_mempool *mp)
2918 const struct rte_memzone *rz;
2919 struct ixgbe_rx_queue *rxq;
2920 struct ixgbe_hw *hw;
2922 struct ixgbe_adapter *adapter =
2923 (struct ixgbe_adapter *)dev->data->dev_private;
2926 PMD_INIT_FUNC_TRACE();
2927 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2929 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2932 * Validate number of receive descriptors.
2933 * It must not exceed hardware maximum, and must be multiple
2936 if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2937 (nb_desc > IXGBE_MAX_RING_DESC) ||
2938 (nb_desc < IXGBE_MIN_RING_DESC)) {
2942 /* Free memory prior to re-allocation if needed... */
2943 if (dev->data->rx_queues[queue_idx] != NULL) {
2944 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2945 dev->data->rx_queues[queue_idx] = NULL;
2948 /* First allocate the rx queue data structure */
2949 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2950 RTE_CACHE_LINE_SIZE, socket_id);
2954 rxq->nb_rx_desc = nb_desc;
2955 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2956 rxq->queue_id = queue_idx;
2957 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2958 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2959 rxq->port_id = dev->data->port_id;
2960 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2961 rxq->crc_len = RTE_ETHER_CRC_LEN;
2964 rxq->drop_en = rx_conf->rx_drop_en;
2965 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2966 rxq->offloads = offloads;
2969 * The packet type in RX descriptor is different for different NICs.
2970 * Some bits are used for x550 but reserved for other NICS.
2971 * So set different masks for different NICs.
2973 if (hw->mac.type == ixgbe_mac_X550 ||
2974 hw->mac.type == ixgbe_mac_X550EM_x ||
2975 hw->mac.type == ixgbe_mac_X550EM_a ||
2976 hw->mac.type == ixgbe_mac_X550_vf ||
2977 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2978 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2979 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
2981 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
2984 * Allocate RX ring hardware descriptors. A memzone large enough to
2985 * handle the maximum ring size is allocated in order to allow for
2986 * resizing in later calls to the queue setup function.
2988 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2989 RX_RING_SZ, IXGBE_ALIGN, socket_id);
2991 ixgbe_rx_queue_release(rxq);
2996 * Zero init all the descriptors in the ring.
2998 memset(rz->addr, 0, RX_RING_SZ);
3001 * Modified to setup VFRDT for Virtual Function
3003 if (hw->mac.type == ixgbe_mac_82599_vf ||
3004 hw->mac.type == ixgbe_mac_X540_vf ||
3005 hw->mac.type == ixgbe_mac_X550_vf ||
3006 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
3007 hw->mac.type == ixgbe_mac_X550EM_a_vf) {
3009 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
3011 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
3014 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
3016 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
3019 rxq->rx_ring_phys_addr = rz->iova;
3020 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
3023 * Certain constraints must be met in order to use the bulk buffer
3024 * allocation Rx burst function. If any of Rx queues doesn't meet them
3025 * the feature should be disabled for the whole port.
3027 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
3028 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
3029 "preconditions - canceling the feature for "
3030 "the whole port[%d]",
3031 rxq->queue_id, rxq->port_id);
3032 adapter->rx_bulk_alloc_allowed = false;
3036 * Allocate software ring. Allow for space at the end of the
3037 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
3038 * function does not access an invalid memory region.
3041 if (adapter->rx_bulk_alloc_allowed)
3042 len += RTE_PMD_IXGBE_RX_MAX_BURST;
3044 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
3045 sizeof(struct ixgbe_rx_entry) * len,
3046 RTE_CACHE_LINE_SIZE, socket_id);
3047 if (!rxq->sw_ring) {
3048 ixgbe_rx_queue_release(rxq);
3053 * Always allocate even if it's not going to be needed in order to
3054 * simplify the code.
3056 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
3057 * be requested in ixgbe_dev_rx_init(), which is called later from
3061 rte_zmalloc_socket("rxq->sw_sc_ring",
3062 sizeof(struct ixgbe_scattered_rx_entry) * len,
3063 RTE_CACHE_LINE_SIZE, socket_id);
3064 if (!rxq->sw_sc_ring) {
3065 ixgbe_rx_queue_release(rxq);
3069 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
3070 "dma_addr=0x%"PRIx64,
3071 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
3072 rxq->rx_ring_phys_addr);
3074 if (!rte_is_power_of_2(nb_desc)) {
3075 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
3076 "preconditions - canceling the feature for "
3077 "the whole port[%d]",
3078 rxq->queue_id, rxq->port_id);
3079 adapter->rx_vec_allowed = false;
3081 ixgbe_rxq_vec_setup(rxq);
3083 dev->data->rx_queues[queue_idx] = rxq;
3085 ixgbe_reset_rx_queue(adapter, rxq);
3091 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3093 #define IXGBE_RXQ_SCAN_INTERVAL 4
3094 volatile union ixgbe_adv_rx_desc *rxdp;
3095 struct ixgbe_rx_queue *rxq;
3098 rxq = dev->data->rx_queues[rx_queue_id];
3099 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
3101 while ((desc < rxq->nb_rx_desc) &&
3102 (rxdp->wb.upper.status_error &
3103 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
3104 desc += IXGBE_RXQ_SCAN_INTERVAL;
3105 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
3106 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3107 rxdp = &(rxq->rx_ring[rxq->rx_tail +
3108 desc - rxq->nb_rx_desc]);
3115 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
3117 volatile union ixgbe_adv_rx_desc *rxdp;
3118 struct ixgbe_rx_queue *rxq = rx_queue;
3121 if (unlikely(offset >= rxq->nb_rx_desc))
3123 desc = rxq->rx_tail + offset;
3124 if (desc >= rxq->nb_rx_desc)
3125 desc -= rxq->nb_rx_desc;
3127 rxdp = &rxq->rx_ring[desc];
3128 return !!(rxdp->wb.upper.status_error &
3129 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
3133 ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
3135 struct ixgbe_rx_queue *rxq = rx_queue;
3136 volatile uint32_t *status;
3137 uint32_t nb_hold, desc;
3139 if (unlikely(offset >= rxq->nb_rx_desc))
3142 #ifdef RTE_IXGBE_INC_VECTOR
3143 if (rxq->rx_using_sse)
3144 nb_hold = rxq->rxrearm_nb;
3147 nb_hold = rxq->nb_rx_hold;
3148 if (offset >= rxq->nb_rx_desc - nb_hold)
3149 return RTE_ETH_RX_DESC_UNAVAIL;
3151 desc = rxq->rx_tail + offset;
3152 if (desc >= rxq->nb_rx_desc)
3153 desc -= rxq->nb_rx_desc;
3155 status = &rxq->rx_ring[desc].wb.upper.status_error;
3156 if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
3157 return RTE_ETH_RX_DESC_DONE;
3159 return RTE_ETH_RX_DESC_AVAIL;
3163 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
3165 struct ixgbe_tx_queue *txq = tx_queue;
3166 volatile uint32_t *status;
3169 if (unlikely(offset >= txq->nb_tx_desc))
3172 desc = txq->tx_tail + offset;
3173 /* go to next desc that has the RS bit */
3174 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
3176 if (desc >= txq->nb_tx_desc) {
3177 desc -= txq->nb_tx_desc;
3178 if (desc >= txq->nb_tx_desc)
3179 desc -= txq->nb_tx_desc;
3182 status = &txq->tx_ring[desc].wb.status;
3183 if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
3184 return RTE_ETH_TX_DESC_DONE;
3186 return RTE_ETH_TX_DESC_FULL;
3190 * Set up link loopback for X540/X550 mode Tx->Rx.
3192 static inline void __attribute__((cold))
3193 ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
3196 PMD_INIT_FUNC_TRACE();
3198 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
3200 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3201 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
3202 macc = IXGBE_READ_REG(hw, IXGBE_MACC);
3205 /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */
3206 autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE;
3207 /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */
3208 macc |= IXGBE_MACC_FLU;
3210 autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE;
3211 macc &= ~IXGBE_MACC_FLU;
3214 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3215 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
3217 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
3220 void __attribute__((cold))
3221 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
3224 struct ixgbe_adapter *adapter =
3225 (struct ixgbe_adapter *)dev->data->dev_private;
3226 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3228 PMD_INIT_FUNC_TRACE();
3230 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3231 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
3234 txq->ops->release_mbufs(txq);
3235 txq->ops->reset(txq);
3239 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3240 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3243 ixgbe_rx_queue_release_mbufs(rxq);
3244 ixgbe_reset_rx_queue(adapter, rxq);
3247 /* If loopback mode was enabled, reconfigure the link accordingly */
3248 if (dev->data->dev_conf.lpbk_mode != 0) {
3249 if (hw->mac.type == ixgbe_mac_X540 ||
3250 hw->mac.type == ixgbe_mac_X550 ||
3251 hw->mac.type == ixgbe_mac_X550EM_x ||
3252 hw->mac.type == ixgbe_mac_X550EM_a)
3253 ixgbe_setup_loopback_link_x540_x550(hw, false);
3258 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
3262 PMD_INIT_FUNC_TRACE();
3264 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3265 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
3266 dev->data->rx_queues[i] = NULL;
3268 dev->data->nb_rx_queues = 0;
3270 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3271 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
3272 dev->data->tx_queues[i] = NULL;
3274 dev->data->nb_tx_queues = 0;
3277 /*********************************************************************
3279 * Device RX/TX init functions
3281 **********************************************************************/
3284 * Receive Side Scaling (RSS)
3285 * See section 7.1.2.8 in the following document:
3286 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
3289 * The source and destination IP addresses of the IP header and the source
3290 * and destination ports of TCP/UDP headers, if any, of received packets are
3291 * hashed against a configurable random key to compute a 32-bit RSS hash result.
3292 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
3293 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
3294 * RSS output index which is used as the RX queue index where to store the
3296 * The following output is supplied in the RX write-back descriptor:
3297 * - 32-bit result of the Microsoft RSS hash function,
3298 * - 4-bit RSS type field.
3302 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
3303 * Used as the default key.
3305 static uint8_t rss_intel_key[40] = {
3306 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
3307 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
3308 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3309 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
3310 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
3314 ixgbe_rss_disable(struct rte_eth_dev *dev)
3316 struct ixgbe_hw *hw;
3320 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3321 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3322 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3323 mrqc &= ~IXGBE_MRQC_RSSEN;
3324 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3328 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
3338 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3339 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3341 hash_key = rss_conf->rss_key;
3342 if (hash_key != NULL) {
3343 /* Fill in RSS hash key */
3344 for (i = 0; i < 10; i++) {
3345 rss_key = hash_key[(i * 4)];
3346 rss_key |= hash_key[(i * 4) + 1] << 8;
3347 rss_key |= hash_key[(i * 4) + 2] << 16;
3348 rss_key |= hash_key[(i * 4) + 3] << 24;
3349 IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
3353 /* Set configured hashing protocols in MRQC register */
3354 rss_hf = rss_conf->rss_hf;
3355 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
3356 if (rss_hf & ETH_RSS_IPV4)
3357 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3358 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
3359 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3360 if (rss_hf & ETH_RSS_IPV6)
3361 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3362 if (rss_hf & ETH_RSS_IPV6_EX)
3363 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3364 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
3365 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3366 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
3367 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3368 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
3369 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3370 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
3371 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3372 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
3373 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3374 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3378 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
3379 struct rte_eth_rss_conf *rss_conf)
3381 struct ixgbe_hw *hw;
3386 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3388 if (!ixgbe_rss_update_sp(hw->mac.type)) {
3389 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
3393 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3396 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
3397 * "RSS enabling cannot be done dynamically while it must be
3398 * preceded by a software reset"
3399 * Before changing anything, first check that the update RSS operation
3400 * does not attempt to disable RSS, if RSS was enabled at
3401 * initialization time, or does not attempt to enable RSS, if RSS was
3402 * disabled at initialization time.
3404 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
3405 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3406 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
3407 if (rss_hf != 0) /* Enable RSS */
3409 return 0; /* Nothing to do */
3412 if (rss_hf == 0) /* Disable RSS */
3414 ixgbe_hw_rss_hash_set(hw, rss_conf);
3419 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3420 struct rte_eth_rss_conf *rss_conf)
3422 struct ixgbe_hw *hw;
3431 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3432 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3433 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3434 hash_key = rss_conf->rss_key;
3435 if (hash_key != NULL) {
3436 /* Return RSS hash key */
3437 for (i = 0; i < 10; i++) {
3438 rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
3439 hash_key[(i * 4)] = rss_key & 0x000000FF;
3440 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
3441 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
3442 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
3446 /* Get RSS functions configured in MRQC register */
3447 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3448 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
3449 rss_conf->rss_hf = 0;
3453 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
3454 rss_hf |= ETH_RSS_IPV4;
3455 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
3456 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
3457 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
3458 rss_hf |= ETH_RSS_IPV6;
3459 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
3460 rss_hf |= ETH_RSS_IPV6_EX;
3461 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
3462 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
3463 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
3464 rss_hf |= ETH_RSS_IPV6_TCP_EX;
3465 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
3466 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
3467 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
3468 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
3469 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
3470 rss_hf |= ETH_RSS_IPV6_UDP_EX;
3471 rss_conf->rss_hf = rss_hf;
3476 ixgbe_rss_configure(struct rte_eth_dev *dev)
3478 struct rte_eth_rss_conf rss_conf;
3479 struct ixgbe_adapter *adapter;
3480 struct ixgbe_hw *hw;
3484 uint16_t sp_reta_size;
3487 PMD_INIT_FUNC_TRACE();
3488 adapter = (struct ixgbe_adapter *)dev->data->dev_private;
3489 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3491 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3494 * Fill in redirection table
3495 * The byte-swap is needed because NIC registers are in
3496 * little-endian order.
3498 if (adapter->rss_reta_updated == 0) {
3500 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
3501 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3503 if (j == dev->data->nb_rx_queues)
3505 reta = (reta << 8) | j;
3507 IXGBE_WRITE_REG(hw, reta_reg,
3513 * Configure the RSS key and the RSS protocols used to compute
3514 * the RSS hash of input packets.
3516 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3517 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
3518 ixgbe_rss_disable(dev);
3521 if (rss_conf.rss_key == NULL)
3522 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3523 ixgbe_hw_rss_hash_set(hw, &rss_conf);
3526 #define NUM_VFTA_REGISTERS 128
3527 #define NIC_RX_BUFFER_SIZE 0x200
3528 #define X550_RX_BUFFER_SIZE 0x180
3531 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3533 struct rte_eth_vmdq_dcb_conf *cfg;
3534 struct ixgbe_hw *hw;
3535 enum rte_eth_nb_pools num_pools;
3536 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3538 uint8_t nb_tcs; /* number of traffic classes */
3541 PMD_INIT_FUNC_TRACE();
3542 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3543 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3544 num_pools = cfg->nb_queue_pools;
3545 /* Check we have a valid number of pools */
3546 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3547 ixgbe_rss_disable(dev);
3550 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3551 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3555 * split rx buffer up into sections, each for 1 traffic class
3557 switch (hw->mac.type) {
3558 case ixgbe_mac_X550:
3559 case ixgbe_mac_X550EM_x:
3560 case ixgbe_mac_X550EM_a:
3561 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
3564 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3567 for (i = 0; i < nb_tcs; i++) {
3568 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3570 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3571 /* clear 10 bits. */
3572 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
3573 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3575 /* zero alloc all unused TCs */
3576 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3577 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3579 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3580 /* clear 10 bits. */
3581 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3584 /* MRQC: enable vmdq and dcb */
3585 mrqc = (num_pools == ETH_16_POOLS) ?
3586 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
3587 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3589 /* PFVTCTL: turn on virtualisation and set the default pool */
3590 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3591 if (cfg->enable_default_pool) {
3592 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3594 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3597 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3599 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
3601 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3603 * mapping is done with 3 bits per priority,
3604 * so shift by i*3 each time
3606 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3608 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
3610 /* RTRPCS: DCB related */
3611 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
3613 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3614 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3615 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3616 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3618 /* VFTA - enable all vlan filters */
3619 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3620 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3623 /* VFRE: pool enabling for receive - 16 or 32 */
3624 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
3625 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3628 * MPSAR - allow pools to read specific mac addresses
3629 * In this case, all pools should be able to read from mac addr 0
3631 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
3632 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
3634 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3635 for (i = 0; i < cfg->nb_pool_maps; i++) {
3636 /* set vlan id in VF register and set the valid bit */
3637 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
3638 (cfg->pool_map[i].vlan_id & 0xFFF)));
3640 * Put the allowed pools in VFB reg. As we only have 16 or 32
3641 * pools, we only need to use the first half of the register
3644 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
3649 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3650 * @dev: pointer to eth_dev structure
3651 * @dcb_config: pointer to ixgbe_dcb_config structure
3654 ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3655 struct ixgbe_dcb_config *dcb_config)
3658 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3660 PMD_INIT_FUNC_TRACE();
3661 if (hw->mac.type != ixgbe_mac_82598EB) {
3662 /* Disable the Tx desc arbiter so that MTQC can be changed */
3663 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3664 reg |= IXGBE_RTTDCS_ARBDIS;
3665 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3667 /* Enable DCB for Tx with 8 TCs */
3668 if (dcb_config->num_tcs.pg_tcs == 8) {
3669 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3671 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3673 if (dcb_config->vt_mode)
3674 reg |= IXGBE_MTQC_VT_ENA;
3675 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3677 /* Enable the Tx desc arbiter */
3678 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3679 reg &= ~IXGBE_RTTDCS_ARBDIS;
3680 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3682 /* Enable Security TX Buffer IFG for DCB */
3683 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3684 reg |= IXGBE_SECTX_DCB;
3685 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3690 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3691 * @dev: pointer to rte_eth_dev structure
3692 * @dcb_config: pointer to ixgbe_dcb_config structure
3695 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3696 struct ixgbe_dcb_config *dcb_config)
3698 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3699 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3700 struct ixgbe_hw *hw =
3701 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3703 PMD_INIT_FUNC_TRACE();
3704 if (hw->mac.type != ixgbe_mac_82598EB)
3705 /*PF VF Transmit Enable*/
3706 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3707 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3709 /*Configure general DCB TX parameters*/
3710 ixgbe_dcb_tx_hw_config(dev, dcb_config);
3714 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3715 struct ixgbe_dcb_config *dcb_config)
3717 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3718 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3719 struct ixgbe_dcb_tc_config *tc;
3722 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3723 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3724 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3725 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3727 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3728 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3731 /* Initialize User Priority to Traffic Class mapping */
3732 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3733 tc = &dcb_config->tc_config[j];
3734 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3737 /* User Priority to Traffic Class mapping */
3738 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3739 j = vmdq_rx_conf->dcb_tc[i];
3740 tc = &dcb_config->tc_config[j];
3741 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3747 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3748 struct ixgbe_dcb_config *dcb_config)
3750 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3751 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3752 struct ixgbe_dcb_tc_config *tc;
3755 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3756 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3757 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3758 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3760 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3761 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3764 /* Initialize User Priority to Traffic Class mapping */
3765 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3766 tc = &dcb_config->tc_config[j];
3767 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3770 /* User Priority to Traffic Class mapping */
3771 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3772 j = vmdq_tx_conf->dcb_tc[i];
3773 tc = &dcb_config->tc_config[j];
3774 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3780 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3781 struct ixgbe_dcb_config *dcb_config)
3783 struct rte_eth_dcb_rx_conf *rx_conf =
3784 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3785 struct ixgbe_dcb_tc_config *tc;
3788 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3789 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3791 /* Initialize User Priority to Traffic Class mapping */
3792 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3793 tc = &dcb_config->tc_config[j];
3794 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3797 /* User Priority to Traffic Class mapping */
3798 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3799 j = rx_conf->dcb_tc[i];
3800 tc = &dcb_config->tc_config[j];
3801 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3807 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3808 struct ixgbe_dcb_config *dcb_config)
3810 struct rte_eth_dcb_tx_conf *tx_conf =
3811 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3812 struct ixgbe_dcb_tc_config *tc;
3815 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3816 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3818 /* Initialize User Priority to Traffic Class mapping */
3819 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3820 tc = &dcb_config->tc_config[j];
3821 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3824 /* User Priority to Traffic Class mapping */
3825 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3826 j = tx_conf->dcb_tc[i];
3827 tc = &dcb_config->tc_config[j];
3828 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3834 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3835 * @dev: pointer to eth_dev structure
3836 * @dcb_config: pointer to ixgbe_dcb_config structure
3839 ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3840 struct ixgbe_dcb_config *dcb_config)
3846 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3848 PMD_INIT_FUNC_TRACE();
3850 * Disable the arbiter before changing parameters
3851 * (always enable recycle mode; WSP)
3853 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3854 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3856 if (hw->mac.type != ixgbe_mac_82598EB) {
3857 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3858 if (dcb_config->num_tcs.pg_tcs == 4) {
3859 if (dcb_config->vt_mode)
3860 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3861 IXGBE_MRQC_VMDQRT4TCEN;
3863 /* no matter the mode is DCB or DCB_RSS, just
3864 * set the MRQE to RSSXTCEN. RSS is controlled
3867 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3868 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3869 IXGBE_MRQC_RTRSS4TCEN;
3872 if (dcb_config->num_tcs.pg_tcs == 8) {
3873 if (dcb_config->vt_mode)
3874 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3875 IXGBE_MRQC_VMDQRT8TCEN;
3877 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3878 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3879 IXGBE_MRQC_RTRSS8TCEN;
3883 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3885 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3886 /* Disable drop for all queues in VMDQ mode*/
3887 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3888 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3890 (q << IXGBE_QDE_IDX_SHIFT)));
3892 /* Enable drop for all queues in SRIOV mode */
3893 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3894 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3896 (q << IXGBE_QDE_IDX_SHIFT) |
3901 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3902 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3903 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3904 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3906 /* VFTA - enable all vlan filters */
3907 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3908 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3912 * Configure Rx packet plane (recycle mode; WSP) and
3915 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3916 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3920 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3921 uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3923 switch (hw->mac.type) {
3924 case ixgbe_mac_82598EB:
3925 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3927 case ixgbe_mac_82599EB:
3928 case ixgbe_mac_X540:
3929 case ixgbe_mac_X550:
3930 case ixgbe_mac_X550EM_x:
3931 case ixgbe_mac_X550EM_a:
3932 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3941 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3942 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3944 switch (hw->mac.type) {
3945 case ixgbe_mac_82598EB:
3946 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
3947 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
3949 case ixgbe_mac_82599EB:
3950 case ixgbe_mac_X540:
3951 case ixgbe_mac_X550:
3952 case ixgbe_mac_X550EM_x:
3953 case ixgbe_mac_X550EM_a:
3954 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
3955 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
3962 #define DCB_RX_CONFIG 1
3963 #define DCB_TX_CONFIG 1
3964 #define DCB_TX_PB 1024
3966 * ixgbe_dcb_hw_configure - Enable DCB and configure
3967 * general DCB in VT mode and non-VT mode parameters
3968 * @dev: pointer to rte_eth_dev structure
3969 * @dcb_config: pointer to ixgbe_dcb_config structure
3972 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3973 struct ixgbe_dcb_config *dcb_config)
3976 uint8_t i, pfc_en, nb_tcs;
3977 uint16_t pbsize, rx_buffer_size;
3978 uint8_t config_dcb_rx = 0;
3979 uint8_t config_dcb_tx = 0;
3980 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3981 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3982 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3983 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3984 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3985 struct ixgbe_dcb_tc_config *tc;
3986 uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN +
3988 struct ixgbe_hw *hw =
3989 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3990 struct ixgbe_bw_conf *bw_conf =
3991 IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
3993 switch (dev->data->dev_conf.rxmode.mq_mode) {
3994 case ETH_MQ_RX_VMDQ_DCB:
3995 dcb_config->vt_mode = true;
3996 if (hw->mac.type != ixgbe_mac_82598EB) {
3997 config_dcb_rx = DCB_RX_CONFIG;
3999 *get dcb and VT rx configuration parameters
4002 ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
4003 /*Configure general VMDQ and DCB RX parameters*/
4004 ixgbe_vmdq_dcb_configure(dev);
4008 case ETH_MQ_RX_DCB_RSS:
4009 dcb_config->vt_mode = false;
4010 config_dcb_rx = DCB_RX_CONFIG;
4011 /* Get dcb TX configuration parameters from rte_eth_conf */
4012 ixgbe_dcb_rx_config(dev, dcb_config);
4013 /*Configure general DCB RX parameters*/
4014 ixgbe_dcb_rx_hw_config(dev, dcb_config);
4017 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
4020 switch (dev->data->dev_conf.txmode.mq_mode) {
4021 case ETH_MQ_TX_VMDQ_DCB:
4022 dcb_config->vt_mode = true;
4023 config_dcb_tx = DCB_TX_CONFIG;
4024 /* get DCB and VT TX configuration parameters
4027 ixgbe_dcb_vt_tx_config(dev, dcb_config);
4028 /*Configure general VMDQ and DCB TX parameters*/
4029 ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
4033 dcb_config->vt_mode = false;
4034 config_dcb_tx = DCB_TX_CONFIG;
4035 /*get DCB TX configuration parameters from rte_eth_conf*/
4036 ixgbe_dcb_tx_config(dev, dcb_config);
4037 /*Configure general DCB TX parameters*/
4038 ixgbe_dcb_tx_hw_config(dev, dcb_config);
4041 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
4045 nb_tcs = dcb_config->num_tcs.pfc_tcs;
4047 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4048 if (nb_tcs == ETH_4_TCS) {
4049 /* Avoid un-configured priority mapping to TC0 */
4051 uint8_t mask = 0xFF;
4053 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
4054 mask = (uint8_t)(mask & (~(1 << map[i])));
4055 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
4056 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
4060 /* Re-configure 4 TCs BW */
4061 for (i = 0; i < nb_tcs; i++) {
4062 tc = &dcb_config->tc_config[i];
4063 if (bw_conf->tc_num != nb_tcs)
4064 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4065 (uint8_t)(100 / nb_tcs);
4066 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4067 (uint8_t)(100 / nb_tcs);
4069 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4070 tc = &dcb_config->tc_config[i];
4071 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
4072 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
4075 /* Re-configure 8 TCs BW */
4076 for (i = 0; i < nb_tcs; i++) {
4077 tc = &dcb_config->tc_config[i];
4078 if (bw_conf->tc_num != nb_tcs)
4079 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4080 (uint8_t)(100 / nb_tcs + (i & 1));
4081 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4082 (uint8_t)(100 / nb_tcs + (i & 1));
4086 switch (hw->mac.type) {
4087 case ixgbe_mac_X550:
4088 case ixgbe_mac_X550EM_x:
4089 case ixgbe_mac_X550EM_a:
4090 rx_buffer_size = X550_RX_BUFFER_SIZE;
4093 rx_buffer_size = NIC_RX_BUFFER_SIZE;
4097 if (config_dcb_rx) {
4098 /* Set RX buffer size */
4099 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4100 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
4102 for (i = 0; i < nb_tcs; i++) {
4103 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
4105 /* zero alloc all unused TCs */
4106 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4107 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4110 if (config_dcb_tx) {
4111 /* Only support an equally distributed
4112 * Tx packet buffer strategy.
4114 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
4115 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
4117 for (i = 0; i < nb_tcs; i++) {
4118 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4119 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4121 /* Clear unused TCs, if any, to zero buffer size*/
4122 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4123 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4124 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4128 /*Calculates traffic class credits*/
4129 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4130 IXGBE_DCB_TX_CONFIG);
4131 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4132 IXGBE_DCB_RX_CONFIG);
4134 if (config_dcb_rx) {
4135 /* Unpack CEE standard containers */
4136 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
4137 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4138 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
4139 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
4140 /* Configure PG(ETS) RX */
4141 ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
4144 if (config_dcb_tx) {
4145 /* Unpack CEE standard containers */
4146 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
4147 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4148 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
4149 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
4150 /* Configure PG(ETS) TX */
4151 ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
4154 /*Configure queue statistics registers*/
4155 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
4157 /* Check if the PFC is supported */
4158 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
4159 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4160 for (i = 0; i < nb_tcs; i++) {
4162 * If the TC count is 8,and the default high_water is 48,
4163 * the low_water is 16 as default.
4165 hw->fc.high_water[i] = (pbsize * 3) / 4;
4166 hw->fc.low_water[i] = pbsize / 4;
4167 /* Enable pfc for this TC */
4168 tc = &dcb_config->tc_config[i];
4169 tc->pfc = ixgbe_dcb_pfc_enabled;
4171 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
4172 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
4174 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
4181 * ixgbe_configure_dcb - Configure DCB Hardware
4182 * @dev: pointer to rte_eth_dev
4184 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
4186 struct ixgbe_dcb_config *dcb_cfg =
4187 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4188 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
4190 PMD_INIT_FUNC_TRACE();
4192 /* check support mq_mode for DCB */
4193 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
4194 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
4195 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
4198 if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
4201 /** Configure DCB hardware **/
4202 ixgbe_dcb_hw_configure(dev, dcb_cfg);
4206 * VMDq only support for 10 GbE NIC.
4209 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
4211 struct rte_eth_vmdq_rx_conf *cfg;
4212 struct ixgbe_hw *hw;
4213 enum rte_eth_nb_pools num_pools;
4214 uint32_t mrqc, vt_ctl, vlanctrl;
4218 PMD_INIT_FUNC_TRACE();
4219 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4220 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
4221 num_pools = cfg->nb_queue_pools;
4223 ixgbe_rss_disable(dev);
4225 /* MRQC: enable vmdq */
4226 mrqc = IXGBE_MRQC_VMDQEN;
4227 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4229 /* PFVTCTL: turn on virtualisation and set the default pool */
4230 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
4231 if (cfg->enable_default_pool)
4232 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
4234 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
4236 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
4238 for (i = 0; i < (int)num_pools; i++) {
4239 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
4240 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
4243 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4244 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4245 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4246 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4248 /* VFTA - enable all vlan filters */
4249 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
4250 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
4252 /* VFRE: pool enabling for receive - 64 */
4253 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
4254 if (num_pools == ETH_64_POOLS)
4255 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
4258 * MPSAR - allow pools to read specific mac addresses
4259 * In this case, all pools should be able to read from mac addr 0
4261 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
4262 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
4264 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
4265 for (i = 0; i < cfg->nb_pool_maps; i++) {
4266 /* set vlan id in VF register and set the valid bit */
4267 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
4268 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
4270 * Put the allowed pools in VFB reg. As we only have 16 or 64
4271 * pools, we only need to use the first half of the register
4274 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
4275 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
4276 (cfg->pool_map[i].pools & UINT32_MAX));
4278 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
4279 ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
4283 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
4284 if (cfg->enable_loop_back) {
4285 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4286 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
4287 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
4290 IXGBE_WRITE_FLUSH(hw);
4294 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
4295 * @hw: pointer to hardware structure
4298 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
4303 PMD_INIT_FUNC_TRACE();
4304 /*PF VF Transmit Enable*/
4305 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
4306 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
4308 /* Disable the Tx desc arbiter so that MTQC can be changed */
4309 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4310 reg |= IXGBE_RTTDCS_ARBDIS;
4311 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4313 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4314 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
4316 /* Disable drop for all queues */
4317 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4318 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4319 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
4321 /* Enable the Tx desc arbiter */
4322 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4323 reg &= ~IXGBE_RTTDCS_ARBDIS;
4324 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4326 IXGBE_WRITE_FLUSH(hw);
4329 static int __attribute__((cold))
4330 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
4332 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
4336 /* Initialize software ring entries */
4337 for (i = 0; i < rxq->nb_rx_desc; i++) {
4338 volatile union ixgbe_adv_rx_desc *rxd;
4339 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
4342 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
4343 (unsigned) rxq->queue_id);
4347 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
4348 mbuf->port = rxq->port_id;
4351 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
4352 rxd = &rxq->rx_ring[i];
4353 rxd->read.hdr_addr = 0;
4354 rxd->read.pkt_addr = dma_addr;
4362 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
4364 struct ixgbe_hw *hw;
4367 ixgbe_rss_configure(dev);
4369 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4371 /* MRQC: enable VF RSS */
4372 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
4373 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
4374 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4376 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
4380 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
4384 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
4388 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4394 ixgbe_config_vf_default(struct rte_eth_dev *dev)
4396 struct ixgbe_hw *hw =
4397 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4399 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4401 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4406 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4407 IXGBE_MRQC_VMDQRT4TCEN);
4411 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4412 IXGBE_MRQC_VMDQRT8TCEN);
4416 "invalid pool number in IOV mode");
4423 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
4425 struct ixgbe_hw *hw =
4426 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4428 if (hw->mac.type == ixgbe_mac_82598EB)
4431 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4433 * SRIOV inactive scheme
4434 * any DCB/RSS w/o VMDq multi-queue setting
4436 switch (dev->data->dev_conf.rxmode.mq_mode) {
4438 case ETH_MQ_RX_DCB_RSS:
4439 case ETH_MQ_RX_VMDQ_RSS:
4440 ixgbe_rss_configure(dev);
4443 case ETH_MQ_RX_VMDQ_DCB:
4444 ixgbe_vmdq_dcb_configure(dev);
4447 case ETH_MQ_RX_VMDQ_ONLY:
4448 ixgbe_vmdq_rx_hw_configure(dev);
4451 case ETH_MQ_RX_NONE:
4453 /* if mq_mode is none, disable rss mode.*/
4454 ixgbe_rss_disable(dev);
4458 /* SRIOV active scheme
4459 * Support RSS together with SRIOV.
4461 switch (dev->data->dev_conf.rxmode.mq_mode) {
4463 case ETH_MQ_RX_VMDQ_RSS:
4464 ixgbe_config_vf_rss(dev);
4466 case ETH_MQ_RX_VMDQ_DCB:
4468 /* In SRIOV, the configuration is the same as VMDq case */
4469 ixgbe_vmdq_dcb_configure(dev);
4471 /* DCB/RSS together with SRIOV is not supported */
4472 case ETH_MQ_RX_VMDQ_DCB_RSS:
4473 case ETH_MQ_RX_DCB_RSS:
4475 "Could not support DCB/RSS with VMDq & SRIOV");
4478 ixgbe_config_vf_default(dev);
4487 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4489 struct ixgbe_hw *hw =
4490 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4494 if (hw->mac.type == ixgbe_mac_82598EB)
4497 /* disable arbiter before setting MTQC */
4498 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4499 rttdcs |= IXGBE_RTTDCS_ARBDIS;
4500 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4502 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4504 * SRIOV inactive scheme
4505 * any DCB w/o VMDq multi-queue setting
4507 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4508 ixgbe_vmdq_tx_hw_configure(hw);
4510 mtqc = IXGBE_MTQC_64Q_1PB;
4511 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4514 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4517 * SRIOV active scheme
4518 * FIXME if support DCB together with VMDq & SRIOV
4521 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4524 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
4527 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
4531 mtqc = IXGBE_MTQC_64Q_1PB;
4532 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4534 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4537 /* re-enable arbiter */
4538 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
4539 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4545 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
4547 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
4548 * spec rev. 3.0 chapter 8.2.3.8.13.
4550 * @pool Memory pool of the Rx queue
4552 static inline uint32_t
4553 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4555 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4557 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
4559 RTE_IPV4_MAX_PKT_LEN /
4560 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4563 return IXGBE_RSCCTL_MAXDESC_16;
4564 else if (maxdesc >= 8)
4565 return IXGBE_RSCCTL_MAXDESC_8;
4566 else if (maxdesc >= 4)
4567 return IXGBE_RSCCTL_MAXDESC_4;
4569 return IXGBE_RSCCTL_MAXDESC_1;
4573 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
4576 * (Taken from FreeBSD tree)
4577 * (yes this is all very magic and confusing :)
4580 * @entry the register array entry
4581 * @vector the MSIX vector for this queue
4585 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
4587 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4590 vector |= IXGBE_IVAR_ALLOC_VAL;
4592 switch (hw->mac.type) {
4594 case ixgbe_mac_82598EB:
4596 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4598 entry += (type * 64);
4599 index = (entry >> 2) & 0x1F;
4600 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4601 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4602 ivar |= (vector << (8 * (entry & 0x3)));
4603 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4606 case ixgbe_mac_82599EB:
4607 case ixgbe_mac_X540:
4608 if (type == -1) { /* MISC IVAR */
4609 index = (entry & 1) * 8;
4610 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4611 ivar &= ~(0xFF << index);
4612 ivar |= (vector << index);
4613 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4614 } else { /* RX/TX IVARS */
4615 index = (16 * (entry & 1)) + (8 * type);
4616 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4617 ivar &= ~(0xFF << index);
4618 ivar |= (vector << index);
4619 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4629 void __attribute__((cold))
4630 ixgbe_set_rx_function(struct rte_eth_dev *dev)
4632 uint16_t i, rx_using_sse;
4633 struct ixgbe_adapter *adapter =
4634 (struct ixgbe_adapter *)dev->data->dev_private;
4637 * In order to allow Vector Rx there are a few configuration
4638 * conditions to be met and Rx Bulk Allocation should be allowed.
4640 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
4641 !adapter->rx_bulk_alloc_allowed) {
4642 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
4643 "preconditions or RTE_IXGBE_INC_VECTOR is "
4645 dev->data->port_id);
4647 adapter->rx_vec_allowed = false;
4651 * Initialize the appropriate LRO callback.
4653 * If all queues satisfy the bulk allocation preconditions
4654 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
4655 * Otherwise use a single allocation version.
4657 if (dev->data->lro) {
4658 if (adapter->rx_bulk_alloc_allowed) {
4659 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4660 "allocation version");
4661 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4663 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4664 "allocation version");
4665 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4667 } else if (dev->data->scattered_rx) {
4669 * Set the non-LRO scattered callback: there are Vector and
4670 * single allocation versions.
4672 if (adapter->rx_vec_allowed) {
4673 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
4674 "callback (port=%d).",
4675 dev->data->port_id);
4677 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
4678 } else if (adapter->rx_bulk_alloc_allowed) {
4679 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4680 "allocation callback (port=%d).",
4681 dev->data->port_id);
4682 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4684 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
4685 "single allocation) "
4686 "Scattered Rx callback "
4688 dev->data->port_id);
4690 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4693 * Below we set "simple" callbacks according to port/queues parameters.
4694 * If parameters allow we are going to choose between the following
4698 * - Single buffer allocation (the simplest one)
4700 } else if (adapter->rx_vec_allowed) {
4701 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4702 "burst size no less than %d (port=%d).",
4703 RTE_IXGBE_DESCS_PER_LOOP,
4704 dev->data->port_id);
4706 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4707 } else if (adapter->rx_bulk_alloc_allowed) {
4708 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4709 "satisfied. Rx Burst Bulk Alloc function "
4710 "will be used on port=%d.",
4711 dev->data->port_id);
4713 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4715 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4716 "satisfied, or Scattered Rx is requested "
4718 dev->data->port_id);
4720 dev->rx_pkt_burst = ixgbe_recv_pkts;
4723 /* Propagate information about RX function choice through all queues. */
4726 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4727 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4729 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4730 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4732 rxq->rx_using_sse = rx_using_sse;
4733 #ifdef RTE_LIBRTE_SECURITY
4734 rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4735 DEV_RX_OFFLOAD_SECURITY);
4741 * ixgbe_set_rsc - configure RSC related port HW registers
4743 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4744 * of 82599 Spec (x540 configuration is virtually the same).
4748 * Returns 0 in case of success or a non-zero error code
4751 ixgbe_set_rsc(struct rte_eth_dev *dev)
4753 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4754 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4755 struct rte_eth_dev_info dev_info = { 0 };
4756 bool rsc_capable = false;
4762 dev->dev_ops->dev_infos_get(dev, &dev_info);
4763 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4766 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4767 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4772 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4774 if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
4775 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4777 * According to chapter of 4.6.7.2.1 of the Spec Rev.
4778 * 3.0 RSC configuration requires HW CRC stripping being
4779 * enabled. If user requested both HW CRC stripping off
4780 * and RSC on - return an error.
4782 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4787 /* RFCTL configuration */
4788 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4789 if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4791 * Since NFS packets coalescing is not supported - clear
4792 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4795 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4796 IXGBE_RFCTL_NFSR_DIS);
4798 rfctl |= IXGBE_RFCTL_RSC_DIS;
4799 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4801 /* If LRO hasn't been requested - we are done here. */
4802 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4805 /* Set RDRXCTL.RSCACKC bit */
4806 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4807 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4808 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4810 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4811 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4812 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4814 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4816 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4818 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4820 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4823 * ixgbe PMD doesn't support header-split at the moment.
4825 * Following the 4.6.7.2.1 chapter of the 82599/x540
4826 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4827 * should be configured even if header split is not
4828 * enabled. We will configure it 128 bytes following the
4829 * recommendation in the spec.
4831 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4832 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4833 IXGBE_SRRCTL_BSIZEHDR_MASK;
4836 * TODO: Consider setting the Receive Descriptor Minimum
4837 * Threshold Size for an RSC case. This is not an obviously
4838 * beneficiary option but the one worth considering...
4841 rscctl |= IXGBE_RSCCTL_RSCEN;
4842 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4843 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4846 * RSC: Set ITR interval corresponding to 2K ints/s.
4848 * Full-sized RSC aggregations for a 10Gb/s link will
4849 * arrive at about 20K aggregation/s rate.
4851 * 2K inst/s rate will make only 10% of the
4852 * aggregations to be closed due to the interrupt timer
4853 * expiration for a streaming at wire-speed case.
4855 * For a sparse streaming case this setting will yield
4856 * at most 500us latency for a single RSC aggregation.
4858 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4859 eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
4860 eitr |= IXGBE_EITR_CNT_WDIS;
4862 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4863 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4864 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4865 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4868 * RSC requires the mapping of the queue to the
4871 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4876 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4882 * Initializes Receive Unit.
4884 int __attribute__((cold))
4885 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4887 struct ixgbe_hw *hw;
4888 struct ixgbe_rx_queue *rxq;
4899 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4902 PMD_INIT_FUNC_TRACE();
4903 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4906 * Make sure receives are disabled while setting
4907 * up the RX context (registers, descriptor rings, etc.).
4909 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4910 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4912 /* Enable receipt of broadcasted frames */
4913 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4914 fctrl |= IXGBE_FCTRL_BAM;
4915 fctrl |= IXGBE_FCTRL_DPF;
4916 fctrl |= IXGBE_FCTRL_PMCF;
4917 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4920 * Configure CRC stripping, if any.
4922 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4923 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4924 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4926 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4929 * Configure jumbo frame support, if any.
4931 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
4932 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4933 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4934 maxfrs &= 0x0000FFFF;
4935 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4936 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4938 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4941 * If loopback mode is configured, set LPBK bit.
4943 if (dev->data->dev_conf.lpbk_mode != 0) {
4944 rc = ixgbe_check_supported_loopback_mode(dev);
4946 PMD_INIT_LOG(ERR, "Unsupported loopback mode");
4949 hlreg0 |= IXGBE_HLREG0_LPBK;
4951 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4954 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4957 * Assume no header split and no VLAN strip support
4958 * on any Rx queue first .
4960 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4961 /* Setup RX queues */
4962 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4963 rxq = dev->data->rx_queues[i];
4966 * Reset crc_len in case it was changed after queue setup by a
4967 * call to configure.
4969 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4970 rxq->crc_len = RTE_ETHER_CRC_LEN;
4974 /* Setup the Base and Length of the Rx Descriptor Rings */
4975 bus_addr = rxq->rx_ring_phys_addr;
4976 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4977 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4978 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4979 (uint32_t)(bus_addr >> 32));
4980 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4981 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4982 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4983 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4985 /* Configure the SRRCTL register */
4986 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4988 /* Set if packets are dropped when no descriptors available */
4990 srrctl |= IXGBE_SRRCTL_DROP_EN;
4993 * Configure the RX buffer size in the BSIZEPACKET field of
4994 * the SRRCTL register of the queue.
4995 * The value is in 1 KB resolution. Valid values can be from
4998 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4999 RTE_PKTMBUF_HEADROOM);
5000 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5001 IXGBE_SRRCTL_BSIZEPKT_MASK);
5003 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
5005 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5006 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5008 /* It adds dual VLAN length for supporting dual VLAN */
5009 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
5010 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
5011 dev->data->scattered_rx = 1;
5012 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5013 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5016 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
5017 dev->data->scattered_rx = 1;
5020 * Device configured with multiple RX queues.
5022 ixgbe_dev_mq_rx_configure(dev);
5025 * Setup the Checksum Register.
5026 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
5027 * Enable IP/L4 checkum computation by hardware if requested to do so.
5029 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
5030 rxcsum |= IXGBE_RXCSUM_PCSD;
5031 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
5032 rxcsum |= IXGBE_RXCSUM_IPPCSE;
5034 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
5036 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
5038 if (hw->mac.type == ixgbe_mac_82599EB ||
5039 hw->mac.type == ixgbe_mac_X540) {
5040 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
5041 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
5042 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
5044 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
5045 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
5046 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
5049 rc = ixgbe_set_rsc(dev);
5053 ixgbe_set_rx_function(dev);
5059 * Initializes Transmit Unit.
5061 void __attribute__((cold))
5062 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
5064 struct ixgbe_hw *hw;
5065 struct ixgbe_tx_queue *txq;
5071 PMD_INIT_FUNC_TRACE();
5072 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5074 /* Enable TX CRC (checksum offload requirement) and hw padding
5077 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5078 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
5079 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5081 /* Setup the Base and Length of the Tx Descriptor Rings */
5082 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5083 txq = dev->data->tx_queues[i];
5085 bus_addr = txq->tx_ring_phys_addr;
5086 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
5087 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5088 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
5089 (uint32_t)(bus_addr >> 32));
5090 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
5091 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5092 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5093 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5094 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5097 * Disable Tx Head Writeback RO bit, since this hoses
5098 * bookkeeping if things aren't delivered in order.
5100 switch (hw->mac.type) {
5101 case ixgbe_mac_82598EB:
5102 txctrl = IXGBE_READ_REG(hw,
5103 IXGBE_DCA_TXCTRL(txq->reg_idx));
5104 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5105 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
5109 case ixgbe_mac_82599EB:
5110 case ixgbe_mac_X540:
5111 case ixgbe_mac_X550:
5112 case ixgbe_mac_X550EM_x:
5113 case ixgbe_mac_X550EM_a:
5115 txctrl = IXGBE_READ_REG(hw,
5116 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
5117 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5118 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
5124 /* Device configured with multiple TX queues. */
5125 ixgbe_dev_mq_tx_configure(dev);
5129 * Check if requested loopback mode is supported
5132 ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev)
5134 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5136 if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX)
5137 if (hw->mac.type == ixgbe_mac_82599EB ||
5138 hw->mac.type == ixgbe_mac_X540 ||
5139 hw->mac.type == ixgbe_mac_X550 ||
5140 hw->mac.type == ixgbe_mac_X550EM_x ||
5141 hw->mac.type == ixgbe_mac_X550EM_a)
5148 * Set up link for 82599 loopback mode Tx->Rx.
5150 static inline void __attribute__((cold))
5151 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
5153 PMD_INIT_FUNC_TRACE();
5155 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
5156 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
5158 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
5167 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
5168 ixgbe_reset_pipeline_82599(hw);
5170 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
5176 * Start Transmit and Receive Units.
5178 int __attribute__((cold))
5179 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
5181 struct ixgbe_hw *hw;
5182 struct ixgbe_tx_queue *txq;
5183 struct ixgbe_rx_queue *rxq;
5190 PMD_INIT_FUNC_TRACE();
5191 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5193 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5194 txq = dev->data->tx_queues[i];
5195 /* Setup Transmit Threshold Registers */
5196 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5197 txdctl |= txq->pthresh & 0x7F;
5198 txdctl |= ((txq->hthresh & 0x7F) << 8);
5199 txdctl |= ((txq->wthresh & 0x7F) << 16);
5200 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5203 if (hw->mac.type != ixgbe_mac_82598EB) {
5204 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
5205 dmatxctl |= IXGBE_DMATXCTL_TE;
5206 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
5209 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5210 txq = dev->data->tx_queues[i];
5211 if (!txq->tx_deferred_start) {
5212 ret = ixgbe_dev_tx_queue_start(dev, i);
5218 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5219 rxq = dev->data->rx_queues[i];
5220 if (!rxq->rx_deferred_start) {
5221 ret = ixgbe_dev_rx_queue_start(dev, i);
5227 /* Enable Receive engine */
5228 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5229 if (hw->mac.type == ixgbe_mac_82598EB)
5230 rxctrl |= IXGBE_RXCTRL_DMBYPS;
5231 rxctrl |= IXGBE_RXCTRL_RXEN;
5232 hw->mac.ops.enable_rx_dma(hw, rxctrl);
5234 /* If loopback mode is enabled, set up the link accordingly */
5235 if (dev->data->dev_conf.lpbk_mode != 0) {
5236 if (hw->mac.type == ixgbe_mac_82599EB)
5237 ixgbe_setup_loopback_link_82599(hw);
5238 else if (hw->mac.type == ixgbe_mac_X540 ||
5239 hw->mac.type == ixgbe_mac_X550 ||
5240 hw->mac.type == ixgbe_mac_X550EM_x ||
5241 hw->mac.type == ixgbe_mac_X550EM_a)
5242 ixgbe_setup_loopback_link_x540_x550(hw, true);
5245 #ifdef RTE_LIBRTE_SECURITY
5246 if ((dev->data->dev_conf.rxmode.offloads &
5247 DEV_RX_OFFLOAD_SECURITY) ||
5248 (dev->data->dev_conf.txmode.offloads &
5249 DEV_TX_OFFLOAD_SECURITY)) {
5250 ret = ixgbe_crypto_enable_ipsec(dev);
5253 "ixgbe_crypto_enable_ipsec fails with %d.",
5264 * Start Receive Units for specified queue.
5266 int __attribute__((cold))
5267 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5269 struct ixgbe_hw *hw;
5270 struct ixgbe_rx_queue *rxq;
5274 PMD_INIT_FUNC_TRACE();
5275 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5277 rxq = dev->data->rx_queues[rx_queue_id];
5279 /* Allocate buffers for descriptor rings */
5280 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
5281 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
5285 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5286 rxdctl |= IXGBE_RXDCTL_ENABLE;
5287 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5289 /* Wait until RX Enable ready */
5290 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5293 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5294 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5296 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
5298 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5299 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
5300 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5306 * Stop Receive Units for specified queue.
5308 int __attribute__((cold))
5309 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5311 struct ixgbe_hw *hw;
5312 struct ixgbe_adapter *adapter =
5313 (struct ixgbe_adapter *)dev->data->dev_private;
5314 struct ixgbe_rx_queue *rxq;
5318 PMD_INIT_FUNC_TRACE();
5319 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5321 rxq = dev->data->rx_queues[rx_queue_id];
5323 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5324 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5325 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5327 /* Wait until RX Enable bit clear */
5328 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5331 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5332 } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
5334 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
5336 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5338 ixgbe_rx_queue_release_mbufs(rxq);
5339 ixgbe_reset_rx_queue(adapter, rxq);
5340 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5347 * Start Transmit Units for specified queue.
5349 int __attribute__((cold))
5350 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5352 struct ixgbe_hw *hw;
5353 struct ixgbe_tx_queue *txq;
5357 PMD_INIT_FUNC_TRACE();
5358 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5360 txq = dev->data->tx_queues[tx_queue_id];
5361 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5362 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5363 txdctl |= IXGBE_TXDCTL_ENABLE;
5364 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5366 /* Wait until TX Enable ready */
5367 if (hw->mac.type == ixgbe_mac_82599EB) {
5368 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5371 txdctl = IXGBE_READ_REG(hw,
5372 IXGBE_TXDCTL(txq->reg_idx));
5373 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5375 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
5379 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5380 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5386 * Stop Transmit Units for specified queue.
5388 int __attribute__((cold))
5389 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5391 struct ixgbe_hw *hw;
5392 struct ixgbe_tx_queue *txq;
5394 uint32_t txtdh, txtdt;
5397 PMD_INIT_FUNC_TRACE();
5398 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5400 txq = dev->data->tx_queues[tx_queue_id];
5402 /* Wait until TX queue is empty */
5403 if (hw->mac.type == ixgbe_mac_82599EB) {
5404 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5406 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5407 txtdh = IXGBE_READ_REG(hw,
5408 IXGBE_TDH(txq->reg_idx));
5409 txtdt = IXGBE_READ_REG(hw,
5410 IXGBE_TDT(txq->reg_idx));
5411 } while (--poll_ms && (txtdh != txtdt));
5414 "Tx Queue %d is not empty when stopping.",
5418 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5419 txdctl &= ~IXGBE_TXDCTL_ENABLE;
5420 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5422 /* Wait until TX Enable bit clear */
5423 if (hw->mac.type == ixgbe_mac_82599EB) {
5424 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5427 txdctl = IXGBE_READ_REG(hw,
5428 IXGBE_TXDCTL(txq->reg_idx));
5429 } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
5431 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
5435 if (txq->ops != NULL) {
5436 txq->ops->release_mbufs(txq);
5437 txq->ops->reset(txq);
5439 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5445 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5446 struct rte_eth_rxq_info *qinfo)
5448 struct ixgbe_rx_queue *rxq;
5450 rxq = dev->data->rx_queues[queue_id];
5452 qinfo->mp = rxq->mb_pool;
5453 qinfo->scattered_rx = dev->data->scattered_rx;
5454 qinfo->nb_desc = rxq->nb_rx_desc;
5456 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
5457 qinfo->conf.rx_drop_en = rxq->drop_en;
5458 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
5459 qinfo->conf.offloads = rxq->offloads;
5463 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5464 struct rte_eth_txq_info *qinfo)
5466 struct ixgbe_tx_queue *txq;
5468 txq = dev->data->tx_queues[queue_id];
5470 qinfo->nb_desc = txq->nb_tx_desc;
5472 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
5473 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
5474 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
5476 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
5477 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
5478 qinfo->conf.offloads = txq->offloads;
5479 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
5483 * [VF] Initializes Receive Unit.
5485 int __attribute__((cold))
5486 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
5488 struct ixgbe_hw *hw;
5489 struct ixgbe_rx_queue *rxq;
5490 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
5492 uint32_t srrctl, psrtype = 0;
5497 PMD_INIT_FUNC_TRACE();
5498 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5500 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
5501 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5502 "it should be power of 2");
5506 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
5507 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5508 "it should be equal to or less than %d",
5509 hw->mac.max_rx_queues);
5514 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
5515 * disables the VF receipt of packets if the PF MTU is > 1500.
5516 * This is done to deal with 82599 limitations that imposes
5517 * the PF and all VFs to share the same MTU.
5518 * Then, the PF driver enables again the VF receipt of packet when
5519 * the VF driver issues a IXGBE_VF_SET_LPE request.
5520 * In the meantime, the VF device cannot be used, even if the VF driver
5521 * and the Guest VM network stack are ready to accept packets with a
5522 * size up to the PF MTU.
5523 * As a work-around to this PF behaviour, force the call to
5524 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
5525 * VF packets received can work in all cases.
5527 ixgbevf_rlpml_set_vf(hw,
5528 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
5531 * Assume no header split and no VLAN strip support
5532 * on any Rx queue first .
5534 rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
5535 /* Setup RX queues */
5536 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5537 rxq = dev->data->rx_queues[i];
5539 /* Allocate buffers for descriptor rings */
5540 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
5544 /* Setup the Base and Length of the Rx Descriptor Rings */
5545 bus_addr = rxq->rx_ring_phys_addr;
5547 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
5548 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5549 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
5550 (uint32_t)(bus_addr >> 32));
5551 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
5552 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5553 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
5554 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
5557 /* Configure the SRRCTL register */
5558 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
5560 /* Set if packets are dropped when no descriptors available */
5562 srrctl |= IXGBE_SRRCTL_DROP_EN;
5565 * Configure the RX buffer size in the BSIZEPACKET field of
5566 * the SRRCTL register of the queue.
5567 * The value is in 1 KB resolution. Valid values can be from
5570 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5571 RTE_PKTMBUF_HEADROOM);
5572 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5573 IXGBE_SRRCTL_BSIZEPKT_MASK);
5576 * VF modification to write virtual function SRRCTL register
5578 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
5580 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5581 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5583 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
5584 /* It adds dual VLAN length for supporting dual VLAN */
5585 (rxmode->max_rx_pkt_len +
5586 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
5587 if (!dev->data->scattered_rx)
5588 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
5589 dev->data->scattered_rx = 1;
5592 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5593 rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5596 /* Set RQPL for VF RSS according to max Rx queue */
5597 psrtype |= (dev->data->nb_rx_queues >> 1) <<
5598 IXGBE_PSRTYPE_RQPL_SHIFT;
5599 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
5601 ixgbe_set_rx_function(dev);
5607 * [VF] Initializes Transmit Unit.
5609 void __attribute__((cold))
5610 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
5612 struct ixgbe_hw *hw;
5613 struct ixgbe_tx_queue *txq;
5618 PMD_INIT_FUNC_TRACE();
5619 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5621 /* Setup the Base and Length of the Tx Descriptor Rings */
5622 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5623 txq = dev->data->tx_queues[i];
5624 bus_addr = txq->tx_ring_phys_addr;
5625 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
5626 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5627 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
5628 (uint32_t)(bus_addr >> 32));
5629 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
5630 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5631 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5632 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
5633 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
5636 * Disable Tx Head Writeback RO bit, since this hoses
5637 * bookkeeping if things aren't delivered in order.
5639 txctrl = IXGBE_READ_REG(hw,
5640 IXGBE_VFDCA_TXCTRL(i));
5641 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5642 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
5648 * [VF] Start Transmit and Receive Units.
5650 void __attribute__((cold))
5651 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
5653 struct ixgbe_hw *hw;
5654 struct ixgbe_tx_queue *txq;
5655 struct ixgbe_rx_queue *rxq;
5661 PMD_INIT_FUNC_TRACE();
5662 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5664 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5665 txq = dev->data->tx_queues[i];
5666 /* Setup Transmit Threshold Registers */
5667 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5668 txdctl |= txq->pthresh & 0x7F;
5669 txdctl |= ((txq->hthresh & 0x7F) << 8);
5670 txdctl |= ((txq->wthresh & 0x7F) << 16);
5671 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5674 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5676 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5677 txdctl |= IXGBE_TXDCTL_ENABLE;
5678 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5681 /* Wait until TX Enable ready */
5684 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5685 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5687 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
5689 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5691 rxq = dev->data->rx_queues[i];
5693 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5694 rxdctl |= IXGBE_RXDCTL_ENABLE;
5695 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
5697 /* Wait until RX Enable ready */
5701 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5702 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5704 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5706 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
5712 ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
5713 const struct rte_flow_action_rss *in)
5715 if (in->key_len > RTE_DIM(out->key) ||
5716 in->queue_num > RTE_DIM(out->queue))
5718 out->conf = (struct rte_flow_action_rss){
5722 .key_len = in->key_len,
5723 .queue_num = in->queue_num,
5724 .key = memcpy(out->key, in->key, in->key_len),
5725 .queue = memcpy(out->queue, in->queue,
5726 sizeof(*in->queue) * in->queue_num),
5732 ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5733 const struct rte_flow_action_rss *with)
5735 return (comp->func == with->func &&
5736 comp->level == with->level &&
5737 comp->types == with->types &&
5738 comp->key_len == with->key_len &&
5739 comp->queue_num == with->queue_num &&
5740 !memcmp(comp->key, with->key, with->key_len) &&
5741 !memcmp(comp->queue, with->queue,
5742 sizeof(*with->queue) * with->queue_num));
5746 ixgbe_config_rss_filter(struct rte_eth_dev *dev,
5747 struct ixgbe_rte_flow_rss_conf *conf, bool add)
5749 struct ixgbe_hw *hw;
5753 uint16_t sp_reta_size;
5755 struct rte_eth_rss_conf rss_conf = {
5756 .rss_key = conf->conf.key_len ?
5757 (void *)(uintptr_t)conf->conf.key : NULL,
5758 .rss_key_len = conf->conf.key_len,
5759 .rss_hf = conf->conf.types,
5761 struct ixgbe_filter_info *filter_info =
5762 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5764 PMD_INIT_FUNC_TRACE();
5765 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5767 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5770 if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
5772 ixgbe_rss_disable(dev);
5773 memset(&filter_info->rss_info, 0,
5774 sizeof(struct ixgbe_rte_flow_rss_conf));
5780 if (filter_info->rss_info.conf.queue_num)
5782 /* Fill in redirection table
5783 * The byte-swap is needed because NIC registers are in
5784 * little-endian order.
5787 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
5788 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5790 if (j == conf->conf.queue_num)
5792 reta = (reta << 8) | conf->conf.queue[j];
5794 IXGBE_WRITE_REG(hw, reta_reg,
5798 /* Configure the RSS key and the RSS protocols used to compute
5799 * the RSS hash of input packets.
5801 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
5802 ixgbe_rss_disable(dev);
5805 if (rss_conf.rss_key == NULL)
5806 rss_conf.rss_key = rss_intel_key; /* Default hash key */
5807 ixgbe_hw_rss_hash_set(hw, &rss_conf);
5809 if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
5815 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
5817 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5823 ixgbe_recv_pkts_vec(
5824 void __rte_unused *rx_queue,
5825 struct rte_mbuf __rte_unused **rx_pkts,
5826 uint16_t __rte_unused nb_pkts)
5832 ixgbe_recv_scattered_pkts_vec(
5833 void __rte_unused *rx_queue,
5834 struct rte_mbuf __rte_unused **rx_pkts,
5835 uint16_t __rte_unused nb_pkts)
5841 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)