1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
21 #include <rte_debug.h>
22 #include <rte_interrupts.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev_driver.h>
37 #include <rte_prefetch.h>
41 #include <rte_string_fns.h>
42 #include <rte_errno.h>
46 #include "ixgbe_logs.h"
47 #include "base/ixgbe_api.h"
48 #include "base/ixgbe_vf.h"
49 #include "ixgbe_ethdev.h"
50 #include "base/ixgbe_dcb.h"
51 #include "base/ixgbe_common.h"
52 #include "ixgbe_rxtx.h"
54 #ifdef RTE_LIBRTE_IEEE1588
55 #define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
57 #define IXGBE_TX_IEEE1588_TMST 0
59 /* Bit Mask to indicate what bits required for building TX context */
60 #define IXGBE_TX_OFFLOAD_MASK ( \
70 PKT_TX_OUTER_IP_CKSUM | \
71 PKT_TX_SEC_OFFLOAD | \
72 IXGBE_TX_IEEE1588_TMST)
74 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
75 (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
78 #define RTE_PMD_USE_PREFETCH
81 #ifdef RTE_PMD_USE_PREFETCH
83 * Prefetch a cache line into all cache levels.
85 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
87 #define rte_ixgbe_prefetch(p) do {} while (0)
90 #ifdef RTE_IXGBE_INC_VECTOR
91 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
95 /*********************************************************************
99 **********************************************************************/
102 * Check for descriptors with their DD bit set and free mbufs.
103 * Return the total number of buffers freed.
105 static __rte_always_inline int
106 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
108 struct ixgbe_tx_entry *txep;
111 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
113 /* check DD bit on threshold descriptor */
114 status = txq->tx_ring[txq->tx_next_dd].wb.status;
115 if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
119 * first buffer to free from S/W ring is at index
120 * tx_next_dd - (tx_rs_thresh-1)
122 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
124 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
125 /* free buffers one at a time */
126 m = rte_pktmbuf_prefree_seg(txep->mbuf);
129 if (unlikely(m == NULL))
132 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
133 (nb_free > 0 && m->pool != free[0]->pool)) {
134 rte_mempool_put_bulk(free[0]->pool,
135 (void **)free, nb_free);
143 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
145 /* buffers were freed, update counters */
146 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
147 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
148 if (txq->tx_next_dd >= txq->nb_tx_desc)
149 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
151 return txq->tx_rs_thresh;
154 /* Populate 4 descriptors with data from 4 mbufs */
156 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
158 uint64_t buf_dma_addr;
162 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
163 buf_dma_addr = rte_mbuf_data_iova(*pkts);
164 pkt_len = (*pkts)->data_len;
166 /* write data to descriptor */
167 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
169 txdp->read.cmd_type_len =
170 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
172 txdp->read.olinfo_status =
173 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
175 rte_prefetch0(&(*pkts)->pool);
179 /* Populate 1 descriptor with data from 1 mbuf */
181 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
183 uint64_t buf_dma_addr;
186 buf_dma_addr = rte_mbuf_data_iova(*pkts);
187 pkt_len = (*pkts)->data_len;
189 /* write data to descriptor */
190 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
191 txdp->read.cmd_type_len =
192 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
193 txdp->read.olinfo_status =
194 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
195 rte_prefetch0(&(*pkts)->pool);
199 * Fill H/W descriptor ring with mbuf data.
200 * Copy mbuf pointers to the S/W ring.
203 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
206 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
207 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
208 const int N_PER_LOOP = 4;
209 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
210 int mainpart, leftover;
214 * Process most of the packets in chunks of N pkts. Any
215 * leftover packets will get processed one at a time.
217 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
218 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
219 for (i = 0; i < mainpart; i += N_PER_LOOP) {
220 /* Copy N mbuf pointers to the S/W ring */
221 for (j = 0; j < N_PER_LOOP; ++j) {
222 (txep + i + j)->mbuf = *(pkts + i + j);
224 tx4(txdp + i, pkts + i);
227 if (unlikely(leftover > 0)) {
228 for (i = 0; i < leftover; ++i) {
229 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
230 tx1(txdp + mainpart + i, pkts + mainpart + i);
235 static inline uint16_t
236 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
239 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
240 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
244 * Begin scanning the H/W ring for done descriptors when the
245 * number of available descriptors drops below tx_free_thresh. For
246 * each done descriptor, free the associated buffer.
248 if (txq->nb_tx_free < txq->tx_free_thresh)
249 ixgbe_tx_free_bufs(txq);
251 /* Only use descriptors that are available */
252 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
253 if (unlikely(nb_pkts == 0))
256 /* Use exactly nb_pkts descriptors */
257 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
260 * At this point, we know there are enough descriptors in the
261 * ring to transmit all the packets. This assumes that each
262 * mbuf contains a single segment, and that no new offloads
263 * are expected, which would require a new context descriptor.
267 * See if we're going to wrap-around. If so, handle the top
268 * of the descriptor ring first, then do the bottom. If not,
269 * the processing looks just like the "bottom" part anyway...
271 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
272 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
273 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
276 * We know that the last descriptor in the ring will need to
277 * have its RS bit set because tx_rs_thresh has to be
278 * a divisor of the ring size
280 tx_r[txq->tx_next_rs].read.cmd_type_len |=
281 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
282 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
287 /* Fill H/W descriptor ring with mbuf data */
288 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
289 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
292 * Determine if RS bit should be set
293 * This is what we actually want:
294 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
295 * but instead of subtracting 1 and doing >=, we can just do
296 * greater than without subtracting.
298 if (txq->tx_tail > txq->tx_next_rs) {
299 tx_r[txq->tx_next_rs].read.cmd_type_len |=
300 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
301 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
303 if (txq->tx_next_rs >= txq->nb_tx_desc)
304 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
308 * Check for wrap-around. This would only happen if we used
309 * up to the last descriptor in the ring, no more, no less.
311 if (txq->tx_tail >= txq->nb_tx_desc)
314 /* update tail pointer */
316 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
322 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
327 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
328 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
329 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
331 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
336 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
337 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
338 nb_tx = (uint16_t)(nb_tx + ret);
339 nb_pkts = (uint16_t)(nb_pkts - ret);
347 #ifdef RTE_IXGBE_INC_VECTOR
349 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
353 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
358 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
359 ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
372 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
373 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
374 uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
375 __rte_unused uint64_t *mdata)
377 uint32_t type_tucmd_mlhl;
378 uint32_t mss_l4len_idx = 0;
380 uint32_t vlan_macip_lens;
381 union ixgbe_tx_offload tx_offload_mask;
382 uint32_t seqnum_seed = 0;
384 ctx_idx = txq->ctx_curr;
385 tx_offload_mask.data[0] = 0;
386 tx_offload_mask.data[1] = 0;
389 /* Specify which HW CTX to upload. */
390 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
392 if (ol_flags & PKT_TX_VLAN_PKT) {
393 tx_offload_mask.vlan_tci |= ~0;
396 /* check if TCP segmentation required for this packet */
397 if (ol_flags & PKT_TX_TCP_SEG) {
398 /* implies IP cksum in IPv4 */
399 if (ol_flags & PKT_TX_IP_CKSUM)
400 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
401 IXGBE_ADVTXD_TUCMD_L4T_TCP |
402 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
404 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
405 IXGBE_ADVTXD_TUCMD_L4T_TCP |
406 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
408 tx_offload_mask.l2_len |= ~0;
409 tx_offload_mask.l3_len |= ~0;
410 tx_offload_mask.l4_len |= ~0;
411 tx_offload_mask.tso_segsz |= ~0;
412 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
413 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
414 } else { /* no TSO, check if hardware checksum is needed */
415 if (ol_flags & PKT_TX_IP_CKSUM) {
416 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
417 tx_offload_mask.l2_len |= ~0;
418 tx_offload_mask.l3_len |= ~0;
421 switch (ol_flags & PKT_TX_L4_MASK) {
422 case PKT_TX_UDP_CKSUM:
423 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
424 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
425 mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
426 tx_offload_mask.l2_len |= ~0;
427 tx_offload_mask.l3_len |= ~0;
429 case PKT_TX_TCP_CKSUM:
430 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
431 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
432 mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
433 << IXGBE_ADVTXD_L4LEN_SHIFT;
434 tx_offload_mask.l2_len |= ~0;
435 tx_offload_mask.l3_len |= ~0;
437 case PKT_TX_SCTP_CKSUM:
438 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
439 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
440 mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
441 << IXGBE_ADVTXD_L4LEN_SHIFT;
442 tx_offload_mask.l2_len |= ~0;
443 tx_offload_mask.l3_len |= ~0;
446 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
447 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
452 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
453 tx_offload_mask.outer_l2_len |= ~0;
454 tx_offload_mask.outer_l3_len |= ~0;
455 tx_offload_mask.l2_len |= ~0;
456 seqnum_seed |= tx_offload.outer_l3_len
457 << IXGBE_ADVTXD_OUTER_IPLEN;
458 seqnum_seed |= tx_offload.l2_len
459 << IXGBE_ADVTXD_TUNNEL_LEN;
461 #ifdef RTE_LIBRTE_SECURITY
462 if (ol_flags & PKT_TX_SEC_OFFLOAD) {
463 union ixgbe_crypto_tx_desc_md *md =
464 (union ixgbe_crypto_tx_desc_md *)mdata;
466 (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
467 type_tucmd_mlhl |= md->enc ?
468 (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
469 IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
471 (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
472 tx_offload_mask.sa_idx |= ~0;
473 tx_offload_mask.sec_pad_len |= ~0;
477 txq->ctx_cache[ctx_idx].flags = ol_flags;
478 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
479 tx_offload_mask.data[0] & tx_offload.data[0];
480 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
481 tx_offload_mask.data[1] & tx_offload.data[1];
482 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
484 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
485 vlan_macip_lens = tx_offload.l3_len;
486 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
487 vlan_macip_lens |= (tx_offload.outer_l2_len <<
488 IXGBE_ADVTXD_MACLEN_SHIFT);
490 vlan_macip_lens |= (tx_offload.l2_len <<
491 IXGBE_ADVTXD_MACLEN_SHIFT);
492 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
493 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
494 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
495 ctx_txd->seqnum_seed = seqnum_seed;
499 * Check which hardware context can be used. Use the existing match
500 * or create a new context descriptor.
502 static inline uint32_t
503 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
504 union ixgbe_tx_offload tx_offload)
506 /* If match with the current used context */
507 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
508 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
509 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
510 & tx_offload.data[0])) &&
511 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
512 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
513 & tx_offload.data[1]))))
514 return txq->ctx_curr;
516 /* What if match with the next context */
518 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
519 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
520 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
521 & tx_offload.data[0])) &&
522 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
523 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
524 & tx_offload.data[1]))))
525 return txq->ctx_curr;
527 /* Mismatch, use the previous context */
528 return IXGBE_CTX_NUM;
531 static inline uint32_t
532 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
536 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
537 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
538 if (ol_flags & PKT_TX_IP_CKSUM)
539 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
540 if (ol_flags & PKT_TX_TCP_SEG)
541 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
545 static inline uint32_t
546 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
548 uint32_t cmdtype = 0;
550 if (ol_flags & PKT_TX_VLAN_PKT)
551 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
552 if (ol_flags & PKT_TX_TCP_SEG)
553 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
554 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
555 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
556 if (ol_flags & PKT_TX_MACSEC)
557 cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
561 /* Default RS bit threshold values */
562 #ifndef DEFAULT_TX_RS_THRESH
563 #define DEFAULT_TX_RS_THRESH 32
565 #ifndef DEFAULT_TX_FREE_THRESH
566 #define DEFAULT_TX_FREE_THRESH 32
569 /* Reset transmit descriptors after they have been used */
571 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
573 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
574 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
575 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
576 uint16_t nb_tx_desc = txq->nb_tx_desc;
577 uint16_t desc_to_clean_to;
578 uint16_t nb_tx_to_clean;
581 /* Determine the last descriptor needing to be cleaned */
582 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
583 if (desc_to_clean_to >= nb_tx_desc)
584 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
586 /* Check to make sure the last descriptor to clean is done */
587 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
588 status = txr[desc_to_clean_to].wb.status;
589 if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
590 PMD_TX_FREE_LOG(DEBUG,
591 "TX descriptor %4u is not done"
592 "(port=%d queue=%d)",
594 txq->port_id, txq->queue_id);
595 /* Failed to clean any descriptors, better luck next time */
599 /* Figure out how many descriptors will be cleaned */
600 if (last_desc_cleaned > desc_to_clean_to)
601 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
604 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
607 PMD_TX_FREE_LOG(DEBUG,
608 "Cleaning %4u TX descriptors: %4u to %4u "
609 "(port=%d queue=%d)",
610 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
611 txq->port_id, txq->queue_id);
614 * The last descriptor to clean is done, so that means all the
615 * descriptors from the last descriptor that was cleaned
616 * up to the last descriptor with the RS bit set
617 * are done. Only reset the threshold descriptor.
619 txr[desc_to_clean_to].wb.status = 0;
621 /* Update the txq to reflect the last descriptor that was cleaned */
622 txq->last_desc_cleaned = desc_to_clean_to;
623 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
630 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
633 struct ixgbe_tx_queue *txq;
634 struct ixgbe_tx_entry *sw_ring;
635 struct ixgbe_tx_entry *txe, *txn;
636 volatile union ixgbe_adv_tx_desc *txr;
637 volatile union ixgbe_adv_tx_desc *txd, *txp;
638 struct rte_mbuf *tx_pkt;
639 struct rte_mbuf *m_seg;
640 uint64_t buf_dma_addr;
641 uint32_t olinfo_status;
642 uint32_t cmd_type_len;
653 union ixgbe_tx_offload tx_offload;
654 #ifdef RTE_LIBRTE_SECURITY
658 tx_offload.data[0] = 0;
659 tx_offload.data[1] = 0;
661 sw_ring = txq->sw_ring;
663 tx_id = txq->tx_tail;
664 txe = &sw_ring[tx_id];
667 /* Determine if the descriptor ring needs to be cleaned. */
668 if (txq->nb_tx_free < txq->tx_free_thresh)
669 ixgbe_xmit_cleanup(txq);
671 rte_prefetch0(&txe->mbuf->pool);
674 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
677 pkt_len = tx_pkt->pkt_len;
680 * Determine how many (if any) context descriptors
681 * are needed for offload functionality.
683 ol_flags = tx_pkt->ol_flags;
684 #ifdef RTE_LIBRTE_SECURITY
685 use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
688 /* If hardware offload required */
689 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
691 tx_offload.l2_len = tx_pkt->l2_len;
692 tx_offload.l3_len = tx_pkt->l3_len;
693 tx_offload.l4_len = tx_pkt->l4_len;
694 tx_offload.vlan_tci = tx_pkt->vlan_tci;
695 tx_offload.tso_segsz = tx_pkt->tso_segsz;
696 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
697 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
698 #ifdef RTE_LIBRTE_SECURITY
700 union ixgbe_crypto_tx_desc_md *ipsec_mdata =
701 (union ixgbe_crypto_tx_desc_md *)
703 tx_offload.sa_idx = ipsec_mdata->sa_idx;
704 tx_offload.sec_pad_len = ipsec_mdata->pad_len;
708 /* If new context need be built or reuse the exist ctx. */
709 ctx = what_advctx_update(txq, tx_ol_req,
711 /* Only allocate context descriptor if required*/
712 new_ctx = (ctx == IXGBE_CTX_NUM);
717 * Keep track of how many descriptors are used this loop
718 * This will always be the number of segments + the number of
719 * Context descriptors required to transmit the packet
721 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
724 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
725 /* set RS on the previous packet in the burst */
726 txp->read.cmd_type_len |=
727 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
730 * The number of descriptors that must be allocated for a
731 * packet is the number of segments of that packet, plus 1
732 * Context Descriptor for the hardware offload, if any.
733 * Determine the last TX descriptor to allocate in the TX ring
734 * for the packet, starting from the current position (tx_id)
737 tx_last = (uint16_t) (tx_id + nb_used - 1);
740 if (tx_last >= txq->nb_tx_desc)
741 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
743 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
744 " tx_first=%u tx_last=%u",
745 (unsigned) txq->port_id,
746 (unsigned) txq->queue_id,
752 * Make sure there are enough TX descriptors available to
753 * transmit the entire packet.
754 * nb_used better be less than or equal to txq->tx_rs_thresh
756 if (nb_used > txq->nb_tx_free) {
757 PMD_TX_FREE_LOG(DEBUG,
758 "Not enough free TX descriptors "
759 "nb_used=%4u nb_free=%4u "
760 "(port=%d queue=%d)",
761 nb_used, txq->nb_tx_free,
762 txq->port_id, txq->queue_id);
764 if (ixgbe_xmit_cleanup(txq) != 0) {
765 /* Could not clean any descriptors */
771 /* nb_used better be <= txq->tx_rs_thresh */
772 if (unlikely(nb_used > txq->tx_rs_thresh)) {
773 PMD_TX_FREE_LOG(DEBUG,
774 "The number of descriptors needed to "
775 "transmit the packet exceeds the "
776 "RS bit threshold. This will impact "
778 "nb_used=%4u nb_free=%4u "
780 "(port=%d queue=%d)",
781 nb_used, txq->nb_tx_free,
783 txq->port_id, txq->queue_id);
785 * Loop here until there are enough TX
786 * descriptors or until the ring cannot be
789 while (nb_used > txq->nb_tx_free) {
790 if (ixgbe_xmit_cleanup(txq) != 0) {
792 * Could not clean any
804 * By now there are enough free TX descriptors to transmit
809 * Set common flags of all TX Data Descriptors.
811 * The following bits must be set in all Data Descriptors:
812 * - IXGBE_ADVTXD_DTYP_DATA
813 * - IXGBE_ADVTXD_DCMD_DEXT
815 * The following bits must be set in the first Data Descriptor
816 * and are ignored in the other ones:
817 * - IXGBE_ADVTXD_DCMD_IFCS
818 * - IXGBE_ADVTXD_MAC_1588
819 * - IXGBE_ADVTXD_DCMD_VLE
821 * The following bits must only be set in the last Data
823 * - IXGBE_TXD_CMD_EOP
825 * The following bits can be set in any Data Descriptor, but
826 * are only set in the last Data Descriptor:
829 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
830 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
832 #ifdef RTE_LIBRTE_IEEE1588
833 if (ol_flags & PKT_TX_IEEE1588_TMST)
834 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
840 if (ol_flags & PKT_TX_TCP_SEG) {
841 /* when TSO is on, paylen in descriptor is the
842 * not the packet len but the tcp payload len */
843 pkt_len -= (tx_offload.l2_len +
844 tx_offload.l3_len + tx_offload.l4_len);
848 * Setup the TX Advanced Context Descriptor if required
851 volatile struct ixgbe_adv_tx_context_desc *
854 ctx_txd = (volatile struct
855 ixgbe_adv_tx_context_desc *)
858 txn = &sw_ring[txe->next_id];
859 rte_prefetch0(&txn->mbuf->pool);
861 if (txe->mbuf != NULL) {
862 rte_pktmbuf_free_seg(txe->mbuf);
866 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
867 tx_offload, &tx_pkt->udata64);
869 txe->last_id = tx_last;
870 tx_id = txe->next_id;
875 * Setup the TX Advanced Data Descriptor,
876 * This path will go through
877 * whatever new/reuse the context descriptor
879 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
880 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
881 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
884 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
885 #ifdef RTE_LIBRTE_SECURITY
887 olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
893 txn = &sw_ring[txe->next_id];
894 rte_prefetch0(&txn->mbuf->pool);
896 if (txe->mbuf != NULL)
897 rte_pktmbuf_free_seg(txe->mbuf);
901 * Set up Transmit Data Descriptor.
903 slen = m_seg->data_len;
904 buf_dma_addr = rte_mbuf_data_iova(m_seg);
905 txd->read.buffer_addr =
906 rte_cpu_to_le_64(buf_dma_addr);
907 txd->read.cmd_type_len =
908 rte_cpu_to_le_32(cmd_type_len | slen);
909 txd->read.olinfo_status =
910 rte_cpu_to_le_32(olinfo_status);
911 txe->last_id = tx_last;
912 tx_id = txe->next_id;
915 } while (m_seg != NULL);
918 * The last packet data descriptor needs End Of Packet (EOP)
920 cmd_type_len |= IXGBE_TXD_CMD_EOP;
921 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
922 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
924 /* Set RS bit only on threshold packets' last descriptor */
925 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
926 PMD_TX_FREE_LOG(DEBUG,
927 "Setting RS bit on TXD id="
928 "%4u (port=%d queue=%d)",
929 tx_last, txq->port_id, txq->queue_id);
931 cmd_type_len |= IXGBE_TXD_CMD_RS;
933 /* Update txq RS bit counters */
939 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
943 /* set RS on last packet in the burst */
945 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
950 * Set the Transmit Descriptor Tail (TDT)
952 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
953 (unsigned) txq->port_id, (unsigned) txq->queue_id,
954 (unsigned) tx_id, (unsigned) nb_tx);
955 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
956 txq->tx_tail = tx_id;
961 /*********************************************************************
965 **********************************************************************/
967 ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
972 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
974 for (i = 0; i < nb_pkts; i++) {
976 ol_flags = m->ol_flags;
979 * Check if packet meets requirements for number of segments
981 * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
985 if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
990 if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
991 rte_errno = -ENOTSUP;
995 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
996 ret = rte_validate_tx_offload(m);
1002 ret = rte_net_intel_cksum_prepare(m);
1012 /*********************************************************************
1016 **********************************************************************/
1018 #define IXGBE_PACKET_TYPE_ETHER 0X00
1019 #define IXGBE_PACKET_TYPE_IPV4 0X01
1020 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
1021 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
1022 #define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
1023 #define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
1024 #define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
1025 #define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
1026 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
1027 #define IXGBE_PACKET_TYPE_IPV6 0X04
1028 #define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
1029 #define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
1030 #define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
1031 #define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
1032 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
1033 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
1034 #define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
1035 #define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
1036 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
1037 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
1038 #define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
1039 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
1040 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
1041 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
1042 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
1043 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
1044 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
1045 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
1046 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
1047 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
1048 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
1049 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
1050 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
1052 #define IXGBE_PACKET_TYPE_NVGRE 0X00
1053 #define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
1054 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11
1055 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
1056 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
1057 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
1058 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
1059 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
1060 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
1061 #define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
1062 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
1063 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
1064 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
1065 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
1066 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
1067 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
1068 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
1069 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
1070 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
1071 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
1072 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D
1073 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
1074 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
1076 #define IXGBE_PACKET_TYPE_VXLAN 0X80
1077 #define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81
1078 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91
1079 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
1080 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
1081 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
1082 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
1083 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
1084 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
1085 #define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
1086 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
1087 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
1088 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
1089 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
1090 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
1091 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
1092 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
1093 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
1094 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
1095 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
1096 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D
1097 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
1098 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
1101 * Use 2 different table for normal packet and tunnel packet
1102 * to save the space.
1105 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
1106 [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
1107 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
1109 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1110 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
1111 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1112 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
1113 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1114 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
1115 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1116 RTE_PTYPE_L3_IPV4_EXT,
1117 [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1118 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
1119 [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1120 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
1121 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1122 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
1123 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
1125 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1126 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
1127 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1128 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
1129 [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1130 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
1131 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1132 RTE_PTYPE_L3_IPV6_EXT,
1133 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1134 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
1135 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1136 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
1137 [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1138 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
1139 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1140 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1141 RTE_PTYPE_INNER_L3_IPV6,
1142 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1143 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1144 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1145 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1146 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1147 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1148 [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1149 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1150 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1151 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
1152 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1153 RTE_PTYPE_INNER_L3_IPV6,
1154 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1155 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1156 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1157 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1158 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1159 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1160 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1161 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1162 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1163 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1164 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1165 RTE_PTYPE_INNER_L3_IPV6_EXT,
1166 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1167 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1168 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1169 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1170 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1171 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1172 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1173 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1174 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1175 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1176 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1177 RTE_PTYPE_INNER_L3_IPV6_EXT,
1178 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1179 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1180 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1181 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1182 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1183 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1184 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
1185 RTE_PTYPE_L2_ETHER |
1186 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1187 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1191 ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
1192 [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
1193 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1194 RTE_PTYPE_INNER_L2_ETHER,
1195 [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
1196 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1197 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1198 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1199 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1200 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
1201 [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
1202 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1203 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
1204 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1205 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1206 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1207 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1208 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1209 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
1210 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1211 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1212 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1213 [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1214 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1215 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1216 RTE_PTYPE_INNER_L4_TCP,
1217 [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1218 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1219 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1220 RTE_PTYPE_INNER_L4_TCP,
1221 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1222 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1223 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1224 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1225 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1226 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1227 RTE_PTYPE_INNER_L4_TCP,
1228 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
1229 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1230 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1231 RTE_PTYPE_INNER_L3_IPV4,
1232 [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1233 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1234 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1235 RTE_PTYPE_INNER_L4_UDP,
1236 [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1237 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1238 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1239 RTE_PTYPE_INNER_L4_UDP,
1240 [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1241 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1242 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1243 RTE_PTYPE_INNER_L4_SCTP,
1244 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1245 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1246 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1247 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1248 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1249 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1250 RTE_PTYPE_INNER_L4_UDP,
1251 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1252 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1253 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1254 RTE_PTYPE_INNER_L4_SCTP,
1255 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
1256 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1257 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1258 RTE_PTYPE_INNER_L3_IPV4,
1259 [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1260 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1261 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1262 RTE_PTYPE_INNER_L4_SCTP,
1263 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1264 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1265 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1266 RTE_PTYPE_INNER_L4_SCTP,
1267 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1268 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1269 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1270 RTE_PTYPE_INNER_L4_TCP,
1271 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1272 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1273 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1274 RTE_PTYPE_INNER_L4_UDP,
1276 [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
1277 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1278 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
1279 [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
1280 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1281 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1282 RTE_PTYPE_INNER_L3_IPV4,
1283 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1284 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1285 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1286 RTE_PTYPE_INNER_L3_IPV4_EXT,
1287 [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
1288 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1289 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1290 RTE_PTYPE_INNER_L3_IPV6,
1291 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1292 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1293 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1294 RTE_PTYPE_INNER_L3_IPV4,
1295 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1296 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1297 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1298 RTE_PTYPE_INNER_L3_IPV6_EXT,
1299 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1300 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1301 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1302 RTE_PTYPE_INNER_L3_IPV4,
1303 [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1304 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1305 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1306 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
1307 [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1308 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1309 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1310 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1311 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1312 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1313 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1314 RTE_PTYPE_INNER_L3_IPV4,
1315 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1316 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1317 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1318 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1319 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
1320 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1321 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1322 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1323 [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1324 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1325 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1326 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
1327 [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1328 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1329 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1330 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1331 [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1332 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1333 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1334 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1335 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1336 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1337 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1338 RTE_PTYPE_INNER_L3_IPV4,
1339 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1340 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1341 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1342 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1343 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1344 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1345 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1346 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1347 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
1348 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1349 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1350 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1351 [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1352 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1353 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1354 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
1355 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1356 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1357 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1358 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
1359 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1360 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1361 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1362 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
1363 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1364 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1365 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1366 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
1369 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
1370 static inline uint32_t
1371 ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
1374 if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1375 return RTE_PTYPE_UNKNOWN;
1377 pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
1379 /* For tunnel packet */
1380 if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
1381 /* Remove the tunnel bit to save the space. */
1382 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
1383 return ptype_table_tn[pkt_info];
1387 * For x550, if it's not tunnel,
1388 * tunnel type bit should be set to 0.
1389 * Reuse 82599's mask.
1391 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
1393 return ptype_table[pkt_info];
1396 static inline uint64_t
1397 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1399 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1400 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1401 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1402 PKT_RX_RSS_HASH, 0, 0, 0,
1403 0, 0, 0, PKT_RX_FDIR,
1405 #ifdef RTE_LIBRTE_IEEE1588
1406 static uint64_t ip_pkt_etqf_map[8] = {
1407 0, 0, 0, PKT_RX_IEEE1588_PTP,
1411 if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1412 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1413 ip_rss_types_map[pkt_info & 0XF];
1415 return ip_rss_types_map[pkt_info & 0XF];
1417 return ip_rss_types_map[pkt_info & 0XF];
1421 static inline uint64_t
1422 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1427 * Check if VLAN present only.
1428 * Do not check whether L3/L4 rx checksum done by NIC or not,
1429 * That can be found from rte_eth_rxmode.offloads flag
1431 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
1433 #ifdef RTE_LIBRTE_IEEE1588
1434 if (rx_status & IXGBE_RXD_STAT_TMST)
1435 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1440 static inline uint64_t
1441 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1446 * Bit 31: IPE, IPv4 checksum error
1447 * Bit 30: L4I, L4I integrity error
1449 static uint64_t error_to_pkt_flags_map[4] = {
1450 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
1451 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
1452 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
1453 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1455 pkt_flags = error_to_pkt_flags_map[(rx_status >>
1456 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1458 if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1459 (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1460 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1463 #ifdef RTE_LIBRTE_SECURITY
1464 if (rx_status & IXGBE_RXD_STAT_SECP) {
1465 pkt_flags |= PKT_RX_SEC_OFFLOAD;
1466 if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
1467 pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
1475 * LOOK_AHEAD defines how many desc statuses to check beyond the
1476 * current descriptor.
1477 * It must be a pound define for optimal performance.
1478 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1479 * function only works with LOOK_AHEAD=8.
1481 #define LOOK_AHEAD 8
1482 #if (LOOK_AHEAD != 8)
1483 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1486 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1488 volatile union ixgbe_adv_rx_desc *rxdp;
1489 struct ixgbe_rx_entry *rxep;
1490 struct rte_mbuf *mb;
1494 uint32_t s[LOOK_AHEAD];
1495 uint32_t pkt_info[LOOK_AHEAD];
1496 int i, j, nb_rx = 0;
1498 uint64_t vlan_flags = rxq->vlan_flags;
1500 /* get references to current descriptor and S/W ring entry */
1501 rxdp = &rxq->rx_ring[rxq->rx_tail];
1502 rxep = &rxq->sw_ring[rxq->rx_tail];
1504 status = rxdp->wb.upper.status_error;
1505 /* check to make sure there is at least 1 packet to receive */
1506 if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1510 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1511 * reference packets that are ready to be received.
1513 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1514 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1515 /* Read desc statuses backwards to avoid race condition */
1516 for (j = 0; j < LOOK_AHEAD; j++)
1517 s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1521 /* Compute how many status bits were set */
1522 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1523 (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
1526 for (j = 0; j < nb_dd; j++)
1527 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
1532 /* Translate descriptor info to mbuf format */
1533 for (j = 0; j < nb_dd; ++j) {
1535 pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1537 mb->data_len = pkt_len;
1538 mb->pkt_len = pkt_len;
1539 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1541 /* convert descriptor fields to rte mbuf flags */
1542 pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1544 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1545 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
1546 ((uint16_t)pkt_info[j]);
1547 mb->ol_flags = pkt_flags;
1549 ixgbe_rxd_pkt_info_to_pkt_type
1550 (pkt_info[j], rxq->pkt_type_mask);
1552 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1553 mb->hash.rss = rte_le_to_cpu_32(
1554 rxdp[j].wb.lower.hi_dword.rss);
1555 else if (pkt_flags & PKT_RX_FDIR) {
1556 mb->hash.fdir.hash = rte_le_to_cpu_16(
1557 rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1558 IXGBE_ATR_HASH_MASK;
1559 mb->hash.fdir.id = rte_le_to_cpu_16(
1560 rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1564 /* Move mbuf pointers from the S/W ring to the stage */
1565 for (j = 0; j < LOOK_AHEAD; ++j) {
1566 rxq->rx_stage[i + j] = rxep[j].mbuf;
1569 /* stop if all requested packets could not be received */
1570 if (nb_dd != LOOK_AHEAD)
1574 /* clear software ring entries so we can cleanup correctly */
1575 for (i = 0; i < nb_rx; ++i) {
1576 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1584 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1586 volatile union ixgbe_adv_rx_desc *rxdp;
1587 struct ixgbe_rx_entry *rxep;
1588 struct rte_mbuf *mb;
1593 /* allocate buffers in bulk directly into the S/W ring */
1594 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1595 rxep = &rxq->sw_ring[alloc_idx];
1596 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1597 rxq->rx_free_thresh);
1598 if (unlikely(diag != 0))
1601 rxdp = &rxq->rx_ring[alloc_idx];
1602 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1603 /* populate the static rte mbuf fields */
1606 mb->port = rxq->port_id;
1609 rte_mbuf_refcnt_set(mb, 1);
1610 mb->data_off = RTE_PKTMBUF_HEADROOM;
1612 /* populate the descriptors */
1613 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1614 rxdp[i].read.hdr_addr = 0;
1615 rxdp[i].read.pkt_addr = dma_addr;
1618 /* update state of internal queue structure */
1619 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1620 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1621 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1627 static inline uint16_t
1628 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1631 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1634 /* how many packets are ready to return? */
1635 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1637 /* copy mbuf pointers to the application's packet list */
1638 for (i = 0; i < nb_pkts; ++i)
1639 rx_pkts[i] = stage[i];
1641 /* update internal queue state */
1642 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1643 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1648 static inline uint16_t
1649 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1652 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1655 /* Any previously recv'd pkts will be returned from the Rx stage */
1656 if (rxq->rx_nb_avail)
1657 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1659 /* Scan the H/W ring for packets to receive */
1660 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1662 /* update internal queue state */
1663 rxq->rx_next_avail = 0;
1664 rxq->rx_nb_avail = nb_rx;
1665 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1667 /* if required, allocate new buffers to replenish descriptors */
1668 if (rxq->rx_tail > rxq->rx_free_trigger) {
1669 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1671 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1674 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1675 "queue_id=%u", (unsigned) rxq->port_id,
1676 (unsigned) rxq->queue_id);
1678 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1679 rxq->rx_free_thresh;
1682 * Need to rewind any previous receives if we cannot
1683 * allocate new buffers to replenish the old ones.
1685 rxq->rx_nb_avail = 0;
1686 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1687 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1688 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1693 /* update tail pointer */
1695 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
1699 if (rxq->rx_tail >= rxq->nb_rx_desc)
1702 /* received any packets this loop? */
1703 if (rxq->rx_nb_avail)
1704 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1709 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1711 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1716 if (unlikely(nb_pkts == 0))
1719 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1720 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1722 /* request is relatively large, chunk it up */
1727 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1728 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1729 nb_rx = (uint16_t)(nb_rx + ret);
1730 nb_pkts = (uint16_t)(nb_pkts - ret);
1739 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1742 struct ixgbe_rx_queue *rxq;
1743 volatile union ixgbe_adv_rx_desc *rx_ring;
1744 volatile union ixgbe_adv_rx_desc *rxdp;
1745 struct ixgbe_rx_entry *sw_ring;
1746 struct ixgbe_rx_entry *rxe;
1747 struct rte_mbuf *rxm;
1748 struct rte_mbuf *nmb;
1749 union ixgbe_adv_rx_desc rxd;
1758 uint64_t vlan_flags;
1763 rx_id = rxq->rx_tail;
1764 rx_ring = rxq->rx_ring;
1765 sw_ring = rxq->sw_ring;
1766 vlan_flags = rxq->vlan_flags;
1767 while (nb_rx < nb_pkts) {
1769 * The order of operations here is important as the DD status
1770 * bit must not be read after any other descriptor fields.
1771 * rx_ring and rxdp are pointing to volatile data so the order
1772 * of accesses cannot be reordered by the compiler. If they were
1773 * not volatile, they could be reordered which could lead to
1774 * using invalid descriptor fields when read from rxd.
1776 rxdp = &rx_ring[rx_id];
1777 staterr = rxdp->wb.upper.status_error;
1778 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1785 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1786 * is likely to be invalid and to be dropped by the various
1787 * validation checks performed by the network stack.
1789 * Allocate a new mbuf to replenish the RX ring descriptor.
1790 * If the allocation fails:
1791 * - arrange for that RX descriptor to be the first one
1792 * being parsed the next time the receive function is
1793 * invoked [on the same queue].
1795 * - Stop parsing the RX ring and return immediately.
1797 * This policy do not drop the packet received in the RX
1798 * descriptor for which the allocation of a new mbuf failed.
1799 * Thus, it allows that packet to be later retrieved if
1800 * mbuf have been freed in the mean time.
1801 * As a side effect, holding RX descriptors instead of
1802 * systematically giving them back to the NIC may lead to
1803 * RX ring exhaustion situations.
1804 * However, the NIC can gracefully prevent such situations
1805 * to happen by sending specific "back-pressure" flow control
1806 * frames to its peer(s).
1808 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1809 "ext_err_stat=0x%08x pkt_len=%u",
1810 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1811 (unsigned) rx_id, (unsigned) staterr,
1812 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1814 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1816 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1817 "queue_id=%u", (unsigned) rxq->port_id,
1818 (unsigned) rxq->queue_id);
1819 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1824 rxe = &sw_ring[rx_id];
1826 if (rx_id == rxq->nb_rx_desc)
1829 /* Prefetch next mbuf while processing current one. */
1830 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1833 * When next RX descriptor is on a cache-line boundary,
1834 * prefetch the next 4 RX descriptors and the next 8 pointers
1837 if ((rx_id & 0x3) == 0) {
1838 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1839 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1845 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1846 rxdp->read.hdr_addr = 0;
1847 rxdp->read.pkt_addr = dma_addr;
1850 * Initialize the returned mbuf.
1851 * 1) setup generic mbuf fields:
1852 * - number of segments,
1855 * - RX port identifier.
1856 * 2) integrate hardware offload data, if any:
1857 * - RSS flag & hash,
1858 * - IP checksum flag,
1859 * - VLAN TCI, if any,
1862 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1864 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1865 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1868 rxm->pkt_len = pkt_len;
1869 rxm->data_len = pkt_len;
1870 rxm->port = rxq->port_id;
1872 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1873 /* Only valid if PKT_RX_VLAN set in pkt_flags */
1874 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1876 pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
1877 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1878 pkt_flags = pkt_flags |
1879 ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1880 rxm->ol_flags = pkt_flags;
1882 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1883 rxq->pkt_type_mask);
1885 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1886 rxm->hash.rss = rte_le_to_cpu_32(
1887 rxd.wb.lower.hi_dword.rss);
1888 else if (pkt_flags & PKT_RX_FDIR) {
1889 rxm->hash.fdir.hash = rte_le_to_cpu_16(
1890 rxd.wb.lower.hi_dword.csum_ip.csum) &
1891 IXGBE_ATR_HASH_MASK;
1892 rxm->hash.fdir.id = rte_le_to_cpu_16(
1893 rxd.wb.lower.hi_dword.csum_ip.ip_id);
1896 * Store the mbuf address into the next entry of the array
1897 * of returned packets.
1899 rx_pkts[nb_rx++] = rxm;
1901 rxq->rx_tail = rx_id;
1904 * If the number of free RX descriptors is greater than the RX free
1905 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1907 * Update the RDT with the value of the last processed RX descriptor
1908 * minus 1, to guarantee that the RDT register is never equal to the
1909 * RDH register, which creates a "full" ring situtation from the
1910 * hardware point of view...
1912 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1913 if (nb_hold > rxq->rx_free_thresh) {
1914 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1915 "nb_hold=%u nb_rx=%u",
1916 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1917 (unsigned) rx_id, (unsigned) nb_hold,
1919 rx_id = (uint16_t) ((rx_id == 0) ?
1920 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1921 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1924 rxq->nb_rx_hold = nb_hold;
1929 * Detect an RSC descriptor.
1931 static inline uint32_t
1932 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1934 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1935 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1939 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1941 * Fill the following info in the HEAD buffer of the Rx cluster:
1942 * - RX port identifier
1943 * - hardware offload data, if any:
1945 * - IP checksum flag
1946 * - VLAN TCI, if any
1948 * @head HEAD of the packet cluster
1949 * @desc HW descriptor to get data from
1950 * @rxq Pointer to the Rx queue
1953 ixgbe_fill_cluster_head_buf(
1954 struct rte_mbuf *head,
1955 union ixgbe_adv_rx_desc *desc,
1956 struct ixgbe_rx_queue *rxq,
1962 head->port = rxq->port_id;
1964 /* The vlan_tci field is only valid when PKT_RX_VLAN is
1965 * set in the pkt_flags field.
1967 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1968 pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
1969 pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1970 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1971 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1972 head->ol_flags = pkt_flags;
1974 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
1976 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1977 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1978 else if (pkt_flags & PKT_RX_FDIR) {
1979 head->hash.fdir.hash =
1980 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1981 & IXGBE_ATR_HASH_MASK;
1982 head->hash.fdir.id =
1983 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1988 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1990 * @rx_queue Rx queue handle
1991 * @rx_pkts table of received packets
1992 * @nb_pkts size of rx_pkts table
1993 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1995 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1996 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1998 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1999 * 1) When non-EOP RSC completion arrives:
2000 * a) Update the HEAD of the current RSC aggregation cluster with the new
2001 * segment's data length.
2002 * b) Set the "next" pointer of the current segment to point to the segment
2003 * at the NEXTP index.
2004 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
2005 * in the sw_rsc_ring.
2006 * 2) When EOP arrives we just update the cluster's total length and offload
2007 * flags and deliver the cluster up to the upper layers. In our case - put it
2008 * in the rx_pkts table.
2010 * Returns the number of received packets/clusters (according to the "bulk
2011 * receive" interface).
2013 static inline uint16_t
2014 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
2017 struct ixgbe_rx_queue *rxq = rx_queue;
2018 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
2019 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
2020 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
2021 uint16_t rx_id = rxq->rx_tail;
2023 uint16_t nb_hold = rxq->nb_rx_hold;
2024 uint16_t prev_id = rxq->rx_tail;
2026 while (nb_rx < nb_pkts) {
2028 struct ixgbe_rx_entry *rxe;
2029 struct ixgbe_scattered_rx_entry *sc_entry;
2030 struct ixgbe_scattered_rx_entry *next_sc_entry;
2031 struct ixgbe_rx_entry *next_rxe = NULL;
2032 struct rte_mbuf *first_seg;
2033 struct rte_mbuf *rxm;
2034 struct rte_mbuf *nmb = NULL;
2035 union ixgbe_adv_rx_desc rxd;
2038 volatile union ixgbe_adv_rx_desc *rxdp;
2043 * The code in this whole file uses the volatile pointer to
2044 * ensure the read ordering of the status and the rest of the
2045 * descriptor fields (on the compiler level only!!!). This is so
2046 * UGLY - why not to just use the compiler barrier instead? DPDK
2047 * even has the rte_compiler_barrier() for that.
2049 * But most importantly this is just wrong because this doesn't
2050 * ensure memory ordering in a general case at all. For
2051 * instance, DPDK is supposed to work on Power CPUs where
2052 * compiler barrier may just not be enough!
2054 * I tried to write only this function properly to have a
2055 * starting point (as a part of an LRO/RSC series) but the
2056 * compiler cursed at me when I tried to cast away the
2057 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
2058 * keeping it the way it is for now.
2060 * The code in this file is broken in so many other places and
2061 * will just not work on a big endian CPU anyway therefore the
2062 * lines below will have to be revisited together with the rest
2066 * - Get rid of "volatile" and let the compiler do its job.
2067 * - Use the proper memory barrier (rte_rmb()) to ensure the
2068 * memory ordering below.
2070 rxdp = &rx_ring[rx_id];
2071 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
2073 if (!(staterr & IXGBE_RXDADV_STAT_DD))
2078 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
2079 "staterr=0x%x data_len=%u",
2080 rxq->port_id, rxq->queue_id, rx_id, staterr,
2081 rte_le_to_cpu_16(rxd.wb.upper.length));
2084 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
2086 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
2087 "port_id=%u queue_id=%u",
2088 rxq->port_id, rxq->queue_id);
2090 rte_eth_devices[rxq->port_id].data->
2091 rx_mbuf_alloc_failed++;
2094 } else if (nb_hold > rxq->rx_free_thresh) {
2095 uint16_t next_rdt = rxq->rx_free_trigger;
2097 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
2099 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
2101 nb_hold -= rxq->rx_free_thresh;
2103 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
2104 "port_id=%u queue_id=%u",
2105 rxq->port_id, rxq->queue_id);
2107 rte_eth_devices[rxq->port_id].data->
2108 rx_mbuf_alloc_failed++;
2114 rxe = &sw_ring[rx_id];
2115 eop = staterr & IXGBE_RXDADV_STAT_EOP;
2117 next_id = rx_id + 1;
2118 if (next_id == rxq->nb_rx_desc)
2121 /* Prefetch next mbuf while processing current one. */
2122 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
2125 * When next RX descriptor is on a cache-line boundary,
2126 * prefetch the next 4 RX descriptors and the next 4 pointers
2129 if ((next_id & 0x3) == 0) {
2130 rte_ixgbe_prefetch(&rx_ring[next_id]);
2131 rte_ixgbe_prefetch(&sw_ring[next_id]);
2138 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2140 * Update RX descriptor with the physical address of the
2141 * new data buffer of the new allocated mbuf.
2145 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2146 rxdp->read.hdr_addr = 0;
2147 rxdp->read.pkt_addr = dma;
2152 * Set data length & data buffer address of mbuf.
2154 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
2155 rxm->data_len = data_len;
2160 * Get next descriptor index:
2161 * - For RSC it's in the NEXTP field.
2162 * - For a scattered packet - it's just a following
2165 if (ixgbe_rsc_count(&rxd))
2167 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
2168 IXGBE_RXDADV_NEXTP_SHIFT;
2172 next_sc_entry = &sw_sc_ring[nextp_id];
2173 next_rxe = &sw_ring[nextp_id];
2174 rte_ixgbe_prefetch(next_rxe);
2177 sc_entry = &sw_sc_ring[rx_id];
2178 first_seg = sc_entry->fbuf;
2179 sc_entry->fbuf = NULL;
2182 * If this is the first buffer of the received packet,
2183 * set the pointer to the first mbuf of the packet and
2184 * initialize its context.
2185 * Otherwise, update the total length and the number of segments
2186 * of the current scattered packet, and update the pointer to
2187 * the last mbuf of the current packet.
2189 if (first_seg == NULL) {
2191 first_seg->pkt_len = data_len;
2192 first_seg->nb_segs = 1;
2194 first_seg->pkt_len += data_len;
2195 first_seg->nb_segs++;
2202 * If this is not the last buffer of the received packet, update
2203 * the pointer to the first mbuf at the NEXTP entry in the
2204 * sw_sc_ring and continue to parse the RX ring.
2206 if (!eop && next_rxe) {
2207 rxm->next = next_rxe->mbuf;
2208 next_sc_entry->fbuf = first_seg;
2212 /* Initialize the first mbuf of the returned packet */
2213 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
2216 * Deal with the case, when HW CRC srip is disabled.
2217 * That can't happen when LRO is enabled, but still could
2218 * happen for scattered RX mode.
2220 first_seg->pkt_len -= rxq->crc_len;
2221 if (unlikely(rxm->data_len <= rxq->crc_len)) {
2222 struct rte_mbuf *lp;
2224 for (lp = first_seg; lp->next != rxm; lp = lp->next)
2227 first_seg->nb_segs--;
2228 lp->data_len -= rxq->crc_len - rxm->data_len;
2230 rte_pktmbuf_free_seg(rxm);
2232 rxm->data_len -= rxq->crc_len;
2234 /* Prefetch data of first segment, if configured to do so. */
2235 rte_packet_prefetch((char *)first_seg->buf_addr +
2236 first_seg->data_off);
2239 * Store the mbuf address into the next entry of the array
2240 * of returned packets.
2242 rx_pkts[nb_rx++] = first_seg;
2246 * Record index of the next RX descriptor to probe.
2248 rxq->rx_tail = rx_id;
2251 * If the number of free RX descriptors is greater than the RX free
2252 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2254 * Update the RDT with the value of the last processed RX descriptor
2255 * minus 1, to guarantee that the RDT register is never equal to the
2256 * RDH register, which creates a "full" ring situtation from the
2257 * hardware point of view...
2259 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
2260 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
2261 "nb_hold=%u nb_rx=%u",
2262 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
2265 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
2269 rxq->nb_rx_hold = nb_hold;
2274 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2277 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
2281 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2284 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
2287 /*********************************************************************
2289 * Queue management functions
2291 **********************************************************************/
2293 static void __attribute__((cold))
2294 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
2298 if (txq->sw_ring != NULL) {
2299 for (i = 0; i < txq->nb_tx_desc; i++) {
2300 if (txq->sw_ring[i].mbuf != NULL) {
2301 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2302 txq->sw_ring[i].mbuf = NULL;
2308 static void __attribute__((cold))
2309 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
2312 txq->sw_ring != NULL)
2313 rte_free(txq->sw_ring);
2316 static void __attribute__((cold))
2317 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
2319 if (txq != NULL && txq->ops != NULL) {
2320 txq->ops->release_mbufs(txq);
2321 txq->ops->free_swring(txq);
2326 void __attribute__((cold))
2327 ixgbe_dev_tx_queue_release(void *txq)
2329 ixgbe_tx_queue_release(txq);
2332 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
2333 static void __attribute__((cold))
2334 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
2336 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
2337 struct ixgbe_tx_entry *txe = txq->sw_ring;
2340 /* Zero out HW ring memory */
2341 for (i = 0; i < txq->nb_tx_desc; i++) {
2342 txq->tx_ring[i] = zeroed_desc;
2345 /* Initialize SW ring entries */
2346 prev = (uint16_t) (txq->nb_tx_desc - 1);
2347 for (i = 0; i < txq->nb_tx_desc; i++) {
2348 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
2350 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
2353 txe[prev].next_id = i;
2357 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2358 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2361 txq->nb_tx_used = 0;
2363 * Always allow 1 descriptor to be un-allocated to avoid
2364 * a H/W race condition
2366 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2367 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2369 memset((void *)&txq->ctx_cache, 0,
2370 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
2373 static const struct ixgbe_txq_ops def_txq_ops = {
2374 .release_mbufs = ixgbe_tx_queue_release_mbufs,
2375 .free_swring = ixgbe_tx_free_swring,
2376 .reset = ixgbe_reset_tx_queue,
2379 /* Takes an ethdev and a queue and sets up the tx function to be used based on
2380 * the queue parameters. Used in tx_queue_setup by primary process and then
2381 * in dev_init by secondary process when attaching to an existing ethdev.
2383 void __attribute__((cold))
2384 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
2386 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2387 if ((txq->offloads == 0) &&
2388 #ifdef RTE_LIBRTE_SECURITY
2389 !(txq->using_ipsec) &&
2391 (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
2392 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2393 dev->tx_pkt_prepare = NULL;
2394 #ifdef RTE_IXGBE_INC_VECTOR
2395 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2396 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2397 ixgbe_txq_vec_setup(txq) == 0)) {
2398 PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
2399 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
2402 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
2404 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2406 " - offloads = 0x%" PRIx64,
2409 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2410 (unsigned long)txq->tx_rs_thresh,
2411 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2412 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2413 dev->tx_pkt_prepare = ixgbe_prep_pkts;
2418 ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2426 ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2428 uint64_t tx_offload_capa;
2429 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2432 DEV_TX_OFFLOAD_VLAN_INSERT |
2433 DEV_TX_OFFLOAD_IPV4_CKSUM |
2434 DEV_TX_OFFLOAD_UDP_CKSUM |
2435 DEV_TX_OFFLOAD_TCP_CKSUM |
2436 DEV_TX_OFFLOAD_SCTP_CKSUM |
2437 DEV_TX_OFFLOAD_TCP_TSO |
2438 DEV_TX_OFFLOAD_MULTI_SEGS;
2440 if (hw->mac.type == ixgbe_mac_82599EB ||
2441 hw->mac.type == ixgbe_mac_X540)
2442 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2444 if (hw->mac.type == ixgbe_mac_X550 ||
2445 hw->mac.type == ixgbe_mac_X550EM_x ||
2446 hw->mac.type == ixgbe_mac_X550EM_a)
2447 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2449 #ifdef RTE_LIBRTE_SECURITY
2450 if (dev->security_ctx)
2451 tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2453 return tx_offload_capa;
2456 int __attribute__((cold))
2457 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2460 unsigned int socket_id,
2461 const struct rte_eth_txconf *tx_conf)
2463 const struct rte_memzone *tz;
2464 struct ixgbe_tx_queue *txq;
2465 struct ixgbe_hw *hw;
2466 uint16_t tx_rs_thresh, tx_free_thresh;
2469 PMD_INIT_FUNC_TRACE();
2470 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2472 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2475 * Validate number of transmit descriptors.
2476 * It must not exceed hardware maximum, and must be multiple
2479 if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2480 (nb_desc > IXGBE_MAX_RING_DESC) ||
2481 (nb_desc < IXGBE_MIN_RING_DESC)) {
2486 * The following two parameters control the setting of the RS bit on
2487 * transmit descriptors.
2488 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2489 * descriptors have been used.
2490 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2491 * descriptors are used or if the number of descriptors required
2492 * to transmit a packet is greater than the number of free TX
2494 * The following constraints must be satisfied:
2495 * tx_rs_thresh must be greater than 0.
2496 * tx_rs_thresh must be less than the size of the ring minus 2.
2497 * tx_rs_thresh must be less than or equal to tx_free_thresh.
2498 * tx_rs_thresh must be a divisor of the ring size.
2499 * tx_free_thresh must be greater than 0.
2500 * tx_free_thresh must be less than the size of the ring minus 3.
2501 * tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
2502 * One descriptor in the TX ring is used as a sentinel to avoid a
2503 * H/W race condition, hence the maximum threshold constraints.
2504 * When set to zero use default values.
2506 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2507 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2508 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
2509 tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
2510 nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
2511 if (tx_conf->tx_rs_thresh > 0)
2512 tx_rs_thresh = tx_conf->tx_rs_thresh;
2513 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
2514 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
2515 "exceed nb_desc. (tx_rs_thresh=%u "
2516 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
2517 (unsigned int)tx_rs_thresh,
2518 (unsigned int)tx_free_thresh,
2519 (unsigned int)nb_desc,
2520 (int)dev->data->port_id,
2524 if (tx_rs_thresh >= (nb_desc - 2)) {
2525 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2526 "of TX descriptors minus 2. (tx_rs_thresh=%u "
2527 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2528 (int)dev->data->port_id, (int)queue_idx);
2531 if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2532 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2533 "(tx_rs_thresh=%u port=%d queue=%d)",
2534 DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2535 (int)dev->data->port_id, (int)queue_idx);
2538 if (tx_free_thresh >= (nb_desc - 3)) {
2539 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2540 "tx_free_thresh must be less than the number of "
2541 "TX descriptors minus 3. (tx_free_thresh=%u "
2542 "port=%d queue=%d)",
2543 (unsigned int)tx_free_thresh,
2544 (int)dev->data->port_id, (int)queue_idx);
2547 if (tx_rs_thresh > tx_free_thresh) {
2548 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2549 "tx_free_thresh. (tx_free_thresh=%u "
2550 "tx_rs_thresh=%u port=%d queue=%d)",
2551 (unsigned int)tx_free_thresh,
2552 (unsigned int)tx_rs_thresh,
2553 (int)dev->data->port_id,
2557 if ((nb_desc % tx_rs_thresh) != 0) {
2558 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2559 "number of TX descriptors. (tx_rs_thresh=%u "
2560 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2561 (int)dev->data->port_id, (int)queue_idx);
2566 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2567 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2568 * by the NIC and all descriptors are written back after the NIC
2569 * accumulates WTHRESH descriptors.
2571 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2572 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2573 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2574 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2575 (int)dev->data->port_id, (int)queue_idx);
2579 /* Free memory prior to re-allocation if needed... */
2580 if (dev->data->tx_queues[queue_idx] != NULL) {
2581 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2582 dev->data->tx_queues[queue_idx] = NULL;
2585 /* First allocate the tx queue data structure */
2586 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2587 RTE_CACHE_LINE_SIZE, socket_id);
2592 * Allocate TX ring hardware descriptors. A memzone large enough to
2593 * handle the maximum ring size is allocated in order to allow for
2594 * resizing in later calls to the queue setup function.
2596 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2597 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2598 IXGBE_ALIGN, socket_id);
2600 ixgbe_tx_queue_release(txq);
2604 txq->nb_tx_desc = nb_desc;
2605 txq->tx_rs_thresh = tx_rs_thresh;
2606 txq->tx_free_thresh = tx_free_thresh;
2607 txq->pthresh = tx_conf->tx_thresh.pthresh;
2608 txq->hthresh = tx_conf->tx_thresh.hthresh;
2609 txq->wthresh = tx_conf->tx_thresh.wthresh;
2610 txq->queue_id = queue_idx;
2611 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2612 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2613 txq->port_id = dev->data->port_id;
2614 txq->offloads = offloads;
2615 txq->ops = &def_txq_ops;
2616 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2617 #ifdef RTE_LIBRTE_SECURITY
2618 txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2619 DEV_TX_OFFLOAD_SECURITY);
2623 * Modification to set VFTDT for virtual function if vf is detected
2625 if (hw->mac.type == ixgbe_mac_82599_vf ||
2626 hw->mac.type == ixgbe_mac_X540_vf ||
2627 hw->mac.type == ixgbe_mac_X550_vf ||
2628 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2629 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2630 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2632 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2634 txq->tx_ring_phys_addr = tz->iova;
2635 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2637 /* Allocate software ring */
2638 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2639 sizeof(struct ixgbe_tx_entry) * nb_desc,
2640 RTE_CACHE_LINE_SIZE, socket_id);
2641 if (txq->sw_ring == NULL) {
2642 ixgbe_tx_queue_release(txq);
2645 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2646 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2648 /* set up vector or scalar TX function as appropriate */
2649 ixgbe_set_tx_function(dev, txq);
2651 txq->ops->reset(txq);
2653 dev->data->tx_queues[queue_idx] = txq;
2660 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2662 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2663 * in the sw_rsc_ring is not set to NULL but rather points to the next
2664 * mbuf of this RSC aggregation (that has not been completed yet and still
2665 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2666 * will just free first "nb_segs" segments of the cluster explicitly by calling
2667 * an rte_pktmbuf_free_seg().
2669 * @m scattered cluster head
2671 static void __attribute__((cold))
2672 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2674 uint16_t i, nb_segs = m->nb_segs;
2675 struct rte_mbuf *next_seg;
2677 for (i = 0; i < nb_segs; i++) {
2679 rte_pktmbuf_free_seg(m);
2684 static void __attribute__((cold))
2685 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2689 #ifdef RTE_IXGBE_INC_VECTOR
2690 /* SSE Vector driver has a different way of releasing mbufs. */
2691 if (rxq->rx_using_sse) {
2692 ixgbe_rx_queue_release_mbufs_vec(rxq);
2697 if (rxq->sw_ring != NULL) {
2698 for (i = 0; i < rxq->nb_rx_desc; i++) {
2699 if (rxq->sw_ring[i].mbuf != NULL) {
2700 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2701 rxq->sw_ring[i].mbuf = NULL;
2704 if (rxq->rx_nb_avail) {
2705 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2706 struct rte_mbuf *mb;
2708 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2709 rte_pktmbuf_free_seg(mb);
2711 rxq->rx_nb_avail = 0;
2715 if (rxq->sw_sc_ring)
2716 for (i = 0; i < rxq->nb_rx_desc; i++)
2717 if (rxq->sw_sc_ring[i].fbuf) {
2718 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2719 rxq->sw_sc_ring[i].fbuf = NULL;
2723 static void __attribute__((cold))
2724 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2727 ixgbe_rx_queue_release_mbufs(rxq);
2728 rte_free(rxq->sw_ring);
2729 rte_free(rxq->sw_sc_ring);
2734 void __attribute__((cold))
2735 ixgbe_dev_rx_queue_release(void *rxq)
2737 ixgbe_rx_queue_release(rxq);
2741 * Check if Rx Burst Bulk Alloc function can be used.
2743 * 0: the preconditions are satisfied and the bulk allocation function
2745 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2746 * function must be used.
2748 static inline int __attribute__((cold))
2749 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2754 * Make sure the following pre-conditions are satisfied:
2755 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2756 * rxq->rx_free_thresh < rxq->nb_rx_desc
2757 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2758 * Scattered packets are not supported. This should be checked
2759 * outside of this function.
2761 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2762 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2763 "rxq->rx_free_thresh=%d, "
2764 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2765 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2767 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2768 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2769 "rxq->rx_free_thresh=%d, "
2770 "rxq->nb_rx_desc=%d",
2771 rxq->rx_free_thresh, rxq->nb_rx_desc);
2773 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2774 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2775 "rxq->nb_rx_desc=%d, "
2776 "rxq->rx_free_thresh=%d",
2777 rxq->nb_rx_desc, rxq->rx_free_thresh);
2784 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2785 static void __attribute__((cold))
2786 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2788 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2790 uint16_t len = rxq->nb_rx_desc;
2793 * By default, the Rx queue setup function allocates enough memory for
2794 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2795 * extra memory at the end of the descriptor ring to be zero'd out.
2797 if (adapter->rx_bulk_alloc_allowed)
2798 /* zero out extra memory */
2799 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2802 * Zero out HW ring memory. Zero out extra memory at the end of
2803 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2804 * reads extra memory as zeros.
2806 for (i = 0; i < len; i++) {
2807 rxq->rx_ring[i] = zeroed_desc;
2811 * initialize extra software ring entries. Space for these extra
2812 * entries is always allocated
2814 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2815 for (i = rxq->nb_rx_desc; i < len; ++i) {
2816 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2819 rxq->rx_nb_avail = 0;
2820 rxq->rx_next_avail = 0;
2821 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2823 rxq->nb_rx_hold = 0;
2824 rxq->pkt_first_seg = NULL;
2825 rxq->pkt_last_seg = NULL;
2827 #ifdef RTE_IXGBE_INC_VECTOR
2828 rxq->rxrearm_start = 0;
2829 rxq->rxrearm_nb = 0;
2834 ixgbe_is_vf(struct rte_eth_dev *dev)
2836 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2838 switch (hw->mac.type) {
2839 case ixgbe_mac_82599_vf:
2840 case ixgbe_mac_X540_vf:
2841 case ixgbe_mac_X550_vf:
2842 case ixgbe_mac_X550EM_x_vf:
2843 case ixgbe_mac_X550EM_a_vf:
2851 ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
2853 uint64_t offloads = 0;
2854 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2856 if (hw->mac.type != ixgbe_mac_82598EB)
2857 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2863 ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2866 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2868 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
2869 DEV_RX_OFFLOAD_UDP_CKSUM |
2870 DEV_RX_OFFLOAD_TCP_CKSUM |
2871 DEV_RX_OFFLOAD_KEEP_CRC |
2872 DEV_RX_OFFLOAD_JUMBO_FRAME |
2873 DEV_RX_OFFLOAD_VLAN_FILTER |
2874 DEV_RX_OFFLOAD_SCATTER;
2876 if (hw->mac.type == ixgbe_mac_82598EB)
2877 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2879 if (ixgbe_is_vf(dev) == 0)
2880 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2883 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2886 if ((hw->mac.type == ixgbe_mac_82599EB ||
2887 hw->mac.type == ixgbe_mac_X540 ||
2888 hw->mac.type == ixgbe_mac_X550) &&
2889 !RTE_ETH_DEV_SRIOV(dev).active)
2890 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
2892 if (hw->mac.type == ixgbe_mac_82599EB ||
2893 hw->mac.type == ixgbe_mac_X540)
2894 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
2896 if (hw->mac.type == ixgbe_mac_X550 ||
2897 hw->mac.type == ixgbe_mac_X550EM_x ||
2898 hw->mac.type == ixgbe_mac_X550EM_a)
2899 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
2901 #ifdef RTE_LIBRTE_SECURITY
2902 if (dev->security_ctx)
2903 offloads |= DEV_RX_OFFLOAD_SECURITY;
2909 int __attribute__((cold))
2910 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2913 unsigned int socket_id,
2914 const struct rte_eth_rxconf *rx_conf,
2915 struct rte_mempool *mp)
2917 const struct rte_memzone *rz;
2918 struct ixgbe_rx_queue *rxq;
2919 struct ixgbe_hw *hw;
2921 struct ixgbe_adapter *adapter =
2922 (struct ixgbe_adapter *)dev->data->dev_private;
2925 PMD_INIT_FUNC_TRACE();
2926 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2928 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2931 * Validate number of receive descriptors.
2932 * It must not exceed hardware maximum, and must be multiple
2935 if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2936 (nb_desc > IXGBE_MAX_RING_DESC) ||
2937 (nb_desc < IXGBE_MIN_RING_DESC)) {
2941 /* Free memory prior to re-allocation if needed... */
2942 if (dev->data->rx_queues[queue_idx] != NULL) {
2943 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2944 dev->data->rx_queues[queue_idx] = NULL;
2947 /* First allocate the rx queue data structure */
2948 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2949 RTE_CACHE_LINE_SIZE, socket_id);
2953 rxq->nb_rx_desc = nb_desc;
2954 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2955 rxq->queue_id = queue_idx;
2956 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2957 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2958 rxq->port_id = dev->data->port_id;
2959 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2960 rxq->crc_len = RTE_ETHER_CRC_LEN;
2963 rxq->drop_en = rx_conf->rx_drop_en;
2964 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2965 rxq->offloads = offloads;
2968 * The packet type in RX descriptor is different for different NICs.
2969 * Some bits are used for x550 but reserved for other NICS.
2970 * So set different masks for different NICs.
2972 if (hw->mac.type == ixgbe_mac_X550 ||
2973 hw->mac.type == ixgbe_mac_X550EM_x ||
2974 hw->mac.type == ixgbe_mac_X550EM_a ||
2975 hw->mac.type == ixgbe_mac_X550_vf ||
2976 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2977 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2978 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
2980 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
2983 * Allocate RX ring hardware descriptors. A memzone large enough to
2984 * handle the maximum ring size is allocated in order to allow for
2985 * resizing in later calls to the queue setup function.
2987 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2988 RX_RING_SZ, IXGBE_ALIGN, socket_id);
2990 ixgbe_rx_queue_release(rxq);
2995 * Zero init all the descriptors in the ring.
2997 memset(rz->addr, 0, RX_RING_SZ);
3000 * Modified to setup VFRDT for Virtual Function
3002 if (hw->mac.type == ixgbe_mac_82599_vf ||
3003 hw->mac.type == ixgbe_mac_X540_vf ||
3004 hw->mac.type == ixgbe_mac_X550_vf ||
3005 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
3006 hw->mac.type == ixgbe_mac_X550EM_a_vf) {
3008 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
3010 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
3013 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
3015 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
3018 rxq->rx_ring_phys_addr = rz->iova;
3019 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
3022 * Certain constraints must be met in order to use the bulk buffer
3023 * allocation Rx burst function. If any of Rx queues doesn't meet them
3024 * the feature should be disabled for the whole port.
3026 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
3027 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
3028 "preconditions - canceling the feature for "
3029 "the whole port[%d]",
3030 rxq->queue_id, rxq->port_id);
3031 adapter->rx_bulk_alloc_allowed = false;
3035 * Allocate software ring. Allow for space at the end of the
3036 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
3037 * function does not access an invalid memory region.
3040 if (adapter->rx_bulk_alloc_allowed)
3041 len += RTE_PMD_IXGBE_RX_MAX_BURST;
3043 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
3044 sizeof(struct ixgbe_rx_entry) * len,
3045 RTE_CACHE_LINE_SIZE, socket_id);
3046 if (!rxq->sw_ring) {
3047 ixgbe_rx_queue_release(rxq);
3052 * Always allocate even if it's not going to be needed in order to
3053 * simplify the code.
3055 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
3056 * be requested in ixgbe_dev_rx_init(), which is called later from
3060 rte_zmalloc_socket("rxq->sw_sc_ring",
3061 sizeof(struct ixgbe_scattered_rx_entry) * len,
3062 RTE_CACHE_LINE_SIZE, socket_id);
3063 if (!rxq->sw_sc_ring) {
3064 ixgbe_rx_queue_release(rxq);
3068 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
3069 "dma_addr=0x%"PRIx64,
3070 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
3071 rxq->rx_ring_phys_addr);
3073 if (!rte_is_power_of_2(nb_desc)) {
3074 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
3075 "preconditions - canceling the feature for "
3076 "the whole port[%d]",
3077 rxq->queue_id, rxq->port_id);
3078 adapter->rx_vec_allowed = false;
3080 ixgbe_rxq_vec_setup(rxq);
3082 dev->data->rx_queues[queue_idx] = rxq;
3084 ixgbe_reset_rx_queue(adapter, rxq);
3090 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3092 #define IXGBE_RXQ_SCAN_INTERVAL 4
3093 volatile union ixgbe_adv_rx_desc *rxdp;
3094 struct ixgbe_rx_queue *rxq;
3097 rxq = dev->data->rx_queues[rx_queue_id];
3098 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
3100 while ((desc < rxq->nb_rx_desc) &&
3101 (rxdp->wb.upper.status_error &
3102 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
3103 desc += IXGBE_RXQ_SCAN_INTERVAL;
3104 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
3105 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3106 rxdp = &(rxq->rx_ring[rxq->rx_tail +
3107 desc - rxq->nb_rx_desc]);
3114 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
3116 volatile union ixgbe_adv_rx_desc *rxdp;
3117 struct ixgbe_rx_queue *rxq = rx_queue;
3120 if (unlikely(offset >= rxq->nb_rx_desc))
3122 desc = rxq->rx_tail + offset;
3123 if (desc >= rxq->nb_rx_desc)
3124 desc -= rxq->nb_rx_desc;
3126 rxdp = &rxq->rx_ring[desc];
3127 return !!(rxdp->wb.upper.status_error &
3128 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
3132 ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
3134 struct ixgbe_rx_queue *rxq = rx_queue;
3135 volatile uint32_t *status;
3136 uint32_t nb_hold, desc;
3138 if (unlikely(offset >= rxq->nb_rx_desc))
3141 #ifdef RTE_IXGBE_INC_VECTOR
3142 if (rxq->rx_using_sse)
3143 nb_hold = rxq->rxrearm_nb;
3146 nb_hold = rxq->nb_rx_hold;
3147 if (offset >= rxq->nb_rx_desc - nb_hold)
3148 return RTE_ETH_RX_DESC_UNAVAIL;
3150 desc = rxq->rx_tail + offset;
3151 if (desc >= rxq->nb_rx_desc)
3152 desc -= rxq->nb_rx_desc;
3154 status = &rxq->rx_ring[desc].wb.upper.status_error;
3155 if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
3156 return RTE_ETH_RX_DESC_DONE;
3158 return RTE_ETH_RX_DESC_AVAIL;
3162 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
3164 struct ixgbe_tx_queue *txq = tx_queue;
3165 volatile uint32_t *status;
3168 if (unlikely(offset >= txq->nb_tx_desc))
3171 desc = txq->tx_tail + offset;
3172 /* go to next desc that has the RS bit */
3173 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
3175 if (desc >= txq->nb_tx_desc) {
3176 desc -= txq->nb_tx_desc;
3177 if (desc >= txq->nb_tx_desc)
3178 desc -= txq->nb_tx_desc;
3181 status = &txq->tx_ring[desc].wb.status;
3182 if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
3183 return RTE_ETH_TX_DESC_DONE;
3185 return RTE_ETH_TX_DESC_FULL;
3189 * Set up link loopback for X540/X550 mode Tx->Rx.
3191 static inline void __attribute__((cold))
3192 ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
3195 PMD_INIT_FUNC_TRACE();
3197 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
3199 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3200 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
3201 macc = IXGBE_READ_REG(hw, IXGBE_MACC);
3204 /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */
3205 autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE;
3206 /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */
3207 macc |= IXGBE_MACC_FLU;
3209 autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE;
3210 macc &= ~IXGBE_MACC_FLU;
3213 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3214 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
3216 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
3219 void __attribute__((cold))
3220 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
3223 struct ixgbe_adapter *adapter =
3224 (struct ixgbe_adapter *)dev->data->dev_private;
3225 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3227 PMD_INIT_FUNC_TRACE();
3229 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3230 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
3233 txq->ops->release_mbufs(txq);
3234 txq->ops->reset(txq);
3238 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3239 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3242 ixgbe_rx_queue_release_mbufs(rxq);
3243 ixgbe_reset_rx_queue(adapter, rxq);
3246 /* If loopback mode was enabled, reconfigure the link accordingly */
3247 if (dev->data->dev_conf.lpbk_mode != 0) {
3248 if (hw->mac.type == ixgbe_mac_X540 ||
3249 hw->mac.type == ixgbe_mac_X550 ||
3250 hw->mac.type == ixgbe_mac_X550EM_x ||
3251 hw->mac.type == ixgbe_mac_X550EM_a)
3252 ixgbe_setup_loopback_link_x540_x550(hw, false);
3257 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
3261 PMD_INIT_FUNC_TRACE();
3263 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3264 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
3265 dev->data->rx_queues[i] = NULL;
3267 dev->data->nb_rx_queues = 0;
3269 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3270 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
3271 dev->data->tx_queues[i] = NULL;
3273 dev->data->nb_tx_queues = 0;
3276 /*********************************************************************
3278 * Device RX/TX init functions
3280 **********************************************************************/
3283 * Receive Side Scaling (RSS)
3284 * See section 7.1.2.8 in the following document:
3285 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
3288 * The source and destination IP addresses of the IP header and the source
3289 * and destination ports of TCP/UDP headers, if any, of received packets are
3290 * hashed against a configurable random key to compute a 32-bit RSS hash result.
3291 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
3292 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
3293 * RSS output index which is used as the RX queue index where to store the
3295 * The following output is supplied in the RX write-back descriptor:
3296 * - 32-bit result of the Microsoft RSS hash function,
3297 * - 4-bit RSS type field.
3301 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
3302 * Used as the default key.
3304 static uint8_t rss_intel_key[40] = {
3305 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
3306 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
3307 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3308 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
3309 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
3313 ixgbe_rss_disable(struct rte_eth_dev *dev)
3315 struct ixgbe_hw *hw;
3319 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3320 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3321 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3322 mrqc &= ~IXGBE_MRQC_RSSEN;
3323 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3327 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
3337 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3338 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3340 hash_key = rss_conf->rss_key;
3341 if (hash_key != NULL) {
3342 /* Fill in RSS hash key */
3343 for (i = 0; i < 10; i++) {
3344 rss_key = hash_key[(i * 4)];
3345 rss_key |= hash_key[(i * 4) + 1] << 8;
3346 rss_key |= hash_key[(i * 4) + 2] << 16;
3347 rss_key |= hash_key[(i * 4) + 3] << 24;
3348 IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
3352 /* Set configured hashing protocols in MRQC register */
3353 rss_hf = rss_conf->rss_hf;
3354 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
3355 if (rss_hf & ETH_RSS_IPV4)
3356 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3357 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
3358 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3359 if (rss_hf & ETH_RSS_IPV6)
3360 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3361 if (rss_hf & ETH_RSS_IPV6_EX)
3362 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3363 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
3364 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3365 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
3366 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3367 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
3368 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3369 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
3370 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3371 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
3372 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3373 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3377 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
3378 struct rte_eth_rss_conf *rss_conf)
3380 struct ixgbe_hw *hw;
3385 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3387 if (!ixgbe_rss_update_sp(hw->mac.type)) {
3388 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
3392 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3395 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
3396 * "RSS enabling cannot be done dynamically while it must be
3397 * preceded by a software reset"
3398 * Before changing anything, first check that the update RSS operation
3399 * does not attempt to disable RSS, if RSS was enabled at
3400 * initialization time, or does not attempt to enable RSS, if RSS was
3401 * disabled at initialization time.
3403 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
3404 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3405 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
3406 if (rss_hf != 0) /* Enable RSS */
3408 return 0; /* Nothing to do */
3411 if (rss_hf == 0) /* Disable RSS */
3413 ixgbe_hw_rss_hash_set(hw, rss_conf);
3418 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3419 struct rte_eth_rss_conf *rss_conf)
3421 struct ixgbe_hw *hw;
3430 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3431 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3432 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3433 hash_key = rss_conf->rss_key;
3434 if (hash_key != NULL) {
3435 /* Return RSS hash key */
3436 for (i = 0; i < 10; i++) {
3437 rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
3438 hash_key[(i * 4)] = rss_key & 0x000000FF;
3439 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
3440 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
3441 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
3445 /* Get RSS functions configured in MRQC register */
3446 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3447 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
3448 rss_conf->rss_hf = 0;
3452 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
3453 rss_hf |= ETH_RSS_IPV4;
3454 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
3455 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
3456 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
3457 rss_hf |= ETH_RSS_IPV6;
3458 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
3459 rss_hf |= ETH_RSS_IPV6_EX;
3460 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
3461 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
3462 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
3463 rss_hf |= ETH_RSS_IPV6_TCP_EX;
3464 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
3465 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
3466 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
3467 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
3468 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
3469 rss_hf |= ETH_RSS_IPV6_UDP_EX;
3470 rss_conf->rss_hf = rss_hf;
3475 ixgbe_rss_configure(struct rte_eth_dev *dev)
3477 struct rte_eth_rss_conf rss_conf;
3478 struct ixgbe_adapter *adapter;
3479 struct ixgbe_hw *hw;
3483 uint16_t sp_reta_size;
3486 PMD_INIT_FUNC_TRACE();
3487 adapter = (struct ixgbe_adapter *)dev->data->dev_private;
3488 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3490 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3493 * Fill in redirection table
3494 * The byte-swap is needed because NIC registers are in
3495 * little-endian order.
3497 if (adapter->rss_reta_updated == 0) {
3499 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
3500 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3502 if (j == dev->data->nb_rx_queues)
3504 reta = (reta << 8) | j;
3506 IXGBE_WRITE_REG(hw, reta_reg,
3512 * Configure the RSS key and the RSS protocols used to compute
3513 * the RSS hash of input packets.
3515 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3516 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
3517 ixgbe_rss_disable(dev);
3520 if (rss_conf.rss_key == NULL)
3521 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3522 ixgbe_hw_rss_hash_set(hw, &rss_conf);
3525 #define NUM_VFTA_REGISTERS 128
3526 #define NIC_RX_BUFFER_SIZE 0x200
3527 #define X550_RX_BUFFER_SIZE 0x180
3530 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3532 struct rte_eth_vmdq_dcb_conf *cfg;
3533 struct ixgbe_hw *hw;
3534 enum rte_eth_nb_pools num_pools;
3535 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3537 uint8_t nb_tcs; /* number of traffic classes */
3540 PMD_INIT_FUNC_TRACE();
3541 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3542 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3543 num_pools = cfg->nb_queue_pools;
3544 /* Check we have a valid number of pools */
3545 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3546 ixgbe_rss_disable(dev);
3549 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3550 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3554 * split rx buffer up into sections, each for 1 traffic class
3556 switch (hw->mac.type) {
3557 case ixgbe_mac_X550:
3558 case ixgbe_mac_X550EM_x:
3559 case ixgbe_mac_X550EM_a:
3560 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
3563 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3566 for (i = 0; i < nb_tcs; i++) {
3567 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3569 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3570 /* clear 10 bits. */
3571 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
3572 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3574 /* zero alloc all unused TCs */
3575 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3576 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3578 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3579 /* clear 10 bits. */
3580 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3583 /* MRQC: enable vmdq and dcb */
3584 mrqc = (num_pools == ETH_16_POOLS) ?
3585 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
3586 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3588 /* PFVTCTL: turn on virtualisation and set the default pool */
3589 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3590 if (cfg->enable_default_pool) {
3591 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3593 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3596 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3598 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
3600 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3602 * mapping is done with 3 bits per priority,
3603 * so shift by i*3 each time
3605 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3607 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
3609 /* RTRPCS: DCB related */
3610 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
3612 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3613 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3614 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3615 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3617 /* VFTA - enable all vlan filters */
3618 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3619 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3622 /* VFRE: pool enabling for receive - 16 or 32 */
3623 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
3624 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3627 * MPSAR - allow pools to read specific mac addresses
3628 * In this case, all pools should be able to read from mac addr 0
3630 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
3631 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
3633 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3634 for (i = 0; i < cfg->nb_pool_maps; i++) {
3635 /* set vlan id in VF register and set the valid bit */
3636 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
3637 (cfg->pool_map[i].vlan_id & 0xFFF)));
3639 * Put the allowed pools in VFB reg. As we only have 16 or 32
3640 * pools, we only need to use the first half of the register
3643 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
3648 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3649 * @dev: pointer to eth_dev structure
3650 * @dcb_config: pointer to ixgbe_dcb_config structure
3653 ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3654 struct ixgbe_dcb_config *dcb_config)
3657 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3659 PMD_INIT_FUNC_TRACE();
3660 if (hw->mac.type != ixgbe_mac_82598EB) {
3661 /* Disable the Tx desc arbiter so that MTQC can be changed */
3662 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3663 reg |= IXGBE_RTTDCS_ARBDIS;
3664 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3666 /* Enable DCB for Tx with 8 TCs */
3667 if (dcb_config->num_tcs.pg_tcs == 8) {
3668 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3670 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3672 if (dcb_config->vt_mode)
3673 reg |= IXGBE_MTQC_VT_ENA;
3674 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3676 /* Enable the Tx desc arbiter */
3677 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3678 reg &= ~IXGBE_RTTDCS_ARBDIS;
3679 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3681 /* Enable Security TX Buffer IFG for DCB */
3682 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3683 reg |= IXGBE_SECTX_DCB;
3684 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3689 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3690 * @dev: pointer to rte_eth_dev structure
3691 * @dcb_config: pointer to ixgbe_dcb_config structure
3694 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3695 struct ixgbe_dcb_config *dcb_config)
3697 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3698 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3699 struct ixgbe_hw *hw =
3700 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3702 PMD_INIT_FUNC_TRACE();
3703 if (hw->mac.type != ixgbe_mac_82598EB)
3704 /*PF VF Transmit Enable*/
3705 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3706 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3708 /*Configure general DCB TX parameters*/
3709 ixgbe_dcb_tx_hw_config(dev, dcb_config);
3713 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3714 struct ixgbe_dcb_config *dcb_config)
3716 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3717 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3718 struct ixgbe_dcb_tc_config *tc;
3721 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3722 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3723 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3724 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3726 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3727 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3730 /* Initialize User Priority to Traffic Class mapping */
3731 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3732 tc = &dcb_config->tc_config[j];
3733 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3736 /* User Priority to Traffic Class mapping */
3737 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3738 j = vmdq_rx_conf->dcb_tc[i];
3739 tc = &dcb_config->tc_config[j];
3740 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3746 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3747 struct ixgbe_dcb_config *dcb_config)
3749 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3750 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3751 struct ixgbe_dcb_tc_config *tc;
3754 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3755 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3756 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3757 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3759 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3760 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3763 /* Initialize User Priority to Traffic Class mapping */
3764 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3765 tc = &dcb_config->tc_config[j];
3766 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3769 /* User Priority to Traffic Class mapping */
3770 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3771 j = vmdq_tx_conf->dcb_tc[i];
3772 tc = &dcb_config->tc_config[j];
3773 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3779 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3780 struct ixgbe_dcb_config *dcb_config)
3782 struct rte_eth_dcb_rx_conf *rx_conf =
3783 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3784 struct ixgbe_dcb_tc_config *tc;
3787 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3788 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3790 /* Initialize User Priority to Traffic Class mapping */
3791 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3792 tc = &dcb_config->tc_config[j];
3793 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3796 /* User Priority to Traffic Class mapping */
3797 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3798 j = rx_conf->dcb_tc[i];
3799 tc = &dcb_config->tc_config[j];
3800 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3806 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3807 struct ixgbe_dcb_config *dcb_config)
3809 struct rte_eth_dcb_tx_conf *tx_conf =
3810 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3811 struct ixgbe_dcb_tc_config *tc;
3814 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3815 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3817 /* Initialize User Priority to Traffic Class mapping */
3818 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3819 tc = &dcb_config->tc_config[j];
3820 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3823 /* User Priority to Traffic Class mapping */
3824 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3825 j = tx_conf->dcb_tc[i];
3826 tc = &dcb_config->tc_config[j];
3827 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3833 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3834 * @dev: pointer to eth_dev structure
3835 * @dcb_config: pointer to ixgbe_dcb_config structure
3838 ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3839 struct ixgbe_dcb_config *dcb_config)
3845 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3847 PMD_INIT_FUNC_TRACE();
3849 * Disable the arbiter before changing parameters
3850 * (always enable recycle mode; WSP)
3852 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3853 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3855 if (hw->mac.type != ixgbe_mac_82598EB) {
3856 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3857 if (dcb_config->num_tcs.pg_tcs == 4) {
3858 if (dcb_config->vt_mode)
3859 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3860 IXGBE_MRQC_VMDQRT4TCEN;
3862 /* no matter the mode is DCB or DCB_RSS, just
3863 * set the MRQE to RSSXTCEN. RSS is controlled
3866 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3867 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3868 IXGBE_MRQC_RTRSS4TCEN;
3871 if (dcb_config->num_tcs.pg_tcs == 8) {
3872 if (dcb_config->vt_mode)
3873 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3874 IXGBE_MRQC_VMDQRT8TCEN;
3876 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3877 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3878 IXGBE_MRQC_RTRSS8TCEN;
3882 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3884 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3885 /* Disable drop for all queues in VMDQ mode*/
3886 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3887 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3889 (q << IXGBE_QDE_IDX_SHIFT)));
3891 /* Enable drop for all queues in SRIOV mode */
3892 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3893 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3895 (q << IXGBE_QDE_IDX_SHIFT) |
3900 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3901 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3902 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3903 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3905 /* VFTA - enable all vlan filters */
3906 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3907 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3911 * Configure Rx packet plane (recycle mode; WSP) and
3914 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3915 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3919 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3920 uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3922 switch (hw->mac.type) {
3923 case ixgbe_mac_82598EB:
3924 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3926 case ixgbe_mac_82599EB:
3927 case ixgbe_mac_X540:
3928 case ixgbe_mac_X550:
3929 case ixgbe_mac_X550EM_x:
3930 case ixgbe_mac_X550EM_a:
3931 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3940 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3941 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3943 switch (hw->mac.type) {
3944 case ixgbe_mac_82598EB:
3945 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
3946 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
3948 case ixgbe_mac_82599EB:
3949 case ixgbe_mac_X540:
3950 case ixgbe_mac_X550:
3951 case ixgbe_mac_X550EM_x:
3952 case ixgbe_mac_X550EM_a:
3953 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
3954 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
3961 #define DCB_RX_CONFIG 1
3962 #define DCB_TX_CONFIG 1
3963 #define DCB_TX_PB 1024
3965 * ixgbe_dcb_hw_configure - Enable DCB and configure
3966 * general DCB in VT mode and non-VT mode parameters
3967 * @dev: pointer to rte_eth_dev structure
3968 * @dcb_config: pointer to ixgbe_dcb_config structure
3971 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3972 struct ixgbe_dcb_config *dcb_config)
3975 uint8_t i, pfc_en, nb_tcs;
3976 uint16_t pbsize, rx_buffer_size;
3977 uint8_t config_dcb_rx = 0;
3978 uint8_t config_dcb_tx = 0;
3979 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3980 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3981 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3982 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3983 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3984 struct ixgbe_dcb_tc_config *tc;
3985 uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN +
3987 struct ixgbe_hw *hw =
3988 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3989 struct ixgbe_bw_conf *bw_conf =
3990 IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
3992 switch (dev->data->dev_conf.rxmode.mq_mode) {
3993 case ETH_MQ_RX_VMDQ_DCB:
3994 dcb_config->vt_mode = true;
3995 if (hw->mac.type != ixgbe_mac_82598EB) {
3996 config_dcb_rx = DCB_RX_CONFIG;
3998 *get dcb and VT rx configuration parameters
4001 ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
4002 /*Configure general VMDQ and DCB RX parameters*/
4003 ixgbe_vmdq_dcb_configure(dev);
4007 case ETH_MQ_RX_DCB_RSS:
4008 dcb_config->vt_mode = false;
4009 config_dcb_rx = DCB_RX_CONFIG;
4010 /* Get dcb TX configuration parameters from rte_eth_conf */
4011 ixgbe_dcb_rx_config(dev, dcb_config);
4012 /*Configure general DCB RX parameters*/
4013 ixgbe_dcb_rx_hw_config(dev, dcb_config);
4016 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
4019 switch (dev->data->dev_conf.txmode.mq_mode) {
4020 case ETH_MQ_TX_VMDQ_DCB:
4021 dcb_config->vt_mode = true;
4022 config_dcb_tx = DCB_TX_CONFIG;
4023 /* get DCB and VT TX configuration parameters
4026 ixgbe_dcb_vt_tx_config(dev, dcb_config);
4027 /*Configure general VMDQ and DCB TX parameters*/
4028 ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
4032 dcb_config->vt_mode = false;
4033 config_dcb_tx = DCB_TX_CONFIG;
4034 /*get DCB TX configuration parameters from rte_eth_conf*/
4035 ixgbe_dcb_tx_config(dev, dcb_config);
4036 /*Configure general DCB TX parameters*/
4037 ixgbe_dcb_tx_hw_config(dev, dcb_config);
4040 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
4044 nb_tcs = dcb_config->num_tcs.pfc_tcs;
4046 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4047 if (nb_tcs == ETH_4_TCS) {
4048 /* Avoid un-configured priority mapping to TC0 */
4050 uint8_t mask = 0xFF;
4052 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
4053 mask = (uint8_t)(mask & (~(1 << map[i])));
4054 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
4055 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
4059 /* Re-configure 4 TCs BW */
4060 for (i = 0; i < nb_tcs; i++) {
4061 tc = &dcb_config->tc_config[i];
4062 if (bw_conf->tc_num != nb_tcs)
4063 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4064 (uint8_t)(100 / nb_tcs);
4065 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4066 (uint8_t)(100 / nb_tcs);
4068 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4069 tc = &dcb_config->tc_config[i];
4070 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
4071 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
4074 /* Re-configure 8 TCs BW */
4075 for (i = 0; i < nb_tcs; i++) {
4076 tc = &dcb_config->tc_config[i];
4077 if (bw_conf->tc_num != nb_tcs)
4078 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4079 (uint8_t)(100 / nb_tcs + (i & 1));
4080 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4081 (uint8_t)(100 / nb_tcs + (i & 1));
4085 switch (hw->mac.type) {
4086 case ixgbe_mac_X550:
4087 case ixgbe_mac_X550EM_x:
4088 case ixgbe_mac_X550EM_a:
4089 rx_buffer_size = X550_RX_BUFFER_SIZE;
4092 rx_buffer_size = NIC_RX_BUFFER_SIZE;
4096 if (config_dcb_rx) {
4097 /* Set RX buffer size */
4098 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4099 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
4101 for (i = 0; i < nb_tcs; i++) {
4102 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
4104 /* zero alloc all unused TCs */
4105 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4106 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4109 if (config_dcb_tx) {
4110 /* Only support an equally distributed
4111 * Tx packet buffer strategy.
4113 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
4114 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
4116 for (i = 0; i < nb_tcs; i++) {
4117 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4118 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4120 /* Clear unused TCs, if any, to zero buffer size*/
4121 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4122 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4123 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4127 /*Calculates traffic class credits*/
4128 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4129 IXGBE_DCB_TX_CONFIG);
4130 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4131 IXGBE_DCB_RX_CONFIG);
4133 if (config_dcb_rx) {
4134 /* Unpack CEE standard containers */
4135 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
4136 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4137 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
4138 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
4139 /* Configure PG(ETS) RX */
4140 ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
4143 if (config_dcb_tx) {
4144 /* Unpack CEE standard containers */
4145 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
4146 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4147 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
4148 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
4149 /* Configure PG(ETS) TX */
4150 ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
4153 /*Configure queue statistics registers*/
4154 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
4156 /* Check if the PFC is supported */
4157 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
4158 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4159 for (i = 0; i < nb_tcs; i++) {
4161 * If the TC count is 8,and the default high_water is 48,
4162 * the low_water is 16 as default.
4164 hw->fc.high_water[i] = (pbsize * 3) / 4;
4165 hw->fc.low_water[i] = pbsize / 4;
4166 /* Enable pfc for this TC */
4167 tc = &dcb_config->tc_config[i];
4168 tc->pfc = ixgbe_dcb_pfc_enabled;
4170 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
4171 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
4173 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
4180 * ixgbe_configure_dcb - Configure DCB Hardware
4181 * @dev: pointer to rte_eth_dev
4183 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
4185 struct ixgbe_dcb_config *dcb_cfg =
4186 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4187 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
4189 PMD_INIT_FUNC_TRACE();
4191 /* check support mq_mode for DCB */
4192 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
4193 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
4194 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
4197 if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
4200 /** Configure DCB hardware **/
4201 ixgbe_dcb_hw_configure(dev, dcb_cfg);
4205 * VMDq only support for 10 GbE NIC.
4208 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
4210 struct rte_eth_vmdq_rx_conf *cfg;
4211 struct ixgbe_hw *hw;
4212 enum rte_eth_nb_pools num_pools;
4213 uint32_t mrqc, vt_ctl, vlanctrl;
4217 PMD_INIT_FUNC_TRACE();
4218 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4219 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
4220 num_pools = cfg->nb_queue_pools;
4222 ixgbe_rss_disable(dev);
4224 /* MRQC: enable vmdq */
4225 mrqc = IXGBE_MRQC_VMDQEN;
4226 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4228 /* PFVTCTL: turn on virtualisation and set the default pool */
4229 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
4230 if (cfg->enable_default_pool)
4231 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
4233 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
4235 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
4237 for (i = 0; i < (int)num_pools; i++) {
4238 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
4239 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
4242 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4243 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4244 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4245 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4247 /* VFTA - enable all vlan filters */
4248 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
4249 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
4251 /* VFRE: pool enabling for receive - 64 */
4252 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
4253 if (num_pools == ETH_64_POOLS)
4254 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
4257 * MPSAR - allow pools to read specific mac addresses
4258 * In this case, all pools should be able to read from mac addr 0
4260 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
4261 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
4263 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
4264 for (i = 0; i < cfg->nb_pool_maps; i++) {
4265 /* set vlan id in VF register and set the valid bit */
4266 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
4267 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
4269 * Put the allowed pools in VFB reg. As we only have 16 or 64
4270 * pools, we only need to use the first half of the register
4273 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
4274 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
4275 (cfg->pool_map[i].pools & UINT32_MAX));
4277 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
4278 ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
4282 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
4283 if (cfg->enable_loop_back) {
4284 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4285 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
4286 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
4289 IXGBE_WRITE_FLUSH(hw);
4293 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
4294 * @hw: pointer to hardware structure
4297 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
4302 PMD_INIT_FUNC_TRACE();
4303 /*PF VF Transmit Enable*/
4304 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
4305 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
4307 /* Disable the Tx desc arbiter so that MTQC can be changed */
4308 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4309 reg |= IXGBE_RTTDCS_ARBDIS;
4310 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4312 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4313 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
4315 /* Disable drop for all queues */
4316 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4317 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4318 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
4320 /* Enable the Tx desc arbiter */
4321 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4322 reg &= ~IXGBE_RTTDCS_ARBDIS;
4323 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4325 IXGBE_WRITE_FLUSH(hw);
4328 static int __attribute__((cold))
4329 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
4331 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
4335 /* Initialize software ring entries */
4336 for (i = 0; i < rxq->nb_rx_desc; i++) {
4337 volatile union ixgbe_adv_rx_desc *rxd;
4338 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
4341 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
4342 (unsigned) rxq->queue_id);
4346 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
4347 mbuf->port = rxq->port_id;
4350 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
4351 rxd = &rxq->rx_ring[i];
4352 rxd->read.hdr_addr = 0;
4353 rxd->read.pkt_addr = dma_addr;
4361 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
4363 struct ixgbe_hw *hw;
4366 ixgbe_rss_configure(dev);
4368 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4370 /* MRQC: enable VF RSS */
4371 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
4372 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
4373 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4375 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
4379 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
4383 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
4387 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4393 ixgbe_config_vf_default(struct rte_eth_dev *dev)
4395 struct ixgbe_hw *hw =
4396 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4398 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4400 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4405 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4406 IXGBE_MRQC_VMDQRT4TCEN);
4410 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4411 IXGBE_MRQC_VMDQRT8TCEN);
4415 "invalid pool number in IOV mode");
4422 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
4424 struct ixgbe_hw *hw =
4425 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4427 if (hw->mac.type == ixgbe_mac_82598EB)
4430 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4432 * SRIOV inactive scheme
4433 * any DCB/RSS w/o VMDq multi-queue setting
4435 switch (dev->data->dev_conf.rxmode.mq_mode) {
4437 case ETH_MQ_RX_DCB_RSS:
4438 case ETH_MQ_RX_VMDQ_RSS:
4439 ixgbe_rss_configure(dev);
4442 case ETH_MQ_RX_VMDQ_DCB:
4443 ixgbe_vmdq_dcb_configure(dev);
4446 case ETH_MQ_RX_VMDQ_ONLY:
4447 ixgbe_vmdq_rx_hw_configure(dev);
4450 case ETH_MQ_RX_NONE:
4452 /* if mq_mode is none, disable rss mode.*/
4453 ixgbe_rss_disable(dev);
4457 /* SRIOV active scheme
4458 * Support RSS together with SRIOV.
4460 switch (dev->data->dev_conf.rxmode.mq_mode) {
4462 case ETH_MQ_RX_VMDQ_RSS:
4463 ixgbe_config_vf_rss(dev);
4465 case ETH_MQ_RX_VMDQ_DCB:
4467 /* In SRIOV, the configuration is the same as VMDq case */
4468 ixgbe_vmdq_dcb_configure(dev);
4470 /* DCB/RSS together with SRIOV is not supported */
4471 case ETH_MQ_RX_VMDQ_DCB_RSS:
4472 case ETH_MQ_RX_DCB_RSS:
4474 "Could not support DCB/RSS with VMDq & SRIOV");
4477 ixgbe_config_vf_default(dev);
4486 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4488 struct ixgbe_hw *hw =
4489 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4493 if (hw->mac.type == ixgbe_mac_82598EB)
4496 /* disable arbiter before setting MTQC */
4497 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4498 rttdcs |= IXGBE_RTTDCS_ARBDIS;
4499 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4501 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4503 * SRIOV inactive scheme
4504 * any DCB w/o VMDq multi-queue setting
4506 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4507 ixgbe_vmdq_tx_hw_configure(hw);
4509 mtqc = IXGBE_MTQC_64Q_1PB;
4510 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4513 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4516 * SRIOV active scheme
4517 * FIXME if support DCB together with VMDq & SRIOV
4520 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4523 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
4526 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
4530 mtqc = IXGBE_MTQC_64Q_1PB;
4531 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4533 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4536 /* re-enable arbiter */
4537 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
4538 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4544 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
4546 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
4547 * spec rev. 3.0 chapter 8.2.3.8.13.
4549 * @pool Memory pool of the Rx queue
4551 static inline uint32_t
4552 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4554 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4556 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
4558 RTE_IPV4_MAX_PKT_LEN /
4559 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4562 return IXGBE_RSCCTL_MAXDESC_16;
4563 else if (maxdesc >= 8)
4564 return IXGBE_RSCCTL_MAXDESC_8;
4565 else if (maxdesc >= 4)
4566 return IXGBE_RSCCTL_MAXDESC_4;
4568 return IXGBE_RSCCTL_MAXDESC_1;
4572 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
4575 * (Taken from FreeBSD tree)
4576 * (yes this is all very magic and confusing :)
4579 * @entry the register array entry
4580 * @vector the MSIX vector for this queue
4584 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
4586 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4589 vector |= IXGBE_IVAR_ALLOC_VAL;
4591 switch (hw->mac.type) {
4593 case ixgbe_mac_82598EB:
4595 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4597 entry += (type * 64);
4598 index = (entry >> 2) & 0x1F;
4599 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4600 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4601 ivar |= (vector << (8 * (entry & 0x3)));
4602 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4605 case ixgbe_mac_82599EB:
4606 case ixgbe_mac_X540:
4607 if (type == -1) { /* MISC IVAR */
4608 index = (entry & 1) * 8;
4609 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4610 ivar &= ~(0xFF << index);
4611 ivar |= (vector << index);
4612 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4613 } else { /* RX/TX IVARS */
4614 index = (16 * (entry & 1)) + (8 * type);
4615 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4616 ivar &= ~(0xFF << index);
4617 ivar |= (vector << index);
4618 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4628 void __attribute__((cold))
4629 ixgbe_set_rx_function(struct rte_eth_dev *dev)
4631 uint16_t i, rx_using_sse;
4632 struct ixgbe_adapter *adapter =
4633 (struct ixgbe_adapter *)dev->data->dev_private;
4636 * In order to allow Vector Rx there are a few configuration
4637 * conditions to be met and Rx Bulk Allocation should be allowed.
4639 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
4640 !adapter->rx_bulk_alloc_allowed) {
4641 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
4642 "preconditions or RTE_IXGBE_INC_VECTOR is "
4644 dev->data->port_id);
4646 adapter->rx_vec_allowed = false;
4650 * Initialize the appropriate LRO callback.
4652 * If all queues satisfy the bulk allocation preconditions
4653 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
4654 * Otherwise use a single allocation version.
4656 if (dev->data->lro) {
4657 if (adapter->rx_bulk_alloc_allowed) {
4658 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4659 "allocation version");
4660 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4662 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4663 "allocation version");
4664 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4666 } else if (dev->data->scattered_rx) {
4668 * Set the non-LRO scattered callback: there are Vector and
4669 * single allocation versions.
4671 if (adapter->rx_vec_allowed) {
4672 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
4673 "callback (port=%d).",
4674 dev->data->port_id);
4676 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
4677 } else if (adapter->rx_bulk_alloc_allowed) {
4678 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4679 "allocation callback (port=%d).",
4680 dev->data->port_id);
4681 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4683 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
4684 "single allocation) "
4685 "Scattered Rx callback "
4687 dev->data->port_id);
4689 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4692 * Below we set "simple" callbacks according to port/queues parameters.
4693 * If parameters allow we are going to choose between the following
4697 * - Single buffer allocation (the simplest one)
4699 } else if (adapter->rx_vec_allowed) {
4700 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4701 "burst size no less than %d (port=%d).",
4702 RTE_IXGBE_DESCS_PER_LOOP,
4703 dev->data->port_id);
4705 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4706 } else if (adapter->rx_bulk_alloc_allowed) {
4707 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4708 "satisfied. Rx Burst Bulk Alloc function "
4709 "will be used on port=%d.",
4710 dev->data->port_id);
4712 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4714 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4715 "satisfied, or Scattered Rx is requested "
4717 dev->data->port_id);
4719 dev->rx_pkt_burst = ixgbe_recv_pkts;
4722 /* Propagate information about RX function choice through all queues. */
4725 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4726 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4728 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4729 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4731 rxq->rx_using_sse = rx_using_sse;
4732 #ifdef RTE_LIBRTE_SECURITY
4733 rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4734 DEV_RX_OFFLOAD_SECURITY);
4740 * ixgbe_set_rsc - configure RSC related port HW registers
4742 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4743 * of 82599 Spec (x540 configuration is virtually the same).
4747 * Returns 0 in case of success or a non-zero error code
4750 ixgbe_set_rsc(struct rte_eth_dev *dev)
4752 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4753 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4754 struct rte_eth_dev_info dev_info = { 0 };
4755 bool rsc_capable = false;
4761 dev->dev_ops->dev_infos_get(dev, &dev_info);
4762 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4765 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4766 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4771 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4773 if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
4774 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4776 * According to chapter of 4.6.7.2.1 of the Spec Rev.
4777 * 3.0 RSC configuration requires HW CRC stripping being
4778 * enabled. If user requested both HW CRC stripping off
4779 * and RSC on - return an error.
4781 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4786 /* RFCTL configuration */
4787 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4788 if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4790 * Since NFS packets coalescing is not supported - clear
4791 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4794 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4795 IXGBE_RFCTL_NFSR_DIS);
4797 rfctl |= IXGBE_RFCTL_RSC_DIS;
4798 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4800 /* If LRO hasn't been requested - we are done here. */
4801 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4804 /* Set RDRXCTL.RSCACKC bit */
4805 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4806 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4807 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4809 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4810 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4811 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4813 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4815 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4817 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4819 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4822 * ixgbe PMD doesn't support header-split at the moment.
4824 * Following the 4.6.7.2.1 chapter of the 82599/x540
4825 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4826 * should be configured even if header split is not
4827 * enabled. We will configure it 128 bytes following the
4828 * recommendation in the spec.
4830 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4831 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4832 IXGBE_SRRCTL_BSIZEHDR_MASK;
4835 * TODO: Consider setting the Receive Descriptor Minimum
4836 * Threshold Size for an RSC case. This is not an obviously
4837 * beneficiary option but the one worth considering...
4840 rscctl |= IXGBE_RSCCTL_RSCEN;
4841 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4842 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4845 * RSC: Set ITR interval corresponding to 2K ints/s.
4847 * Full-sized RSC aggregations for a 10Gb/s link will
4848 * arrive at about 20K aggregation/s rate.
4850 * 2K inst/s rate will make only 10% of the
4851 * aggregations to be closed due to the interrupt timer
4852 * expiration for a streaming at wire-speed case.
4854 * For a sparse streaming case this setting will yield
4855 * at most 500us latency for a single RSC aggregation.
4857 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4858 eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
4859 eitr |= IXGBE_EITR_CNT_WDIS;
4861 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4862 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4863 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4864 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4867 * RSC requires the mapping of the queue to the
4870 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4875 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4881 * Initializes Receive Unit.
4883 int __attribute__((cold))
4884 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4886 struct ixgbe_hw *hw;
4887 struct ixgbe_rx_queue *rxq;
4898 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4901 PMD_INIT_FUNC_TRACE();
4902 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4905 * Make sure receives are disabled while setting
4906 * up the RX context (registers, descriptor rings, etc.).
4908 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4909 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4911 /* Enable receipt of broadcasted frames */
4912 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4913 fctrl |= IXGBE_FCTRL_BAM;
4914 fctrl |= IXGBE_FCTRL_DPF;
4915 fctrl |= IXGBE_FCTRL_PMCF;
4916 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4919 * Configure CRC stripping, if any.
4921 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4922 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4923 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4925 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4928 * Configure jumbo frame support, if any.
4930 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
4931 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4932 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4933 maxfrs &= 0x0000FFFF;
4934 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4935 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4937 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4940 * If loopback mode is configured, set LPBK bit.
4942 if (dev->data->dev_conf.lpbk_mode != 0) {
4943 rc = ixgbe_check_supported_loopback_mode(dev);
4945 PMD_INIT_LOG(ERR, "Unsupported loopback mode");
4948 hlreg0 |= IXGBE_HLREG0_LPBK;
4950 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4953 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4956 * Assume no header split and no VLAN strip support
4957 * on any Rx queue first .
4959 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4960 /* Setup RX queues */
4961 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4962 rxq = dev->data->rx_queues[i];
4965 * Reset crc_len in case it was changed after queue setup by a
4966 * call to configure.
4968 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4969 rxq->crc_len = RTE_ETHER_CRC_LEN;
4973 /* Setup the Base and Length of the Rx Descriptor Rings */
4974 bus_addr = rxq->rx_ring_phys_addr;
4975 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4976 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4977 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4978 (uint32_t)(bus_addr >> 32));
4979 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4980 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4981 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4982 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4984 /* Configure the SRRCTL register */
4985 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4987 /* Set if packets are dropped when no descriptors available */
4989 srrctl |= IXGBE_SRRCTL_DROP_EN;
4992 * Configure the RX buffer size in the BSIZEPACKET field of
4993 * the SRRCTL register of the queue.
4994 * The value is in 1 KB resolution. Valid values can be from
4997 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4998 RTE_PKTMBUF_HEADROOM);
4999 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5000 IXGBE_SRRCTL_BSIZEPKT_MASK);
5002 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
5004 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5005 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5007 /* It adds dual VLAN length for supporting dual VLAN */
5008 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
5009 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
5010 dev->data->scattered_rx = 1;
5011 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5012 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5015 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
5016 dev->data->scattered_rx = 1;
5019 * Device configured with multiple RX queues.
5021 ixgbe_dev_mq_rx_configure(dev);
5024 * Setup the Checksum Register.
5025 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
5026 * Enable IP/L4 checkum computation by hardware if requested to do so.
5028 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
5029 rxcsum |= IXGBE_RXCSUM_PCSD;
5030 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
5031 rxcsum |= IXGBE_RXCSUM_IPPCSE;
5033 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
5035 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
5037 if (hw->mac.type == ixgbe_mac_82599EB ||
5038 hw->mac.type == ixgbe_mac_X540) {
5039 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
5040 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
5041 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
5043 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
5044 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
5045 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
5048 rc = ixgbe_set_rsc(dev);
5052 ixgbe_set_rx_function(dev);
5058 * Initializes Transmit Unit.
5060 void __attribute__((cold))
5061 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
5063 struct ixgbe_hw *hw;
5064 struct ixgbe_tx_queue *txq;
5070 PMD_INIT_FUNC_TRACE();
5071 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5073 /* Enable TX CRC (checksum offload requirement) and hw padding
5076 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5077 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
5078 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5080 /* Setup the Base and Length of the Tx Descriptor Rings */
5081 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5082 txq = dev->data->tx_queues[i];
5084 bus_addr = txq->tx_ring_phys_addr;
5085 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
5086 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5087 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
5088 (uint32_t)(bus_addr >> 32));
5089 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
5090 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5091 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5092 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5093 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5096 * Disable Tx Head Writeback RO bit, since this hoses
5097 * bookkeeping if things aren't delivered in order.
5099 switch (hw->mac.type) {
5100 case ixgbe_mac_82598EB:
5101 txctrl = IXGBE_READ_REG(hw,
5102 IXGBE_DCA_TXCTRL(txq->reg_idx));
5103 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5104 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
5108 case ixgbe_mac_82599EB:
5109 case ixgbe_mac_X540:
5110 case ixgbe_mac_X550:
5111 case ixgbe_mac_X550EM_x:
5112 case ixgbe_mac_X550EM_a:
5114 txctrl = IXGBE_READ_REG(hw,
5115 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
5116 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5117 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
5123 /* Device configured with multiple TX queues. */
5124 ixgbe_dev_mq_tx_configure(dev);
5128 * Check if requested loopback mode is supported
5131 ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev)
5133 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5135 if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX)
5136 if (hw->mac.type == ixgbe_mac_82599EB ||
5137 hw->mac.type == ixgbe_mac_X540 ||
5138 hw->mac.type == ixgbe_mac_X550 ||
5139 hw->mac.type == ixgbe_mac_X550EM_x ||
5140 hw->mac.type == ixgbe_mac_X550EM_a)
5147 * Set up link for 82599 loopback mode Tx->Rx.
5149 static inline void __attribute__((cold))
5150 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
5152 PMD_INIT_FUNC_TRACE();
5154 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
5155 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
5157 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
5166 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
5167 ixgbe_reset_pipeline_82599(hw);
5169 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
5175 * Start Transmit and Receive Units.
5177 int __attribute__((cold))
5178 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
5180 struct ixgbe_hw *hw;
5181 struct ixgbe_tx_queue *txq;
5182 struct ixgbe_rx_queue *rxq;
5189 PMD_INIT_FUNC_TRACE();
5190 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5192 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5193 txq = dev->data->tx_queues[i];
5194 /* Setup Transmit Threshold Registers */
5195 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5196 txdctl |= txq->pthresh & 0x7F;
5197 txdctl |= ((txq->hthresh & 0x7F) << 8);
5198 txdctl |= ((txq->wthresh & 0x7F) << 16);
5199 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5202 if (hw->mac.type != ixgbe_mac_82598EB) {
5203 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
5204 dmatxctl |= IXGBE_DMATXCTL_TE;
5205 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
5208 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5209 txq = dev->data->tx_queues[i];
5210 if (!txq->tx_deferred_start) {
5211 ret = ixgbe_dev_tx_queue_start(dev, i);
5217 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5218 rxq = dev->data->rx_queues[i];
5219 if (!rxq->rx_deferred_start) {
5220 ret = ixgbe_dev_rx_queue_start(dev, i);
5226 /* Enable Receive engine */
5227 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5228 if (hw->mac.type == ixgbe_mac_82598EB)
5229 rxctrl |= IXGBE_RXCTRL_DMBYPS;
5230 rxctrl |= IXGBE_RXCTRL_RXEN;
5231 hw->mac.ops.enable_rx_dma(hw, rxctrl);
5233 /* If loopback mode is enabled, set up the link accordingly */
5234 if (dev->data->dev_conf.lpbk_mode != 0) {
5235 if (hw->mac.type == ixgbe_mac_82599EB)
5236 ixgbe_setup_loopback_link_82599(hw);
5237 else if (hw->mac.type == ixgbe_mac_X540 ||
5238 hw->mac.type == ixgbe_mac_X550 ||
5239 hw->mac.type == ixgbe_mac_X550EM_x ||
5240 hw->mac.type == ixgbe_mac_X550EM_a)
5241 ixgbe_setup_loopback_link_x540_x550(hw, true);
5244 #ifdef RTE_LIBRTE_SECURITY
5245 if ((dev->data->dev_conf.rxmode.offloads &
5246 DEV_RX_OFFLOAD_SECURITY) ||
5247 (dev->data->dev_conf.txmode.offloads &
5248 DEV_TX_OFFLOAD_SECURITY)) {
5249 ret = ixgbe_crypto_enable_ipsec(dev);
5252 "ixgbe_crypto_enable_ipsec fails with %d.",
5263 * Start Receive Units for specified queue.
5265 int __attribute__((cold))
5266 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5268 struct ixgbe_hw *hw;
5269 struct ixgbe_rx_queue *rxq;
5273 PMD_INIT_FUNC_TRACE();
5274 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5276 rxq = dev->data->rx_queues[rx_queue_id];
5278 /* Allocate buffers for descriptor rings */
5279 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
5280 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
5284 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5285 rxdctl |= IXGBE_RXDCTL_ENABLE;
5286 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5288 /* Wait until RX Enable ready */
5289 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5292 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5293 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5295 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
5297 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5298 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
5299 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5305 * Stop Receive Units for specified queue.
5307 int __attribute__((cold))
5308 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5310 struct ixgbe_hw *hw;
5311 struct ixgbe_adapter *adapter =
5312 (struct ixgbe_adapter *)dev->data->dev_private;
5313 struct ixgbe_rx_queue *rxq;
5317 PMD_INIT_FUNC_TRACE();
5318 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5320 rxq = dev->data->rx_queues[rx_queue_id];
5322 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5323 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5324 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5326 /* Wait until RX Enable bit clear */
5327 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5330 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5331 } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
5333 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
5335 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5337 ixgbe_rx_queue_release_mbufs(rxq);
5338 ixgbe_reset_rx_queue(adapter, rxq);
5339 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5346 * Start Transmit Units for specified queue.
5348 int __attribute__((cold))
5349 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5351 struct ixgbe_hw *hw;
5352 struct ixgbe_tx_queue *txq;
5356 PMD_INIT_FUNC_TRACE();
5357 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5359 txq = dev->data->tx_queues[tx_queue_id];
5360 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5361 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5362 txdctl |= IXGBE_TXDCTL_ENABLE;
5363 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5365 /* Wait until TX Enable ready */
5366 if (hw->mac.type == ixgbe_mac_82599EB) {
5367 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5370 txdctl = IXGBE_READ_REG(hw,
5371 IXGBE_TXDCTL(txq->reg_idx));
5372 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5374 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
5378 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5379 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5385 * Stop Transmit Units for specified queue.
5387 int __attribute__((cold))
5388 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5390 struct ixgbe_hw *hw;
5391 struct ixgbe_tx_queue *txq;
5393 uint32_t txtdh, txtdt;
5396 PMD_INIT_FUNC_TRACE();
5397 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5399 txq = dev->data->tx_queues[tx_queue_id];
5401 /* Wait until TX queue is empty */
5402 if (hw->mac.type == ixgbe_mac_82599EB) {
5403 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5405 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5406 txtdh = IXGBE_READ_REG(hw,
5407 IXGBE_TDH(txq->reg_idx));
5408 txtdt = IXGBE_READ_REG(hw,
5409 IXGBE_TDT(txq->reg_idx));
5410 } while (--poll_ms && (txtdh != txtdt));
5413 "Tx Queue %d is not empty when stopping.",
5417 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5418 txdctl &= ~IXGBE_TXDCTL_ENABLE;
5419 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5421 /* Wait until TX Enable bit clear */
5422 if (hw->mac.type == ixgbe_mac_82599EB) {
5423 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5426 txdctl = IXGBE_READ_REG(hw,
5427 IXGBE_TXDCTL(txq->reg_idx));
5428 } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
5430 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
5434 if (txq->ops != NULL) {
5435 txq->ops->release_mbufs(txq);
5436 txq->ops->reset(txq);
5438 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5444 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5445 struct rte_eth_rxq_info *qinfo)
5447 struct ixgbe_rx_queue *rxq;
5449 rxq = dev->data->rx_queues[queue_id];
5451 qinfo->mp = rxq->mb_pool;
5452 qinfo->scattered_rx = dev->data->scattered_rx;
5453 qinfo->nb_desc = rxq->nb_rx_desc;
5455 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
5456 qinfo->conf.rx_drop_en = rxq->drop_en;
5457 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
5458 qinfo->conf.offloads = rxq->offloads;
5462 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5463 struct rte_eth_txq_info *qinfo)
5465 struct ixgbe_tx_queue *txq;
5467 txq = dev->data->tx_queues[queue_id];
5469 qinfo->nb_desc = txq->nb_tx_desc;
5471 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
5472 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
5473 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
5475 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
5476 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
5477 qinfo->conf.offloads = txq->offloads;
5478 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
5482 * [VF] Initializes Receive Unit.
5484 int __attribute__((cold))
5485 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
5487 struct ixgbe_hw *hw;
5488 struct ixgbe_rx_queue *rxq;
5489 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
5491 uint32_t srrctl, psrtype = 0;
5496 PMD_INIT_FUNC_TRACE();
5497 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5499 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
5500 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5501 "it should be power of 2");
5505 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
5506 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5507 "it should be equal to or less than %d",
5508 hw->mac.max_rx_queues);
5513 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
5514 * disables the VF receipt of packets if the PF MTU is > 1500.
5515 * This is done to deal with 82599 limitations that imposes
5516 * the PF and all VFs to share the same MTU.
5517 * Then, the PF driver enables again the VF receipt of packet when
5518 * the VF driver issues a IXGBE_VF_SET_LPE request.
5519 * In the meantime, the VF device cannot be used, even if the VF driver
5520 * and the Guest VM network stack are ready to accept packets with a
5521 * size up to the PF MTU.
5522 * As a work-around to this PF behaviour, force the call to
5523 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
5524 * VF packets received can work in all cases.
5526 ixgbevf_rlpml_set_vf(hw,
5527 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
5530 * Assume no header split and no VLAN strip support
5531 * on any Rx queue first .
5533 rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
5534 /* Setup RX queues */
5535 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5536 rxq = dev->data->rx_queues[i];
5538 /* Allocate buffers for descriptor rings */
5539 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
5543 /* Setup the Base and Length of the Rx Descriptor Rings */
5544 bus_addr = rxq->rx_ring_phys_addr;
5546 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
5547 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5548 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
5549 (uint32_t)(bus_addr >> 32));
5550 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
5551 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5552 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
5553 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
5556 /* Configure the SRRCTL register */
5557 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
5559 /* Set if packets are dropped when no descriptors available */
5561 srrctl |= IXGBE_SRRCTL_DROP_EN;
5564 * Configure the RX buffer size in the BSIZEPACKET field of
5565 * the SRRCTL register of the queue.
5566 * The value is in 1 KB resolution. Valid values can be from
5569 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5570 RTE_PKTMBUF_HEADROOM);
5571 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5572 IXGBE_SRRCTL_BSIZEPKT_MASK);
5575 * VF modification to write virtual function SRRCTL register
5577 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
5579 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5580 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5582 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
5583 /* It adds dual VLAN length for supporting dual VLAN */
5584 (rxmode->max_rx_pkt_len +
5585 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
5586 if (!dev->data->scattered_rx)
5587 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
5588 dev->data->scattered_rx = 1;
5591 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5592 rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5595 /* Set RQPL for VF RSS according to max Rx queue */
5596 psrtype |= (dev->data->nb_rx_queues >> 1) <<
5597 IXGBE_PSRTYPE_RQPL_SHIFT;
5598 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
5600 ixgbe_set_rx_function(dev);
5606 * [VF] Initializes Transmit Unit.
5608 void __attribute__((cold))
5609 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
5611 struct ixgbe_hw *hw;
5612 struct ixgbe_tx_queue *txq;
5617 PMD_INIT_FUNC_TRACE();
5618 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5620 /* Setup the Base and Length of the Tx Descriptor Rings */
5621 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5622 txq = dev->data->tx_queues[i];
5623 bus_addr = txq->tx_ring_phys_addr;
5624 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
5625 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5626 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
5627 (uint32_t)(bus_addr >> 32));
5628 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
5629 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5630 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5631 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
5632 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
5635 * Disable Tx Head Writeback RO bit, since this hoses
5636 * bookkeeping if things aren't delivered in order.
5638 txctrl = IXGBE_READ_REG(hw,
5639 IXGBE_VFDCA_TXCTRL(i));
5640 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5641 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
5647 * [VF] Start Transmit and Receive Units.
5649 void __attribute__((cold))
5650 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
5652 struct ixgbe_hw *hw;
5653 struct ixgbe_tx_queue *txq;
5654 struct ixgbe_rx_queue *rxq;
5660 PMD_INIT_FUNC_TRACE();
5661 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5663 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5664 txq = dev->data->tx_queues[i];
5665 /* Setup Transmit Threshold Registers */
5666 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5667 txdctl |= txq->pthresh & 0x7F;
5668 txdctl |= ((txq->hthresh & 0x7F) << 8);
5669 txdctl |= ((txq->wthresh & 0x7F) << 16);
5670 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5673 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5675 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5676 txdctl |= IXGBE_TXDCTL_ENABLE;
5677 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5680 /* Wait until TX Enable ready */
5683 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5684 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5686 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
5688 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5690 rxq = dev->data->rx_queues[i];
5692 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5693 rxdctl |= IXGBE_RXDCTL_ENABLE;
5694 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
5696 /* Wait until RX Enable ready */
5700 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5701 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5703 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5705 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
5711 ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
5712 const struct rte_flow_action_rss *in)
5714 if (in->key_len > RTE_DIM(out->key) ||
5715 in->queue_num > RTE_DIM(out->queue))
5717 out->conf = (struct rte_flow_action_rss){
5721 .key_len = in->key_len,
5722 .queue_num = in->queue_num,
5723 .key = memcpy(out->key, in->key, in->key_len),
5724 .queue = memcpy(out->queue, in->queue,
5725 sizeof(*in->queue) * in->queue_num),
5731 ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5732 const struct rte_flow_action_rss *with)
5734 return (comp->func == with->func &&
5735 comp->level == with->level &&
5736 comp->types == with->types &&
5737 comp->key_len == with->key_len &&
5738 comp->queue_num == with->queue_num &&
5739 !memcmp(comp->key, with->key, with->key_len) &&
5740 !memcmp(comp->queue, with->queue,
5741 sizeof(*with->queue) * with->queue_num));
5745 ixgbe_config_rss_filter(struct rte_eth_dev *dev,
5746 struct ixgbe_rte_flow_rss_conf *conf, bool add)
5748 struct ixgbe_hw *hw;
5752 uint16_t sp_reta_size;
5754 struct rte_eth_rss_conf rss_conf = {
5755 .rss_key = conf->conf.key_len ?
5756 (void *)(uintptr_t)conf->conf.key : NULL,
5757 .rss_key_len = conf->conf.key_len,
5758 .rss_hf = conf->conf.types,
5760 struct ixgbe_filter_info *filter_info =
5761 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5763 PMD_INIT_FUNC_TRACE();
5764 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5766 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5769 if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
5771 ixgbe_rss_disable(dev);
5772 memset(&filter_info->rss_info, 0,
5773 sizeof(struct ixgbe_rte_flow_rss_conf));
5779 if (filter_info->rss_info.conf.queue_num)
5781 /* Fill in redirection table
5782 * The byte-swap is needed because NIC registers are in
5783 * little-endian order.
5786 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
5787 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5789 if (j == conf->conf.queue_num)
5791 reta = (reta << 8) | conf->conf.queue[j];
5793 IXGBE_WRITE_REG(hw, reta_reg,
5797 /* Configure the RSS key and the RSS protocols used to compute
5798 * the RSS hash of input packets.
5800 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
5801 ixgbe_rss_disable(dev);
5804 if (rss_conf.rss_key == NULL)
5805 rss_conf.rss_key = rss_intel_key; /* Default hash key */
5806 ixgbe_hw_rss_hash_set(hw, &rss_conf);
5808 if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
5814 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
5816 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5822 ixgbe_recv_pkts_vec(
5823 void __rte_unused *rx_queue,
5824 struct rte_mbuf __rte_unused **rx_pkts,
5825 uint16_t __rte_unused nb_pkts)
5831 ixgbe_recv_scattered_pkts_vec(
5832 void __rte_unused *rx_queue,
5833 struct rte_mbuf __rte_unused **rx_pkts,
5834 uint16_t __rte_unused nb_pkts)
5840 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)