1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
21 #include <rte_debug.h>
22 #include <rte_interrupts.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev_driver.h>
37 #include <rte_prefetch.h>
41 #include <rte_string_fns.h>
42 #include <rte_errno.h>
46 #include "ixgbe_logs.h"
47 #include "base/ixgbe_api.h"
48 #include "base/ixgbe_vf.h"
49 #include "ixgbe_ethdev.h"
50 #include "base/ixgbe_dcb.h"
51 #include "base/ixgbe_common.h"
52 #include "ixgbe_rxtx.h"
54 #ifdef RTE_LIBRTE_IEEE1588
55 #define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
57 #define IXGBE_TX_IEEE1588_TMST 0
59 /* Bit Mask to indicate what bits required for building TX context */
60 #define IXGBE_TX_OFFLOAD_MASK ( \
70 PKT_TX_OUTER_IP_CKSUM | \
71 PKT_TX_SEC_OFFLOAD | \
72 IXGBE_TX_IEEE1588_TMST)
74 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
75 (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
78 #define RTE_PMD_USE_PREFETCH
81 #ifdef RTE_PMD_USE_PREFETCH
83 * Prefetch a cache line into all cache levels.
85 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
87 #define rte_ixgbe_prefetch(p) do {} while (0)
90 #ifdef RTE_IXGBE_INC_VECTOR
91 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
95 /*********************************************************************
99 **********************************************************************/
102 * Check for descriptors with their DD bit set and free mbufs.
103 * Return the total number of buffers freed.
105 static __rte_always_inline int
106 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
108 struct ixgbe_tx_entry *txep;
111 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
113 /* check DD bit on threshold descriptor */
114 status = txq->tx_ring[txq->tx_next_dd].wb.status;
115 if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
119 * first buffer to free from S/W ring is at index
120 * tx_next_dd - (tx_rs_thresh-1)
122 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
124 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
125 /* free buffers one at a time */
126 m = rte_pktmbuf_prefree_seg(txep->mbuf);
129 if (unlikely(m == NULL))
132 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
133 (nb_free > 0 && m->pool != free[0]->pool)) {
134 rte_mempool_put_bulk(free[0]->pool,
135 (void **)free, nb_free);
143 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
145 /* buffers were freed, update counters */
146 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
147 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
148 if (txq->tx_next_dd >= txq->nb_tx_desc)
149 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
151 return txq->tx_rs_thresh;
154 /* Populate 4 descriptors with data from 4 mbufs */
156 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
158 uint64_t buf_dma_addr;
162 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
163 buf_dma_addr = rte_mbuf_data_iova(*pkts);
164 pkt_len = (*pkts)->data_len;
166 /* write data to descriptor */
167 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
169 txdp->read.cmd_type_len =
170 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
172 txdp->read.olinfo_status =
173 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
175 rte_prefetch0(&(*pkts)->pool);
179 /* Populate 1 descriptor with data from 1 mbuf */
181 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
183 uint64_t buf_dma_addr;
186 buf_dma_addr = rte_mbuf_data_iova(*pkts);
187 pkt_len = (*pkts)->data_len;
189 /* write data to descriptor */
190 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
191 txdp->read.cmd_type_len =
192 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
193 txdp->read.olinfo_status =
194 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
195 rte_prefetch0(&(*pkts)->pool);
199 * Fill H/W descriptor ring with mbuf data.
200 * Copy mbuf pointers to the S/W ring.
203 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
206 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
207 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
208 const int N_PER_LOOP = 4;
209 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
210 int mainpart, leftover;
214 * Process most of the packets in chunks of N pkts. Any
215 * leftover packets will get processed one at a time.
217 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
218 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
219 for (i = 0; i < mainpart; i += N_PER_LOOP) {
220 /* Copy N mbuf pointers to the S/W ring */
221 for (j = 0; j < N_PER_LOOP; ++j) {
222 (txep + i + j)->mbuf = *(pkts + i + j);
224 tx4(txdp + i, pkts + i);
227 if (unlikely(leftover > 0)) {
228 for (i = 0; i < leftover; ++i) {
229 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
230 tx1(txdp + mainpart + i, pkts + mainpart + i);
235 static inline uint16_t
236 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
239 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
240 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
244 * Begin scanning the H/W ring for done descriptors when the
245 * number of available descriptors drops below tx_free_thresh. For
246 * each done descriptor, free the associated buffer.
248 if (txq->nb_tx_free < txq->tx_free_thresh)
249 ixgbe_tx_free_bufs(txq);
251 /* Only use descriptors that are available */
252 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
253 if (unlikely(nb_pkts == 0))
256 /* Use exactly nb_pkts descriptors */
257 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
260 * At this point, we know there are enough descriptors in the
261 * ring to transmit all the packets. This assumes that each
262 * mbuf contains a single segment, and that no new offloads
263 * are expected, which would require a new context descriptor.
267 * See if we're going to wrap-around. If so, handle the top
268 * of the descriptor ring first, then do the bottom. If not,
269 * the processing looks just like the "bottom" part anyway...
271 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
272 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
273 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
276 * We know that the last descriptor in the ring will need to
277 * have its RS bit set because tx_rs_thresh has to be
278 * a divisor of the ring size
280 tx_r[txq->tx_next_rs].read.cmd_type_len |=
281 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
282 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
287 /* Fill H/W descriptor ring with mbuf data */
288 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
289 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
292 * Determine if RS bit should be set
293 * This is what we actually want:
294 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
295 * but instead of subtracting 1 and doing >=, we can just do
296 * greater than without subtracting.
298 if (txq->tx_tail > txq->tx_next_rs) {
299 tx_r[txq->tx_next_rs].read.cmd_type_len |=
300 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
301 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
303 if (txq->tx_next_rs >= txq->nb_tx_desc)
304 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
308 * Check for wrap-around. This would only happen if we used
309 * up to the last descriptor in the ring, no more, no less.
311 if (txq->tx_tail >= txq->nb_tx_desc)
314 /* update tail pointer */
316 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
322 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
327 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
328 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
329 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
331 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
336 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
337 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
338 nb_tx = (uint16_t)(nb_tx + ret);
339 nb_pkts = (uint16_t)(nb_pkts - ret);
347 #ifdef RTE_IXGBE_INC_VECTOR
349 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
353 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
358 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
359 ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
372 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
373 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
374 uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
375 __rte_unused uint64_t *mdata)
377 uint32_t type_tucmd_mlhl;
378 uint32_t mss_l4len_idx = 0;
380 uint32_t vlan_macip_lens;
381 union ixgbe_tx_offload tx_offload_mask;
382 uint32_t seqnum_seed = 0;
384 ctx_idx = txq->ctx_curr;
385 tx_offload_mask.data[0] = 0;
386 tx_offload_mask.data[1] = 0;
389 /* Specify which HW CTX to upload. */
390 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
392 if (ol_flags & PKT_TX_VLAN_PKT) {
393 tx_offload_mask.vlan_tci |= ~0;
396 /* check if TCP segmentation required for this packet */
397 if (ol_flags & PKT_TX_TCP_SEG) {
398 /* implies IP cksum in IPv4 */
399 if (ol_flags & PKT_TX_IP_CKSUM)
400 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
401 IXGBE_ADVTXD_TUCMD_L4T_TCP |
402 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
404 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
405 IXGBE_ADVTXD_TUCMD_L4T_TCP |
406 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
408 tx_offload_mask.l2_len |= ~0;
409 tx_offload_mask.l3_len |= ~0;
410 tx_offload_mask.l4_len |= ~0;
411 tx_offload_mask.tso_segsz |= ~0;
412 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
413 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
414 } else { /* no TSO, check if hardware checksum is needed */
415 if (ol_flags & PKT_TX_IP_CKSUM) {
416 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
417 tx_offload_mask.l2_len |= ~0;
418 tx_offload_mask.l3_len |= ~0;
421 switch (ol_flags & PKT_TX_L4_MASK) {
422 case PKT_TX_UDP_CKSUM:
423 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
424 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
425 mss_l4len_idx |= sizeof(struct rte_udp_hdr)
426 << IXGBE_ADVTXD_L4LEN_SHIFT;
427 tx_offload_mask.l2_len |= ~0;
428 tx_offload_mask.l3_len |= ~0;
430 case PKT_TX_TCP_CKSUM:
431 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
432 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
433 mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
434 << IXGBE_ADVTXD_L4LEN_SHIFT;
435 tx_offload_mask.l2_len |= ~0;
436 tx_offload_mask.l3_len |= ~0;
438 case PKT_TX_SCTP_CKSUM:
439 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
440 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
441 mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
442 << IXGBE_ADVTXD_L4LEN_SHIFT;
443 tx_offload_mask.l2_len |= ~0;
444 tx_offload_mask.l3_len |= ~0;
447 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
448 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
453 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
454 tx_offload_mask.outer_l2_len |= ~0;
455 tx_offload_mask.outer_l3_len |= ~0;
456 tx_offload_mask.l2_len |= ~0;
457 seqnum_seed |= tx_offload.outer_l3_len
458 << IXGBE_ADVTXD_OUTER_IPLEN;
459 seqnum_seed |= tx_offload.l2_len
460 << IXGBE_ADVTXD_TUNNEL_LEN;
462 #ifdef RTE_LIBRTE_SECURITY
463 if (ol_flags & PKT_TX_SEC_OFFLOAD) {
464 union ixgbe_crypto_tx_desc_md *md =
465 (union ixgbe_crypto_tx_desc_md *)mdata;
467 (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
468 type_tucmd_mlhl |= md->enc ?
469 (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
470 IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
472 (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
473 tx_offload_mask.sa_idx |= ~0;
474 tx_offload_mask.sec_pad_len |= ~0;
478 txq->ctx_cache[ctx_idx].flags = ol_flags;
479 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
480 tx_offload_mask.data[0] & tx_offload.data[0];
481 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
482 tx_offload_mask.data[1] & tx_offload.data[1];
483 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
485 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
486 vlan_macip_lens = tx_offload.l3_len;
487 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
488 vlan_macip_lens |= (tx_offload.outer_l2_len <<
489 IXGBE_ADVTXD_MACLEN_SHIFT);
491 vlan_macip_lens |= (tx_offload.l2_len <<
492 IXGBE_ADVTXD_MACLEN_SHIFT);
493 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
494 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
495 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
496 ctx_txd->seqnum_seed = seqnum_seed;
500 * Check which hardware context can be used. Use the existing match
501 * or create a new context descriptor.
503 static inline uint32_t
504 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
505 union ixgbe_tx_offload tx_offload)
507 /* If match with the current used context */
508 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
509 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
510 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
511 & tx_offload.data[0])) &&
512 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
513 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
514 & tx_offload.data[1]))))
515 return txq->ctx_curr;
517 /* What if match with the next context */
519 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
520 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
521 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
522 & tx_offload.data[0])) &&
523 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
524 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
525 & tx_offload.data[1]))))
526 return txq->ctx_curr;
528 /* Mismatch, use the previous context */
529 return IXGBE_CTX_NUM;
532 static inline uint32_t
533 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
537 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
538 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
539 if (ol_flags & PKT_TX_IP_CKSUM)
540 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
541 if (ol_flags & PKT_TX_TCP_SEG)
542 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
546 static inline uint32_t
547 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
549 uint32_t cmdtype = 0;
551 if (ol_flags & PKT_TX_VLAN_PKT)
552 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
553 if (ol_flags & PKT_TX_TCP_SEG)
554 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
555 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
556 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
557 if (ol_flags & PKT_TX_MACSEC)
558 cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
562 /* Default RS bit threshold values */
563 #ifndef DEFAULT_TX_RS_THRESH
564 #define DEFAULT_TX_RS_THRESH 32
566 #ifndef DEFAULT_TX_FREE_THRESH
567 #define DEFAULT_TX_FREE_THRESH 32
570 /* Reset transmit descriptors after they have been used */
572 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
574 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
575 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
576 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
577 uint16_t nb_tx_desc = txq->nb_tx_desc;
578 uint16_t desc_to_clean_to;
579 uint16_t nb_tx_to_clean;
582 /* Determine the last descriptor needing to be cleaned */
583 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
584 if (desc_to_clean_to >= nb_tx_desc)
585 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
587 /* Check to make sure the last descriptor to clean is done */
588 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
589 status = txr[desc_to_clean_to].wb.status;
590 if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
591 PMD_TX_FREE_LOG(DEBUG,
592 "TX descriptor %4u is not done"
593 "(port=%d queue=%d)",
595 txq->port_id, txq->queue_id);
596 /* Failed to clean any descriptors, better luck next time */
600 /* Figure out how many descriptors will be cleaned */
601 if (last_desc_cleaned > desc_to_clean_to)
602 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
605 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
608 PMD_TX_FREE_LOG(DEBUG,
609 "Cleaning %4u TX descriptors: %4u to %4u "
610 "(port=%d queue=%d)",
611 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
612 txq->port_id, txq->queue_id);
615 * The last descriptor to clean is done, so that means all the
616 * descriptors from the last descriptor that was cleaned
617 * up to the last descriptor with the RS bit set
618 * are done. Only reset the threshold descriptor.
620 txr[desc_to_clean_to].wb.status = 0;
622 /* Update the txq to reflect the last descriptor that was cleaned */
623 txq->last_desc_cleaned = desc_to_clean_to;
624 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
631 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
634 struct ixgbe_tx_queue *txq;
635 struct ixgbe_tx_entry *sw_ring;
636 struct ixgbe_tx_entry *txe, *txn;
637 volatile union ixgbe_adv_tx_desc *txr;
638 volatile union ixgbe_adv_tx_desc *txd, *txp;
639 struct rte_mbuf *tx_pkt;
640 struct rte_mbuf *m_seg;
641 uint64_t buf_dma_addr;
642 uint32_t olinfo_status;
643 uint32_t cmd_type_len;
654 union ixgbe_tx_offload tx_offload;
655 #ifdef RTE_LIBRTE_SECURITY
659 tx_offload.data[0] = 0;
660 tx_offload.data[1] = 0;
662 sw_ring = txq->sw_ring;
664 tx_id = txq->tx_tail;
665 txe = &sw_ring[tx_id];
668 /* Determine if the descriptor ring needs to be cleaned. */
669 if (txq->nb_tx_free < txq->tx_free_thresh)
670 ixgbe_xmit_cleanup(txq);
672 rte_prefetch0(&txe->mbuf->pool);
675 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
678 pkt_len = tx_pkt->pkt_len;
681 * Determine how many (if any) context descriptors
682 * are needed for offload functionality.
684 ol_flags = tx_pkt->ol_flags;
685 #ifdef RTE_LIBRTE_SECURITY
686 use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
689 /* If hardware offload required */
690 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
692 tx_offload.l2_len = tx_pkt->l2_len;
693 tx_offload.l3_len = tx_pkt->l3_len;
694 tx_offload.l4_len = tx_pkt->l4_len;
695 tx_offload.vlan_tci = tx_pkt->vlan_tci;
696 tx_offload.tso_segsz = tx_pkt->tso_segsz;
697 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
698 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
699 #ifdef RTE_LIBRTE_SECURITY
701 union ixgbe_crypto_tx_desc_md *ipsec_mdata =
702 (union ixgbe_crypto_tx_desc_md *)
704 tx_offload.sa_idx = ipsec_mdata->sa_idx;
705 tx_offload.sec_pad_len = ipsec_mdata->pad_len;
709 /* If new context need be built or reuse the exist ctx. */
710 ctx = what_advctx_update(txq, tx_ol_req,
712 /* Only allocate context descriptor if required*/
713 new_ctx = (ctx == IXGBE_CTX_NUM);
718 * Keep track of how many descriptors are used this loop
719 * This will always be the number of segments + the number of
720 * Context descriptors required to transmit the packet
722 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
725 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
726 /* set RS on the previous packet in the burst */
727 txp->read.cmd_type_len |=
728 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
731 * The number of descriptors that must be allocated for a
732 * packet is the number of segments of that packet, plus 1
733 * Context Descriptor for the hardware offload, if any.
734 * Determine the last TX descriptor to allocate in the TX ring
735 * for the packet, starting from the current position (tx_id)
738 tx_last = (uint16_t) (tx_id + nb_used - 1);
741 if (tx_last >= txq->nb_tx_desc)
742 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
744 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
745 " tx_first=%u tx_last=%u",
746 (unsigned) txq->port_id,
747 (unsigned) txq->queue_id,
753 * Make sure there are enough TX descriptors available to
754 * transmit the entire packet.
755 * nb_used better be less than or equal to txq->tx_rs_thresh
757 if (nb_used > txq->nb_tx_free) {
758 PMD_TX_FREE_LOG(DEBUG,
759 "Not enough free TX descriptors "
760 "nb_used=%4u nb_free=%4u "
761 "(port=%d queue=%d)",
762 nb_used, txq->nb_tx_free,
763 txq->port_id, txq->queue_id);
765 if (ixgbe_xmit_cleanup(txq) != 0) {
766 /* Could not clean any descriptors */
772 /* nb_used better be <= txq->tx_rs_thresh */
773 if (unlikely(nb_used > txq->tx_rs_thresh)) {
774 PMD_TX_FREE_LOG(DEBUG,
775 "The number of descriptors needed to "
776 "transmit the packet exceeds the "
777 "RS bit threshold. This will impact "
779 "nb_used=%4u nb_free=%4u "
781 "(port=%d queue=%d)",
782 nb_used, txq->nb_tx_free,
784 txq->port_id, txq->queue_id);
786 * Loop here until there are enough TX
787 * descriptors or until the ring cannot be
790 while (nb_used > txq->nb_tx_free) {
791 if (ixgbe_xmit_cleanup(txq) != 0) {
793 * Could not clean any
805 * By now there are enough free TX descriptors to transmit
810 * Set common flags of all TX Data Descriptors.
812 * The following bits must be set in all Data Descriptors:
813 * - IXGBE_ADVTXD_DTYP_DATA
814 * - IXGBE_ADVTXD_DCMD_DEXT
816 * The following bits must be set in the first Data Descriptor
817 * and are ignored in the other ones:
818 * - IXGBE_ADVTXD_DCMD_IFCS
819 * - IXGBE_ADVTXD_MAC_1588
820 * - IXGBE_ADVTXD_DCMD_VLE
822 * The following bits must only be set in the last Data
824 * - IXGBE_TXD_CMD_EOP
826 * The following bits can be set in any Data Descriptor, but
827 * are only set in the last Data Descriptor:
830 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
831 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
833 #ifdef RTE_LIBRTE_IEEE1588
834 if (ol_flags & PKT_TX_IEEE1588_TMST)
835 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
841 if (ol_flags & PKT_TX_TCP_SEG) {
842 /* when TSO is on, paylen in descriptor is the
843 * not the packet len but the tcp payload len */
844 pkt_len -= (tx_offload.l2_len +
845 tx_offload.l3_len + tx_offload.l4_len);
849 * Setup the TX Advanced Context Descriptor if required
852 volatile struct ixgbe_adv_tx_context_desc *
855 ctx_txd = (volatile struct
856 ixgbe_adv_tx_context_desc *)
859 txn = &sw_ring[txe->next_id];
860 rte_prefetch0(&txn->mbuf->pool);
862 if (txe->mbuf != NULL) {
863 rte_pktmbuf_free_seg(txe->mbuf);
867 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
868 tx_offload, &tx_pkt->udata64);
870 txe->last_id = tx_last;
871 tx_id = txe->next_id;
876 * Setup the TX Advanced Data Descriptor,
877 * This path will go through
878 * whatever new/reuse the context descriptor
880 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
881 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
882 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
885 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
886 #ifdef RTE_LIBRTE_SECURITY
888 olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
894 txn = &sw_ring[txe->next_id];
895 rte_prefetch0(&txn->mbuf->pool);
897 if (txe->mbuf != NULL)
898 rte_pktmbuf_free_seg(txe->mbuf);
902 * Set up Transmit Data Descriptor.
904 slen = m_seg->data_len;
905 buf_dma_addr = rte_mbuf_data_iova(m_seg);
906 txd->read.buffer_addr =
907 rte_cpu_to_le_64(buf_dma_addr);
908 txd->read.cmd_type_len =
909 rte_cpu_to_le_32(cmd_type_len | slen);
910 txd->read.olinfo_status =
911 rte_cpu_to_le_32(olinfo_status);
912 txe->last_id = tx_last;
913 tx_id = txe->next_id;
916 } while (m_seg != NULL);
919 * The last packet data descriptor needs End Of Packet (EOP)
921 cmd_type_len |= IXGBE_TXD_CMD_EOP;
922 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
923 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
925 /* Set RS bit only on threshold packets' last descriptor */
926 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
927 PMD_TX_FREE_LOG(DEBUG,
928 "Setting RS bit on TXD id="
929 "%4u (port=%d queue=%d)",
930 tx_last, txq->port_id, txq->queue_id);
932 cmd_type_len |= IXGBE_TXD_CMD_RS;
934 /* Update txq RS bit counters */
940 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
944 /* set RS on last packet in the burst */
946 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
951 * Set the Transmit Descriptor Tail (TDT)
953 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
954 (unsigned) txq->port_id, (unsigned) txq->queue_id,
955 (unsigned) tx_id, (unsigned) nb_tx);
956 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
957 txq->tx_tail = tx_id;
962 /*********************************************************************
966 **********************************************************************/
968 ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
973 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
975 for (i = 0; i < nb_pkts; i++) {
977 ol_flags = m->ol_flags;
980 * Check if packet meets requirements for number of segments
982 * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
986 if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
991 if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
996 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
997 ret = rte_validate_tx_offload(m);
1003 ret = rte_net_intel_cksum_prepare(m);
1013 /*********************************************************************
1017 **********************************************************************/
1019 #define IXGBE_PACKET_TYPE_ETHER 0X00
1020 #define IXGBE_PACKET_TYPE_IPV4 0X01
1021 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
1022 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
1023 #define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
1024 #define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
1025 #define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
1026 #define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
1027 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
1028 #define IXGBE_PACKET_TYPE_IPV6 0X04
1029 #define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
1030 #define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
1031 #define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
1032 #define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
1033 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
1034 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
1035 #define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
1036 #define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
1037 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
1038 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
1039 #define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
1040 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
1041 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
1042 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
1043 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
1044 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
1045 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
1046 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
1047 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
1048 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
1049 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
1050 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
1051 #define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
1053 #define IXGBE_PACKET_TYPE_NVGRE 0X00
1054 #define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
1055 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11
1056 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
1057 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
1058 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
1059 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
1060 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
1061 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
1062 #define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
1063 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
1064 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
1065 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
1066 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
1067 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
1068 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
1069 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
1070 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
1071 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
1072 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
1073 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D
1074 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
1075 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
1077 #define IXGBE_PACKET_TYPE_VXLAN 0X80
1078 #define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81
1079 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91
1080 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
1081 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
1082 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
1083 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
1084 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
1085 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
1086 #define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
1087 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
1088 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
1089 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
1090 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
1091 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
1092 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
1093 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
1094 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
1095 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
1096 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
1097 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D
1098 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
1099 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
1102 * Use 2 different table for normal packet and tunnel packet
1103 * to save the space.
1106 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
1107 [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
1108 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
1110 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1111 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
1112 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1113 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
1114 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1115 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
1116 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1117 RTE_PTYPE_L3_IPV4_EXT,
1118 [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1119 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
1120 [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1121 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
1122 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1123 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
1124 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
1126 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1127 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
1128 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1129 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
1130 [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1131 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
1132 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1133 RTE_PTYPE_L3_IPV6_EXT,
1134 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1135 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
1136 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1137 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
1138 [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1139 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
1140 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1141 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1142 RTE_PTYPE_INNER_L3_IPV6,
1143 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1144 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1145 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1146 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1147 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1148 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1149 [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1150 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1151 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1152 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
1153 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1154 RTE_PTYPE_INNER_L3_IPV6,
1155 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1156 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1157 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1158 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1159 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1160 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1161 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1162 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1163 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1164 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1165 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1166 RTE_PTYPE_INNER_L3_IPV6_EXT,
1167 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1168 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1169 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1170 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1171 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1172 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1173 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1174 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1175 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1176 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1177 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1178 RTE_PTYPE_INNER_L3_IPV6_EXT,
1179 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1180 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1181 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1182 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1183 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1184 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1185 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
1186 RTE_PTYPE_L2_ETHER |
1187 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1188 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1192 ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
1193 [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
1194 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1195 RTE_PTYPE_INNER_L2_ETHER,
1196 [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
1197 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1198 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1199 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1200 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1201 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
1202 [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
1203 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1204 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
1205 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1206 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1207 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1208 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1209 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1210 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
1211 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1212 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1213 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1214 [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1215 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1216 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1217 RTE_PTYPE_INNER_L4_TCP,
1218 [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1219 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1220 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1221 RTE_PTYPE_INNER_L4_TCP,
1222 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1223 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1224 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1225 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1226 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1227 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1228 RTE_PTYPE_INNER_L4_TCP,
1229 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
1230 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1231 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1232 RTE_PTYPE_INNER_L3_IPV4,
1233 [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1234 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1235 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1236 RTE_PTYPE_INNER_L4_UDP,
1237 [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1238 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1239 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1240 RTE_PTYPE_INNER_L4_UDP,
1241 [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1242 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1243 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1244 RTE_PTYPE_INNER_L4_SCTP,
1245 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1246 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1247 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1248 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1249 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1250 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1251 RTE_PTYPE_INNER_L4_UDP,
1252 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1253 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1254 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1255 RTE_PTYPE_INNER_L4_SCTP,
1256 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
1257 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1258 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1259 RTE_PTYPE_INNER_L3_IPV4,
1260 [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1261 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1262 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1263 RTE_PTYPE_INNER_L4_SCTP,
1264 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1265 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1266 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1267 RTE_PTYPE_INNER_L4_SCTP,
1268 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1269 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1270 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1271 RTE_PTYPE_INNER_L4_TCP,
1272 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1273 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1274 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1275 RTE_PTYPE_INNER_L4_UDP,
1277 [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
1278 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1279 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
1280 [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
1281 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1282 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1283 RTE_PTYPE_INNER_L3_IPV4,
1284 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1285 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1286 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1287 RTE_PTYPE_INNER_L3_IPV4_EXT,
1288 [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
1289 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1290 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1291 RTE_PTYPE_INNER_L3_IPV6,
1292 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1293 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1294 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1295 RTE_PTYPE_INNER_L3_IPV4,
1296 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1297 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1298 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1299 RTE_PTYPE_INNER_L3_IPV6_EXT,
1300 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1301 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1302 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1303 RTE_PTYPE_INNER_L3_IPV4,
1304 [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1305 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1306 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1307 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
1308 [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1309 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1310 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1311 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1312 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1313 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1314 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1315 RTE_PTYPE_INNER_L3_IPV4,
1316 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1317 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1318 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1319 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1320 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
1321 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1322 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1323 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1324 [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1325 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1326 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1327 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
1328 [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1329 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1330 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1331 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1332 [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1333 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1334 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1335 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1336 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1337 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1338 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1339 RTE_PTYPE_INNER_L3_IPV4,
1340 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1341 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1342 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1343 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1344 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1345 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1346 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1347 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1348 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
1349 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1350 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1351 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1352 [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1353 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1354 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1355 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
1356 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1357 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1358 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1359 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
1360 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1361 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1362 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1363 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
1364 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1365 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1366 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1367 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
1370 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
1371 static inline uint32_t
1372 ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
1375 if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1376 return RTE_PTYPE_UNKNOWN;
1378 pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
1380 /* For tunnel packet */
1381 if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
1382 /* Remove the tunnel bit to save the space. */
1383 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
1384 return ptype_table_tn[pkt_info];
1388 * For x550, if it's not tunnel,
1389 * tunnel type bit should be set to 0.
1390 * Reuse 82599's mask.
1392 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
1394 return ptype_table[pkt_info];
1397 static inline uint64_t
1398 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1400 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1401 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1402 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1403 PKT_RX_RSS_HASH, 0, 0, 0,
1404 0, 0, 0, PKT_RX_FDIR,
1406 #ifdef RTE_LIBRTE_IEEE1588
1407 static uint64_t ip_pkt_etqf_map[8] = {
1408 0, 0, 0, PKT_RX_IEEE1588_PTP,
1412 if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1413 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1414 ip_rss_types_map[pkt_info & 0XF];
1416 return ip_rss_types_map[pkt_info & 0XF];
1418 return ip_rss_types_map[pkt_info & 0XF];
1422 static inline uint64_t
1423 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1428 * Check if VLAN present only.
1429 * Do not check whether L3/L4 rx checksum done by NIC or not,
1430 * That can be found from rte_eth_rxmode.offloads flag
1432 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
1434 #ifdef RTE_LIBRTE_IEEE1588
1435 if (rx_status & IXGBE_RXD_STAT_TMST)
1436 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1441 static inline uint64_t
1442 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1447 * Bit 31: IPE, IPv4 checksum error
1448 * Bit 30: L4I, L4I integrity error
1450 static uint64_t error_to_pkt_flags_map[4] = {
1451 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
1452 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
1453 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
1454 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1456 pkt_flags = error_to_pkt_flags_map[(rx_status >>
1457 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1459 if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1460 (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1461 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1464 #ifdef RTE_LIBRTE_SECURITY
1465 if (rx_status & IXGBE_RXD_STAT_SECP) {
1466 pkt_flags |= PKT_RX_SEC_OFFLOAD;
1467 if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
1468 pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
1476 * LOOK_AHEAD defines how many desc statuses to check beyond the
1477 * current descriptor.
1478 * It must be a pound define for optimal performance.
1479 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1480 * function only works with LOOK_AHEAD=8.
1482 #define LOOK_AHEAD 8
1483 #if (LOOK_AHEAD != 8)
1484 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1487 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1489 volatile union ixgbe_adv_rx_desc *rxdp;
1490 struct ixgbe_rx_entry *rxep;
1491 struct rte_mbuf *mb;
1495 uint32_t s[LOOK_AHEAD];
1496 uint32_t pkt_info[LOOK_AHEAD];
1497 int i, j, nb_rx = 0;
1499 uint64_t vlan_flags = rxq->vlan_flags;
1501 /* get references to current descriptor and S/W ring entry */
1502 rxdp = &rxq->rx_ring[rxq->rx_tail];
1503 rxep = &rxq->sw_ring[rxq->rx_tail];
1505 status = rxdp->wb.upper.status_error;
1506 /* check to make sure there is at least 1 packet to receive */
1507 if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1511 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1512 * reference packets that are ready to be received.
1514 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1515 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1516 /* Read desc statuses backwards to avoid race condition */
1517 for (j = 0; j < LOOK_AHEAD; j++)
1518 s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1522 /* Compute how many status bits were set */
1523 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1524 (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
1527 for (j = 0; j < nb_dd; j++)
1528 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
1533 /* Translate descriptor info to mbuf format */
1534 for (j = 0; j < nb_dd; ++j) {
1536 pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1538 mb->data_len = pkt_len;
1539 mb->pkt_len = pkt_len;
1540 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1542 /* convert descriptor fields to rte mbuf flags */
1543 pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1545 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1546 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
1547 ((uint16_t)pkt_info[j]);
1548 mb->ol_flags = pkt_flags;
1550 ixgbe_rxd_pkt_info_to_pkt_type
1551 (pkt_info[j], rxq->pkt_type_mask);
1553 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1554 mb->hash.rss = rte_le_to_cpu_32(
1555 rxdp[j].wb.lower.hi_dword.rss);
1556 else if (pkt_flags & PKT_RX_FDIR) {
1557 mb->hash.fdir.hash = rte_le_to_cpu_16(
1558 rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1559 IXGBE_ATR_HASH_MASK;
1560 mb->hash.fdir.id = rte_le_to_cpu_16(
1561 rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1565 /* Move mbuf pointers from the S/W ring to the stage */
1566 for (j = 0; j < LOOK_AHEAD; ++j) {
1567 rxq->rx_stage[i + j] = rxep[j].mbuf;
1570 /* stop if all requested packets could not be received */
1571 if (nb_dd != LOOK_AHEAD)
1575 /* clear software ring entries so we can cleanup correctly */
1576 for (i = 0; i < nb_rx; ++i) {
1577 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1585 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1587 volatile union ixgbe_adv_rx_desc *rxdp;
1588 struct ixgbe_rx_entry *rxep;
1589 struct rte_mbuf *mb;
1594 /* allocate buffers in bulk directly into the S/W ring */
1595 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1596 rxep = &rxq->sw_ring[alloc_idx];
1597 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1598 rxq->rx_free_thresh);
1599 if (unlikely(diag != 0))
1602 rxdp = &rxq->rx_ring[alloc_idx];
1603 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1604 /* populate the static rte mbuf fields */
1607 mb->port = rxq->port_id;
1610 rte_mbuf_refcnt_set(mb, 1);
1611 mb->data_off = RTE_PKTMBUF_HEADROOM;
1613 /* populate the descriptors */
1614 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1615 rxdp[i].read.hdr_addr = 0;
1616 rxdp[i].read.pkt_addr = dma_addr;
1619 /* update state of internal queue structure */
1620 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1621 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1622 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1628 static inline uint16_t
1629 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1632 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1635 /* how many packets are ready to return? */
1636 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1638 /* copy mbuf pointers to the application's packet list */
1639 for (i = 0; i < nb_pkts; ++i)
1640 rx_pkts[i] = stage[i];
1642 /* update internal queue state */
1643 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1644 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1649 static inline uint16_t
1650 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1653 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1656 /* Any previously recv'd pkts will be returned from the Rx stage */
1657 if (rxq->rx_nb_avail)
1658 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1660 /* Scan the H/W ring for packets to receive */
1661 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1663 /* update internal queue state */
1664 rxq->rx_next_avail = 0;
1665 rxq->rx_nb_avail = nb_rx;
1666 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1668 /* if required, allocate new buffers to replenish descriptors */
1669 if (rxq->rx_tail > rxq->rx_free_trigger) {
1670 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1672 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1675 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1676 "queue_id=%u", (unsigned) rxq->port_id,
1677 (unsigned) rxq->queue_id);
1679 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1680 rxq->rx_free_thresh;
1683 * Need to rewind any previous receives if we cannot
1684 * allocate new buffers to replenish the old ones.
1686 rxq->rx_nb_avail = 0;
1687 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1688 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1689 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1694 /* update tail pointer */
1696 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
1700 if (rxq->rx_tail >= rxq->nb_rx_desc)
1703 /* received any packets this loop? */
1704 if (rxq->rx_nb_avail)
1705 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1710 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1712 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1717 if (unlikely(nb_pkts == 0))
1720 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1721 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1723 /* request is relatively large, chunk it up */
1728 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1729 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1730 nb_rx = (uint16_t)(nb_rx + ret);
1731 nb_pkts = (uint16_t)(nb_pkts - ret);
1740 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1743 struct ixgbe_rx_queue *rxq;
1744 volatile union ixgbe_adv_rx_desc *rx_ring;
1745 volatile union ixgbe_adv_rx_desc *rxdp;
1746 struct ixgbe_rx_entry *sw_ring;
1747 struct ixgbe_rx_entry *rxe;
1748 struct rte_mbuf *rxm;
1749 struct rte_mbuf *nmb;
1750 union ixgbe_adv_rx_desc rxd;
1759 uint64_t vlan_flags;
1764 rx_id = rxq->rx_tail;
1765 rx_ring = rxq->rx_ring;
1766 sw_ring = rxq->sw_ring;
1767 vlan_flags = rxq->vlan_flags;
1768 while (nb_rx < nb_pkts) {
1770 * The order of operations here is important as the DD status
1771 * bit must not be read after any other descriptor fields.
1772 * rx_ring and rxdp are pointing to volatile data so the order
1773 * of accesses cannot be reordered by the compiler. If they were
1774 * not volatile, they could be reordered which could lead to
1775 * using invalid descriptor fields when read from rxd.
1777 rxdp = &rx_ring[rx_id];
1778 staterr = rxdp->wb.upper.status_error;
1779 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1786 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1787 * is likely to be invalid and to be dropped by the various
1788 * validation checks performed by the network stack.
1790 * Allocate a new mbuf to replenish the RX ring descriptor.
1791 * If the allocation fails:
1792 * - arrange for that RX descriptor to be the first one
1793 * being parsed the next time the receive function is
1794 * invoked [on the same queue].
1796 * - Stop parsing the RX ring and return immediately.
1798 * This policy do not drop the packet received in the RX
1799 * descriptor for which the allocation of a new mbuf failed.
1800 * Thus, it allows that packet to be later retrieved if
1801 * mbuf have been freed in the mean time.
1802 * As a side effect, holding RX descriptors instead of
1803 * systematically giving them back to the NIC may lead to
1804 * RX ring exhaustion situations.
1805 * However, the NIC can gracefully prevent such situations
1806 * to happen by sending specific "back-pressure" flow control
1807 * frames to its peer(s).
1809 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1810 "ext_err_stat=0x%08x pkt_len=%u",
1811 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1812 (unsigned) rx_id, (unsigned) staterr,
1813 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1815 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1817 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1818 "queue_id=%u", (unsigned) rxq->port_id,
1819 (unsigned) rxq->queue_id);
1820 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1825 rxe = &sw_ring[rx_id];
1827 if (rx_id == rxq->nb_rx_desc)
1830 /* Prefetch next mbuf while processing current one. */
1831 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1834 * When next RX descriptor is on a cache-line boundary,
1835 * prefetch the next 4 RX descriptors and the next 8 pointers
1838 if ((rx_id & 0x3) == 0) {
1839 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1840 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1846 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1847 rxdp->read.hdr_addr = 0;
1848 rxdp->read.pkt_addr = dma_addr;
1851 * Initialize the returned mbuf.
1852 * 1) setup generic mbuf fields:
1853 * - number of segments,
1856 * - RX port identifier.
1857 * 2) integrate hardware offload data, if any:
1858 * - RSS flag & hash,
1859 * - IP checksum flag,
1860 * - VLAN TCI, if any,
1863 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1865 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1866 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1869 rxm->pkt_len = pkt_len;
1870 rxm->data_len = pkt_len;
1871 rxm->port = rxq->port_id;
1873 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1874 /* Only valid if PKT_RX_VLAN set in pkt_flags */
1875 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1877 pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
1878 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1879 pkt_flags = pkt_flags |
1880 ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1881 rxm->ol_flags = pkt_flags;
1883 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1884 rxq->pkt_type_mask);
1886 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1887 rxm->hash.rss = rte_le_to_cpu_32(
1888 rxd.wb.lower.hi_dword.rss);
1889 else if (pkt_flags & PKT_RX_FDIR) {
1890 rxm->hash.fdir.hash = rte_le_to_cpu_16(
1891 rxd.wb.lower.hi_dword.csum_ip.csum) &
1892 IXGBE_ATR_HASH_MASK;
1893 rxm->hash.fdir.id = rte_le_to_cpu_16(
1894 rxd.wb.lower.hi_dword.csum_ip.ip_id);
1897 * Store the mbuf address into the next entry of the array
1898 * of returned packets.
1900 rx_pkts[nb_rx++] = rxm;
1902 rxq->rx_tail = rx_id;
1905 * If the number of free RX descriptors is greater than the RX free
1906 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1908 * Update the RDT with the value of the last processed RX descriptor
1909 * minus 1, to guarantee that the RDT register is never equal to the
1910 * RDH register, which creates a "full" ring situtation from the
1911 * hardware point of view...
1913 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1914 if (nb_hold > rxq->rx_free_thresh) {
1915 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1916 "nb_hold=%u nb_rx=%u",
1917 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1918 (unsigned) rx_id, (unsigned) nb_hold,
1920 rx_id = (uint16_t) ((rx_id == 0) ?
1921 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1922 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1925 rxq->nb_rx_hold = nb_hold;
1930 * Detect an RSC descriptor.
1932 static inline uint32_t
1933 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1935 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1936 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1940 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1942 * Fill the following info in the HEAD buffer of the Rx cluster:
1943 * - RX port identifier
1944 * - hardware offload data, if any:
1946 * - IP checksum flag
1947 * - VLAN TCI, if any
1949 * @head HEAD of the packet cluster
1950 * @desc HW descriptor to get data from
1951 * @rxq Pointer to the Rx queue
1954 ixgbe_fill_cluster_head_buf(
1955 struct rte_mbuf *head,
1956 union ixgbe_adv_rx_desc *desc,
1957 struct ixgbe_rx_queue *rxq,
1963 head->port = rxq->port_id;
1965 /* The vlan_tci field is only valid when PKT_RX_VLAN is
1966 * set in the pkt_flags field.
1968 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1969 pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
1970 pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1971 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1972 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1973 head->ol_flags = pkt_flags;
1975 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
1977 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1978 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1979 else if (pkt_flags & PKT_RX_FDIR) {
1980 head->hash.fdir.hash =
1981 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1982 & IXGBE_ATR_HASH_MASK;
1983 head->hash.fdir.id =
1984 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1989 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1991 * @rx_queue Rx queue handle
1992 * @rx_pkts table of received packets
1993 * @nb_pkts size of rx_pkts table
1994 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1996 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1997 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1999 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
2000 * 1) When non-EOP RSC completion arrives:
2001 * a) Update the HEAD of the current RSC aggregation cluster with the new
2002 * segment's data length.
2003 * b) Set the "next" pointer of the current segment to point to the segment
2004 * at the NEXTP index.
2005 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
2006 * in the sw_rsc_ring.
2007 * 2) When EOP arrives we just update the cluster's total length and offload
2008 * flags and deliver the cluster up to the upper layers. In our case - put it
2009 * in the rx_pkts table.
2011 * Returns the number of received packets/clusters (according to the "bulk
2012 * receive" interface).
2014 static inline uint16_t
2015 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
2018 struct ixgbe_rx_queue *rxq = rx_queue;
2019 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
2020 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
2021 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
2022 uint16_t rx_id = rxq->rx_tail;
2024 uint16_t nb_hold = rxq->nb_rx_hold;
2025 uint16_t prev_id = rxq->rx_tail;
2027 while (nb_rx < nb_pkts) {
2029 struct ixgbe_rx_entry *rxe;
2030 struct ixgbe_scattered_rx_entry *sc_entry;
2031 struct ixgbe_scattered_rx_entry *next_sc_entry;
2032 struct ixgbe_rx_entry *next_rxe = NULL;
2033 struct rte_mbuf *first_seg;
2034 struct rte_mbuf *rxm;
2035 struct rte_mbuf *nmb = NULL;
2036 union ixgbe_adv_rx_desc rxd;
2039 volatile union ixgbe_adv_rx_desc *rxdp;
2044 * The code in this whole file uses the volatile pointer to
2045 * ensure the read ordering of the status and the rest of the
2046 * descriptor fields (on the compiler level only!!!). This is so
2047 * UGLY - why not to just use the compiler barrier instead? DPDK
2048 * even has the rte_compiler_barrier() for that.
2050 * But most importantly this is just wrong because this doesn't
2051 * ensure memory ordering in a general case at all. For
2052 * instance, DPDK is supposed to work on Power CPUs where
2053 * compiler barrier may just not be enough!
2055 * I tried to write only this function properly to have a
2056 * starting point (as a part of an LRO/RSC series) but the
2057 * compiler cursed at me when I tried to cast away the
2058 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
2059 * keeping it the way it is for now.
2061 * The code in this file is broken in so many other places and
2062 * will just not work on a big endian CPU anyway therefore the
2063 * lines below will have to be revisited together with the rest
2067 * - Get rid of "volatile" and let the compiler do its job.
2068 * - Use the proper memory barrier (rte_rmb()) to ensure the
2069 * memory ordering below.
2071 rxdp = &rx_ring[rx_id];
2072 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
2074 if (!(staterr & IXGBE_RXDADV_STAT_DD))
2079 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
2080 "staterr=0x%x data_len=%u",
2081 rxq->port_id, rxq->queue_id, rx_id, staterr,
2082 rte_le_to_cpu_16(rxd.wb.upper.length));
2085 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
2087 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
2088 "port_id=%u queue_id=%u",
2089 rxq->port_id, rxq->queue_id);
2091 rte_eth_devices[rxq->port_id].data->
2092 rx_mbuf_alloc_failed++;
2095 } else if (nb_hold > rxq->rx_free_thresh) {
2096 uint16_t next_rdt = rxq->rx_free_trigger;
2098 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
2100 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
2102 nb_hold -= rxq->rx_free_thresh;
2104 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
2105 "port_id=%u queue_id=%u",
2106 rxq->port_id, rxq->queue_id);
2108 rte_eth_devices[rxq->port_id].data->
2109 rx_mbuf_alloc_failed++;
2115 rxe = &sw_ring[rx_id];
2116 eop = staterr & IXGBE_RXDADV_STAT_EOP;
2118 next_id = rx_id + 1;
2119 if (next_id == rxq->nb_rx_desc)
2122 /* Prefetch next mbuf while processing current one. */
2123 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
2126 * When next RX descriptor is on a cache-line boundary,
2127 * prefetch the next 4 RX descriptors and the next 4 pointers
2130 if ((next_id & 0x3) == 0) {
2131 rte_ixgbe_prefetch(&rx_ring[next_id]);
2132 rte_ixgbe_prefetch(&sw_ring[next_id]);
2139 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2141 * Update RX descriptor with the physical address of the
2142 * new data buffer of the new allocated mbuf.
2146 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2147 rxdp->read.hdr_addr = 0;
2148 rxdp->read.pkt_addr = dma;
2153 * Set data length & data buffer address of mbuf.
2155 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
2156 rxm->data_len = data_len;
2161 * Get next descriptor index:
2162 * - For RSC it's in the NEXTP field.
2163 * - For a scattered packet - it's just a following
2166 if (ixgbe_rsc_count(&rxd))
2168 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
2169 IXGBE_RXDADV_NEXTP_SHIFT;
2173 next_sc_entry = &sw_sc_ring[nextp_id];
2174 next_rxe = &sw_ring[nextp_id];
2175 rte_ixgbe_prefetch(next_rxe);
2178 sc_entry = &sw_sc_ring[rx_id];
2179 first_seg = sc_entry->fbuf;
2180 sc_entry->fbuf = NULL;
2183 * If this is the first buffer of the received packet,
2184 * set the pointer to the first mbuf of the packet and
2185 * initialize its context.
2186 * Otherwise, update the total length and the number of segments
2187 * of the current scattered packet, and update the pointer to
2188 * the last mbuf of the current packet.
2190 if (first_seg == NULL) {
2192 first_seg->pkt_len = data_len;
2193 first_seg->nb_segs = 1;
2195 first_seg->pkt_len += data_len;
2196 first_seg->nb_segs++;
2203 * If this is not the last buffer of the received packet, update
2204 * the pointer to the first mbuf at the NEXTP entry in the
2205 * sw_sc_ring and continue to parse the RX ring.
2207 if (!eop && next_rxe) {
2208 rxm->next = next_rxe->mbuf;
2209 next_sc_entry->fbuf = first_seg;
2213 /* Initialize the first mbuf of the returned packet */
2214 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
2217 * Deal with the case, when HW CRC srip is disabled.
2218 * That can't happen when LRO is enabled, but still could
2219 * happen for scattered RX mode.
2221 first_seg->pkt_len -= rxq->crc_len;
2222 if (unlikely(rxm->data_len <= rxq->crc_len)) {
2223 struct rte_mbuf *lp;
2225 for (lp = first_seg; lp->next != rxm; lp = lp->next)
2228 first_seg->nb_segs--;
2229 lp->data_len -= rxq->crc_len - rxm->data_len;
2231 rte_pktmbuf_free_seg(rxm);
2233 rxm->data_len -= rxq->crc_len;
2235 /* Prefetch data of first segment, if configured to do so. */
2236 rte_packet_prefetch((char *)first_seg->buf_addr +
2237 first_seg->data_off);
2240 * Store the mbuf address into the next entry of the array
2241 * of returned packets.
2243 rx_pkts[nb_rx++] = first_seg;
2247 * Record index of the next RX descriptor to probe.
2249 rxq->rx_tail = rx_id;
2252 * If the number of free RX descriptors is greater than the RX free
2253 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2255 * Update the RDT with the value of the last processed RX descriptor
2256 * minus 1, to guarantee that the RDT register is never equal to the
2257 * RDH register, which creates a "full" ring situtation from the
2258 * hardware point of view...
2260 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
2261 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
2262 "nb_hold=%u nb_rx=%u",
2263 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
2266 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
2270 rxq->nb_rx_hold = nb_hold;
2275 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2278 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
2282 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2285 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
2288 /*********************************************************************
2290 * Queue management functions
2292 **********************************************************************/
2294 static void __attribute__((cold))
2295 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
2299 if (txq->sw_ring != NULL) {
2300 for (i = 0; i < txq->nb_tx_desc; i++) {
2301 if (txq->sw_ring[i].mbuf != NULL) {
2302 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2303 txq->sw_ring[i].mbuf = NULL;
2309 static void __attribute__((cold))
2310 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
2313 txq->sw_ring != NULL)
2314 rte_free(txq->sw_ring);
2317 static void __attribute__((cold))
2318 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
2320 if (txq != NULL && txq->ops != NULL) {
2321 txq->ops->release_mbufs(txq);
2322 txq->ops->free_swring(txq);
2327 void __attribute__((cold))
2328 ixgbe_dev_tx_queue_release(void *txq)
2330 ixgbe_tx_queue_release(txq);
2333 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
2334 static void __attribute__((cold))
2335 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
2337 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
2338 struct ixgbe_tx_entry *txe = txq->sw_ring;
2341 /* Zero out HW ring memory */
2342 for (i = 0; i < txq->nb_tx_desc; i++) {
2343 txq->tx_ring[i] = zeroed_desc;
2346 /* Initialize SW ring entries */
2347 prev = (uint16_t) (txq->nb_tx_desc - 1);
2348 for (i = 0; i < txq->nb_tx_desc; i++) {
2349 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
2351 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
2354 txe[prev].next_id = i;
2358 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2359 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2362 txq->nb_tx_used = 0;
2364 * Always allow 1 descriptor to be un-allocated to avoid
2365 * a H/W race condition
2367 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2368 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2370 memset((void *)&txq->ctx_cache, 0,
2371 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
2374 static const struct ixgbe_txq_ops def_txq_ops = {
2375 .release_mbufs = ixgbe_tx_queue_release_mbufs,
2376 .free_swring = ixgbe_tx_free_swring,
2377 .reset = ixgbe_reset_tx_queue,
2380 /* Takes an ethdev and a queue and sets up the tx function to be used based on
2381 * the queue parameters. Used in tx_queue_setup by primary process and then
2382 * in dev_init by secondary process when attaching to an existing ethdev.
2384 void __attribute__((cold))
2385 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
2387 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2388 if ((txq->offloads == 0) &&
2389 #ifdef RTE_LIBRTE_SECURITY
2390 !(txq->using_ipsec) &&
2392 (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
2393 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2394 dev->tx_pkt_prepare = NULL;
2395 #ifdef RTE_IXGBE_INC_VECTOR
2396 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2397 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2398 ixgbe_txq_vec_setup(txq) == 0)) {
2399 PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
2400 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
2403 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
2405 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2407 " - offloads = 0x%" PRIx64,
2410 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2411 (unsigned long)txq->tx_rs_thresh,
2412 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2413 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2414 dev->tx_pkt_prepare = ixgbe_prep_pkts;
2419 ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2427 ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2429 uint64_t tx_offload_capa;
2430 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2433 DEV_TX_OFFLOAD_VLAN_INSERT |
2434 DEV_TX_OFFLOAD_IPV4_CKSUM |
2435 DEV_TX_OFFLOAD_UDP_CKSUM |
2436 DEV_TX_OFFLOAD_TCP_CKSUM |
2437 DEV_TX_OFFLOAD_SCTP_CKSUM |
2438 DEV_TX_OFFLOAD_TCP_TSO |
2439 DEV_TX_OFFLOAD_MULTI_SEGS;
2441 if (hw->mac.type == ixgbe_mac_82599EB ||
2442 hw->mac.type == ixgbe_mac_X540)
2443 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2445 if (hw->mac.type == ixgbe_mac_X550 ||
2446 hw->mac.type == ixgbe_mac_X550EM_x ||
2447 hw->mac.type == ixgbe_mac_X550EM_a)
2448 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2450 #ifdef RTE_LIBRTE_SECURITY
2451 if (dev->security_ctx)
2452 tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2454 return tx_offload_capa;
2457 int __attribute__((cold))
2458 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2461 unsigned int socket_id,
2462 const struct rte_eth_txconf *tx_conf)
2464 const struct rte_memzone *tz;
2465 struct ixgbe_tx_queue *txq;
2466 struct ixgbe_hw *hw;
2467 uint16_t tx_rs_thresh, tx_free_thresh;
2470 PMD_INIT_FUNC_TRACE();
2471 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2473 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2476 * Validate number of transmit descriptors.
2477 * It must not exceed hardware maximum, and must be multiple
2480 if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2481 (nb_desc > IXGBE_MAX_RING_DESC) ||
2482 (nb_desc < IXGBE_MIN_RING_DESC)) {
2487 * The following two parameters control the setting of the RS bit on
2488 * transmit descriptors.
2489 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2490 * descriptors have been used.
2491 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2492 * descriptors are used or if the number of descriptors required
2493 * to transmit a packet is greater than the number of free TX
2495 * The following constraints must be satisfied:
2496 * tx_rs_thresh must be greater than 0.
2497 * tx_rs_thresh must be less than the size of the ring minus 2.
2498 * tx_rs_thresh must be less than or equal to tx_free_thresh.
2499 * tx_rs_thresh must be a divisor of the ring size.
2500 * tx_free_thresh must be greater than 0.
2501 * tx_free_thresh must be less than the size of the ring minus 3.
2502 * tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
2503 * One descriptor in the TX ring is used as a sentinel to avoid a
2504 * H/W race condition, hence the maximum threshold constraints.
2505 * When set to zero use default values.
2507 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2508 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2509 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
2510 tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
2511 nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
2512 if (tx_conf->tx_rs_thresh > 0)
2513 tx_rs_thresh = tx_conf->tx_rs_thresh;
2514 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
2515 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
2516 "exceed nb_desc. (tx_rs_thresh=%u "
2517 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
2518 (unsigned int)tx_rs_thresh,
2519 (unsigned int)tx_free_thresh,
2520 (unsigned int)nb_desc,
2521 (int)dev->data->port_id,
2525 if (tx_rs_thresh >= (nb_desc - 2)) {
2526 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2527 "of TX descriptors minus 2. (tx_rs_thresh=%u "
2528 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2529 (int)dev->data->port_id, (int)queue_idx);
2532 if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2533 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2534 "(tx_rs_thresh=%u port=%d queue=%d)",
2535 DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2536 (int)dev->data->port_id, (int)queue_idx);
2539 if (tx_free_thresh >= (nb_desc - 3)) {
2540 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2541 "tx_free_thresh must be less than the number of "
2542 "TX descriptors minus 3. (tx_free_thresh=%u "
2543 "port=%d queue=%d)",
2544 (unsigned int)tx_free_thresh,
2545 (int)dev->data->port_id, (int)queue_idx);
2548 if (tx_rs_thresh > tx_free_thresh) {
2549 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2550 "tx_free_thresh. (tx_free_thresh=%u "
2551 "tx_rs_thresh=%u port=%d queue=%d)",
2552 (unsigned int)tx_free_thresh,
2553 (unsigned int)tx_rs_thresh,
2554 (int)dev->data->port_id,
2558 if ((nb_desc % tx_rs_thresh) != 0) {
2559 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2560 "number of TX descriptors. (tx_rs_thresh=%u "
2561 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2562 (int)dev->data->port_id, (int)queue_idx);
2567 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2568 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2569 * by the NIC and all descriptors are written back after the NIC
2570 * accumulates WTHRESH descriptors.
2572 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2573 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2574 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2575 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2576 (int)dev->data->port_id, (int)queue_idx);
2580 /* Free memory prior to re-allocation if needed... */
2581 if (dev->data->tx_queues[queue_idx] != NULL) {
2582 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2583 dev->data->tx_queues[queue_idx] = NULL;
2586 /* First allocate the tx queue data structure */
2587 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2588 RTE_CACHE_LINE_SIZE, socket_id);
2593 * Allocate TX ring hardware descriptors. A memzone large enough to
2594 * handle the maximum ring size is allocated in order to allow for
2595 * resizing in later calls to the queue setup function.
2597 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2598 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2599 IXGBE_ALIGN, socket_id);
2601 ixgbe_tx_queue_release(txq);
2605 txq->nb_tx_desc = nb_desc;
2606 txq->tx_rs_thresh = tx_rs_thresh;
2607 txq->tx_free_thresh = tx_free_thresh;
2608 txq->pthresh = tx_conf->tx_thresh.pthresh;
2609 txq->hthresh = tx_conf->tx_thresh.hthresh;
2610 txq->wthresh = tx_conf->tx_thresh.wthresh;
2611 txq->queue_id = queue_idx;
2612 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2613 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2614 txq->port_id = dev->data->port_id;
2615 txq->offloads = offloads;
2616 txq->ops = &def_txq_ops;
2617 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2618 #ifdef RTE_LIBRTE_SECURITY
2619 txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2620 DEV_TX_OFFLOAD_SECURITY);
2624 * Modification to set VFTDT for virtual function if vf is detected
2626 if (hw->mac.type == ixgbe_mac_82599_vf ||
2627 hw->mac.type == ixgbe_mac_X540_vf ||
2628 hw->mac.type == ixgbe_mac_X550_vf ||
2629 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2630 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2631 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2633 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2635 txq->tx_ring_phys_addr = tz->iova;
2636 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2638 /* Allocate software ring */
2639 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2640 sizeof(struct ixgbe_tx_entry) * nb_desc,
2641 RTE_CACHE_LINE_SIZE, socket_id);
2642 if (txq->sw_ring == NULL) {
2643 ixgbe_tx_queue_release(txq);
2646 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2647 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2649 /* set up vector or scalar TX function as appropriate */
2650 ixgbe_set_tx_function(dev, txq);
2652 txq->ops->reset(txq);
2654 dev->data->tx_queues[queue_idx] = txq;
2661 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2663 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2664 * in the sw_rsc_ring is not set to NULL but rather points to the next
2665 * mbuf of this RSC aggregation (that has not been completed yet and still
2666 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2667 * will just free first "nb_segs" segments of the cluster explicitly by calling
2668 * an rte_pktmbuf_free_seg().
2670 * @m scattered cluster head
2672 static void __attribute__((cold))
2673 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2675 uint16_t i, nb_segs = m->nb_segs;
2676 struct rte_mbuf *next_seg;
2678 for (i = 0; i < nb_segs; i++) {
2680 rte_pktmbuf_free_seg(m);
2685 static void __attribute__((cold))
2686 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2690 #ifdef RTE_IXGBE_INC_VECTOR
2691 /* SSE Vector driver has a different way of releasing mbufs. */
2692 if (rxq->rx_using_sse) {
2693 ixgbe_rx_queue_release_mbufs_vec(rxq);
2698 if (rxq->sw_ring != NULL) {
2699 for (i = 0; i < rxq->nb_rx_desc; i++) {
2700 if (rxq->sw_ring[i].mbuf != NULL) {
2701 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2702 rxq->sw_ring[i].mbuf = NULL;
2705 if (rxq->rx_nb_avail) {
2706 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2707 struct rte_mbuf *mb;
2709 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2710 rte_pktmbuf_free_seg(mb);
2712 rxq->rx_nb_avail = 0;
2716 if (rxq->sw_sc_ring)
2717 for (i = 0; i < rxq->nb_rx_desc; i++)
2718 if (rxq->sw_sc_ring[i].fbuf) {
2719 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2720 rxq->sw_sc_ring[i].fbuf = NULL;
2724 static void __attribute__((cold))
2725 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2728 ixgbe_rx_queue_release_mbufs(rxq);
2729 rte_free(rxq->sw_ring);
2730 rte_free(rxq->sw_sc_ring);
2735 void __attribute__((cold))
2736 ixgbe_dev_rx_queue_release(void *rxq)
2738 ixgbe_rx_queue_release(rxq);
2742 * Check if Rx Burst Bulk Alloc function can be used.
2744 * 0: the preconditions are satisfied and the bulk allocation function
2746 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2747 * function must be used.
2749 static inline int __attribute__((cold))
2750 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2755 * Make sure the following pre-conditions are satisfied:
2756 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2757 * rxq->rx_free_thresh < rxq->nb_rx_desc
2758 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2759 * Scattered packets are not supported. This should be checked
2760 * outside of this function.
2762 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2763 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2764 "rxq->rx_free_thresh=%d, "
2765 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2766 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2768 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2769 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2770 "rxq->rx_free_thresh=%d, "
2771 "rxq->nb_rx_desc=%d",
2772 rxq->rx_free_thresh, rxq->nb_rx_desc);
2774 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2775 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2776 "rxq->nb_rx_desc=%d, "
2777 "rxq->rx_free_thresh=%d",
2778 rxq->nb_rx_desc, rxq->rx_free_thresh);
2785 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2786 static void __attribute__((cold))
2787 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2789 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2791 uint16_t len = rxq->nb_rx_desc;
2794 * By default, the Rx queue setup function allocates enough memory for
2795 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2796 * extra memory at the end of the descriptor ring to be zero'd out.
2798 if (adapter->rx_bulk_alloc_allowed)
2799 /* zero out extra memory */
2800 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2803 * Zero out HW ring memory. Zero out extra memory at the end of
2804 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2805 * reads extra memory as zeros.
2807 for (i = 0; i < len; i++) {
2808 rxq->rx_ring[i] = zeroed_desc;
2812 * initialize extra software ring entries. Space for these extra
2813 * entries is always allocated
2815 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2816 for (i = rxq->nb_rx_desc; i < len; ++i) {
2817 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2820 rxq->rx_nb_avail = 0;
2821 rxq->rx_next_avail = 0;
2822 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2824 rxq->nb_rx_hold = 0;
2825 rxq->pkt_first_seg = NULL;
2826 rxq->pkt_last_seg = NULL;
2828 #ifdef RTE_IXGBE_INC_VECTOR
2829 rxq->rxrearm_start = 0;
2830 rxq->rxrearm_nb = 0;
2835 ixgbe_is_vf(struct rte_eth_dev *dev)
2837 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2839 switch (hw->mac.type) {
2840 case ixgbe_mac_82599_vf:
2841 case ixgbe_mac_X540_vf:
2842 case ixgbe_mac_X550_vf:
2843 case ixgbe_mac_X550EM_x_vf:
2844 case ixgbe_mac_X550EM_a_vf:
2852 ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
2854 uint64_t offloads = 0;
2855 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2857 if (hw->mac.type != ixgbe_mac_82598EB)
2858 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2864 ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2867 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2869 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
2870 DEV_RX_OFFLOAD_UDP_CKSUM |
2871 DEV_RX_OFFLOAD_TCP_CKSUM |
2872 DEV_RX_OFFLOAD_KEEP_CRC |
2873 DEV_RX_OFFLOAD_JUMBO_FRAME |
2874 DEV_RX_OFFLOAD_VLAN_FILTER |
2875 DEV_RX_OFFLOAD_SCATTER |
2876 DEV_RX_OFFLOAD_RSS_HASH;
2878 if (hw->mac.type == ixgbe_mac_82598EB)
2879 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2881 if (ixgbe_is_vf(dev) == 0)
2882 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2885 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2888 if ((hw->mac.type == ixgbe_mac_82599EB ||
2889 hw->mac.type == ixgbe_mac_X540 ||
2890 hw->mac.type == ixgbe_mac_X550) &&
2891 !RTE_ETH_DEV_SRIOV(dev).active)
2892 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
2894 if (hw->mac.type == ixgbe_mac_82599EB ||
2895 hw->mac.type == ixgbe_mac_X540)
2896 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
2898 if (hw->mac.type == ixgbe_mac_X550 ||
2899 hw->mac.type == ixgbe_mac_X550EM_x ||
2900 hw->mac.type == ixgbe_mac_X550EM_a)
2901 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
2903 #ifdef RTE_LIBRTE_SECURITY
2904 if (dev->security_ctx)
2905 offloads |= DEV_RX_OFFLOAD_SECURITY;
2911 int __attribute__((cold))
2912 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2915 unsigned int socket_id,
2916 const struct rte_eth_rxconf *rx_conf,
2917 struct rte_mempool *mp)
2919 const struct rte_memzone *rz;
2920 struct ixgbe_rx_queue *rxq;
2921 struct ixgbe_hw *hw;
2923 struct ixgbe_adapter *adapter = dev->data->dev_private;
2926 PMD_INIT_FUNC_TRACE();
2927 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2929 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2932 * Validate number of receive descriptors.
2933 * It must not exceed hardware maximum, and must be multiple
2936 if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2937 (nb_desc > IXGBE_MAX_RING_DESC) ||
2938 (nb_desc < IXGBE_MIN_RING_DESC)) {
2942 /* Free memory prior to re-allocation if needed... */
2943 if (dev->data->rx_queues[queue_idx] != NULL) {
2944 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2945 dev->data->rx_queues[queue_idx] = NULL;
2948 /* First allocate the rx queue data structure */
2949 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2950 RTE_CACHE_LINE_SIZE, socket_id);
2954 rxq->nb_rx_desc = nb_desc;
2955 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2956 rxq->queue_id = queue_idx;
2957 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2958 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2959 rxq->port_id = dev->data->port_id;
2960 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2961 rxq->crc_len = RTE_ETHER_CRC_LEN;
2964 rxq->drop_en = rx_conf->rx_drop_en;
2965 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2966 rxq->offloads = offloads;
2969 * The packet type in RX descriptor is different for different NICs.
2970 * Some bits are used for x550 but reserved for other NICS.
2971 * So set different masks for different NICs.
2973 if (hw->mac.type == ixgbe_mac_X550 ||
2974 hw->mac.type == ixgbe_mac_X550EM_x ||
2975 hw->mac.type == ixgbe_mac_X550EM_a ||
2976 hw->mac.type == ixgbe_mac_X550_vf ||
2977 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2978 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2979 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
2981 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
2984 * Allocate RX ring hardware descriptors. A memzone large enough to
2985 * handle the maximum ring size is allocated in order to allow for
2986 * resizing in later calls to the queue setup function.
2988 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2989 RX_RING_SZ, IXGBE_ALIGN, socket_id);
2991 ixgbe_rx_queue_release(rxq);
2996 * Zero init all the descriptors in the ring.
2998 memset(rz->addr, 0, RX_RING_SZ);
3001 * Modified to setup VFRDT for Virtual Function
3003 if (hw->mac.type == ixgbe_mac_82599_vf ||
3004 hw->mac.type == ixgbe_mac_X540_vf ||
3005 hw->mac.type == ixgbe_mac_X550_vf ||
3006 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
3007 hw->mac.type == ixgbe_mac_X550EM_a_vf) {
3009 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
3011 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
3014 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
3016 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
3019 rxq->rx_ring_phys_addr = rz->iova;
3020 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
3023 * Certain constraints must be met in order to use the bulk buffer
3024 * allocation Rx burst function. If any of Rx queues doesn't meet them
3025 * the feature should be disabled for the whole port.
3027 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
3028 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
3029 "preconditions - canceling the feature for "
3030 "the whole port[%d]",
3031 rxq->queue_id, rxq->port_id);
3032 adapter->rx_bulk_alloc_allowed = false;
3036 * Allocate software ring. Allow for space at the end of the
3037 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
3038 * function does not access an invalid memory region.
3041 if (adapter->rx_bulk_alloc_allowed)
3042 len += RTE_PMD_IXGBE_RX_MAX_BURST;
3044 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
3045 sizeof(struct ixgbe_rx_entry) * len,
3046 RTE_CACHE_LINE_SIZE, socket_id);
3047 if (!rxq->sw_ring) {
3048 ixgbe_rx_queue_release(rxq);
3053 * Always allocate even if it's not going to be needed in order to
3054 * simplify the code.
3056 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
3057 * be requested in ixgbe_dev_rx_init(), which is called later from
3061 rte_zmalloc_socket("rxq->sw_sc_ring",
3062 sizeof(struct ixgbe_scattered_rx_entry) * len,
3063 RTE_CACHE_LINE_SIZE, socket_id);
3064 if (!rxq->sw_sc_ring) {
3065 ixgbe_rx_queue_release(rxq);
3069 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
3070 "dma_addr=0x%"PRIx64,
3071 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
3072 rxq->rx_ring_phys_addr);
3074 if (!rte_is_power_of_2(nb_desc)) {
3075 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
3076 "preconditions - canceling the feature for "
3077 "the whole port[%d]",
3078 rxq->queue_id, rxq->port_id);
3079 adapter->rx_vec_allowed = false;
3081 ixgbe_rxq_vec_setup(rxq);
3083 dev->data->rx_queues[queue_idx] = rxq;
3085 ixgbe_reset_rx_queue(adapter, rxq);
3091 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3093 #define IXGBE_RXQ_SCAN_INTERVAL 4
3094 volatile union ixgbe_adv_rx_desc *rxdp;
3095 struct ixgbe_rx_queue *rxq;
3098 rxq = dev->data->rx_queues[rx_queue_id];
3099 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
3101 while ((desc < rxq->nb_rx_desc) &&
3102 (rxdp->wb.upper.status_error &
3103 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
3104 desc += IXGBE_RXQ_SCAN_INTERVAL;
3105 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
3106 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3107 rxdp = &(rxq->rx_ring[rxq->rx_tail +
3108 desc - rxq->nb_rx_desc]);
3115 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
3117 volatile union ixgbe_adv_rx_desc *rxdp;
3118 struct ixgbe_rx_queue *rxq = rx_queue;
3121 if (unlikely(offset >= rxq->nb_rx_desc))
3123 desc = rxq->rx_tail + offset;
3124 if (desc >= rxq->nb_rx_desc)
3125 desc -= rxq->nb_rx_desc;
3127 rxdp = &rxq->rx_ring[desc];
3128 return !!(rxdp->wb.upper.status_error &
3129 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
3133 ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
3135 struct ixgbe_rx_queue *rxq = rx_queue;
3136 volatile uint32_t *status;
3137 uint32_t nb_hold, desc;
3139 if (unlikely(offset >= rxq->nb_rx_desc))
3142 #ifdef RTE_IXGBE_INC_VECTOR
3143 if (rxq->rx_using_sse)
3144 nb_hold = rxq->rxrearm_nb;
3147 nb_hold = rxq->nb_rx_hold;
3148 if (offset >= rxq->nb_rx_desc - nb_hold)
3149 return RTE_ETH_RX_DESC_UNAVAIL;
3151 desc = rxq->rx_tail + offset;
3152 if (desc >= rxq->nb_rx_desc)
3153 desc -= rxq->nb_rx_desc;
3155 status = &rxq->rx_ring[desc].wb.upper.status_error;
3156 if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
3157 return RTE_ETH_RX_DESC_DONE;
3159 return RTE_ETH_RX_DESC_AVAIL;
3163 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
3165 struct ixgbe_tx_queue *txq = tx_queue;
3166 volatile uint32_t *status;
3169 if (unlikely(offset >= txq->nb_tx_desc))
3172 desc = txq->tx_tail + offset;
3173 /* go to next desc that has the RS bit */
3174 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
3176 if (desc >= txq->nb_tx_desc) {
3177 desc -= txq->nb_tx_desc;
3178 if (desc >= txq->nb_tx_desc)
3179 desc -= txq->nb_tx_desc;
3182 status = &txq->tx_ring[desc].wb.status;
3183 if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
3184 return RTE_ETH_TX_DESC_DONE;
3186 return RTE_ETH_TX_DESC_FULL;
3190 * Set up link loopback for X540/X550 mode Tx->Rx.
3192 static inline void __attribute__((cold))
3193 ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
3196 PMD_INIT_FUNC_TRACE();
3198 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
3200 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3201 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
3202 macc = IXGBE_READ_REG(hw, IXGBE_MACC);
3205 /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */
3206 autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE;
3207 /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */
3208 macc |= IXGBE_MACC_FLU;
3210 autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE;
3211 macc &= ~IXGBE_MACC_FLU;
3214 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3215 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
3217 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
3220 void __attribute__((cold))
3221 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
3224 struct ixgbe_adapter *adapter = dev->data->dev_private;
3225 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3227 PMD_INIT_FUNC_TRACE();
3229 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3230 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
3233 txq->ops->release_mbufs(txq);
3234 txq->ops->reset(txq);
3238 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3239 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3242 ixgbe_rx_queue_release_mbufs(rxq);
3243 ixgbe_reset_rx_queue(adapter, rxq);
3246 /* If loopback mode was enabled, reconfigure the link accordingly */
3247 if (dev->data->dev_conf.lpbk_mode != 0) {
3248 if (hw->mac.type == ixgbe_mac_X540 ||
3249 hw->mac.type == ixgbe_mac_X550 ||
3250 hw->mac.type == ixgbe_mac_X550EM_x ||
3251 hw->mac.type == ixgbe_mac_X550EM_a)
3252 ixgbe_setup_loopback_link_x540_x550(hw, false);
3257 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
3261 PMD_INIT_FUNC_TRACE();
3263 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3264 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
3265 dev->data->rx_queues[i] = NULL;
3267 dev->data->nb_rx_queues = 0;
3269 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3270 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
3271 dev->data->tx_queues[i] = NULL;
3273 dev->data->nb_tx_queues = 0;
3276 /*********************************************************************
3278 * Device RX/TX init functions
3280 **********************************************************************/
3283 * Receive Side Scaling (RSS)
3284 * See section 7.1.2.8 in the following document:
3285 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
3288 * The source and destination IP addresses of the IP header and the source
3289 * and destination ports of TCP/UDP headers, if any, of received packets are
3290 * hashed against a configurable random key to compute a 32-bit RSS hash result.
3291 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
3292 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
3293 * RSS output index which is used as the RX queue index where to store the
3295 * The following output is supplied in the RX write-back descriptor:
3296 * - 32-bit result of the Microsoft RSS hash function,
3297 * - 4-bit RSS type field.
3301 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
3302 * Used as the default key.
3304 static uint8_t rss_intel_key[40] = {
3305 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
3306 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
3307 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3308 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
3309 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
3313 ixgbe_rss_disable(struct rte_eth_dev *dev)
3315 struct ixgbe_hw *hw;
3319 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3320 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3321 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3322 mrqc &= ~IXGBE_MRQC_RSSEN;
3323 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3327 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
3337 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3338 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3340 hash_key = rss_conf->rss_key;
3341 if (hash_key != NULL) {
3342 /* Fill in RSS hash key */
3343 for (i = 0; i < 10; i++) {
3344 rss_key = hash_key[(i * 4)];
3345 rss_key |= hash_key[(i * 4) + 1] << 8;
3346 rss_key |= hash_key[(i * 4) + 2] << 16;
3347 rss_key |= hash_key[(i * 4) + 3] << 24;
3348 IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
3352 /* Set configured hashing protocols in MRQC register */
3353 rss_hf = rss_conf->rss_hf;
3354 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
3355 if (rss_hf & ETH_RSS_IPV4)
3356 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3357 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
3358 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3359 if (rss_hf & ETH_RSS_IPV6)
3360 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3361 if (rss_hf & ETH_RSS_IPV6_EX)
3362 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3363 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
3364 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3365 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
3366 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3367 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
3368 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3369 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
3370 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3371 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
3372 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3373 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3377 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
3378 struct rte_eth_rss_conf *rss_conf)
3380 struct ixgbe_hw *hw;
3385 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3387 if (!ixgbe_rss_update_sp(hw->mac.type)) {
3388 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
3392 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3395 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
3396 * "RSS enabling cannot be done dynamically while it must be
3397 * preceded by a software reset"
3398 * Before changing anything, first check that the update RSS operation
3399 * does not attempt to disable RSS, if RSS was enabled at
3400 * initialization time, or does not attempt to enable RSS, if RSS was
3401 * disabled at initialization time.
3403 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
3404 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3405 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
3406 if (rss_hf != 0) /* Enable RSS */
3408 return 0; /* Nothing to do */
3411 if (rss_hf == 0) /* Disable RSS */
3413 ixgbe_hw_rss_hash_set(hw, rss_conf);
3418 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3419 struct rte_eth_rss_conf *rss_conf)
3421 struct ixgbe_hw *hw;
3430 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3431 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3432 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3433 hash_key = rss_conf->rss_key;
3434 if (hash_key != NULL) {
3435 /* Return RSS hash key */
3436 for (i = 0; i < 10; i++) {
3437 rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
3438 hash_key[(i * 4)] = rss_key & 0x000000FF;
3439 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
3440 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
3441 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
3445 /* Get RSS functions configured in MRQC register */
3446 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3447 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
3448 rss_conf->rss_hf = 0;
3452 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
3453 rss_hf |= ETH_RSS_IPV4;
3454 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
3455 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
3456 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
3457 rss_hf |= ETH_RSS_IPV6;
3458 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
3459 rss_hf |= ETH_RSS_IPV6_EX;
3460 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
3461 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
3462 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
3463 rss_hf |= ETH_RSS_IPV6_TCP_EX;
3464 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
3465 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
3466 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
3467 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
3468 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
3469 rss_hf |= ETH_RSS_IPV6_UDP_EX;
3470 rss_conf->rss_hf = rss_hf;
3475 ixgbe_rss_configure(struct rte_eth_dev *dev)
3477 struct rte_eth_rss_conf rss_conf;
3478 struct ixgbe_adapter *adapter;
3479 struct ixgbe_hw *hw;
3483 uint16_t sp_reta_size;
3486 PMD_INIT_FUNC_TRACE();
3487 adapter = dev->data->dev_private;
3488 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3490 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3493 * Fill in redirection table
3494 * The byte-swap is needed because NIC registers are in
3495 * little-endian order.
3497 if (adapter->rss_reta_updated == 0) {
3499 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
3500 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3502 if (j == dev->data->nb_rx_queues)
3504 reta = (reta << 8) | j;
3506 IXGBE_WRITE_REG(hw, reta_reg,
3512 * Configure the RSS key and the RSS protocols used to compute
3513 * the RSS hash of input packets.
3515 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3516 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
3517 ixgbe_rss_disable(dev);
3520 if (rss_conf.rss_key == NULL)
3521 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3522 ixgbe_hw_rss_hash_set(hw, &rss_conf);
3525 #define NUM_VFTA_REGISTERS 128
3526 #define NIC_RX_BUFFER_SIZE 0x200
3527 #define X550_RX_BUFFER_SIZE 0x180
3530 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3532 struct rte_eth_vmdq_dcb_conf *cfg;
3533 struct ixgbe_hw *hw;
3534 enum rte_eth_nb_pools num_pools;
3535 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3537 uint8_t nb_tcs; /* number of traffic classes */
3540 PMD_INIT_FUNC_TRACE();
3541 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3542 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3543 num_pools = cfg->nb_queue_pools;
3544 /* Check we have a valid number of pools */
3545 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3546 ixgbe_rss_disable(dev);
3549 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3550 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3554 * split rx buffer up into sections, each for 1 traffic class
3556 switch (hw->mac.type) {
3557 case ixgbe_mac_X550:
3558 case ixgbe_mac_X550EM_x:
3559 case ixgbe_mac_X550EM_a:
3560 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
3563 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3566 for (i = 0; i < nb_tcs; i++) {
3567 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3569 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3570 /* clear 10 bits. */
3571 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
3572 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3574 /* zero alloc all unused TCs */
3575 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3576 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3578 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3579 /* clear 10 bits. */
3580 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3583 /* MRQC: enable vmdq and dcb */
3584 mrqc = (num_pools == ETH_16_POOLS) ?
3585 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
3586 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3588 /* PFVTCTL: turn on virtualisation and set the default pool */
3589 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3590 if (cfg->enable_default_pool) {
3591 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3593 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3596 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3598 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
3600 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3602 * mapping is done with 3 bits per priority,
3603 * so shift by i*3 each time
3605 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3607 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
3609 /* RTRPCS: DCB related */
3610 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
3612 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3613 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3614 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3615 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3617 /* VFTA - enable all vlan filters */
3618 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3619 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3622 /* VFRE: pool enabling for receive - 16 or 32 */
3623 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
3624 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3627 * MPSAR - allow pools to read specific mac addresses
3628 * In this case, all pools should be able to read from mac addr 0
3630 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
3631 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
3633 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3634 for (i = 0; i < cfg->nb_pool_maps; i++) {
3635 /* set vlan id in VF register and set the valid bit */
3636 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
3637 (cfg->pool_map[i].vlan_id & 0xFFF)));
3639 * Put the allowed pools in VFB reg. As we only have 16 or 32
3640 * pools, we only need to use the first half of the register
3643 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
3648 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3649 * @dev: pointer to eth_dev structure
3650 * @dcb_config: pointer to ixgbe_dcb_config structure
3653 ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3654 struct ixgbe_dcb_config *dcb_config)
3657 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3659 PMD_INIT_FUNC_TRACE();
3660 if (hw->mac.type != ixgbe_mac_82598EB) {
3661 /* Disable the Tx desc arbiter so that MTQC can be changed */
3662 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3663 reg |= IXGBE_RTTDCS_ARBDIS;
3664 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3666 /* Enable DCB for Tx with 8 TCs */
3667 if (dcb_config->num_tcs.pg_tcs == 8) {
3668 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3670 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3672 if (dcb_config->vt_mode)
3673 reg |= IXGBE_MTQC_VT_ENA;
3674 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3676 /* Enable the Tx desc arbiter */
3677 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3678 reg &= ~IXGBE_RTTDCS_ARBDIS;
3679 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3681 /* Enable Security TX Buffer IFG for DCB */
3682 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3683 reg |= IXGBE_SECTX_DCB;
3684 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3689 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3690 * @dev: pointer to rte_eth_dev structure
3691 * @dcb_config: pointer to ixgbe_dcb_config structure
3694 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3695 struct ixgbe_dcb_config *dcb_config)
3697 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3698 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3699 struct ixgbe_hw *hw =
3700 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3702 PMD_INIT_FUNC_TRACE();
3703 if (hw->mac.type != ixgbe_mac_82598EB)
3704 /*PF VF Transmit Enable*/
3705 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3706 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3708 /*Configure general DCB TX parameters*/
3709 ixgbe_dcb_tx_hw_config(dev, dcb_config);
3713 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3714 struct ixgbe_dcb_config *dcb_config)
3716 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3717 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3718 struct ixgbe_dcb_tc_config *tc;
3721 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3722 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3723 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3724 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3726 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3727 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3730 /* Initialize User Priority to Traffic Class mapping */
3731 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3732 tc = &dcb_config->tc_config[j];
3733 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3736 /* User Priority to Traffic Class mapping */
3737 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3738 j = vmdq_rx_conf->dcb_tc[i];
3739 tc = &dcb_config->tc_config[j];
3740 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3746 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3747 struct ixgbe_dcb_config *dcb_config)
3749 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3750 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3751 struct ixgbe_dcb_tc_config *tc;
3754 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3755 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3756 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3757 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3759 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3760 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3763 /* Initialize User Priority to Traffic Class mapping */
3764 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3765 tc = &dcb_config->tc_config[j];
3766 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3769 /* User Priority to Traffic Class mapping */
3770 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3771 j = vmdq_tx_conf->dcb_tc[i];
3772 tc = &dcb_config->tc_config[j];
3773 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3779 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3780 struct ixgbe_dcb_config *dcb_config)
3782 struct rte_eth_dcb_rx_conf *rx_conf =
3783 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3784 struct ixgbe_dcb_tc_config *tc;
3787 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3788 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3790 /* Initialize User Priority to Traffic Class mapping */
3791 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3792 tc = &dcb_config->tc_config[j];
3793 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3796 /* User Priority to Traffic Class mapping */
3797 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3798 j = rx_conf->dcb_tc[i];
3799 tc = &dcb_config->tc_config[j];
3800 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3806 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3807 struct ixgbe_dcb_config *dcb_config)
3809 struct rte_eth_dcb_tx_conf *tx_conf =
3810 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3811 struct ixgbe_dcb_tc_config *tc;
3814 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3815 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3817 /* Initialize User Priority to Traffic Class mapping */
3818 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3819 tc = &dcb_config->tc_config[j];
3820 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3823 /* User Priority to Traffic Class mapping */
3824 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3825 j = tx_conf->dcb_tc[i];
3826 tc = &dcb_config->tc_config[j];
3827 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3833 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3834 * @dev: pointer to eth_dev structure
3835 * @dcb_config: pointer to ixgbe_dcb_config structure
3838 ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3839 struct ixgbe_dcb_config *dcb_config)
3845 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3847 PMD_INIT_FUNC_TRACE();
3849 * Disable the arbiter before changing parameters
3850 * (always enable recycle mode; WSP)
3852 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3853 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3855 if (hw->mac.type != ixgbe_mac_82598EB) {
3856 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3857 if (dcb_config->num_tcs.pg_tcs == 4) {
3858 if (dcb_config->vt_mode)
3859 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3860 IXGBE_MRQC_VMDQRT4TCEN;
3862 /* no matter the mode is DCB or DCB_RSS, just
3863 * set the MRQE to RSSXTCEN. RSS is controlled
3866 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3867 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3868 IXGBE_MRQC_RTRSS4TCEN;
3871 if (dcb_config->num_tcs.pg_tcs == 8) {
3872 if (dcb_config->vt_mode)
3873 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3874 IXGBE_MRQC_VMDQRT8TCEN;
3876 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3877 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3878 IXGBE_MRQC_RTRSS8TCEN;
3882 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3884 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3885 /* Disable drop for all queues in VMDQ mode*/
3886 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3887 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3889 (q << IXGBE_QDE_IDX_SHIFT)));
3891 /* Enable drop for all queues in SRIOV mode */
3892 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3893 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3895 (q << IXGBE_QDE_IDX_SHIFT) |
3900 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3901 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3902 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3903 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3905 /* VFTA - enable all vlan filters */
3906 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3907 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3911 * Configure Rx packet plane (recycle mode; WSP) and
3914 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3915 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3919 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3920 uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3922 switch (hw->mac.type) {
3923 case ixgbe_mac_82598EB:
3924 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3926 case ixgbe_mac_82599EB:
3927 case ixgbe_mac_X540:
3928 case ixgbe_mac_X550:
3929 case ixgbe_mac_X550EM_x:
3930 case ixgbe_mac_X550EM_a:
3931 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3940 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3941 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3943 switch (hw->mac.type) {
3944 case ixgbe_mac_82598EB:
3945 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
3946 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
3948 case ixgbe_mac_82599EB:
3949 case ixgbe_mac_X540:
3950 case ixgbe_mac_X550:
3951 case ixgbe_mac_X550EM_x:
3952 case ixgbe_mac_X550EM_a:
3953 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
3954 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
3961 #define DCB_RX_CONFIG 1
3962 #define DCB_TX_CONFIG 1
3963 #define DCB_TX_PB 1024
3965 * ixgbe_dcb_hw_configure - Enable DCB and configure
3966 * general DCB in VT mode and non-VT mode parameters
3967 * @dev: pointer to rte_eth_dev structure
3968 * @dcb_config: pointer to ixgbe_dcb_config structure
3971 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3972 struct ixgbe_dcb_config *dcb_config)
3975 uint8_t i, pfc_en, nb_tcs;
3976 uint16_t pbsize, rx_buffer_size;
3977 uint8_t config_dcb_rx = 0;
3978 uint8_t config_dcb_tx = 0;
3979 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3980 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3981 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3982 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3983 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3984 struct ixgbe_dcb_tc_config *tc;
3985 uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN +
3987 struct ixgbe_hw *hw =
3988 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3989 struct ixgbe_bw_conf *bw_conf =
3990 IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
3992 switch (dev->data->dev_conf.rxmode.mq_mode) {
3993 case ETH_MQ_RX_VMDQ_DCB:
3994 dcb_config->vt_mode = true;
3995 if (hw->mac.type != ixgbe_mac_82598EB) {
3996 config_dcb_rx = DCB_RX_CONFIG;
3998 *get dcb and VT rx configuration parameters
4001 ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
4002 /*Configure general VMDQ and DCB RX parameters*/
4003 ixgbe_vmdq_dcb_configure(dev);
4007 case ETH_MQ_RX_DCB_RSS:
4008 dcb_config->vt_mode = false;
4009 config_dcb_rx = DCB_RX_CONFIG;
4010 /* Get dcb TX configuration parameters from rte_eth_conf */
4011 ixgbe_dcb_rx_config(dev, dcb_config);
4012 /*Configure general DCB RX parameters*/
4013 ixgbe_dcb_rx_hw_config(dev, dcb_config);
4016 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
4019 switch (dev->data->dev_conf.txmode.mq_mode) {
4020 case ETH_MQ_TX_VMDQ_DCB:
4021 dcb_config->vt_mode = true;
4022 config_dcb_tx = DCB_TX_CONFIG;
4023 /* get DCB and VT TX configuration parameters
4026 ixgbe_dcb_vt_tx_config(dev, dcb_config);
4027 /*Configure general VMDQ and DCB TX parameters*/
4028 ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
4032 dcb_config->vt_mode = false;
4033 config_dcb_tx = DCB_TX_CONFIG;
4034 /*get DCB TX configuration parameters from rte_eth_conf*/
4035 ixgbe_dcb_tx_config(dev, dcb_config);
4036 /*Configure general DCB TX parameters*/
4037 ixgbe_dcb_tx_hw_config(dev, dcb_config);
4040 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
4044 nb_tcs = dcb_config->num_tcs.pfc_tcs;
4046 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4047 if (nb_tcs == ETH_4_TCS) {
4048 /* Avoid un-configured priority mapping to TC0 */
4050 uint8_t mask = 0xFF;
4052 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
4053 mask = (uint8_t)(mask & (~(1 << map[i])));
4054 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
4055 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
4059 /* Re-configure 4 TCs BW */
4060 for (i = 0; i < nb_tcs; i++) {
4061 tc = &dcb_config->tc_config[i];
4062 if (bw_conf->tc_num != nb_tcs)
4063 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4064 (uint8_t)(100 / nb_tcs);
4065 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4066 (uint8_t)(100 / nb_tcs);
4068 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4069 tc = &dcb_config->tc_config[i];
4070 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
4071 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
4074 /* Re-configure 8 TCs BW */
4075 for (i = 0; i < nb_tcs; i++) {
4076 tc = &dcb_config->tc_config[i];
4077 if (bw_conf->tc_num != nb_tcs)
4078 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4079 (uint8_t)(100 / nb_tcs + (i & 1));
4080 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4081 (uint8_t)(100 / nb_tcs + (i & 1));
4085 switch (hw->mac.type) {
4086 case ixgbe_mac_X550:
4087 case ixgbe_mac_X550EM_x:
4088 case ixgbe_mac_X550EM_a:
4089 rx_buffer_size = X550_RX_BUFFER_SIZE;
4092 rx_buffer_size = NIC_RX_BUFFER_SIZE;
4096 if (config_dcb_rx) {
4097 /* Set RX buffer size */
4098 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4099 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
4101 for (i = 0; i < nb_tcs; i++) {
4102 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
4104 /* zero alloc all unused TCs */
4105 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4106 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4109 if (config_dcb_tx) {
4110 /* Only support an equally distributed
4111 * Tx packet buffer strategy.
4113 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
4114 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
4116 for (i = 0; i < nb_tcs; i++) {
4117 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4118 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4120 /* Clear unused TCs, if any, to zero buffer size*/
4121 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4122 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4123 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4127 /*Calculates traffic class credits*/
4128 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4129 IXGBE_DCB_TX_CONFIG);
4130 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4131 IXGBE_DCB_RX_CONFIG);
4133 if (config_dcb_rx) {
4134 /* Unpack CEE standard containers */
4135 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
4136 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4137 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
4138 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
4139 /* Configure PG(ETS) RX */
4140 ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
4143 if (config_dcb_tx) {
4144 /* Unpack CEE standard containers */
4145 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
4146 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4147 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
4148 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
4149 /* Configure PG(ETS) TX */
4150 ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
4153 /*Configure queue statistics registers*/
4154 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
4156 /* Check if the PFC is supported */
4157 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
4158 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4159 for (i = 0; i < nb_tcs; i++) {
4161 * If the TC count is 8,and the default high_water is 48,
4162 * the low_water is 16 as default.
4164 hw->fc.high_water[i] = (pbsize * 3) / 4;
4165 hw->fc.low_water[i] = pbsize / 4;
4166 /* Enable pfc for this TC */
4167 tc = &dcb_config->tc_config[i];
4168 tc->pfc = ixgbe_dcb_pfc_enabled;
4170 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
4171 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
4173 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
4180 * ixgbe_configure_dcb - Configure DCB Hardware
4181 * @dev: pointer to rte_eth_dev
4183 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
4185 struct ixgbe_dcb_config *dcb_cfg =
4186 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4187 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
4189 PMD_INIT_FUNC_TRACE();
4191 /* check support mq_mode for DCB */
4192 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
4193 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
4194 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
4197 if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
4200 /** Configure DCB hardware **/
4201 ixgbe_dcb_hw_configure(dev, dcb_cfg);
4205 * VMDq only support for 10 GbE NIC.
4208 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
4210 struct rte_eth_vmdq_rx_conf *cfg;
4211 struct ixgbe_hw *hw;
4212 enum rte_eth_nb_pools num_pools;
4213 uint32_t mrqc, vt_ctl, vlanctrl;
4217 PMD_INIT_FUNC_TRACE();
4218 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4219 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
4220 num_pools = cfg->nb_queue_pools;
4222 ixgbe_rss_disable(dev);
4224 /* MRQC: enable vmdq */
4225 mrqc = IXGBE_MRQC_VMDQEN;
4226 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4228 /* PFVTCTL: turn on virtualisation and set the default pool */
4229 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
4230 if (cfg->enable_default_pool)
4231 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
4233 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
4235 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
4237 for (i = 0; i < (int)num_pools; i++) {
4238 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
4239 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
4242 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4243 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4244 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4245 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4247 /* VFTA - enable all vlan filters */
4248 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
4249 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
4251 /* VFRE: pool enabling for receive - 64 */
4252 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
4253 if (num_pools == ETH_64_POOLS)
4254 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
4257 * MPSAR - allow pools to read specific mac addresses
4258 * In this case, all pools should be able to read from mac addr 0
4260 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
4261 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
4263 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
4264 for (i = 0; i < cfg->nb_pool_maps; i++) {
4265 /* set vlan id in VF register and set the valid bit */
4266 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
4267 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
4269 * Put the allowed pools in VFB reg. As we only have 16 or 64
4270 * pools, we only need to use the first half of the register
4273 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
4274 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
4275 (cfg->pool_map[i].pools & UINT32_MAX));
4277 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
4278 ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
4282 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
4283 if (cfg->enable_loop_back) {
4284 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4285 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
4286 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
4289 IXGBE_WRITE_FLUSH(hw);
4293 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
4294 * @hw: pointer to hardware structure
4297 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
4302 PMD_INIT_FUNC_TRACE();
4303 /*PF VF Transmit Enable*/
4304 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
4305 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
4307 /* Disable the Tx desc arbiter so that MTQC can be changed */
4308 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4309 reg |= IXGBE_RTTDCS_ARBDIS;
4310 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4312 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4313 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
4315 /* Disable drop for all queues */
4316 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4317 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4318 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
4320 /* Enable the Tx desc arbiter */
4321 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4322 reg &= ~IXGBE_RTTDCS_ARBDIS;
4323 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4325 IXGBE_WRITE_FLUSH(hw);
4328 static int __attribute__((cold))
4329 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
4331 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
4335 /* Initialize software ring entries */
4336 for (i = 0; i < rxq->nb_rx_desc; i++) {
4337 volatile union ixgbe_adv_rx_desc *rxd;
4338 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
4341 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
4342 (unsigned) rxq->queue_id);
4346 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
4347 mbuf->port = rxq->port_id;
4350 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
4351 rxd = &rxq->rx_ring[i];
4352 rxd->read.hdr_addr = 0;
4353 rxd->read.pkt_addr = dma_addr;
4361 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
4363 struct ixgbe_hw *hw;
4366 ixgbe_rss_configure(dev);
4368 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4370 /* MRQC: enable VF RSS */
4371 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
4372 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
4373 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4375 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
4379 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
4383 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
4387 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4393 ixgbe_config_vf_default(struct rte_eth_dev *dev)
4395 struct ixgbe_hw *hw =
4396 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4398 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4400 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4405 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4406 IXGBE_MRQC_VMDQRT4TCEN);
4410 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4411 IXGBE_MRQC_VMDQRT8TCEN);
4415 "invalid pool number in IOV mode");
4422 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
4424 struct ixgbe_hw *hw =
4425 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4427 if (hw->mac.type == ixgbe_mac_82598EB)
4430 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4432 * SRIOV inactive scheme
4433 * any DCB/RSS w/o VMDq multi-queue setting
4435 switch (dev->data->dev_conf.rxmode.mq_mode) {
4437 case ETH_MQ_RX_DCB_RSS:
4438 case ETH_MQ_RX_VMDQ_RSS:
4439 ixgbe_rss_configure(dev);
4442 case ETH_MQ_RX_VMDQ_DCB:
4443 ixgbe_vmdq_dcb_configure(dev);
4446 case ETH_MQ_RX_VMDQ_ONLY:
4447 ixgbe_vmdq_rx_hw_configure(dev);
4450 case ETH_MQ_RX_NONE:
4452 /* if mq_mode is none, disable rss mode.*/
4453 ixgbe_rss_disable(dev);
4457 /* SRIOV active scheme
4458 * Support RSS together with SRIOV.
4460 switch (dev->data->dev_conf.rxmode.mq_mode) {
4462 case ETH_MQ_RX_VMDQ_RSS:
4463 ixgbe_config_vf_rss(dev);
4465 case ETH_MQ_RX_VMDQ_DCB:
4467 /* In SRIOV, the configuration is the same as VMDq case */
4468 ixgbe_vmdq_dcb_configure(dev);
4470 /* DCB/RSS together with SRIOV is not supported */
4471 case ETH_MQ_RX_VMDQ_DCB_RSS:
4472 case ETH_MQ_RX_DCB_RSS:
4474 "Could not support DCB/RSS with VMDq & SRIOV");
4477 ixgbe_config_vf_default(dev);
4486 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4488 struct ixgbe_hw *hw =
4489 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4493 if (hw->mac.type == ixgbe_mac_82598EB)
4496 /* disable arbiter before setting MTQC */
4497 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4498 rttdcs |= IXGBE_RTTDCS_ARBDIS;
4499 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4501 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4503 * SRIOV inactive scheme
4504 * any DCB w/o VMDq multi-queue setting
4506 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4507 ixgbe_vmdq_tx_hw_configure(hw);
4509 mtqc = IXGBE_MTQC_64Q_1PB;
4510 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4513 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4516 * SRIOV active scheme
4517 * FIXME if support DCB together with VMDq & SRIOV
4520 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4523 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
4526 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
4530 mtqc = IXGBE_MTQC_64Q_1PB;
4531 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4533 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4536 /* re-enable arbiter */
4537 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
4538 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4544 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
4546 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
4547 * spec rev. 3.0 chapter 8.2.3.8.13.
4549 * @pool Memory pool of the Rx queue
4551 static inline uint32_t
4552 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4554 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4556 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
4558 RTE_IPV4_MAX_PKT_LEN /
4559 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4562 return IXGBE_RSCCTL_MAXDESC_16;
4563 else if (maxdesc >= 8)
4564 return IXGBE_RSCCTL_MAXDESC_8;
4565 else if (maxdesc >= 4)
4566 return IXGBE_RSCCTL_MAXDESC_4;
4568 return IXGBE_RSCCTL_MAXDESC_1;
4572 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
4575 * (Taken from FreeBSD tree)
4576 * (yes this is all very magic and confusing :)
4579 * @entry the register array entry
4580 * @vector the MSIX vector for this queue
4584 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
4586 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4589 vector |= IXGBE_IVAR_ALLOC_VAL;
4591 switch (hw->mac.type) {
4593 case ixgbe_mac_82598EB:
4595 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4597 entry += (type * 64);
4598 index = (entry >> 2) & 0x1F;
4599 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4600 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4601 ivar |= (vector << (8 * (entry & 0x3)));
4602 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4605 case ixgbe_mac_82599EB:
4606 case ixgbe_mac_X540:
4607 if (type == -1) { /* MISC IVAR */
4608 index = (entry & 1) * 8;
4609 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4610 ivar &= ~(0xFF << index);
4611 ivar |= (vector << index);
4612 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4613 } else { /* RX/TX IVARS */
4614 index = (16 * (entry & 1)) + (8 * type);
4615 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4616 ivar &= ~(0xFF << index);
4617 ivar |= (vector << index);
4618 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4628 void __attribute__((cold))
4629 ixgbe_set_rx_function(struct rte_eth_dev *dev)
4631 uint16_t i, rx_using_sse;
4632 struct ixgbe_adapter *adapter = dev->data->dev_private;
4635 * In order to allow Vector Rx there are a few configuration
4636 * conditions to be met and Rx Bulk Allocation should be allowed.
4638 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
4639 !adapter->rx_bulk_alloc_allowed) {
4640 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
4641 "preconditions or RTE_IXGBE_INC_VECTOR is "
4643 dev->data->port_id);
4645 adapter->rx_vec_allowed = false;
4649 * Initialize the appropriate LRO callback.
4651 * If all queues satisfy the bulk allocation preconditions
4652 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
4653 * Otherwise use a single allocation version.
4655 if (dev->data->lro) {
4656 if (adapter->rx_bulk_alloc_allowed) {
4657 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4658 "allocation version");
4659 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4661 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4662 "allocation version");
4663 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4665 } else if (dev->data->scattered_rx) {
4667 * Set the non-LRO scattered callback: there are Vector and
4668 * single allocation versions.
4670 if (adapter->rx_vec_allowed) {
4671 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
4672 "callback (port=%d).",
4673 dev->data->port_id);
4675 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
4676 } else if (adapter->rx_bulk_alloc_allowed) {
4677 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4678 "allocation callback (port=%d).",
4679 dev->data->port_id);
4680 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4682 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
4683 "single allocation) "
4684 "Scattered Rx callback "
4686 dev->data->port_id);
4688 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4691 * Below we set "simple" callbacks according to port/queues parameters.
4692 * If parameters allow we are going to choose between the following
4696 * - Single buffer allocation (the simplest one)
4698 } else if (adapter->rx_vec_allowed) {
4699 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4700 "burst size no less than %d (port=%d).",
4701 RTE_IXGBE_DESCS_PER_LOOP,
4702 dev->data->port_id);
4704 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4705 } else if (adapter->rx_bulk_alloc_allowed) {
4706 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4707 "satisfied. Rx Burst Bulk Alloc function "
4708 "will be used on port=%d.",
4709 dev->data->port_id);
4711 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4713 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4714 "satisfied, or Scattered Rx is requested "
4716 dev->data->port_id);
4718 dev->rx_pkt_burst = ixgbe_recv_pkts;
4721 /* Propagate information about RX function choice through all queues. */
4724 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4725 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4727 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4728 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4730 rxq->rx_using_sse = rx_using_sse;
4731 #ifdef RTE_LIBRTE_SECURITY
4732 rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4733 DEV_RX_OFFLOAD_SECURITY);
4739 * ixgbe_set_rsc - configure RSC related port HW registers
4741 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4742 * of 82599 Spec (x540 configuration is virtually the same).
4746 * Returns 0 in case of success or a non-zero error code
4749 ixgbe_set_rsc(struct rte_eth_dev *dev)
4751 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4752 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4753 struct rte_eth_dev_info dev_info = { 0 };
4754 bool rsc_capable = false;
4760 dev->dev_ops->dev_infos_get(dev, &dev_info);
4761 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4764 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4765 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4770 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4772 if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
4773 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4775 * According to chapter of 4.6.7.2.1 of the Spec Rev.
4776 * 3.0 RSC configuration requires HW CRC stripping being
4777 * enabled. If user requested both HW CRC stripping off
4778 * and RSC on - return an error.
4780 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4785 /* RFCTL configuration */
4786 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4787 if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4789 * Since NFS packets coalescing is not supported - clear
4790 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4793 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4794 IXGBE_RFCTL_NFSR_DIS);
4796 rfctl |= IXGBE_RFCTL_RSC_DIS;
4797 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4799 /* If LRO hasn't been requested - we are done here. */
4800 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4803 /* Set RDRXCTL.RSCACKC bit */
4804 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4805 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4806 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4808 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4809 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4810 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4812 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4814 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4816 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4818 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4821 * ixgbe PMD doesn't support header-split at the moment.
4823 * Following the 4.6.7.2.1 chapter of the 82599/x540
4824 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4825 * should be configured even if header split is not
4826 * enabled. We will configure it 128 bytes following the
4827 * recommendation in the spec.
4829 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4830 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4831 IXGBE_SRRCTL_BSIZEHDR_MASK;
4834 * TODO: Consider setting the Receive Descriptor Minimum
4835 * Threshold Size for an RSC case. This is not an obviously
4836 * beneficiary option but the one worth considering...
4839 rscctl |= IXGBE_RSCCTL_RSCEN;
4840 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4841 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4844 * RSC: Set ITR interval corresponding to 2K ints/s.
4846 * Full-sized RSC aggregations for a 10Gb/s link will
4847 * arrive at about 20K aggregation/s rate.
4849 * 2K inst/s rate will make only 10% of the
4850 * aggregations to be closed due to the interrupt timer
4851 * expiration for a streaming at wire-speed case.
4853 * For a sparse streaming case this setting will yield
4854 * at most 500us latency for a single RSC aggregation.
4856 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4857 eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
4858 eitr |= IXGBE_EITR_CNT_WDIS;
4860 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4861 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4862 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4863 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4866 * RSC requires the mapping of the queue to the
4869 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4874 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4880 * Initializes Receive Unit.
4882 int __attribute__((cold))
4883 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4885 struct ixgbe_hw *hw;
4886 struct ixgbe_rx_queue *rxq;
4897 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4900 PMD_INIT_FUNC_TRACE();
4901 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4904 * Make sure receives are disabled while setting
4905 * up the RX context (registers, descriptor rings, etc.).
4907 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4908 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4910 /* Enable receipt of broadcasted frames */
4911 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4912 fctrl |= IXGBE_FCTRL_BAM;
4913 fctrl |= IXGBE_FCTRL_DPF;
4914 fctrl |= IXGBE_FCTRL_PMCF;
4915 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4918 * Configure CRC stripping, if any.
4920 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4921 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4922 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4924 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4927 * Configure jumbo frame support, if any.
4929 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
4930 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4931 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4932 maxfrs &= 0x0000FFFF;
4933 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4934 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4936 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4939 * If loopback mode is configured, set LPBK bit.
4941 if (dev->data->dev_conf.lpbk_mode != 0) {
4942 rc = ixgbe_check_supported_loopback_mode(dev);
4944 PMD_INIT_LOG(ERR, "Unsupported loopback mode");
4947 hlreg0 |= IXGBE_HLREG0_LPBK;
4949 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4952 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4955 * Assume no header split and no VLAN strip support
4956 * on any Rx queue first .
4958 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4959 /* Setup RX queues */
4960 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4961 rxq = dev->data->rx_queues[i];
4964 * Reset crc_len in case it was changed after queue setup by a
4965 * call to configure.
4967 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4968 rxq->crc_len = RTE_ETHER_CRC_LEN;
4972 /* Setup the Base and Length of the Rx Descriptor Rings */
4973 bus_addr = rxq->rx_ring_phys_addr;
4974 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4975 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4976 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4977 (uint32_t)(bus_addr >> 32));
4978 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4979 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4980 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4981 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4983 /* Configure the SRRCTL register */
4984 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4986 /* Set if packets are dropped when no descriptors available */
4988 srrctl |= IXGBE_SRRCTL_DROP_EN;
4991 * Configure the RX buffer size in the BSIZEPACKET field of
4992 * the SRRCTL register of the queue.
4993 * The value is in 1 KB resolution. Valid values can be from
4996 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4997 RTE_PKTMBUF_HEADROOM);
4998 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4999 IXGBE_SRRCTL_BSIZEPKT_MASK);
5001 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
5003 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5004 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5006 /* It adds dual VLAN length for supporting dual VLAN */
5007 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
5008 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
5009 dev->data->scattered_rx = 1;
5010 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5011 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5014 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
5015 dev->data->scattered_rx = 1;
5018 * Device configured with multiple RX queues.
5020 ixgbe_dev_mq_rx_configure(dev);
5023 * Setup the Checksum Register.
5024 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
5025 * Enable IP/L4 checkum computation by hardware if requested to do so.
5027 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
5028 rxcsum |= IXGBE_RXCSUM_PCSD;
5029 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
5030 rxcsum |= IXGBE_RXCSUM_IPPCSE;
5032 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
5034 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
5036 if (hw->mac.type == ixgbe_mac_82599EB ||
5037 hw->mac.type == ixgbe_mac_X540) {
5038 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
5039 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
5040 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
5042 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
5043 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
5044 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
5047 rc = ixgbe_set_rsc(dev);
5051 ixgbe_set_rx_function(dev);
5057 * Initializes Transmit Unit.
5059 void __attribute__((cold))
5060 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
5062 struct ixgbe_hw *hw;
5063 struct ixgbe_tx_queue *txq;
5069 PMD_INIT_FUNC_TRACE();
5070 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5072 /* Enable TX CRC (checksum offload requirement) and hw padding
5075 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5076 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
5077 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5079 /* Setup the Base and Length of the Tx Descriptor Rings */
5080 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5081 txq = dev->data->tx_queues[i];
5083 bus_addr = txq->tx_ring_phys_addr;
5084 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
5085 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5086 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
5087 (uint32_t)(bus_addr >> 32));
5088 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
5089 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5090 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5091 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5092 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5095 * Disable Tx Head Writeback RO bit, since this hoses
5096 * bookkeeping if things aren't delivered in order.
5098 switch (hw->mac.type) {
5099 case ixgbe_mac_82598EB:
5100 txctrl = IXGBE_READ_REG(hw,
5101 IXGBE_DCA_TXCTRL(txq->reg_idx));
5102 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5103 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
5107 case ixgbe_mac_82599EB:
5108 case ixgbe_mac_X540:
5109 case ixgbe_mac_X550:
5110 case ixgbe_mac_X550EM_x:
5111 case ixgbe_mac_X550EM_a:
5113 txctrl = IXGBE_READ_REG(hw,
5114 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
5115 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5116 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
5122 /* Device configured with multiple TX queues. */
5123 ixgbe_dev_mq_tx_configure(dev);
5127 * Check if requested loopback mode is supported
5130 ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev)
5132 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5134 if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX)
5135 if (hw->mac.type == ixgbe_mac_82599EB ||
5136 hw->mac.type == ixgbe_mac_X540 ||
5137 hw->mac.type == ixgbe_mac_X550 ||
5138 hw->mac.type == ixgbe_mac_X550EM_x ||
5139 hw->mac.type == ixgbe_mac_X550EM_a)
5146 * Set up link for 82599 loopback mode Tx->Rx.
5148 static inline void __attribute__((cold))
5149 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
5151 PMD_INIT_FUNC_TRACE();
5153 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
5154 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
5156 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
5165 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
5166 ixgbe_reset_pipeline_82599(hw);
5168 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
5174 * Start Transmit and Receive Units.
5176 int __attribute__((cold))
5177 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
5179 struct ixgbe_hw *hw;
5180 struct ixgbe_tx_queue *txq;
5181 struct ixgbe_rx_queue *rxq;
5188 PMD_INIT_FUNC_TRACE();
5189 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5191 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5192 txq = dev->data->tx_queues[i];
5193 /* Setup Transmit Threshold Registers */
5194 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5195 txdctl |= txq->pthresh & 0x7F;
5196 txdctl |= ((txq->hthresh & 0x7F) << 8);
5197 txdctl |= ((txq->wthresh & 0x7F) << 16);
5198 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5201 if (hw->mac.type != ixgbe_mac_82598EB) {
5202 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
5203 dmatxctl |= IXGBE_DMATXCTL_TE;
5204 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
5207 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5208 txq = dev->data->tx_queues[i];
5209 if (!txq->tx_deferred_start) {
5210 ret = ixgbe_dev_tx_queue_start(dev, i);
5216 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5217 rxq = dev->data->rx_queues[i];
5218 if (!rxq->rx_deferred_start) {
5219 ret = ixgbe_dev_rx_queue_start(dev, i);
5225 /* Enable Receive engine */
5226 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5227 if (hw->mac.type == ixgbe_mac_82598EB)
5228 rxctrl |= IXGBE_RXCTRL_DMBYPS;
5229 rxctrl |= IXGBE_RXCTRL_RXEN;
5230 hw->mac.ops.enable_rx_dma(hw, rxctrl);
5232 /* If loopback mode is enabled, set up the link accordingly */
5233 if (dev->data->dev_conf.lpbk_mode != 0) {
5234 if (hw->mac.type == ixgbe_mac_82599EB)
5235 ixgbe_setup_loopback_link_82599(hw);
5236 else if (hw->mac.type == ixgbe_mac_X540 ||
5237 hw->mac.type == ixgbe_mac_X550 ||
5238 hw->mac.type == ixgbe_mac_X550EM_x ||
5239 hw->mac.type == ixgbe_mac_X550EM_a)
5240 ixgbe_setup_loopback_link_x540_x550(hw, true);
5243 #ifdef RTE_LIBRTE_SECURITY
5244 if ((dev->data->dev_conf.rxmode.offloads &
5245 DEV_RX_OFFLOAD_SECURITY) ||
5246 (dev->data->dev_conf.txmode.offloads &
5247 DEV_TX_OFFLOAD_SECURITY)) {
5248 ret = ixgbe_crypto_enable_ipsec(dev);
5251 "ixgbe_crypto_enable_ipsec fails with %d.",
5262 * Start Receive Units for specified queue.
5264 int __attribute__((cold))
5265 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5267 struct ixgbe_hw *hw;
5268 struct ixgbe_rx_queue *rxq;
5272 PMD_INIT_FUNC_TRACE();
5273 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5275 rxq = dev->data->rx_queues[rx_queue_id];
5277 /* Allocate buffers for descriptor rings */
5278 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
5279 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
5283 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5284 rxdctl |= IXGBE_RXDCTL_ENABLE;
5285 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5287 /* Wait until RX Enable ready */
5288 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5291 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5292 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5294 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
5296 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5297 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
5298 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5304 * Stop Receive Units for specified queue.
5306 int __attribute__((cold))
5307 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5309 struct ixgbe_hw *hw;
5310 struct ixgbe_adapter *adapter = dev->data->dev_private;
5311 struct ixgbe_rx_queue *rxq;
5315 PMD_INIT_FUNC_TRACE();
5316 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5318 rxq = dev->data->rx_queues[rx_queue_id];
5320 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5321 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5322 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5324 /* Wait until RX Enable bit clear */
5325 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5328 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5329 } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
5331 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
5333 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5335 ixgbe_rx_queue_release_mbufs(rxq);
5336 ixgbe_reset_rx_queue(adapter, rxq);
5337 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5344 * Start Transmit Units for specified queue.
5346 int __attribute__((cold))
5347 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5349 struct ixgbe_hw *hw;
5350 struct ixgbe_tx_queue *txq;
5354 PMD_INIT_FUNC_TRACE();
5355 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5357 txq = dev->data->tx_queues[tx_queue_id];
5358 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5359 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5360 txdctl |= IXGBE_TXDCTL_ENABLE;
5361 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5363 /* Wait until TX Enable ready */
5364 if (hw->mac.type == ixgbe_mac_82599EB) {
5365 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5368 txdctl = IXGBE_READ_REG(hw,
5369 IXGBE_TXDCTL(txq->reg_idx));
5370 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5372 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
5376 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5377 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5383 * Stop Transmit Units for specified queue.
5385 int __attribute__((cold))
5386 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5388 struct ixgbe_hw *hw;
5389 struct ixgbe_tx_queue *txq;
5391 uint32_t txtdh, txtdt;
5394 PMD_INIT_FUNC_TRACE();
5395 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5397 txq = dev->data->tx_queues[tx_queue_id];
5399 /* Wait until TX queue is empty */
5400 if (hw->mac.type == ixgbe_mac_82599EB) {
5401 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5403 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5404 txtdh = IXGBE_READ_REG(hw,
5405 IXGBE_TDH(txq->reg_idx));
5406 txtdt = IXGBE_READ_REG(hw,
5407 IXGBE_TDT(txq->reg_idx));
5408 } while (--poll_ms && (txtdh != txtdt));
5411 "Tx Queue %d is not empty when stopping.",
5415 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5416 txdctl &= ~IXGBE_TXDCTL_ENABLE;
5417 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5419 /* Wait until TX Enable bit clear */
5420 if (hw->mac.type == ixgbe_mac_82599EB) {
5421 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5424 txdctl = IXGBE_READ_REG(hw,
5425 IXGBE_TXDCTL(txq->reg_idx));
5426 } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
5428 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
5432 if (txq->ops != NULL) {
5433 txq->ops->release_mbufs(txq);
5434 txq->ops->reset(txq);
5436 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5442 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5443 struct rte_eth_rxq_info *qinfo)
5445 struct ixgbe_rx_queue *rxq;
5447 rxq = dev->data->rx_queues[queue_id];
5449 qinfo->mp = rxq->mb_pool;
5450 qinfo->scattered_rx = dev->data->scattered_rx;
5451 qinfo->nb_desc = rxq->nb_rx_desc;
5453 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
5454 qinfo->conf.rx_drop_en = rxq->drop_en;
5455 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
5456 qinfo->conf.offloads = rxq->offloads;
5460 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5461 struct rte_eth_txq_info *qinfo)
5463 struct ixgbe_tx_queue *txq;
5465 txq = dev->data->tx_queues[queue_id];
5467 qinfo->nb_desc = txq->nb_tx_desc;
5469 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
5470 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
5471 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
5473 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
5474 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
5475 qinfo->conf.offloads = txq->offloads;
5476 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
5480 * [VF] Initializes Receive Unit.
5482 int __attribute__((cold))
5483 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
5485 struct ixgbe_hw *hw;
5486 struct ixgbe_rx_queue *rxq;
5487 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
5489 uint32_t srrctl, psrtype = 0;
5494 PMD_INIT_FUNC_TRACE();
5495 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5497 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
5498 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5499 "it should be power of 2");
5503 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
5504 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5505 "it should be equal to or less than %d",
5506 hw->mac.max_rx_queues);
5511 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
5512 * disables the VF receipt of packets if the PF MTU is > 1500.
5513 * This is done to deal with 82599 limitations that imposes
5514 * the PF and all VFs to share the same MTU.
5515 * Then, the PF driver enables again the VF receipt of packet when
5516 * the VF driver issues a IXGBE_VF_SET_LPE request.
5517 * In the meantime, the VF device cannot be used, even if the VF driver
5518 * and the Guest VM network stack are ready to accept packets with a
5519 * size up to the PF MTU.
5520 * As a work-around to this PF behaviour, force the call to
5521 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
5522 * VF packets received can work in all cases.
5524 ixgbevf_rlpml_set_vf(hw,
5525 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
5528 * Assume no header split and no VLAN strip support
5529 * on any Rx queue first .
5531 rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
5532 /* Setup RX queues */
5533 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5534 rxq = dev->data->rx_queues[i];
5536 /* Allocate buffers for descriptor rings */
5537 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
5541 /* Setup the Base and Length of the Rx Descriptor Rings */
5542 bus_addr = rxq->rx_ring_phys_addr;
5544 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
5545 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5546 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
5547 (uint32_t)(bus_addr >> 32));
5548 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
5549 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5550 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
5551 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
5554 /* Configure the SRRCTL register */
5555 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
5557 /* Set if packets are dropped when no descriptors available */
5559 srrctl |= IXGBE_SRRCTL_DROP_EN;
5562 * Configure the RX buffer size in the BSIZEPACKET field of
5563 * the SRRCTL register of the queue.
5564 * The value is in 1 KB resolution. Valid values can be from
5567 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5568 RTE_PKTMBUF_HEADROOM);
5569 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5570 IXGBE_SRRCTL_BSIZEPKT_MASK);
5573 * VF modification to write virtual function SRRCTL register
5575 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
5577 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5578 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5580 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
5581 /* It adds dual VLAN length for supporting dual VLAN */
5582 (rxmode->max_rx_pkt_len +
5583 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
5584 if (!dev->data->scattered_rx)
5585 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
5586 dev->data->scattered_rx = 1;
5589 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5590 rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5593 /* Set RQPL for VF RSS according to max Rx queue */
5594 psrtype |= (dev->data->nb_rx_queues >> 1) <<
5595 IXGBE_PSRTYPE_RQPL_SHIFT;
5596 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
5598 ixgbe_set_rx_function(dev);
5604 * [VF] Initializes Transmit Unit.
5606 void __attribute__((cold))
5607 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
5609 struct ixgbe_hw *hw;
5610 struct ixgbe_tx_queue *txq;
5615 PMD_INIT_FUNC_TRACE();
5616 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5618 /* Setup the Base and Length of the Tx Descriptor Rings */
5619 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5620 txq = dev->data->tx_queues[i];
5621 bus_addr = txq->tx_ring_phys_addr;
5622 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
5623 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5624 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
5625 (uint32_t)(bus_addr >> 32));
5626 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
5627 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5628 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5629 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
5630 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
5633 * Disable Tx Head Writeback RO bit, since this hoses
5634 * bookkeeping if things aren't delivered in order.
5636 txctrl = IXGBE_READ_REG(hw,
5637 IXGBE_VFDCA_TXCTRL(i));
5638 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5639 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
5645 * [VF] Start Transmit and Receive Units.
5647 void __attribute__((cold))
5648 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
5650 struct ixgbe_hw *hw;
5651 struct ixgbe_tx_queue *txq;
5652 struct ixgbe_rx_queue *rxq;
5658 PMD_INIT_FUNC_TRACE();
5659 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5661 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5662 txq = dev->data->tx_queues[i];
5663 /* Setup Transmit Threshold Registers */
5664 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5665 txdctl |= txq->pthresh & 0x7F;
5666 txdctl |= ((txq->hthresh & 0x7F) << 8);
5667 txdctl |= ((txq->wthresh & 0x7F) << 16);
5668 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5671 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5673 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5674 txdctl |= IXGBE_TXDCTL_ENABLE;
5675 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5678 /* Wait until TX Enable ready */
5681 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5682 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5684 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
5686 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5688 rxq = dev->data->rx_queues[i];
5690 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5691 rxdctl |= IXGBE_RXDCTL_ENABLE;
5692 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
5694 /* Wait until RX Enable ready */
5698 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5699 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5701 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5703 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
5709 ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
5710 const struct rte_flow_action_rss *in)
5712 if (in->key_len > RTE_DIM(out->key) ||
5713 in->queue_num > RTE_DIM(out->queue))
5715 out->conf = (struct rte_flow_action_rss){
5719 .key_len = in->key_len,
5720 .queue_num = in->queue_num,
5721 .key = memcpy(out->key, in->key, in->key_len),
5722 .queue = memcpy(out->queue, in->queue,
5723 sizeof(*in->queue) * in->queue_num),
5729 ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5730 const struct rte_flow_action_rss *with)
5732 return (comp->func == with->func &&
5733 comp->level == with->level &&
5734 comp->types == with->types &&
5735 comp->key_len == with->key_len &&
5736 comp->queue_num == with->queue_num &&
5737 !memcmp(comp->key, with->key, with->key_len) &&
5738 !memcmp(comp->queue, with->queue,
5739 sizeof(*with->queue) * with->queue_num));
5743 ixgbe_config_rss_filter(struct rte_eth_dev *dev,
5744 struct ixgbe_rte_flow_rss_conf *conf, bool add)
5746 struct ixgbe_hw *hw;
5750 uint16_t sp_reta_size;
5752 struct rte_eth_rss_conf rss_conf = {
5753 .rss_key = conf->conf.key_len ?
5754 (void *)(uintptr_t)conf->conf.key : NULL,
5755 .rss_key_len = conf->conf.key_len,
5756 .rss_hf = conf->conf.types,
5758 struct ixgbe_filter_info *filter_info =
5759 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5761 PMD_INIT_FUNC_TRACE();
5762 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5764 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5767 if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
5769 ixgbe_rss_disable(dev);
5770 memset(&filter_info->rss_info, 0,
5771 sizeof(struct ixgbe_rte_flow_rss_conf));
5777 if (filter_info->rss_info.conf.queue_num)
5779 /* Fill in redirection table
5780 * The byte-swap is needed because NIC registers are in
5781 * little-endian order.
5784 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
5785 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5787 if (j == conf->conf.queue_num)
5789 reta = (reta << 8) | conf->conf.queue[j];
5791 IXGBE_WRITE_REG(hw, reta_reg,
5795 /* Configure the RSS key and the RSS protocols used to compute
5796 * the RSS hash of input packets.
5798 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
5799 ixgbe_rss_disable(dev);
5802 if (rss_conf.rss_key == NULL)
5803 rss_conf.rss_key = rss_intel_key; /* Default hash key */
5804 ixgbe_hw_rss_hash_set(hw, &rss_conf);
5806 if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
5812 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
5814 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5820 ixgbe_recv_pkts_vec(
5821 void __rte_unused *rx_queue,
5822 struct rte_mbuf __rte_unused **rx_pkts,
5823 uint16_t __rte_unused nb_pkts)
5829 ixgbe_recv_scattered_pkts_vec(
5830 void __rte_unused *rx_queue,
5831 struct rte_mbuf __rte_unused **rx_pkts,
5832 uint16_t __rte_unused nb_pkts)
5838 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)