4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5 * Copyright 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
46 #include <rte_byteorder.h>
47 #include <rte_common.h>
48 #include <rte_cycles.h>
50 #include <rte_debug.h>
51 #include <rte_interrupts.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_prefetch.h>
71 #include <rte_string_fns.h>
72 #include <rte_errno.h>
75 #include "ixgbe_logs.h"
76 #include "base/ixgbe_api.h"
77 #include "base/ixgbe_vf.h"
78 #include "ixgbe_ethdev.h"
79 #include "base/ixgbe_dcb.h"
80 #include "base/ixgbe_common.h"
81 #include "ixgbe_rxtx.h"
83 /* Bit Mask to indicate what bits required for building TX context */
84 #define IXGBE_TX_OFFLOAD_MASK ( \
89 PKT_TX_OUTER_IP_CKSUM)
91 static inline struct rte_mbuf *
92 rte_rxmbuf_alloc(struct rte_mempool *mp)
96 m = __rte_mbuf_raw_alloc(mp);
97 __rte_mbuf_sanity_check_raw(m, 0);
103 #define RTE_PMD_USE_PREFETCH
106 #ifdef RTE_PMD_USE_PREFETCH
108 * Prefetch a cache line into all cache levels.
110 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
112 #define rte_ixgbe_prefetch(p) do {} while (0)
115 /*********************************************************************
119 **********************************************************************/
122 * Check for descriptors with their DD bit set and free mbufs.
123 * Return the total number of buffers freed.
125 static inline int __attribute__((always_inline))
126 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
128 struct ixgbe_tx_entry *txep;
131 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
133 /* check DD bit on threshold descriptor */
134 status = txq->tx_ring[txq->tx_next_dd].wb.status;
135 if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
139 * first buffer to free from S/W ring is at index
140 * tx_next_dd - (tx_rs_thresh-1)
142 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
144 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
145 /* free buffers one at a time */
146 m = __rte_pktmbuf_prefree_seg(txep->mbuf);
149 if (unlikely(m == NULL))
152 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
153 (nb_free > 0 && m->pool != free[0]->pool)) {
154 rte_mempool_put_bulk(free[0]->pool,
155 (void **)free, nb_free);
163 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
165 /* buffers were freed, update counters */
166 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
167 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
168 if (txq->tx_next_dd >= txq->nb_tx_desc)
169 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
171 return txq->tx_rs_thresh;
174 /* Populate 4 descriptors with data from 4 mbufs */
176 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
178 uint64_t buf_dma_addr;
182 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
183 buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
184 pkt_len = (*pkts)->data_len;
186 /* write data to descriptor */
187 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
189 txdp->read.cmd_type_len =
190 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
192 txdp->read.olinfo_status =
193 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
195 rte_prefetch0(&(*pkts)->pool);
199 /* Populate 1 descriptor with data from 1 mbuf */
201 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
203 uint64_t buf_dma_addr;
206 buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
207 pkt_len = (*pkts)->data_len;
209 /* write data to descriptor */
210 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
211 txdp->read.cmd_type_len =
212 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
213 txdp->read.olinfo_status =
214 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
215 rte_prefetch0(&(*pkts)->pool);
219 * Fill H/W descriptor ring with mbuf data.
220 * Copy mbuf pointers to the S/W ring.
223 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
226 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
227 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
228 const int N_PER_LOOP = 4;
229 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
230 int mainpart, leftover;
234 * Process most of the packets in chunks of N pkts. Any
235 * leftover packets will get processed one at a time.
237 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
238 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
239 for (i = 0; i < mainpart; i += N_PER_LOOP) {
240 /* Copy N mbuf pointers to the S/W ring */
241 for (j = 0; j < N_PER_LOOP; ++j) {
242 (txep + i + j)->mbuf = *(pkts + i + j);
244 tx4(txdp + i, pkts + i);
247 if (unlikely(leftover > 0)) {
248 for (i = 0; i < leftover; ++i) {
249 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
250 tx1(txdp + mainpart + i, pkts + mainpart + i);
255 static inline uint16_t
256 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
259 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
260 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
264 * Begin scanning the H/W ring for done descriptors when the
265 * number of available descriptors drops below tx_free_thresh. For
266 * each done descriptor, free the associated buffer.
268 if (txq->nb_tx_free < txq->tx_free_thresh)
269 ixgbe_tx_free_bufs(txq);
271 /* Only use descriptors that are available */
272 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
273 if (unlikely(nb_pkts == 0))
276 /* Use exactly nb_pkts descriptors */
277 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
280 * At this point, we know there are enough descriptors in the
281 * ring to transmit all the packets. This assumes that each
282 * mbuf contains a single segment, and that no new offloads
283 * are expected, which would require a new context descriptor.
287 * See if we're going to wrap-around. If so, handle the top
288 * of the descriptor ring first, then do the bottom. If not,
289 * the processing looks just like the "bottom" part anyway...
291 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
292 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
293 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
296 * We know that the last descriptor in the ring will need to
297 * have its RS bit set because tx_rs_thresh has to be
298 * a divisor of the ring size
300 tx_r[txq->tx_next_rs].read.cmd_type_len |=
301 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
302 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
307 /* Fill H/W descriptor ring with mbuf data */
308 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
309 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
312 * Determine if RS bit should be set
313 * This is what we actually want:
314 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
315 * but instead of subtracting 1 and doing >=, we can just do
316 * greater than without subtracting.
318 if (txq->tx_tail > txq->tx_next_rs) {
319 tx_r[txq->tx_next_rs].read.cmd_type_len |=
320 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
321 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
323 if (txq->tx_next_rs >= txq->nb_tx_desc)
324 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
328 * Check for wrap-around. This would only happen if we used
329 * up to the last descriptor in the ring, no more, no less.
331 if (txq->tx_tail >= txq->nb_tx_desc)
334 /* update tail pointer */
336 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
342 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
347 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
348 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
349 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
351 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
355 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
356 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
357 nb_tx = (uint16_t)(nb_tx + ret);
358 nb_pkts = (uint16_t)(nb_pkts - ret);
367 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
368 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
369 uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
371 uint32_t type_tucmd_mlhl;
372 uint32_t mss_l4len_idx = 0;
374 uint32_t vlan_macip_lens;
375 union ixgbe_tx_offload tx_offload_mask;
376 uint32_t seqnum_seed = 0;
378 ctx_idx = txq->ctx_curr;
379 tx_offload_mask.data[0] = 0;
380 tx_offload_mask.data[1] = 0;
383 /* Specify which HW CTX to upload. */
384 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
386 if (ol_flags & PKT_TX_VLAN_PKT) {
387 tx_offload_mask.vlan_tci |= ~0;
390 /* check if TCP segmentation required for this packet */
391 if (ol_flags & PKT_TX_TCP_SEG) {
392 /* implies IP cksum in IPv4 */
393 if (ol_flags & PKT_TX_IP_CKSUM)
394 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
395 IXGBE_ADVTXD_TUCMD_L4T_TCP |
396 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
398 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
399 IXGBE_ADVTXD_TUCMD_L4T_TCP |
400 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
402 tx_offload_mask.l2_len |= ~0;
403 tx_offload_mask.l3_len |= ~0;
404 tx_offload_mask.l4_len |= ~0;
405 tx_offload_mask.tso_segsz |= ~0;
406 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
407 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
408 } else { /* no TSO, check if hardware checksum is needed */
409 if (ol_flags & PKT_TX_IP_CKSUM) {
410 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
411 tx_offload_mask.l2_len |= ~0;
412 tx_offload_mask.l3_len |= ~0;
415 switch (ol_flags & PKT_TX_L4_MASK) {
416 case PKT_TX_UDP_CKSUM:
417 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
418 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
419 mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
420 tx_offload_mask.l2_len |= ~0;
421 tx_offload_mask.l3_len |= ~0;
423 case PKT_TX_TCP_CKSUM:
424 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
425 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
426 mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
427 tx_offload_mask.l2_len |= ~0;
428 tx_offload_mask.l3_len |= ~0;
430 case PKT_TX_SCTP_CKSUM:
431 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
432 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
433 mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
434 tx_offload_mask.l2_len |= ~0;
435 tx_offload_mask.l3_len |= ~0;
438 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
439 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
444 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
445 tx_offload_mask.outer_l2_len |= ~0;
446 tx_offload_mask.outer_l3_len |= ~0;
447 tx_offload_mask.l2_len |= ~0;
448 seqnum_seed |= tx_offload.outer_l3_len
449 << IXGBE_ADVTXD_OUTER_IPLEN;
450 seqnum_seed |= tx_offload.l2_len
451 << IXGBE_ADVTXD_TUNNEL_LEN;
454 txq->ctx_cache[ctx_idx].flags = ol_flags;
455 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
456 tx_offload_mask.data[0] & tx_offload.data[0];
457 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
458 tx_offload_mask.data[1] & tx_offload.data[1];
459 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
461 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
462 vlan_macip_lens = tx_offload.l3_len;
463 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
464 vlan_macip_lens |= (tx_offload.outer_l2_len <<
465 IXGBE_ADVTXD_MACLEN_SHIFT);
467 vlan_macip_lens |= (tx_offload.l2_len <<
468 IXGBE_ADVTXD_MACLEN_SHIFT);
469 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
470 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
471 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
472 ctx_txd->seqnum_seed = seqnum_seed;
476 * Check which hardware context can be used. Use the existing match
477 * or create a new context descriptor.
479 static inline uint32_t
480 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
481 union ixgbe_tx_offload tx_offload)
483 /* If match with the current used context */
484 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
485 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
486 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
487 & tx_offload.data[0])) &&
488 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
489 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
490 & tx_offload.data[1])))) {
491 return txq->ctx_curr;
494 /* What if match with the next context */
496 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
497 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
498 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
499 & tx_offload.data[0])) &&
500 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
501 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
502 & tx_offload.data[1])))) {
503 return txq->ctx_curr;
506 /* Mismatch, use the previous context */
507 return IXGBE_CTX_NUM;
510 static inline uint32_t
511 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
514 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
515 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
516 if (ol_flags & PKT_TX_IP_CKSUM)
517 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
518 if (ol_flags & PKT_TX_TCP_SEG)
519 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
523 static inline uint32_t
524 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
526 uint32_t cmdtype = 0;
527 if (ol_flags & PKT_TX_VLAN_PKT)
528 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
529 if (ol_flags & PKT_TX_TCP_SEG)
530 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
531 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
532 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
536 /* Default RS bit threshold values */
537 #ifndef DEFAULT_TX_RS_THRESH
538 #define DEFAULT_TX_RS_THRESH 32
540 #ifndef DEFAULT_TX_FREE_THRESH
541 #define DEFAULT_TX_FREE_THRESH 32
544 /* Reset transmit descriptors after they have been used */
546 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
548 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
549 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
550 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
551 uint16_t nb_tx_desc = txq->nb_tx_desc;
552 uint16_t desc_to_clean_to;
553 uint16_t nb_tx_to_clean;
556 /* Determine the last descriptor needing to be cleaned */
557 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
558 if (desc_to_clean_to >= nb_tx_desc)
559 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
561 /* Check to make sure the last descriptor to clean is done */
562 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
563 status = txr[desc_to_clean_to].wb.status;
564 if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
566 PMD_TX_FREE_LOG(DEBUG,
567 "TX descriptor %4u is not done"
568 "(port=%d queue=%d)",
570 txq->port_id, txq->queue_id);
571 /* Failed to clean any descriptors, better luck next time */
575 /* Figure out how many descriptors will be cleaned */
576 if (last_desc_cleaned > desc_to_clean_to)
577 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
580 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
583 PMD_TX_FREE_LOG(DEBUG,
584 "Cleaning %4u TX descriptors: %4u to %4u "
585 "(port=%d queue=%d)",
586 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
587 txq->port_id, txq->queue_id);
590 * The last descriptor to clean is done, so that means all the
591 * descriptors from the last descriptor that was cleaned
592 * up to the last descriptor with the RS bit set
593 * are done. Only reset the threshold descriptor.
595 txr[desc_to_clean_to].wb.status = 0;
597 /* Update the txq to reflect the last descriptor that was cleaned */
598 txq->last_desc_cleaned = desc_to_clean_to;
599 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
606 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
609 struct ixgbe_tx_queue *txq;
610 struct ixgbe_tx_entry *sw_ring;
611 struct ixgbe_tx_entry *txe, *txn;
612 volatile union ixgbe_adv_tx_desc *txr;
613 volatile union ixgbe_adv_tx_desc *txd, *txp;
614 struct rte_mbuf *tx_pkt;
615 struct rte_mbuf *m_seg;
616 uint64_t buf_dma_addr;
617 uint32_t olinfo_status;
618 uint32_t cmd_type_len;
629 union ixgbe_tx_offload tx_offload;
631 tx_offload.data[0] = 0;
632 tx_offload.data[1] = 0;
634 sw_ring = txq->sw_ring;
636 tx_id = txq->tx_tail;
637 txe = &sw_ring[tx_id];
640 /* Determine if the descriptor ring needs to be cleaned. */
641 if (txq->nb_tx_free < txq->tx_free_thresh)
642 ixgbe_xmit_cleanup(txq);
644 rte_prefetch0(&txe->mbuf->pool);
647 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
650 pkt_len = tx_pkt->pkt_len;
653 * Determine how many (if any) context descriptors
654 * are needed for offload functionality.
656 ol_flags = tx_pkt->ol_flags;
658 /* If hardware offload required */
659 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
661 tx_offload.l2_len = tx_pkt->l2_len;
662 tx_offload.l3_len = tx_pkt->l3_len;
663 tx_offload.l4_len = tx_pkt->l4_len;
664 tx_offload.vlan_tci = tx_pkt->vlan_tci;
665 tx_offload.tso_segsz = tx_pkt->tso_segsz;
666 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
667 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
669 /* If new context need be built or reuse the exist ctx. */
670 ctx = what_advctx_update(txq, tx_ol_req,
672 /* Only allocate context descriptor if required*/
673 new_ctx = (ctx == IXGBE_CTX_NUM);
678 * Keep track of how many descriptors are used this loop
679 * This will always be the number of segments + the number of
680 * Context descriptors required to transmit the packet
682 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
685 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
686 /* set RS on the previous packet in the burst */
687 txp->read.cmd_type_len |=
688 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
691 * The number of descriptors that must be allocated for a
692 * packet is the number of segments of that packet, plus 1
693 * Context Descriptor for the hardware offload, if any.
694 * Determine the last TX descriptor to allocate in the TX ring
695 * for the packet, starting from the current position (tx_id)
698 tx_last = (uint16_t) (tx_id + nb_used - 1);
701 if (tx_last >= txq->nb_tx_desc)
702 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
704 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
705 " tx_first=%u tx_last=%u",
706 (unsigned) txq->port_id,
707 (unsigned) txq->queue_id,
713 * Make sure there are enough TX descriptors available to
714 * transmit the entire packet.
715 * nb_used better be less than or equal to txq->tx_rs_thresh
717 if (nb_used > txq->nb_tx_free) {
718 PMD_TX_FREE_LOG(DEBUG,
719 "Not enough free TX descriptors "
720 "nb_used=%4u nb_free=%4u "
721 "(port=%d queue=%d)",
722 nb_used, txq->nb_tx_free,
723 txq->port_id, txq->queue_id);
725 if (ixgbe_xmit_cleanup(txq) != 0) {
726 /* Could not clean any descriptors */
732 /* nb_used better be <= txq->tx_rs_thresh */
733 if (unlikely(nb_used > txq->tx_rs_thresh)) {
734 PMD_TX_FREE_LOG(DEBUG,
735 "The number of descriptors needed to "
736 "transmit the packet exceeds the "
737 "RS bit threshold. This will impact "
739 "nb_used=%4u nb_free=%4u "
741 "(port=%d queue=%d)",
742 nb_used, txq->nb_tx_free,
744 txq->port_id, txq->queue_id);
746 * Loop here until there are enough TX
747 * descriptors or until the ring cannot be
750 while (nb_used > txq->nb_tx_free) {
751 if (ixgbe_xmit_cleanup(txq) != 0) {
753 * Could not clean any
765 * By now there are enough free TX descriptors to transmit
770 * Set common flags of all TX Data Descriptors.
772 * The following bits must be set in all Data Descriptors:
773 * - IXGBE_ADVTXD_DTYP_DATA
774 * - IXGBE_ADVTXD_DCMD_DEXT
776 * The following bits must be set in the first Data Descriptor
777 * and are ignored in the other ones:
778 * - IXGBE_ADVTXD_DCMD_IFCS
779 * - IXGBE_ADVTXD_MAC_1588
780 * - IXGBE_ADVTXD_DCMD_VLE
782 * The following bits must only be set in the last Data
784 * - IXGBE_TXD_CMD_EOP
786 * The following bits can be set in any Data Descriptor, but
787 * are only set in the last Data Descriptor:
790 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
791 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
793 #ifdef RTE_LIBRTE_IEEE1588
794 if (ol_flags & PKT_TX_IEEE1588_TMST)
795 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
801 if (ol_flags & PKT_TX_TCP_SEG) {
802 /* when TSO is on, paylen in descriptor is the
803 * not the packet len but the tcp payload len */
804 pkt_len -= (tx_offload.l2_len +
805 tx_offload.l3_len + tx_offload.l4_len);
809 * Setup the TX Advanced Context Descriptor if required
812 volatile struct ixgbe_adv_tx_context_desc *
815 ctx_txd = (volatile struct
816 ixgbe_adv_tx_context_desc *)
819 txn = &sw_ring[txe->next_id];
820 rte_prefetch0(&txn->mbuf->pool);
822 if (txe->mbuf != NULL) {
823 rte_pktmbuf_free_seg(txe->mbuf);
827 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
830 txe->last_id = tx_last;
831 tx_id = txe->next_id;
836 * Setup the TX Advanced Data Descriptor,
837 * This path will go through
838 * whatever new/reuse the context descriptor
840 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
841 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
842 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
845 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
850 txn = &sw_ring[txe->next_id];
851 rte_prefetch0(&txn->mbuf->pool);
853 if (txe->mbuf != NULL)
854 rte_pktmbuf_free_seg(txe->mbuf);
858 * Set up Transmit Data Descriptor.
860 slen = m_seg->data_len;
861 buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
862 txd->read.buffer_addr =
863 rte_cpu_to_le_64(buf_dma_addr);
864 txd->read.cmd_type_len =
865 rte_cpu_to_le_32(cmd_type_len | slen);
866 txd->read.olinfo_status =
867 rte_cpu_to_le_32(olinfo_status);
868 txe->last_id = tx_last;
869 tx_id = txe->next_id;
872 } while (m_seg != NULL);
875 * The last packet data descriptor needs End Of Packet (EOP)
877 cmd_type_len |= IXGBE_TXD_CMD_EOP;
878 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
879 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
881 /* Set RS bit only on threshold packets' last descriptor */
882 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
883 PMD_TX_FREE_LOG(DEBUG,
884 "Setting RS bit on TXD id="
885 "%4u (port=%d queue=%d)",
886 tx_last, txq->port_id, txq->queue_id);
888 cmd_type_len |= IXGBE_TXD_CMD_RS;
890 /* Update txq RS bit counters */
896 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
900 /* set RS on last packet in the burst */
902 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
907 * Set the Transmit Descriptor Tail (TDT)
909 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
910 (unsigned) txq->port_id, (unsigned) txq->queue_id,
911 (unsigned) tx_id, (unsigned) nb_tx);
912 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
913 txq->tx_tail = tx_id;
918 /*********************************************************************
922 **********************************************************************/
923 #define IXGBE_PACKET_TYPE_IPV4 0X01
924 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
925 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
926 #define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
927 #define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
928 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
929 #define IXGBE_PACKET_TYPE_IPV6 0X04
930 #define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
931 #define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
932 #define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
933 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
934 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
935 #define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
936 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
937 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
938 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
939 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
940 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
941 #define IXGBE_PACKET_TYPE_MAX 0X80
942 #define IXGBE_PACKET_TYPE_MASK 0X7F
943 #define IXGBE_PACKET_TYPE_SHIFT 0X04
945 /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
946 static inline uint32_t
947 ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
949 static const uint32_t
950 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
951 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
953 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
954 RTE_PTYPE_L3_IPV4_EXT,
955 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
957 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
958 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
959 RTE_PTYPE_INNER_L3_IPV6,
960 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
961 RTE_PTYPE_L3_IPV6_EXT,
962 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
963 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
964 RTE_PTYPE_INNER_L3_IPV6_EXT,
965 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
966 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
967 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
968 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
969 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
970 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
971 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
972 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
973 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
974 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
975 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
976 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
977 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
978 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
979 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
980 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
981 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
982 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
983 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
984 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
985 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
986 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
987 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
988 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
989 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
990 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
991 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
992 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
994 if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
995 return RTE_PTYPE_UNKNOWN;
997 pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) &
998 IXGBE_PACKET_TYPE_MASK;
1000 return ptype_table[pkt_info];
1003 static inline uint64_t
1004 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1006 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1007 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1008 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1009 PKT_RX_RSS_HASH, 0, 0, 0,
1010 0, 0, 0, PKT_RX_FDIR,
1012 #ifdef RTE_LIBRTE_IEEE1588
1013 static uint64_t ip_pkt_etqf_map[8] = {
1014 0, 0, 0, PKT_RX_IEEE1588_PTP,
1018 if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1019 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1020 ip_rss_types_map[pkt_info & 0XF];
1022 return ip_rss_types_map[pkt_info & 0XF];
1024 return ip_rss_types_map[pkt_info & 0XF];
1028 static inline uint64_t
1029 rx_desc_status_to_pkt_flags(uint32_t rx_status)
1034 * Check if VLAN present only.
1035 * Do not check whether L3/L4 rx checksum done by NIC or not,
1036 * That can be found from rte_eth_rxmode.hw_ip_checksum flag
1038 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
1040 #ifdef RTE_LIBRTE_IEEE1588
1041 if (rx_status & IXGBE_RXD_STAT_TMST)
1042 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1047 static inline uint64_t
1048 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1053 * Bit 31: IPE, IPv4 checksum error
1054 * Bit 30: L4I, L4I integrity error
1056 static uint64_t error_to_pkt_flags_map[4] = {
1057 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
1058 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1060 pkt_flags = error_to_pkt_flags_map[(rx_status >>
1061 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1063 if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1064 (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1065 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1072 * LOOK_AHEAD defines how many desc statuses to check beyond the
1073 * current descriptor.
1074 * It must be a pound define for optimal performance.
1075 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1076 * function only works with LOOK_AHEAD=8.
1078 #define LOOK_AHEAD 8
1079 #if (LOOK_AHEAD != 8)
1080 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1083 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1085 volatile union ixgbe_adv_rx_desc *rxdp;
1086 struct ixgbe_rx_entry *rxep;
1087 struct rte_mbuf *mb;
1091 uint32_t s[LOOK_AHEAD];
1092 uint16_t pkt_info[LOOK_AHEAD];
1093 int i, j, nb_rx = 0;
1096 /* get references to current descriptor and S/W ring entry */
1097 rxdp = &rxq->rx_ring[rxq->rx_tail];
1098 rxep = &rxq->sw_ring[rxq->rx_tail];
1100 status = rxdp->wb.upper.status_error;
1101 /* check to make sure there is at least 1 packet to receive */
1102 if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1106 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1107 * reference packets that are ready to be received.
1109 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1110 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
1112 /* Read desc statuses backwards to avoid race condition */
1113 for (j = LOOK_AHEAD-1; j >= 0; --j)
1114 s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1116 for (j = LOOK_AHEAD - 1; j >= 0; --j)
1117 pkt_info[j] = rxdp[j].wb.lower.lo_dword.
1120 /* Compute how many status bits were set */
1122 for (j = 0; j < LOOK_AHEAD; ++j)
1123 nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
1127 /* Translate descriptor info to mbuf format */
1128 for (j = 0; j < nb_dd; ++j) {
1130 pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1132 mb->data_len = pkt_len;
1133 mb->pkt_len = pkt_len;
1134 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1136 /* convert descriptor fields to rte mbuf flags */
1137 pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
1138 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1140 ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1141 mb->ol_flags = pkt_flags;
1143 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
1145 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1146 mb->hash.rss = rte_le_to_cpu_32(
1147 rxdp[j].wb.lower.hi_dword.rss);
1148 else if (pkt_flags & PKT_RX_FDIR) {
1149 mb->hash.fdir.hash = rte_le_to_cpu_16(
1150 rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1151 IXGBE_ATR_HASH_MASK;
1152 mb->hash.fdir.id = rte_le_to_cpu_16(
1153 rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1157 /* Move mbuf pointers from the S/W ring to the stage */
1158 for (j = 0; j < LOOK_AHEAD; ++j) {
1159 rxq->rx_stage[i + j] = rxep[j].mbuf;
1162 /* stop if all requested packets could not be received */
1163 if (nb_dd != LOOK_AHEAD)
1167 /* clear software ring entries so we can cleanup correctly */
1168 for (i = 0; i < nb_rx; ++i) {
1169 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1177 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1179 volatile union ixgbe_adv_rx_desc *rxdp;
1180 struct ixgbe_rx_entry *rxep;
1181 struct rte_mbuf *mb;
1186 /* allocate buffers in bulk directly into the S/W ring */
1187 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1188 rxep = &rxq->sw_ring[alloc_idx];
1189 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1190 rxq->rx_free_thresh);
1191 if (unlikely(diag != 0))
1194 rxdp = &rxq->rx_ring[alloc_idx];
1195 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1196 /* populate the static rte mbuf fields */
1201 mb->port = rxq->port_id;
1204 rte_mbuf_refcnt_set(mb, 1);
1205 mb->data_off = RTE_PKTMBUF_HEADROOM;
1207 /* populate the descriptors */
1208 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
1209 rxdp[i].read.hdr_addr = 0;
1210 rxdp[i].read.pkt_addr = dma_addr;
1213 /* update state of internal queue structure */
1214 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1215 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1216 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1222 static inline uint16_t
1223 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1226 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1229 /* how many packets are ready to return? */
1230 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1232 /* copy mbuf pointers to the application's packet list */
1233 for (i = 0; i < nb_pkts; ++i)
1234 rx_pkts[i] = stage[i];
1236 /* update internal queue state */
1237 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1238 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1243 static inline uint16_t
1244 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1247 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1250 /* Any previously recv'd pkts will be returned from the Rx stage */
1251 if (rxq->rx_nb_avail)
1252 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1254 /* Scan the H/W ring for packets to receive */
1255 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1257 /* update internal queue state */
1258 rxq->rx_next_avail = 0;
1259 rxq->rx_nb_avail = nb_rx;
1260 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1262 /* if required, allocate new buffers to replenish descriptors */
1263 if (rxq->rx_tail > rxq->rx_free_trigger) {
1264 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1266 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1268 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1269 "queue_id=%u", (unsigned) rxq->port_id,
1270 (unsigned) rxq->queue_id);
1272 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1273 rxq->rx_free_thresh;
1276 * Need to rewind any previous receives if we cannot
1277 * allocate new buffers to replenish the old ones.
1279 rxq->rx_nb_avail = 0;
1280 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1281 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1282 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1287 /* update tail pointer */
1289 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
1292 if (rxq->rx_tail >= rxq->nb_rx_desc)
1295 /* received any packets this loop? */
1296 if (rxq->rx_nb_avail)
1297 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1302 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1304 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1309 if (unlikely(nb_pkts == 0))
1312 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1313 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1315 /* request is relatively large, chunk it up */
1319 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1320 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1321 nb_rx = (uint16_t)(nb_rx + ret);
1322 nb_pkts = (uint16_t)(nb_pkts - ret);
1331 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1334 struct ixgbe_rx_queue *rxq;
1335 volatile union ixgbe_adv_rx_desc *rx_ring;
1336 volatile union ixgbe_adv_rx_desc *rxdp;
1337 struct ixgbe_rx_entry *sw_ring;
1338 struct ixgbe_rx_entry *rxe;
1339 struct rte_mbuf *rxm;
1340 struct rte_mbuf *nmb;
1341 union ixgbe_adv_rx_desc rxd;
1354 rx_id = rxq->rx_tail;
1355 rx_ring = rxq->rx_ring;
1356 sw_ring = rxq->sw_ring;
1357 while (nb_rx < nb_pkts) {
1359 * The order of operations here is important as the DD status
1360 * bit must not be read after any other descriptor fields.
1361 * rx_ring and rxdp are pointing to volatile data so the order
1362 * of accesses cannot be reordered by the compiler. If they were
1363 * not volatile, they could be reordered which could lead to
1364 * using invalid descriptor fields when read from rxd.
1366 rxdp = &rx_ring[rx_id];
1367 staterr = rxdp->wb.upper.status_error;
1368 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1375 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1376 * is likely to be invalid and to be dropped by the various
1377 * validation checks performed by the network stack.
1379 * Allocate a new mbuf to replenish the RX ring descriptor.
1380 * If the allocation fails:
1381 * - arrange for that RX descriptor to be the first one
1382 * being parsed the next time the receive function is
1383 * invoked [on the same queue].
1385 * - Stop parsing the RX ring and return immediately.
1387 * This policy do not drop the packet received in the RX
1388 * descriptor for which the allocation of a new mbuf failed.
1389 * Thus, it allows that packet to be later retrieved if
1390 * mbuf have been freed in the mean time.
1391 * As a side effect, holding RX descriptors instead of
1392 * systematically giving them back to the NIC may lead to
1393 * RX ring exhaustion situations.
1394 * However, the NIC can gracefully prevent such situations
1395 * to happen by sending specific "back-pressure" flow control
1396 * frames to its peer(s).
1398 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1399 "ext_err_stat=0x%08x pkt_len=%u",
1400 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1401 (unsigned) rx_id, (unsigned) staterr,
1402 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1404 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1406 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1407 "queue_id=%u", (unsigned) rxq->port_id,
1408 (unsigned) rxq->queue_id);
1409 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1414 rxe = &sw_ring[rx_id];
1416 if (rx_id == rxq->nb_rx_desc)
1419 /* Prefetch next mbuf while processing current one. */
1420 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1423 * When next RX descriptor is on a cache-line boundary,
1424 * prefetch the next 4 RX descriptors and the next 8 pointers
1427 if ((rx_id & 0x3) == 0) {
1428 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1429 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1435 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1436 rxdp->read.hdr_addr = 0;
1437 rxdp->read.pkt_addr = dma_addr;
1440 * Initialize the returned mbuf.
1441 * 1) setup generic mbuf fields:
1442 * - number of segments,
1445 * - RX port identifier.
1446 * 2) integrate hardware offload data, if any:
1447 * - RSS flag & hash,
1448 * - IP checksum flag,
1449 * - VLAN TCI, if any,
1452 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1454 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1455 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1458 rxm->pkt_len = pkt_len;
1459 rxm->data_len = pkt_len;
1460 rxm->port = rxq->port_id;
1462 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
1464 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1465 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1467 pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1468 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1469 pkt_flags = pkt_flags |
1470 ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1471 rxm->ol_flags = pkt_flags;
1472 rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1474 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1475 rxm->hash.rss = rte_le_to_cpu_32(
1476 rxd.wb.lower.hi_dword.rss);
1477 else if (pkt_flags & PKT_RX_FDIR) {
1478 rxm->hash.fdir.hash = rte_le_to_cpu_16(
1479 rxd.wb.lower.hi_dword.csum_ip.csum) &
1480 IXGBE_ATR_HASH_MASK;
1481 rxm->hash.fdir.id = rte_le_to_cpu_16(
1482 rxd.wb.lower.hi_dword.csum_ip.ip_id);
1485 * Store the mbuf address into the next entry of the array
1486 * of returned packets.
1488 rx_pkts[nb_rx++] = rxm;
1490 rxq->rx_tail = rx_id;
1493 * If the number of free RX descriptors is greater than the RX free
1494 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1496 * Update the RDT with the value of the last processed RX descriptor
1497 * minus 1, to guarantee that the RDT register is never equal to the
1498 * RDH register, which creates a "full" ring situtation from the
1499 * hardware point of view...
1501 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1502 if (nb_hold > rxq->rx_free_thresh) {
1503 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1504 "nb_hold=%u nb_rx=%u",
1505 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1506 (unsigned) rx_id, (unsigned) nb_hold,
1508 rx_id = (uint16_t) ((rx_id == 0) ?
1509 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1510 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1513 rxq->nb_rx_hold = nb_hold;
1518 * Detect an RSC descriptor.
1520 static inline uint32_t
1521 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1523 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1524 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1528 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1530 * Fill the following info in the HEAD buffer of the Rx cluster:
1531 * - RX port identifier
1532 * - hardware offload data, if any:
1534 * - IP checksum flag
1535 * - VLAN TCI, if any
1537 * @head HEAD of the packet cluster
1538 * @desc HW descriptor to get data from
1539 * @port_id Port ID of the Rx queue
1542 ixgbe_fill_cluster_head_buf(
1543 struct rte_mbuf *head,
1544 union ixgbe_adv_rx_desc *desc,
1551 head->port = port_id;
1553 /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1554 * set in the pkt_flags field.
1556 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1557 pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info);
1558 pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1559 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1560 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1561 head->ol_flags = pkt_flags;
1562 head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1564 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1565 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1566 else if (pkt_flags & PKT_RX_FDIR) {
1567 head->hash.fdir.hash =
1568 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1569 & IXGBE_ATR_HASH_MASK;
1570 head->hash.fdir.id =
1571 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1576 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1578 * @rx_queue Rx queue handle
1579 * @rx_pkts table of received packets
1580 * @nb_pkts size of rx_pkts table
1581 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1583 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1584 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1586 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1587 * 1) When non-EOP RSC completion arrives:
1588 * a) Update the HEAD of the current RSC aggregation cluster with the new
1589 * segment's data length.
1590 * b) Set the "next" pointer of the current segment to point to the segment
1591 * at the NEXTP index.
1592 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1593 * in the sw_rsc_ring.
1594 * 2) When EOP arrives we just update the cluster's total length and offload
1595 * flags and deliver the cluster up to the upper layers. In our case - put it
1596 * in the rx_pkts table.
1598 * Returns the number of received packets/clusters (according to the "bulk
1599 * receive" interface).
1601 static inline uint16_t
1602 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1605 struct ixgbe_rx_queue *rxq = rx_queue;
1606 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
1607 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
1608 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1609 uint16_t rx_id = rxq->rx_tail;
1611 uint16_t nb_hold = rxq->nb_rx_hold;
1612 uint16_t prev_id = rxq->rx_tail;
1614 while (nb_rx < nb_pkts) {
1616 struct ixgbe_rx_entry *rxe;
1617 struct ixgbe_scattered_rx_entry *sc_entry;
1618 struct ixgbe_scattered_rx_entry *next_sc_entry;
1619 struct ixgbe_rx_entry *next_rxe = NULL;
1620 struct rte_mbuf *first_seg;
1621 struct rte_mbuf *rxm;
1622 struct rte_mbuf *nmb;
1623 union ixgbe_adv_rx_desc rxd;
1626 volatile union ixgbe_adv_rx_desc *rxdp;
1631 * The code in this whole file uses the volatile pointer to
1632 * ensure the read ordering of the status and the rest of the
1633 * descriptor fields (on the compiler level only!!!). This is so
1634 * UGLY - why not to just use the compiler barrier instead? DPDK
1635 * even has the rte_compiler_barrier() for that.
1637 * But most importantly this is just wrong because this doesn't
1638 * ensure memory ordering in a general case at all. For
1639 * instance, DPDK is supposed to work on Power CPUs where
1640 * compiler barrier may just not be enough!
1642 * I tried to write only this function properly to have a
1643 * starting point (as a part of an LRO/RSC series) but the
1644 * compiler cursed at me when I tried to cast away the
1645 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
1646 * keeping it the way it is for now.
1648 * The code in this file is broken in so many other places and
1649 * will just not work on a big endian CPU anyway therefore the
1650 * lines below will have to be revisited together with the rest
1654 * - Get rid of "volatile" crap and let the compiler do its
1656 * - Use the proper memory barrier (rte_rmb()) to ensure the
1657 * memory ordering below.
1659 rxdp = &rx_ring[rx_id];
1660 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
1662 if (!(staterr & IXGBE_RXDADV_STAT_DD))
1667 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1668 "staterr=0x%x data_len=%u",
1669 rxq->port_id, rxq->queue_id, rx_id, staterr,
1670 rte_le_to_cpu_16(rxd.wb.upper.length));
1673 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1675 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
1676 "port_id=%u queue_id=%u",
1677 rxq->port_id, rxq->queue_id);
1679 rte_eth_devices[rxq->port_id].data->
1680 rx_mbuf_alloc_failed++;
1684 else if (nb_hold > rxq->rx_free_thresh) {
1685 uint16_t next_rdt = rxq->rx_free_trigger;
1687 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
1689 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
1691 nb_hold -= rxq->rx_free_thresh;
1693 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
1694 "port_id=%u queue_id=%u",
1695 rxq->port_id, rxq->queue_id);
1697 rte_eth_devices[rxq->port_id].data->
1698 rx_mbuf_alloc_failed++;
1704 rxe = &sw_ring[rx_id];
1705 eop = staterr & IXGBE_RXDADV_STAT_EOP;
1707 next_id = rx_id + 1;
1708 if (next_id == rxq->nb_rx_desc)
1711 /* Prefetch next mbuf while processing current one. */
1712 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
1715 * When next RX descriptor is on a cache-line boundary,
1716 * prefetch the next 4 RX descriptors and the next 4 pointers
1719 if ((next_id & 0x3) == 0) {
1720 rte_ixgbe_prefetch(&rx_ring[next_id]);
1721 rte_ixgbe_prefetch(&sw_ring[next_id]);
1728 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1730 * Update RX descriptor with the physical address of the
1731 * new data buffer of the new allocated mbuf.
1735 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1736 rxdp->read.hdr_addr = 0;
1737 rxdp->read.pkt_addr = dma;
1742 * Set data length & data buffer address of mbuf.
1744 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1745 rxm->data_len = data_len;
1750 * Get next descriptor index:
1751 * - For RSC it's in the NEXTP field.
1752 * - For a scattered packet - it's just a following
1755 if (ixgbe_rsc_count(&rxd))
1757 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1758 IXGBE_RXDADV_NEXTP_SHIFT;
1762 next_sc_entry = &sw_sc_ring[nextp_id];
1763 next_rxe = &sw_ring[nextp_id];
1764 rte_ixgbe_prefetch(next_rxe);
1767 sc_entry = &sw_sc_ring[rx_id];
1768 first_seg = sc_entry->fbuf;
1769 sc_entry->fbuf = NULL;
1772 * If this is the first buffer of the received packet,
1773 * set the pointer to the first mbuf of the packet and
1774 * initialize its context.
1775 * Otherwise, update the total length and the number of segments
1776 * of the current scattered packet, and update the pointer to
1777 * the last mbuf of the current packet.
1779 if (first_seg == NULL) {
1781 first_seg->pkt_len = data_len;
1782 first_seg->nb_segs = 1;
1784 first_seg->pkt_len += data_len;
1785 first_seg->nb_segs++;
1792 * If this is not the last buffer of the received packet, update
1793 * the pointer to the first mbuf at the NEXTP entry in the
1794 * sw_sc_ring and continue to parse the RX ring.
1796 if (!eop && next_rxe) {
1797 rxm->next = next_rxe->mbuf;
1798 next_sc_entry->fbuf = first_seg;
1803 * This is the last buffer of the received packet - return
1804 * the current cluster to the user.
1808 /* Initialize the first mbuf of the returned packet */
1809 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
1813 * Deal with the case, when HW CRC srip is disabled.
1814 * That can't happen when LRO is enabled, but still could
1815 * happen for scattered RX mode.
1817 first_seg->pkt_len -= rxq->crc_len;
1818 if (unlikely(rxm->data_len <= rxq->crc_len)) {
1819 struct rte_mbuf *lp;
1821 for (lp = first_seg; lp->next != rxm; lp = lp->next)
1824 first_seg->nb_segs--;
1825 lp->data_len -= rxq->crc_len - rxm->data_len;
1827 rte_pktmbuf_free_seg(rxm);
1829 rxm->data_len -= rxq->crc_len;
1831 /* Prefetch data of first segment, if configured to do so. */
1832 rte_packet_prefetch((char *)first_seg->buf_addr +
1833 first_seg->data_off);
1836 * Store the mbuf address into the next entry of the array
1837 * of returned packets.
1839 rx_pkts[nb_rx++] = first_seg;
1843 * Record index of the next RX descriptor to probe.
1845 rxq->rx_tail = rx_id;
1848 * If the number of free RX descriptors is greater than the RX free
1849 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1851 * Update the RDT with the value of the last processed RX descriptor
1852 * minus 1, to guarantee that the RDT register is never equal to the
1853 * RDH register, which creates a "full" ring situtation from the
1854 * hardware point of view...
1856 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1857 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1858 "nb_hold=%u nb_rx=%u",
1859 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1862 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
1866 rxq->nb_rx_hold = nb_hold;
1871 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1874 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
1878 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1881 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
1884 /*********************************************************************
1886 * Queue management functions
1888 **********************************************************************/
1890 static void __attribute__((cold))
1891 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
1895 if (txq->sw_ring != NULL) {
1896 for (i = 0; i < txq->nb_tx_desc; i++) {
1897 if (txq->sw_ring[i].mbuf != NULL) {
1898 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1899 txq->sw_ring[i].mbuf = NULL;
1905 static void __attribute__((cold))
1906 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
1909 txq->sw_ring != NULL)
1910 rte_free(txq->sw_ring);
1913 static void __attribute__((cold))
1914 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
1916 if (txq != NULL && txq->ops != NULL) {
1917 txq->ops->release_mbufs(txq);
1918 txq->ops->free_swring(txq);
1923 void __attribute__((cold))
1924 ixgbe_dev_tx_queue_release(void *txq)
1926 ixgbe_tx_queue_release(txq);
1929 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
1930 static void __attribute__((cold))
1931 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
1933 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
1934 struct ixgbe_tx_entry *txe = txq->sw_ring;
1937 /* Zero out HW ring memory */
1938 for (i = 0; i < txq->nb_tx_desc; i++) {
1939 txq->tx_ring[i] = zeroed_desc;
1942 /* Initialize SW ring entries */
1943 prev = (uint16_t) (txq->nb_tx_desc - 1);
1944 for (i = 0; i < txq->nb_tx_desc; i++) {
1945 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
1946 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
1949 txe[prev].next_id = i;
1953 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1954 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1957 txq->nb_tx_used = 0;
1959 * Always allow 1 descriptor to be un-allocated to avoid
1960 * a H/W race condition
1962 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1963 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1965 memset((void*)&txq->ctx_cache, 0,
1966 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
1969 static const struct ixgbe_txq_ops def_txq_ops = {
1970 .release_mbufs = ixgbe_tx_queue_release_mbufs,
1971 .free_swring = ixgbe_tx_free_swring,
1972 .reset = ixgbe_reset_tx_queue,
1975 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1976 * the queue parameters. Used in tx_queue_setup by primary process and then
1977 * in dev_init by secondary process when attaching to an existing ethdev.
1979 void __attribute__((cold))
1980 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
1982 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1983 if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
1984 && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
1985 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1986 #ifdef RTE_IXGBE_INC_VECTOR
1987 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
1988 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
1989 ixgbe_txq_vec_setup(txq) == 0)) {
1990 PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
1991 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
1994 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
1996 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1998 " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
1999 (unsigned long)txq->txq_flags,
2000 (unsigned long)IXGBE_SIMPLE_FLAGS);
2002 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2003 (unsigned long)txq->tx_rs_thresh,
2004 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2005 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2009 int __attribute__((cold))
2010 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2013 unsigned int socket_id,
2014 const struct rte_eth_txconf *tx_conf)
2016 const struct rte_memzone *tz;
2017 struct ixgbe_tx_queue *txq;
2018 struct ixgbe_hw *hw;
2019 uint16_t tx_rs_thresh, tx_free_thresh;
2021 PMD_INIT_FUNC_TRACE();
2022 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2025 * Validate number of transmit descriptors.
2026 * It must not exceed hardware maximum, and must be multiple
2029 if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2030 (nb_desc > IXGBE_MAX_RING_DESC) ||
2031 (nb_desc < IXGBE_MIN_RING_DESC)) {
2036 * The following two parameters control the setting of the RS bit on
2037 * transmit descriptors.
2038 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2039 * descriptors have been used.
2040 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2041 * descriptors are used or if the number of descriptors required
2042 * to transmit a packet is greater than the number of free TX
2044 * The following constraints must be satisfied:
2045 * tx_rs_thresh must be greater than 0.
2046 * tx_rs_thresh must be less than the size of the ring minus 2.
2047 * tx_rs_thresh must be less than or equal to tx_free_thresh.
2048 * tx_rs_thresh must be a divisor of the ring size.
2049 * tx_free_thresh must be greater than 0.
2050 * tx_free_thresh must be less than the size of the ring minus 3.
2051 * One descriptor in the TX ring is used as a sentinel to avoid a
2052 * H/W race condition, hence the maximum threshold constraints.
2053 * When set to zero use default values.
2055 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2056 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2057 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2058 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2059 if (tx_rs_thresh >= (nb_desc - 2)) {
2060 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2061 "of TX descriptors minus 2. (tx_rs_thresh=%u "
2062 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2063 (int)dev->data->port_id, (int)queue_idx);
2066 if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2067 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2068 "(tx_rs_thresh=%u port=%d queue=%d)",
2069 DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2070 (int)dev->data->port_id, (int)queue_idx);
2073 if (tx_free_thresh >= (nb_desc - 3)) {
2074 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2075 "tx_free_thresh must be less than the number of "
2076 "TX descriptors minus 3. (tx_free_thresh=%u "
2077 "port=%d queue=%d)",
2078 (unsigned int)tx_free_thresh,
2079 (int)dev->data->port_id, (int)queue_idx);
2082 if (tx_rs_thresh > tx_free_thresh) {
2083 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2084 "tx_free_thresh. (tx_free_thresh=%u "
2085 "tx_rs_thresh=%u port=%d queue=%d)",
2086 (unsigned int)tx_free_thresh,
2087 (unsigned int)tx_rs_thresh,
2088 (int)dev->data->port_id,
2092 if ((nb_desc % tx_rs_thresh) != 0) {
2093 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2094 "number of TX descriptors. (tx_rs_thresh=%u "
2095 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2096 (int)dev->data->port_id, (int)queue_idx);
2101 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2102 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2103 * by the NIC and all descriptors are written back after the NIC
2104 * accumulates WTHRESH descriptors.
2106 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2107 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2108 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2109 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2110 (int)dev->data->port_id, (int)queue_idx);
2114 /* Free memory prior to re-allocation if needed... */
2115 if (dev->data->tx_queues[queue_idx] != NULL) {
2116 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2117 dev->data->tx_queues[queue_idx] = NULL;
2120 /* First allocate the tx queue data structure */
2121 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2122 RTE_CACHE_LINE_SIZE, socket_id);
2127 * Allocate TX ring hardware descriptors. A memzone large enough to
2128 * handle the maximum ring size is allocated in order to allow for
2129 * resizing in later calls to the queue setup function.
2131 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2132 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2133 IXGBE_ALIGN, socket_id);
2135 ixgbe_tx_queue_release(txq);
2139 txq->nb_tx_desc = nb_desc;
2140 txq->tx_rs_thresh = tx_rs_thresh;
2141 txq->tx_free_thresh = tx_free_thresh;
2142 txq->pthresh = tx_conf->tx_thresh.pthresh;
2143 txq->hthresh = tx_conf->tx_thresh.hthresh;
2144 txq->wthresh = tx_conf->tx_thresh.wthresh;
2145 txq->queue_id = queue_idx;
2146 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2147 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2148 txq->port_id = dev->data->port_id;
2149 txq->txq_flags = tx_conf->txq_flags;
2150 txq->ops = &def_txq_ops;
2151 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2154 * Modification to set VFTDT for virtual function if vf is detected
2156 if (hw->mac.type == ixgbe_mac_82599_vf ||
2157 hw->mac.type == ixgbe_mac_X540_vf ||
2158 hw->mac.type == ixgbe_mac_X550_vf ||
2159 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2160 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2161 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2163 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2165 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2166 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2168 /* Allocate software ring */
2169 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2170 sizeof(struct ixgbe_tx_entry) * nb_desc,
2171 RTE_CACHE_LINE_SIZE, socket_id);
2172 if (txq->sw_ring == NULL) {
2173 ixgbe_tx_queue_release(txq);
2176 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2177 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2179 /* set up vector or scalar TX function as appropriate */
2180 ixgbe_set_tx_function(dev, txq);
2182 txq->ops->reset(txq);
2184 dev->data->tx_queues[queue_idx] = txq;
2191 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2193 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2194 * in the sw_rsc_ring is not set to NULL but rather points to the next
2195 * mbuf of this RSC aggregation (that has not been completed yet and still
2196 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2197 * will just free first "nb_segs" segments of the cluster explicitly by calling
2198 * an rte_pktmbuf_free_seg().
2200 * @m scattered cluster head
2202 static void __attribute__((cold))
2203 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2205 uint8_t i, nb_segs = m->nb_segs;
2206 struct rte_mbuf *next_seg;
2208 for (i = 0; i < nb_segs; i++) {
2210 rte_pktmbuf_free_seg(m);
2215 static void __attribute__((cold))
2216 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2220 #ifdef RTE_IXGBE_INC_VECTOR
2221 /* SSE Vector driver has a different way of releasing mbufs. */
2222 if (rxq->rx_using_sse) {
2223 ixgbe_rx_queue_release_mbufs_vec(rxq);
2228 if (rxq->sw_ring != NULL) {
2229 for (i = 0; i < rxq->nb_rx_desc; i++) {
2230 if (rxq->sw_ring[i].mbuf != NULL) {
2231 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2232 rxq->sw_ring[i].mbuf = NULL;
2235 if (rxq->rx_nb_avail) {
2236 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2237 struct rte_mbuf *mb;
2238 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2239 rte_pktmbuf_free_seg(mb);
2241 rxq->rx_nb_avail = 0;
2245 if (rxq->sw_sc_ring)
2246 for (i = 0; i < rxq->nb_rx_desc; i++)
2247 if (rxq->sw_sc_ring[i].fbuf) {
2248 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2249 rxq->sw_sc_ring[i].fbuf = NULL;
2253 static void __attribute__((cold))
2254 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2257 ixgbe_rx_queue_release_mbufs(rxq);
2258 rte_free(rxq->sw_ring);
2259 rte_free(rxq->sw_sc_ring);
2264 void __attribute__((cold))
2265 ixgbe_dev_rx_queue_release(void *rxq)
2267 ixgbe_rx_queue_release(rxq);
2271 * Check if Rx Burst Bulk Alloc function can be used.
2273 * 0: the preconditions are satisfied and the bulk allocation function
2275 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2276 * function must be used.
2278 static inline int __attribute__((cold))
2279 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2284 * Make sure the following pre-conditions are satisfied:
2285 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2286 * rxq->rx_free_thresh < rxq->nb_rx_desc
2287 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2288 * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
2289 * Scattered packets are not supported. This should be checked
2290 * outside of this function.
2292 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2293 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2294 "rxq->rx_free_thresh=%d, "
2295 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2296 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2298 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2299 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2300 "rxq->rx_free_thresh=%d, "
2301 "rxq->nb_rx_desc=%d",
2302 rxq->rx_free_thresh, rxq->nb_rx_desc);
2304 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2305 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2306 "rxq->nb_rx_desc=%d, "
2307 "rxq->rx_free_thresh=%d",
2308 rxq->nb_rx_desc, rxq->rx_free_thresh);
2310 } else if (!(rxq->nb_rx_desc <
2311 (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
2312 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2313 "rxq->nb_rx_desc=%d, "
2314 "IXGBE_MAX_RING_DESC=%d, "
2315 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2316 rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
2317 RTE_PMD_IXGBE_RX_MAX_BURST);
2324 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2325 static void __attribute__((cold))
2326 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2328 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2330 uint16_t len = rxq->nb_rx_desc;
2333 * By default, the Rx queue setup function allocates enough memory for
2334 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2335 * extra memory at the end of the descriptor ring to be zero'd out. A
2336 * pre-condition for using the Rx burst bulk alloc function is that the
2337 * number of descriptors is less than or equal to
2338 * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
2339 * constraints here to see if we need to zero out memory after the end
2340 * of the H/W descriptor ring.
2342 if (adapter->rx_bulk_alloc_allowed)
2343 /* zero out extra memory */
2344 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2347 * Zero out HW ring memory. Zero out extra memory at the end of
2348 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2349 * reads extra memory as zeros.
2351 for (i = 0; i < len; i++) {
2352 rxq->rx_ring[i] = zeroed_desc;
2356 * initialize extra software ring entries. Space for these extra
2357 * entries is always allocated
2359 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2360 for (i = rxq->nb_rx_desc; i < len; ++i) {
2361 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2364 rxq->rx_nb_avail = 0;
2365 rxq->rx_next_avail = 0;
2366 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2368 rxq->nb_rx_hold = 0;
2369 rxq->pkt_first_seg = NULL;
2370 rxq->pkt_last_seg = NULL;
2372 #ifdef RTE_IXGBE_INC_VECTOR
2373 rxq->rxrearm_start = 0;
2374 rxq->rxrearm_nb = 0;
2378 int __attribute__((cold))
2379 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2382 unsigned int socket_id,
2383 const struct rte_eth_rxconf *rx_conf,
2384 struct rte_mempool *mp)
2386 const struct rte_memzone *rz;
2387 struct ixgbe_rx_queue *rxq;
2388 struct ixgbe_hw *hw;
2390 struct ixgbe_adapter *adapter =
2391 (struct ixgbe_adapter *)dev->data->dev_private;
2393 PMD_INIT_FUNC_TRACE();
2394 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2397 * Validate number of receive descriptors.
2398 * It must not exceed hardware maximum, and must be multiple
2401 if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2402 (nb_desc > IXGBE_MAX_RING_DESC) ||
2403 (nb_desc < IXGBE_MIN_RING_DESC)) {
2407 /* Free memory prior to re-allocation if needed... */
2408 if (dev->data->rx_queues[queue_idx] != NULL) {
2409 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2410 dev->data->rx_queues[queue_idx] = NULL;
2413 /* First allocate the rx queue data structure */
2414 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2415 RTE_CACHE_LINE_SIZE, socket_id);
2419 rxq->nb_rx_desc = nb_desc;
2420 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2421 rxq->queue_id = queue_idx;
2422 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2423 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2424 rxq->port_id = dev->data->port_id;
2425 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2427 rxq->drop_en = rx_conf->rx_drop_en;
2428 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2431 * Allocate RX ring hardware descriptors. A memzone large enough to
2432 * handle the maximum ring size is allocated in order to allow for
2433 * resizing in later calls to the queue setup function.
2435 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2436 RX_RING_SZ, IXGBE_ALIGN, socket_id);
2438 ixgbe_rx_queue_release(rxq);
2443 * Zero init all the descriptors in the ring.
2445 memset (rz->addr, 0, RX_RING_SZ);
2448 * Modified to setup VFRDT for Virtual Function
2450 if (hw->mac.type == ixgbe_mac_82599_vf ||
2451 hw->mac.type == ixgbe_mac_X540_vf ||
2452 hw->mac.type == ixgbe_mac_X550_vf ||
2453 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2454 hw->mac.type == ixgbe_mac_X550EM_a_vf) {
2456 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2458 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2462 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2464 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2467 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2468 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
2471 * Certain constraints must be met in order to use the bulk buffer
2472 * allocation Rx burst function. If any of Rx queues doesn't meet them
2473 * the feature should be disabled for the whole port.
2475 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2476 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2477 "preconditions - canceling the feature for "
2478 "the whole port[%d]",
2479 rxq->queue_id, rxq->port_id);
2480 adapter->rx_bulk_alloc_allowed = false;
2484 * Allocate software ring. Allow for space at the end of the
2485 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2486 * function does not access an invalid memory region.
2489 if (adapter->rx_bulk_alloc_allowed)
2490 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2492 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2493 sizeof(struct ixgbe_rx_entry) * len,
2494 RTE_CACHE_LINE_SIZE, socket_id);
2495 if (!rxq->sw_ring) {
2496 ixgbe_rx_queue_release(rxq);
2501 * Always allocate even if it's not going to be needed in order to
2502 * simplify the code.
2504 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2505 * be requested in ixgbe_dev_rx_init(), which is called later from
2509 rte_zmalloc_socket("rxq->sw_sc_ring",
2510 sizeof(struct ixgbe_scattered_rx_entry) * len,
2511 RTE_CACHE_LINE_SIZE, socket_id);
2512 if (!rxq->sw_sc_ring) {
2513 ixgbe_rx_queue_release(rxq);
2517 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2518 "dma_addr=0x%"PRIx64,
2519 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2520 rxq->rx_ring_phys_addr);
2522 if (!rte_is_power_of_2(nb_desc)) {
2523 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
2524 "preconditions - canceling the feature for "
2525 "the whole port[%d]",
2526 rxq->queue_id, rxq->port_id);
2527 adapter->rx_vec_allowed = false;
2529 ixgbe_rxq_vec_setup(rxq);
2531 dev->data->rx_queues[queue_idx] = rxq;
2533 ixgbe_reset_rx_queue(adapter, rxq);
2539 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2541 #define IXGBE_RXQ_SCAN_INTERVAL 4
2542 volatile union ixgbe_adv_rx_desc *rxdp;
2543 struct ixgbe_rx_queue *rxq;
2546 if (rx_queue_id >= dev->data->nb_rx_queues) {
2547 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
2551 rxq = dev->data->rx_queues[rx_queue_id];
2552 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2554 while ((desc < rxq->nb_rx_desc) &&
2555 (rxdp->wb.upper.status_error &
2556 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
2557 desc += IXGBE_RXQ_SCAN_INTERVAL;
2558 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
2559 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2560 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2561 desc - rxq->nb_rx_desc]);
2568 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2570 volatile union ixgbe_adv_rx_desc *rxdp;
2571 struct ixgbe_rx_queue *rxq = rx_queue;
2574 if (unlikely(offset >= rxq->nb_rx_desc))
2576 desc = rxq->rx_tail + offset;
2577 if (desc >= rxq->nb_rx_desc)
2578 desc -= rxq->nb_rx_desc;
2580 rxdp = &rxq->rx_ring[desc];
2581 return !!(rxdp->wb.upper.status_error &
2582 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
2585 void __attribute__((cold))
2586 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
2589 struct ixgbe_adapter *adapter =
2590 (struct ixgbe_adapter *)dev->data->dev_private;
2592 PMD_INIT_FUNC_TRACE();
2594 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2595 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
2597 txq->ops->release_mbufs(txq);
2598 txq->ops->reset(txq);
2602 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2603 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
2605 ixgbe_rx_queue_release_mbufs(rxq);
2606 ixgbe_reset_rx_queue(adapter, rxq);
2612 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
2616 PMD_INIT_FUNC_TRACE();
2618 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2619 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
2620 dev->data->rx_queues[i] = NULL;
2622 dev->data->nb_rx_queues = 0;
2624 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2625 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
2626 dev->data->tx_queues[i] = NULL;
2628 dev->data->nb_tx_queues = 0;
2631 /*********************************************************************
2633 * Device RX/TX init functions
2635 **********************************************************************/
2638 * Receive Side Scaling (RSS)
2639 * See section 7.1.2.8 in the following document:
2640 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
2643 * The source and destination IP addresses of the IP header and the source
2644 * and destination ports of TCP/UDP headers, if any, of received packets are
2645 * hashed against a configurable random key to compute a 32-bit RSS hash result.
2646 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2647 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
2648 * RSS output index which is used as the RX queue index where to store the
2650 * The following output is supplied in the RX write-back descriptor:
2651 * - 32-bit result of the Microsoft RSS hash function,
2652 * - 4-bit RSS type field.
2656 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
2657 * Used as the default key.
2659 static uint8_t rss_intel_key[40] = {
2660 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2661 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2662 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2663 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2664 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2668 ixgbe_rss_disable(struct rte_eth_dev *dev)
2670 struct ixgbe_hw *hw;
2674 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2675 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2676 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2677 mrqc &= ~IXGBE_MRQC_RSSEN;
2678 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2682 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
2692 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2693 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2695 hash_key = rss_conf->rss_key;
2696 if (hash_key != NULL) {
2697 /* Fill in RSS hash key */
2698 for (i = 0; i < 10; i++) {
2699 rss_key = hash_key[(i * 4)];
2700 rss_key |= hash_key[(i * 4) + 1] << 8;
2701 rss_key |= hash_key[(i * 4) + 2] << 16;
2702 rss_key |= hash_key[(i * 4) + 3] << 24;
2703 IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
2707 /* Set configured hashing protocols in MRQC register */
2708 rss_hf = rss_conf->rss_hf;
2709 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
2710 if (rss_hf & ETH_RSS_IPV4)
2711 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2712 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2713 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2714 if (rss_hf & ETH_RSS_IPV6)
2715 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2716 if (rss_hf & ETH_RSS_IPV6_EX)
2717 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2718 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2719 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2720 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
2721 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2722 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2723 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2724 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2725 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2726 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
2727 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2728 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2732 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2733 struct rte_eth_rss_conf *rss_conf)
2735 struct ixgbe_hw *hw;
2740 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2742 if (!ixgbe_rss_update_sp(hw->mac.type)) {
2743 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
2747 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2750 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
2751 * "RSS enabling cannot be done dynamically while it must be
2752 * preceded by a software reset"
2753 * Before changing anything, first check that the update RSS operation
2754 * does not attempt to disable RSS, if RSS was enabled at
2755 * initialization time, or does not attempt to enable RSS, if RSS was
2756 * disabled at initialization time.
2758 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
2759 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2760 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
2761 if (rss_hf != 0) /* Enable RSS */
2763 return 0; /* Nothing to do */
2766 if (rss_hf == 0) /* Disable RSS */
2768 ixgbe_hw_rss_hash_set(hw, rss_conf);
2773 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2774 struct rte_eth_rss_conf *rss_conf)
2776 struct ixgbe_hw *hw;
2785 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2786 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2787 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2788 hash_key = rss_conf->rss_key;
2789 if (hash_key != NULL) {
2790 /* Return RSS hash key */
2791 for (i = 0; i < 10; i++) {
2792 rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
2793 hash_key[(i * 4)] = rss_key & 0x000000FF;
2794 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2795 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2796 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2800 /* Get RSS functions configured in MRQC register */
2801 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2802 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
2803 rss_conf->rss_hf = 0;
2807 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
2808 rss_hf |= ETH_RSS_IPV4;
2809 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
2810 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2811 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
2812 rss_hf |= ETH_RSS_IPV6;
2813 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
2814 rss_hf |= ETH_RSS_IPV6_EX;
2815 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
2816 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2817 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
2818 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2819 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
2820 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2821 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
2822 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2823 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
2824 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2825 rss_conf->rss_hf = rss_hf;
2830 ixgbe_rss_configure(struct rte_eth_dev *dev)
2832 struct rte_eth_rss_conf rss_conf;
2833 struct ixgbe_hw *hw;
2837 uint16_t sp_reta_size;
2840 PMD_INIT_FUNC_TRACE();
2841 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2843 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
2846 * Fill in redirection table
2847 * The byte-swap is needed because NIC registers are in
2848 * little-endian order.
2851 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
2852 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
2854 if (j == dev->data->nb_rx_queues)
2856 reta = (reta << 8) | j;
2858 IXGBE_WRITE_REG(hw, reta_reg,
2863 * Configure the RSS key and the RSS protocols used to compute
2864 * the RSS hash of input packets.
2866 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2867 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
2868 ixgbe_rss_disable(dev);
2871 if (rss_conf.rss_key == NULL)
2872 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2873 ixgbe_hw_rss_hash_set(hw, &rss_conf);
2876 #define NUM_VFTA_REGISTERS 128
2877 #define NIC_RX_BUFFER_SIZE 0x200
2878 #define X550_RX_BUFFER_SIZE 0x180
2881 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
2883 struct rte_eth_vmdq_dcb_conf *cfg;
2884 struct ixgbe_hw *hw;
2885 enum rte_eth_nb_pools num_pools;
2886 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
2888 uint8_t nb_tcs; /* number of traffic classes */
2891 PMD_INIT_FUNC_TRACE();
2892 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2893 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2894 num_pools = cfg->nb_queue_pools;
2895 /* Check we have a valid number of pools */
2896 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
2897 ixgbe_rss_disable(dev);
2900 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
2901 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
2905 * split rx buffer up into sections, each for 1 traffic class
2907 switch (hw->mac.type) {
2908 case ixgbe_mac_X550:
2909 case ixgbe_mac_X550EM_x:
2910 case ixgbe_mac_X550EM_a:
2911 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
2914 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2917 for (i = 0; i < nb_tcs; i++) {
2918 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2919 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
2920 /* clear 10 bits. */
2921 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
2922 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2924 /* zero alloc all unused TCs */
2925 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2926 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2927 rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
2928 /* clear 10 bits. */
2929 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2932 /* MRQC: enable vmdq and dcb */
2933 mrqc = ((num_pools == ETH_16_POOLS) ? \
2934 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
2935 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2937 /* PFVTCTL: turn on virtualisation and set the default pool */
2938 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2939 if (cfg->enable_default_pool) {
2940 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
2942 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
2945 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
2947 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
2949 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
2951 * mapping is done with 3 bits per priority,
2952 * so shift by i*3 each time
2954 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
2956 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
2958 /* RTRPCS: DCB related */
2959 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
2961 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2962 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2963 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2964 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2966 /* VFTA - enable all vlan filters */
2967 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2968 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2971 /* VFRE: pool enabling for receive - 16 or 32 */
2972 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
2973 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2976 * MPSAR - allow pools to read specific mac addresses
2977 * In this case, all pools should be able to read from mac addr 0
2979 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
2980 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
2982 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
2983 for (i = 0; i < cfg->nb_pool_maps; i++) {
2984 /* set vlan id in VF register and set the valid bit */
2985 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
2986 (cfg->pool_map[i].vlan_id & 0xFFF)));
2988 * Put the allowed pools in VFB reg. As we only have 16 or 32
2989 * pools, we only need to use the first half of the register
2992 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
2997 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
2998 * @hw: pointer to hardware structure
2999 * @dcb_config: pointer to ixgbe_dcb_config structure
3002 ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
3003 struct ixgbe_dcb_config *dcb_config)
3008 PMD_INIT_FUNC_TRACE();
3009 if (hw->mac.type != ixgbe_mac_82598EB) {
3010 /* Disable the Tx desc arbiter so that MTQC can be changed */
3011 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3012 reg |= IXGBE_RTTDCS_ARBDIS;
3013 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3015 /* Enable DCB for Tx with 8 TCs */
3016 if (dcb_config->num_tcs.pg_tcs == 8) {
3017 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3020 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3022 if (dcb_config->vt_mode)
3023 reg |= IXGBE_MTQC_VT_ENA;
3024 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3026 /* Disable drop for all queues */
3027 for (q = 0; q < 128; q++)
3028 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3029 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3031 /* Enable the Tx desc arbiter */
3032 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3033 reg &= ~IXGBE_RTTDCS_ARBDIS;
3034 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3036 /* Enable Security TX Buffer IFG for DCB */
3037 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3038 reg |= IXGBE_SECTX_DCB;
3039 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3045 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3046 * @dev: pointer to rte_eth_dev structure
3047 * @dcb_config: pointer to ixgbe_dcb_config structure
3050 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3051 struct ixgbe_dcb_config *dcb_config)
3053 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3054 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3055 struct ixgbe_hw *hw =
3056 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3058 PMD_INIT_FUNC_TRACE();
3059 if (hw->mac.type != ixgbe_mac_82598EB)
3060 /*PF VF Transmit Enable*/
3061 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3062 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3064 /*Configure general DCB TX parameters*/
3065 ixgbe_dcb_tx_hw_config(hw,dcb_config);
3070 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3071 struct ixgbe_dcb_config *dcb_config)
3073 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3074 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3075 struct ixgbe_dcb_tc_config *tc;
3078 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3079 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
3080 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3081 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3084 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3085 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3087 /* User Priority to Traffic Class mapping */
3088 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3089 j = vmdq_rx_conf->dcb_tc[i];
3090 tc = &dcb_config->tc_config[j];
3091 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3097 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3098 struct ixgbe_dcb_config *dcb_config)
3100 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3101 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3102 struct ixgbe_dcb_tc_config *tc;
3105 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3106 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
3107 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3108 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3111 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3112 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3115 /* User Priority to Traffic Class mapping */
3116 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3117 j = vmdq_tx_conf->dcb_tc[i];
3118 tc = &dcb_config->tc_config[j];
3119 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3126 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3127 struct ixgbe_dcb_config *dcb_config)
3129 struct rte_eth_dcb_rx_conf *rx_conf =
3130 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3131 struct ixgbe_dcb_tc_config *tc;
3134 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3135 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3137 /* User Priority to Traffic Class mapping */
3138 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3139 j = rx_conf->dcb_tc[i];
3140 tc = &dcb_config->tc_config[j];
3141 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3147 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3148 struct ixgbe_dcb_config *dcb_config)
3150 struct rte_eth_dcb_tx_conf *tx_conf =
3151 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3152 struct ixgbe_dcb_tc_config *tc;
3155 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3156 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3158 /* User Priority to Traffic Class mapping */
3159 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3160 j = tx_conf->dcb_tc[i];
3161 tc = &dcb_config->tc_config[j];
3162 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3168 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3169 * @hw: pointer to hardware structure
3170 * @dcb_config: pointer to ixgbe_dcb_config structure
3173 ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
3174 struct ixgbe_dcb_config *dcb_config)
3180 PMD_INIT_FUNC_TRACE();
3182 * Disable the arbiter before changing parameters
3183 * (always enable recycle mode; WSP)
3185 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3186 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3188 if (hw->mac.type != ixgbe_mac_82598EB) {
3189 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3190 if (dcb_config->num_tcs.pg_tcs == 4) {
3191 if (dcb_config->vt_mode)
3192 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3193 IXGBE_MRQC_VMDQRT4TCEN;
3195 /* no matter the mode is DCB or DCB_RSS, just
3196 * set the MRQE to RSSXTCEN. RSS is controlled
3199 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3200 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3201 IXGBE_MRQC_RTRSS4TCEN;
3204 if (dcb_config->num_tcs.pg_tcs == 8) {
3205 if (dcb_config->vt_mode)
3206 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3207 IXGBE_MRQC_VMDQRT8TCEN;
3209 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3210 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3211 IXGBE_MRQC_RTRSS8TCEN;
3215 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3218 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3219 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3220 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3221 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3223 /* VFTA - enable all vlan filters */
3224 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3225 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3229 * Configure Rx packet plane (recycle mode; WSP) and
3232 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3233 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3239 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3240 uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3242 switch (hw->mac.type) {
3243 case ixgbe_mac_82598EB:
3244 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3246 case ixgbe_mac_82599EB:
3247 case ixgbe_mac_X540:
3248 case ixgbe_mac_X550:
3249 case ixgbe_mac_X550EM_x:
3250 case ixgbe_mac_X550EM_a:
3251 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3260 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3261 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3263 switch (hw->mac.type) {
3264 case ixgbe_mac_82598EB:
3265 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
3266 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
3268 case ixgbe_mac_82599EB:
3269 case ixgbe_mac_X540:
3270 case ixgbe_mac_X550:
3271 case ixgbe_mac_X550EM_x:
3272 case ixgbe_mac_X550EM_a:
3273 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
3274 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
3281 #define DCB_RX_CONFIG 1
3282 #define DCB_TX_CONFIG 1
3283 #define DCB_TX_PB 1024
3285 * ixgbe_dcb_hw_configure - Enable DCB and configure
3286 * general DCB in VT mode and non-VT mode parameters
3287 * @dev: pointer to rte_eth_dev structure
3288 * @dcb_config: pointer to ixgbe_dcb_config structure
3291 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3292 struct ixgbe_dcb_config *dcb_config)
3295 uint8_t i,pfc_en,nb_tcs;
3296 uint16_t pbsize, rx_buffer_size;
3297 uint8_t config_dcb_rx = 0;
3298 uint8_t config_dcb_tx = 0;
3299 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3300 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3301 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3302 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3303 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3304 struct ixgbe_dcb_tc_config *tc;
3305 uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3306 struct ixgbe_hw *hw =
3307 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3309 switch(dev->data->dev_conf.rxmode.mq_mode){
3310 case ETH_MQ_RX_VMDQ_DCB:
3311 dcb_config->vt_mode = true;
3312 if (hw->mac.type != ixgbe_mac_82598EB) {
3313 config_dcb_rx = DCB_RX_CONFIG;
3315 *get dcb and VT rx configuration parameters
3318 ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
3319 /*Configure general VMDQ and DCB RX parameters*/
3320 ixgbe_vmdq_dcb_configure(dev);
3324 case ETH_MQ_RX_DCB_RSS:
3325 dcb_config->vt_mode = false;
3326 config_dcb_rx = DCB_RX_CONFIG;
3327 /* Get dcb TX configuration parameters from rte_eth_conf */
3328 ixgbe_dcb_rx_config(dev, dcb_config);
3329 /*Configure general DCB RX parameters*/
3330 ixgbe_dcb_rx_hw_config(hw, dcb_config);
3333 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3336 switch (dev->data->dev_conf.txmode.mq_mode) {
3337 case ETH_MQ_TX_VMDQ_DCB:
3338 dcb_config->vt_mode = true;
3339 config_dcb_tx = DCB_TX_CONFIG;
3340 /* get DCB and VT TX configuration parameters from rte_eth_conf */
3341 ixgbe_dcb_vt_tx_config(dev,dcb_config);
3342 /*Configure general VMDQ and DCB TX parameters*/
3343 ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
3347 dcb_config->vt_mode = false;
3348 config_dcb_tx = DCB_TX_CONFIG;
3349 /*get DCB TX configuration parameters from rte_eth_conf*/
3350 ixgbe_dcb_tx_config(dev, dcb_config);
3351 /*Configure general DCB TX parameters*/
3352 ixgbe_dcb_tx_hw_config(hw, dcb_config);
3355 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3359 nb_tcs = dcb_config->num_tcs.pfc_tcs;
3361 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3362 if (nb_tcs == ETH_4_TCS) {
3363 /* Avoid un-configured priority mapping to TC0 */
3365 uint8_t mask = 0xFF;
3366 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
3367 mask = (uint8_t)(mask & (~ (1 << map[i])));
3368 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
3369 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
3373 /* Re-configure 4 TCs BW */
3374 for (i = 0; i < nb_tcs; i++) {
3375 tc = &dcb_config->tc_config[i];
3376 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
3377 (uint8_t)(100 / nb_tcs);
3378 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
3379 (uint8_t)(100 / nb_tcs);
3381 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3382 tc = &dcb_config->tc_config[i];
3383 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
3384 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
3388 switch (hw->mac.type) {
3389 case ixgbe_mac_X550:
3390 case ixgbe_mac_X550EM_x:
3391 case ixgbe_mac_X550EM_a:
3392 rx_buffer_size = X550_RX_BUFFER_SIZE;
3395 rx_buffer_size = NIC_RX_BUFFER_SIZE;
3399 if (config_dcb_rx) {
3400 /* Set RX buffer size */
3401 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3402 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
3403 for (i = 0; i < nb_tcs; i++) {
3404 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3406 /* zero alloc all unused TCs */
3407 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3408 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3411 if (config_dcb_tx) {
3412 /* Only support an equally distributed Tx packet buffer strategy. */
3413 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
3414 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
3415 for (i = 0; i < nb_tcs; i++) {
3416 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3417 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3419 /* Clear unused TCs, if any, to zero buffer size*/
3420 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3421 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3422 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3426 /*Calculates traffic class credits*/
3427 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3428 IXGBE_DCB_TX_CONFIG);
3429 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3430 IXGBE_DCB_RX_CONFIG);
3432 if (config_dcb_rx) {
3433 /* Unpack CEE standard containers */
3434 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
3435 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3436 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
3437 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
3438 /* Configure PG(ETS) RX */
3439 ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
3442 if (config_dcb_tx) {
3443 /* Unpack CEE standard containers */
3444 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
3445 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3446 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
3447 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
3448 /* Configure PG(ETS) TX */
3449 ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
3452 /*Configure queue statistics registers*/
3453 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
3455 /* Check if the PFC is supported */
3456 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3457 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3458 for (i = 0; i < nb_tcs; i++) {
3460 * If the TC count is 8,and the default high_water is 48,
3461 * the low_water is 16 as default.
3463 hw->fc.high_water[i] = (pbsize * 3 ) / 4;
3464 hw->fc.low_water[i] = pbsize / 4;
3465 /* Enable pfc for this TC */
3466 tc = &dcb_config->tc_config[i];
3467 tc->pfc = ixgbe_dcb_pfc_enabled;
3469 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3470 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3472 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
3479 * ixgbe_configure_dcb - Configure DCB Hardware
3480 * @dev: pointer to rte_eth_dev
3482 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
3484 struct ixgbe_dcb_config *dcb_cfg =
3485 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3486 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
3488 PMD_INIT_FUNC_TRACE();
3490 /* check support mq_mode for DCB */
3491 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
3492 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
3493 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
3496 if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
3499 /** Configure DCB hardware **/
3500 ixgbe_dcb_hw_configure(dev, dcb_cfg);
3506 * VMDq only support for 10 GbE NIC.
3509 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3511 struct rte_eth_vmdq_rx_conf *cfg;
3512 struct ixgbe_hw *hw;
3513 enum rte_eth_nb_pools num_pools;
3514 uint32_t mrqc, vt_ctl, vlanctrl;
3518 PMD_INIT_FUNC_TRACE();
3519 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3520 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3521 num_pools = cfg->nb_queue_pools;
3523 ixgbe_rss_disable(dev);
3525 /* MRQC: enable vmdq */
3526 mrqc = IXGBE_MRQC_VMDQEN;
3527 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3529 /* PFVTCTL: turn on virtualisation and set the default pool */
3530 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3531 if (cfg->enable_default_pool)
3532 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3534 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3536 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3538 for (i = 0; i < (int)num_pools; i++) {
3539 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
3540 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
3543 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3544 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3545 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3546 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3548 /* VFTA - enable all vlan filters */
3549 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3550 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
3552 /* VFRE: pool enabling for receive - 64 */
3553 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
3554 if (num_pools == ETH_64_POOLS)
3555 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
3558 * MPSAR - allow pools to read specific mac addresses
3559 * In this case, all pools should be able to read from mac addr 0
3561 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
3562 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
3564 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3565 for (i = 0; i < cfg->nb_pool_maps; i++) {
3566 /* set vlan id in VF register and set the valid bit */
3567 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
3568 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
3570 * Put the allowed pools in VFB reg. As we only have 16 or 64
3571 * pools, we only need to use the first half of the register
3574 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3575 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
3576 (cfg->pool_map[i].pools & UINT32_MAX));
3578 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
3579 ((cfg->pool_map[i].pools >> 32) \
3584 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
3585 if (cfg->enable_loop_back) {
3586 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3587 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
3588 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
3591 IXGBE_WRITE_FLUSH(hw);
3595 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
3596 * @hw: pointer to hardware structure
3599 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
3604 PMD_INIT_FUNC_TRACE();
3605 /*PF VF Transmit Enable*/
3606 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
3607 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
3609 /* Disable the Tx desc arbiter so that MTQC can be changed */
3610 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3611 reg |= IXGBE_RTTDCS_ARBDIS;
3612 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3614 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3615 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3617 /* Disable drop for all queues */
3618 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3619 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3620 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3622 /* Enable the Tx desc arbiter */
3623 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3624 reg &= ~IXGBE_RTTDCS_ARBDIS;
3625 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3627 IXGBE_WRITE_FLUSH(hw);
3632 static int __attribute__((cold))
3633 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
3635 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
3639 /* Initialize software ring entries */
3640 for (i = 0; i < rxq->nb_rx_desc; i++) {
3641 volatile union ixgbe_adv_rx_desc *rxd;
3642 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
3644 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
3645 (unsigned) rxq->queue_id);
3649 rte_mbuf_refcnt_set(mbuf, 1);
3651 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3653 mbuf->port = rxq->port_id;
3656 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
3657 rxd = &rxq->rx_ring[i];
3658 rxd->read.hdr_addr = 0;
3659 rxd->read.pkt_addr = dma_addr;
3667 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
3669 struct ixgbe_hw *hw;
3672 ixgbe_rss_configure(dev);
3674 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3676 /* MRQC: enable VF RSS */
3677 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3678 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
3679 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3681 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
3685 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
3689 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
3693 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3699 ixgbe_config_vf_default(struct rte_eth_dev *dev)
3701 struct ixgbe_hw *hw =
3702 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3704 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3706 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3711 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3712 IXGBE_MRQC_VMDQRT4TCEN);
3716 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3717 IXGBE_MRQC_VMDQRT8TCEN);
3721 "invalid pool number in IOV mode");
3728 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3730 struct ixgbe_hw *hw =
3731 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3733 if (hw->mac.type == ixgbe_mac_82598EB)
3736 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3738 * SRIOV inactive scheme
3739 * any DCB/RSS w/o VMDq multi-queue setting
3741 switch (dev->data->dev_conf.rxmode.mq_mode) {
3743 case ETH_MQ_RX_DCB_RSS:
3744 case ETH_MQ_RX_VMDQ_RSS:
3745 ixgbe_rss_configure(dev);
3748 case ETH_MQ_RX_VMDQ_DCB:
3749 ixgbe_vmdq_dcb_configure(dev);
3752 case ETH_MQ_RX_VMDQ_ONLY:
3753 ixgbe_vmdq_rx_hw_configure(dev);
3756 case ETH_MQ_RX_NONE:
3758 /* if mq_mode is none, disable rss mode.*/
3759 ixgbe_rss_disable(dev);
3764 * SRIOV active scheme
3765 * Support RSS together with VMDq & SRIOV
3767 switch (dev->data->dev_conf.rxmode.mq_mode) {
3769 case ETH_MQ_RX_VMDQ_RSS:
3770 ixgbe_config_vf_rss(dev);
3773 /* FIXME if support DCB/RSS together with VMDq & SRIOV */
3774 case ETH_MQ_RX_VMDQ_DCB:
3775 case ETH_MQ_RX_VMDQ_DCB_RSS:
3777 "Could not support DCB with VMDq & SRIOV");
3780 ixgbe_config_vf_default(dev);
3789 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
3791 struct ixgbe_hw *hw =
3792 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3796 if (hw->mac.type == ixgbe_mac_82598EB)
3799 /* disable arbiter before setting MTQC */
3800 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3801 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3802 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3804 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3806 * SRIOV inactive scheme
3807 * any DCB w/o VMDq multi-queue setting
3809 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
3810 ixgbe_vmdq_tx_hw_configure(hw);
3812 mtqc = IXGBE_MTQC_64Q_1PB;
3813 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3816 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3819 * SRIOV active scheme
3820 * FIXME if support DCB together with VMDq & SRIOV
3823 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3826 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
3829 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
3833 mtqc = IXGBE_MTQC_64Q_1PB;
3834 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
3836 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3839 /* re-enable arbiter */
3840 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3841 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3847 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
3849 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
3850 * spec rev. 3.0 chapter 8.2.3.8.13.
3852 * @pool Memory pool of the Rx queue
3854 static inline uint32_t
3855 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
3857 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
3859 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
3862 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
3865 return IXGBE_RSCCTL_MAXDESC_16;
3866 else if (maxdesc >= 8)
3867 return IXGBE_RSCCTL_MAXDESC_8;
3868 else if (maxdesc >= 4)
3869 return IXGBE_RSCCTL_MAXDESC_4;
3871 return IXGBE_RSCCTL_MAXDESC_1;
3875 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
3878 * (Taken from FreeBSD tree)
3879 * (yes this is all very magic and confusing :)
3882 * @entry the register array entry
3883 * @vector the MSIX vector for this queue
3887 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
3889 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3892 vector |= IXGBE_IVAR_ALLOC_VAL;
3894 switch (hw->mac.type) {
3896 case ixgbe_mac_82598EB:
3898 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3900 entry += (type * 64);
3901 index = (entry >> 2) & 0x1F;
3902 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3903 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3904 ivar |= (vector << (8 * (entry & 0x3)));
3905 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3908 case ixgbe_mac_82599EB:
3909 case ixgbe_mac_X540:
3910 if (type == -1) { /* MISC IVAR */
3911 index = (entry & 1) * 8;
3912 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3913 ivar &= ~(0xFF << index);
3914 ivar |= (vector << index);
3915 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3916 } else { /* RX/TX IVARS */
3917 index = (16 * (entry & 1)) + (8 * type);
3918 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3919 ivar &= ~(0xFF << index);
3920 ivar |= (vector << index);
3921 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3931 void __attribute__((cold))
3932 ixgbe_set_rx_function(struct rte_eth_dev *dev)
3934 uint16_t i, rx_using_sse;
3935 struct ixgbe_adapter *adapter =
3936 (struct ixgbe_adapter *)dev->data->dev_private;
3939 * In order to allow Vector Rx there are a few configuration
3940 * conditions to be met and Rx Bulk Allocation should be allowed.
3942 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
3943 !adapter->rx_bulk_alloc_allowed) {
3944 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
3945 "preconditions or RTE_IXGBE_INC_VECTOR is "
3947 dev->data->port_id);
3949 adapter->rx_vec_allowed = false;
3953 * Initialize the appropriate LRO callback.
3955 * If all queues satisfy the bulk allocation preconditions
3956 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
3957 * Otherwise use a single allocation version.
3959 if (dev->data->lro) {
3960 if (adapter->rx_bulk_alloc_allowed) {
3961 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
3962 "allocation version");
3963 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3965 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
3966 "allocation version");
3967 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3969 } else if (dev->data->scattered_rx) {
3971 * Set the non-LRO scattered callback: there are Vector and
3972 * single allocation versions.
3974 if (adapter->rx_vec_allowed) {
3975 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
3976 "callback (port=%d).",
3977 dev->data->port_id);
3979 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
3980 } else if (adapter->rx_bulk_alloc_allowed) {
3981 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
3982 "allocation callback (port=%d).",
3983 dev->data->port_id);
3984 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3986 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
3987 "single allocation) "
3988 "Scattered Rx callback "
3990 dev->data->port_id);
3992 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3995 * Below we set "simple" callbacks according to port/queues parameters.
3996 * If parameters allow we are going to choose between the following
4000 * - Single buffer allocation (the simplest one)
4002 } else if (adapter->rx_vec_allowed) {
4003 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4004 "burst size no less than %d (port=%d).",
4005 RTE_IXGBE_DESCS_PER_LOOP,
4006 dev->data->port_id);
4008 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4009 } else if (adapter->rx_bulk_alloc_allowed) {
4010 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4011 "satisfied. Rx Burst Bulk Alloc function "
4012 "will be used on port=%d.",
4013 dev->data->port_id);
4015 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4017 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4018 "satisfied, or Scattered Rx is requested "
4020 dev->data->port_id);
4022 dev->rx_pkt_burst = ixgbe_recv_pkts;
4025 /* Propagate information about RX function choice through all queues. */
4028 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4029 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4031 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4032 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4033 rxq->rx_using_sse = rx_using_sse;
4038 * ixgbe_set_rsc - configure RSC related port HW registers
4040 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4041 * of 82599 Spec (x540 configuration is virtually the same).
4045 * Returns 0 in case of success or a non-zero error code
4048 ixgbe_set_rsc(struct rte_eth_dev *dev)
4050 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4051 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4052 struct rte_eth_dev_info dev_info = { 0 };
4053 bool rsc_capable = false;
4058 dev->dev_ops->dev_infos_get(dev, &dev_info);
4059 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4062 if (!rsc_capable && rx_conf->enable_lro) {
4063 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4068 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4070 if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
4072 * According to chapter of 4.6.7.2.1 of the Spec Rev.
4073 * 3.0 RSC configuration requires HW CRC stripping being
4074 * enabled. If user requested both HW CRC stripping off
4075 * and RSC on - return an error.
4077 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4082 /* RFCTL configuration */
4084 uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4085 if (rx_conf->enable_lro)
4087 * Since NFS packets coalescing is not supported - clear
4088 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4091 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4092 IXGBE_RFCTL_NFSR_DIS);
4094 rfctl |= IXGBE_RFCTL_RSC_DIS;
4096 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4099 /* If LRO hasn't been requested - we are done here. */
4100 if (!rx_conf->enable_lro)
4103 /* Set RDRXCTL.RSCACKC bit */
4104 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4105 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4106 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4108 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4109 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4110 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4112 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4114 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4116 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4118 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4121 * ixgbe PMD doesn't support header-split at the moment.
4123 * Following the 4.6.7.2.1 chapter of the 82599/x540
4124 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4125 * should be configured even if header split is not
4126 * enabled. We will configure it 128 bytes following the
4127 * recommendation in the spec.
4129 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4130 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4131 IXGBE_SRRCTL_BSIZEHDR_MASK;
4134 * TODO: Consider setting the Receive Descriptor Minimum
4135 * Threshold Size for an RSC case. This is not an obviously
4136 * beneficiary option but the one worth considering...
4139 rscctl |= IXGBE_RSCCTL_RSCEN;
4140 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4141 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4144 * RSC: Set ITR interval corresponding to 2K ints/s.
4146 * Full-sized RSC aggregations for a 10Gb/s link will
4147 * arrive at about 20K aggregation/s rate.
4149 * 2K inst/s rate will make only 10% of the
4150 * aggregations to be closed due to the interrupt timer
4151 * expiration for a streaming at wire-speed case.
4153 * For a sparse streaming case this setting will yield
4154 * at most 500us latency for a single RSC aggregation.
4156 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4157 eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
4159 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4160 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4161 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4162 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4165 * RSC requires the mapping of the queue to the
4168 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4173 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4179 * Initializes Receive Unit.
4181 int __attribute__((cold))
4182 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4184 struct ixgbe_hw *hw;
4185 struct ixgbe_rx_queue *rxq;
4196 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4199 PMD_INIT_FUNC_TRACE();
4200 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4203 * Make sure receives are disabled while setting
4204 * up the RX context (registers, descriptor rings, etc.).
4206 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4207 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4209 /* Enable receipt of broadcasted frames */
4210 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4211 fctrl |= IXGBE_FCTRL_BAM;
4212 fctrl |= IXGBE_FCTRL_DPF;
4213 fctrl |= IXGBE_FCTRL_PMCF;
4214 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4217 * Configure CRC stripping, if any.
4219 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4220 if (rx_conf->hw_strip_crc)
4221 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4223 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4226 * Configure jumbo frame support, if any.
4228 if (rx_conf->jumbo_frame == 1) {
4229 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4230 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4231 maxfrs &= 0x0000FFFF;
4232 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4233 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4235 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4238 * If loopback mode is configured for 82599, set LPBK bit.
4240 if (hw->mac.type == ixgbe_mac_82599EB &&
4241 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4242 hlreg0 |= IXGBE_HLREG0_LPBK;
4244 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4246 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4248 /* Setup RX queues */
4249 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4250 rxq = dev->data->rx_queues[i];
4253 * Reset crc_len in case it was changed after queue setup by a
4254 * call to configure.
4256 rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
4258 /* Setup the Base and Length of the Rx Descriptor Rings */
4259 bus_addr = rxq->rx_ring_phys_addr;
4260 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4261 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4262 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4263 (uint32_t)(bus_addr >> 32));
4264 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4265 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4266 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4267 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4269 /* Configure the SRRCTL register */
4270 #ifdef RTE_HEADER_SPLIT_ENABLE
4272 * Configure Header Split
4274 if (rx_conf->header_split) {
4275 if (hw->mac.type == ixgbe_mac_82599EB) {
4276 /* Must setup the PSRTYPE register */
4278 psrtype = IXGBE_PSRTYPE_TCPHDR |
4279 IXGBE_PSRTYPE_UDPHDR |
4280 IXGBE_PSRTYPE_IPV4HDR |
4281 IXGBE_PSRTYPE_IPV6HDR;
4282 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4284 srrctl = ((rx_conf->split_hdr_size <<
4285 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4286 IXGBE_SRRCTL_BSIZEHDR_MASK);
4287 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4290 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4292 /* Set if packets are dropped when no descriptors available */
4294 srrctl |= IXGBE_SRRCTL_DROP_EN;
4297 * Configure the RX buffer size in the BSIZEPACKET field of
4298 * the SRRCTL register of the queue.
4299 * The value is in 1 KB resolution. Valid values can be from
4302 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4303 RTE_PKTMBUF_HEADROOM);
4304 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4305 IXGBE_SRRCTL_BSIZEPKT_MASK);
4307 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4309 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4310 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4312 /* It adds dual VLAN length for supporting dual VLAN */
4313 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4314 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
4315 dev->data->scattered_rx = 1;
4318 if (rx_conf->enable_scatter)
4319 dev->data->scattered_rx = 1;
4322 * Device configured with multiple RX queues.
4324 ixgbe_dev_mq_rx_configure(dev);
4327 * Setup the Checksum Register.
4328 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4329 * Enable IP/L4 checkum computation by hardware if requested to do so.
4331 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
4332 rxcsum |= IXGBE_RXCSUM_PCSD;
4333 if (rx_conf->hw_ip_checksum)
4334 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4336 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
4338 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4340 if (hw->mac.type == ixgbe_mac_82599EB ||
4341 hw->mac.type == ixgbe_mac_X540) {
4342 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4343 if (rx_conf->hw_strip_crc)
4344 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4346 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
4347 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4348 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4351 rc = ixgbe_set_rsc(dev);
4355 ixgbe_set_rx_function(dev);
4361 * Initializes Transmit Unit.
4363 void __attribute__((cold))
4364 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
4366 struct ixgbe_hw *hw;
4367 struct ixgbe_tx_queue *txq;
4373 PMD_INIT_FUNC_TRACE();
4374 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4376 /* Enable TX CRC (checksum offload requirement) and hw padding
4377 * (TSO requirement) */
4378 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4379 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
4380 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4382 /* Setup the Base and Length of the Tx Descriptor Rings */
4383 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4384 txq = dev->data->tx_queues[i];
4386 bus_addr = txq->tx_ring_phys_addr;
4387 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
4388 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4389 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
4390 (uint32_t)(bus_addr >> 32));
4391 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
4392 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4393 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4394 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4395 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4398 * Disable Tx Head Writeback RO bit, since this hoses
4399 * bookkeeping if things aren't delivered in order.
4401 switch (hw->mac.type) {
4402 case ixgbe_mac_82598EB:
4403 txctrl = IXGBE_READ_REG(hw,
4404 IXGBE_DCA_TXCTRL(txq->reg_idx));
4405 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4406 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
4410 case ixgbe_mac_82599EB:
4411 case ixgbe_mac_X540:
4412 case ixgbe_mac_X550:
4413 case ixgbe_mac_X550EM_x:
4414 case ixgbe_mac_X550EM_a:
4416 txctrl = IXGBE_READ_REG(hw,
4417 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
4418 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4419 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
4425 /* Device configured with multiple TX queues. */
4426 ixgbe_dev_mq_tx_configure(dev);
4430 * Set up link for 82599 loopback mode Tx->Rx.
4432 static inline void __attribute__((cold))
4433 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
4435 PMD_INIT_FUNC_TRACE();
4437 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
4438 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
4440 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
4449 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
4450 ixgbe_reset_pipeline_82599(hw);
4452 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
4458 * Start Transmit and Receive Units.
4460 int __attribute__((cold))
4461 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
4463 struct ixgbe_hw *hw;
4464 struct ixgbe_tx_queue *txq;
4465 struct ixgbe_rx_queue *rxq;
4472 PMD_INIT_FUNC_TRACE();
4473 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4475 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4476 txq = dev->data->tx_queues[i];
4477 /* Setup Transmit Threshold Registers */
4478 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4479 txdctl |= txq->pthresh & 0x7F;
4480 txdctl |= ((txq->hthresh & 0x7F) << 8);
4481 txdctl |= ((txq->wthresh & 0x7F) << 16);
4482 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4485 if (hw->mac.type != ixgbe_mac_82598EB) {
4486 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
4487 dmatxctl |= IXGBE_DMATXCTL_TE;
4488 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
4491 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4492 txq = dev->data->tx_queues[i];
4493 if (!txq->tx_deferred_start) {
4494 ret = ixgbe_dev_tx_queue_start(dev, i);
4500 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4501 rxq = dev->data->rx_queues[i];
4502 if (!rxq->rx_deferred_start) {
4503 ret = ixgbe_dev_rx_queue_start(dev, i);
4509 /* Enable Receive engine */
4510 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4511 if (hw->mac.type == ixgbe_mac_82598EB)
4512 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4513 rxctrl |= IXGBE_RXCTRL_RXEN;
4514 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4516 /* If loopback mode is enabled for 82599, set up the link accordingly */
4517 if (hw->mac.type == ixgbe_mac_82599EB &&
4518 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4519 ixgbe_setup_loopback_link_82599(hw);
4525 * Start Receive Units for specified queue.
4527 int __attribute__((cold))
4528 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4530 struct ixgbe_hw *hw;
4531 struct ixgbe_rx_queue *rxq;
4535 PMD_INIT_FUNC_TRACE();
4536 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4538 if (rx_queue_id < dev->data->nb_rx_queues) {
4539 rxq = dev->data->rx_queues[rx_queue_id];
4541 /* Allocate buffers for descriptor rings */
4542 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
4543 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
4547 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4548 rxdctl |= IXGBE_RXDCTL_ENABLE;
4549 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4551 /* Wait until RX Enable ready */
4552 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4555 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4556 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4558 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
4561 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4562 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
4563 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4571 * Stop Receive Units for specified queue.
4573 int __attribute__((cold))
4574 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4576 struct ixgbe_hw *hw;
4577 struct ixgbe_adapter *adapter =
4578 (struct ixgbe_adapter *)dev->data->dev_private;
4579 struct ixgbe_rx_queue *rxq;
4583 PMD_INIT_FUNC_TRACE();
4584 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4586 if (rx_queue_id < dev->data->nb_rx_queues) {
4587 rxq = dev->data->rx_queues[rx_queue_id];
4589 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4590 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4591 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4593 /* Wait until RX Enable ready */
4594 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4597 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4598 } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
4600 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
4603 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4605 ixgbe_rx_queue_release_mbufs(rxq);
4606 ixgbe_reset_rx_queue(adapter, rxq);
4607 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4616 * Start Transmit Units for specified queue.
4618 int __attribute__((cold))
4619 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4621 struct ixgbe_hw *hw;
4622 struct ixgbe_tx_queue *txq;
4626 PMD_INIT_FUNC_TRACE();
4627 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4629 if (tx_queue_id < dev->data->nb_tx_queues) {
4630 txq = dev->data->tx_queues[tx_queue_id];
4631 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4632 txdctl |= IXGBE_TXDCTL_ENABLE;
4633 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4635 /* Wait until TX Enable ready */
4636 if (hw->mac.type == ixgbe_mac_82599EB) {
4637 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4640 txdctl = IXGBE_READ_REG(hw,
4641 IXGBE_TXDCTL(txq->reg_idx));
4642 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4644 PMD_INIT_LOG(ERR, "Could not enable "
4645 "Tx Queue %d", tx_queue_id);
4648 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4649 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4650 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4658 * Stop Transmit Units for specified queue.
4660 int __attribute__((cold))
4661 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4663 struct ixgbe_hw *hw;
4664 struct ixgbe_tx_queue *txq;
4666 uint32_t txtdh, txtdt;
4669 PMD_INIT_FUNC_TRACE();
4670 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4672 if (tx_queue_id < dev->data->nb_tx_queues) {
4673 txq = dev->data->tx_queues[tx_queue_id];
4675 /* Wait until TX queue is empty */
4676 if (hw->mac.type == ixgbe_mac_82599EB) {
4677 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4679 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4680 txtdh = IXGBE_READ_REG(hw,
4681 IXGBE_TDH(txq->reg_idx));
4682 txtdt = IXGBE_READ_REG(hw,
4683 IXGBE_TDT(txq->reg_idx));
4684 } while (--poll_ms && (txtdh != txtdt));
4686 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
4687 "when stopping.", tx_queue_id);
4690 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4691 txdctl &= ~IXGBE_TXDCTL_ENABLE;
4692 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4694 /* Wait until TX Enable ready */
4695 if (hw->mac.type == ixgbe_mac_82599EB) {
4696 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4699 txdctl = IXGBE_READ_REG(hw,
4700 IXGBE_TXDCTL(txq->reg_idx));
4701 } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
4703 PMD_INIT_LOG(ERR, "Could not disable "
4704 "Tx Queue %d", tx_queue_id);
4707 if (txq->ops != NULL) {
4708 txq->ops->release_mbufs(txq);
4709 txq->ops->reset(txq);
4711 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4719 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4720 struct rte_eth_rxq_info *qinfo)
4722 struct ixgbe_rx_queue *rxq;
4724 rxq = dev->data->rx_queues[queue_id];
4726 qinfo->mp = rxq->mb_pool;
4727 qinfo->scattered_rx = dev->data->scattered_rx;
4728 qinfo->nb_desc = rxq->nb_rx_desc;
4730 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4731 qinfo->conf.rx_drop_en = rxq->drop_en;
4732 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4736 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4737 struct rte_eth_txq_info *qinfo)
4739 struct ixgbe_tx_queue *txq;
4741 txq = dev->data->tx_queues[queue_id];
4743 qinfo->nb_desc = txq->nb_tx_desc;
4745 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
4746 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
4747 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
4749 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4750 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
4751 qinfo->conf.txq_flags = txq->txq_flags;
4752 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4756 * [VF] Initializes Receive Unit.
4758 int __attribute__((cold))
4759 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
4761 struct ixgbe_hw *hw;
4762 struct ixgbe_rx_queue *rxq;
4764 uint32_t srrctl, psrtype = 0;
4769 PMD_INIT_FUNC_TRACE();
4770 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4772 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
4773 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4774 "it should be power of 2");
4778 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
4779 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4780 "it should be equal to or less than %d",
4781 hw->mac.max_rx_queues);
4786 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
4787 * disables the VF receipt of packets if the PF MTU is > 1500.
4788 * This is done to deal with 82599 limitations that imposes
4789 * the PF and all VFs to share the same MTU.
4790 * Then, the PF driver enables again the VF receipt of packet when
4791 * the VF driver issues a IXGBE_VF_SET_LPE request.
4792 * In the meantime, the VF device cannot be used, even if the VF driver
4793 * and the Guest VM network stack are ready to accept packets with a
4794 * size up to the PF MTU.
4795 * As a work-around to this PF behaviour, force the call to
4796 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
4797 * VF packets received can work in all cases.
4799 ixgbevf_rlpml_set_vf(hw,
4800 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
4802 /* Setup RX queues */
4803 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4804 rxq = dev->data->rx_queues[i];
4806 /* Allocate buffers for descriptor rings */
4807 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
4811 /* Setup the Base and Length of the Rx Descriptor Rings */
4812 bus_addr = rxq->rx_ring_phys_addr;
4814 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
4815 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4816 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
4817 (uint32_t)(bus_addr >> 32));
4818 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
4819 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4820 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
4821 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
4824 /* Configure the SRRCTL register */
4825 #ifdef RTE_HEADER_SPLIT_ENABLE
4827 * Configure Header Split
4829 if (dev->data->dev_conf.rxmode.header_split) {
4830 srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
4831 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4832 IXGBE_SRRCTL_BSIZEHDR_MASK);
4833 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4836 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4838 /* Set if packets are dropped when no descriptors available */
4840 srrctl |= IXGBE_SRRCTL_DROP_EN;
4843 * Configure the RX buffer size in the BSIZEPACKET field of
4844 * the SRRCTL register of the queue.
4845 * The value is in 1 KB resolution. Valid values can be from
4848 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4849 RTE_PKTMBUF_HEADROOM);
4850 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4851 IXGBE_SRRCTL_BSIZEPKT_MASK);
4854 * VF modification to write virtual function SRRCTL register
4856 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
4858 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4859 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4861 if (dev->data->dev_conf.rxmode.enable_scatter ||
4862 /* It adds dual VLAN length for supporting dual VLAN */
4863 (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4864 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
4865 if (!dev->data->scattered_rx)
4866 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
4867 dev->data->scattered_rx = 1;
4871 #ifdef RTE_HEADER_SPLIT_ENABLE
4872 if (dev->data->dev_conf.rxmode.header_split)
4873 /* Must setup the PSRTYPE register */
4874 psrtype = IXGBE_PSRTYPE_TCPHDR |
4875 IXGBE_PSRTYPE_UDPHDR |
4876 IXGBE_PSRTYPE_IPV4HDR |
4877 IXGBE_PSRTYPE_IPV6HDR;
4880 /* Set RQPL for VF RSS according to max Rx queue */
4881 psrtype |= (dev->data->nb_rx_queues >> 1) <<
4882 IXGBE_PSRTYPE_RQPL_SHIFT;
4883 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
4885 ixgbe_set_rx_function(dev);
4891 * [VF] Initializes Transmit Unit.
4893 void __attribute__((cold))
4894 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
4896 struct ixgbe_hw *hw;
4897 struct ixgbe_tx_queue *txq;
4902 PMD_INIT_FUNC_TRACE();
4903 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4905 /* Setup the Base and Length of the Tx Descriptor Rings */
4906 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4907 txq = dev->data->tx_queues[i];
4908 bus_addr = txq->tx_ring_phys_addr;
4909 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
4910 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4911 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
4912 (uint32_t)(bus_addr >> 32));
4913 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
4914 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4915 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4916 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
4917 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
4920 * Disable Tx Head Writeback RO bit, since this hoses
4921 * bookkeeping if things aren't delivered in order.
4923 txctrl = IXGBE_READ_REG(hw,
4924 IXGBE_VFDCA_TXCTRL(i));
4925 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4926 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
4932 * [VF] Start Transmit and Receive Units.
4934 void __attribute__((cold))
4935 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4937 struct ixgbe_hw *hw;
4938 struct ixgbe_tx_queue *txq;
4939 struct ixgbe_rx_queue *rxq;
4945 PMD_INIT_FUNC_TRACE();
4946 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4948 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4949 txq = dev->data->tx_queues[i];
4950 /* Setup Transmit Threshold Registers */
4951 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4952 txdctl |= txq->pthresh & 0x7F;
4953 txdctl |= ((txq->hthresh & 0x7F) << 8);
4954 txdctl |= ((txq->wthresh & 0x7F) << 16);
4955 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4958 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4960 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4961 txdctl |= IXGBE_TXDCTL_ENABLE;
4962 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4965 /* Wait until TX Enable ready */
4968 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4969 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4971 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
4973 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4975 rxq = dev->data->rx_queues[i];
4977 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4978 rxdctl |= IXGBE_RXDCTL_ENABLE;
4979 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
4981 /* Wait until RX Enable ready */
4985 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4986 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4988 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
4990 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
4995 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
4996 int __attribute__((weak))
4997 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5002 uint16_t __attribute__((weak))
5003 ixgbe_recv_pkts_vec(
5004 void __rte_unused *rx_queue,
5005 struct rte_mbuf __rte_unused **rx_pkts,
5006 uint16_t __rte_unused nb_pkts)
5011 uint16_t __attribute__((weak))
5012 ixgbe_recv_scattered_pkts_vec(
5013 void __rte_unused *rx_queue,
5014 struct rte_mbuf __rte_unused **rx_pkts,
5015 uint16_t __rte_unused nb_pkts)
5020 int __attribute__((weak))
5021 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)