4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5 * Copyright 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
46 #include <rte_byteorder.h>
47 #include <rte_common.h>
48 #include <rte_cycles.h>
50 #include <rte_debug.h>
51 #include <rte_interrupts.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_prefetch.h>
71 #include <rte_string_fns.h>
72 #include <rte_errno.h>
75 #include "ixgbe_logs.h"
76 #include "base/ixgbe_api.h"
77 #include "base/ixgbe_vf.h"
78 #include "ixgbe_ethdev.h"
79 #include "base/ixgbe_dcb.h"
80 #include "base/ixgbe_common.h"
81 #include "ixgbe_rxtx.h"
83 /* Bit Mask to indicate what bits required for building TX context */
84 #define IXGBE_TX_OFFLOAD_MASK ( \
89 PKT_TX_OUTER_IP_CKSUM)
91 static inline struct rte_mbuf *
92 rte_rxmbuf_alloc(struct rte_mempool *mp)
96 m = __rte_mbuf_raw_alloc(mp);
97 __rte_mbuf_sanity_check_raw(m, 0);
103 #define RTE_PMD_USE_PREFETCH
106 #ifdef RTE_PMD_USE_PREFETCH
108 * Prefetch a cache line into all cache levels.
110 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
112 #define rte_ixgbe_prefetch(p) do {} while (0)
115 /*********************************************************************
119 **********************************************************************/
122 * Check for descriptors with their DD bit set and free mbufs.
123 * Return the total number of buffers freed.
125 static inline int __attribute__((always_inline))
126 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
128 struct ixgbe_tx_entry *txep;
131 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
133 /* check DD bit on threshold descriptor */
134 status = txq->tx_ring[txq->tx_next_dd].wb.status;
135 if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
139 * first buffer to free from S/W ring is at index
140 * tx_next_dd - (tx_rs_thresh-1)
142 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
144 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
145 /* free buffers one at a time */
146 m = __rte_pktmbuf_prefree_seg(txep->mbuf);
149 if (unlikely(m == NULL))
152 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
153 (nb_free > 0 && m->pool != free[0]->pool)) {
154 rte_mempool_put_bulk(free[0]->pool,
155 (void **)free, nb_free);
163 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
165 /* buffers were freed, update counters */
166 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
167 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
168 if (txq->tx_next_dd >= txq->nb_tx_desc)
169 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
171 return txq->tx_rs_thresh;
174 /* Populate 4 descriptors with data from 4 mbufs */
176 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
178 uint64_t buf_dma_addr;
182 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
183 buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
184 pkt_len = (*pkts)->data_len;
186 /* write data to descriptor */
187 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
189 txdp->read.cmd_type_len =
190 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
192 txdp->read.olinfo_status =
193 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
195 rte_prefetch0(&(*pkts)->pool);
199 /* Populate 1 descriptor with data from 1 mbuf */
201 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
203 uint64_t buf_dma_addr;
206 buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
207 pkt_len = (*pkts)->data_len;
209 /* write data to descriptor */
210 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
211 txdp->read.cmd_type_len =
212 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
213 txdp->read.olinfo_status =
214 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
215 rte_prefetch0(&(*pkts)->pool);
219 * Fill H/W descriptor ring with mbuf data.
220 * Copy mbuf pointers to the S/W ring.
223 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
226 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
227 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
228 const int N_PER_LOOP = 4;
229 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
230 int mainpart, leftover;
234 * Process most of the packets in chunks of N pkts. Any
235 * leftover packets will get processed one at a time.
237 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
238 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
239 for (i = 0; i < mainpart; i += N_PER_LOOP) {
240 /* Copy N mbuf pointers to the S/W ring */
241 for (j = 0; j < N_PER_LOOP; ++j) {
242 (txep + i + j)->mbuf = *(pkts + i + j);
244 tx4(txdp + i, pkts + i);
247 if (unlikely(leftover > 0)) {
248 for (i = 0; i < leftover; ++i) {
249 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
250 tx1(txdp + mainpart + i, pkts + mainpart + i);
255 static inline uint16_t
256 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
259 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
260 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
264 * Begin scanning the H/W ring for done descriptors when the
265 * number of available descriptors drops below tx_free_thresh. For
266 * each done descriptor, free the associated buffer.
268 if (txq->nb_tx_free < txq->tx_free_thresh)
269 ixgbe_tx_free_bufs(txq);
271 /* Only use descriptors that are available */
272 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
273 if (unlikely(nb_pkts == 0))
276 /* Use exactly nb_pkts descriptors */
277 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
280 * At this point, we know there are enough descriptors in the
281 * ring to transmit all the packets. This assumes that each
282 * mbuf contains a single segment, and that no new offloads
283 * are expected, which would require a new context descriptor.
287 * See if we're going to wrap-around. If so, handle the top
288 * of the descriptor ring first, then do the bottom. If not,
289 * the processing looks just like the "bottom" part anyway...
291 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
292 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
293 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
296 * We know that the last descriptor in the ring will need to
297 * have its RS bit set because tx_rs_thresh has to be
298 * a divisor of the ring size
300 tx_r[txq->tx_next_rs].read.cmd_type_len |=
301 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
302 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
307 /* Fill H/W descriptor ring with mbuf data */
308 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
309 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
312 * Determine if RS bit should be set
313 * This is what we actually want:
314 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
315 * but instead of subtracting 1 and doing >=, we can just do
316 * greater than without subtracting.
318 if (txq->tx_tail > txq->tx_next_rs) {
319 tx_r[txq->tx_next_rs].read.cmd_type_len |=
320 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
321 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
323 if (txq->tx_next_rs >= txq->nb_tx_desc)
324 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
328 * Check for wrap-around. This would only happen if we used
329 * up to the last descriptor in the ring, no more, no less.
331 if (txq->tx_tail >= txq->nb_tx_desc)
334 /* update tail pointer */
336 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
342 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
347 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
348 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
349 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
351 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
355 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
356 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
357 nb_tx = (uint16_t)(nb_tx + ret);
358 nb_pkts = (uint16_t)(nb_pkts - ret);
367 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
368 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
369 uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
371 uint32_t type_tucmd_mlhl;
372 uint32_t mss_l4len_idx = 0;
374 uint32_t vlan_macip_lens;
375 union ixgbe_tx_offload tx_offload_mask;
376 uint32_t seqnum_seed = 0;
378 ctx_idx = txq->ctx_curr;
379 tx_offload_mask.data[0] = 0;
380 tx_offload_mask.data[1] = 0;
383 /* Specify which HW CTX to upload. */
384 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
386 if (ol_flags & PKT_TX_VLAN_PKT) {
387 tx_offload_mask.vlan_tci |= ~0;
390 /* check if TCP segmentation required for this packet */
391 if (ol_flags & PKT_TX_TCP_SEG) {
392 /* implies IP cksum in IPv4 */
393 if (ol_flags & PKT_TX_IP_CKSUM)
394 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
395 IXGBE_ADVTXD_TUCMD_L4T_TCP |
396 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
398 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
399 IXGBE_ADVTXD_TUCMD_L4T_TCP |
400 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
402 tx_offload_mask.l2_len |= ~0;
403 tx_offload_mask.l3_len |= ~0;
404 tx_offload_mask.l4_len |= ~0;
405 tx_offload_mask.tso_segsz |= ~0;
406 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
407 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
408 } else { /* no TSO, check if hardware checksum is needed */
409 if (ol_flags & PKT_TX_IP_CKSUM) {
410 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
411 tx_offload_mask.l2_len |= ~0;
412 tx_offload_mask.l3_len |= ~0;
415 switch (ol_flags & PKT_TX_L4_MASK) {
416 case PKT_TX_UDP_CKSUM:
417 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
418 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
419 mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
420 tx_offload_mask.l2_len |= ~0;
421 tx_offload_mask.l3_len |= ~0;
423 case PKT_TX_TCP_CKSUM:
424 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
425 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
426 mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
427 tx_offload_mask.l2_len |= ~0;
428 tx_offload_mask.l3_len |= ~0;
430 case PKT_TX_SCTP_CKSUM:
431 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
432 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
433 mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
434 tx_offload_mask.l2_len |= ~0;
435 tx_offload_mask.l3_len |= ~0;
438 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
439 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
444 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
445 tx_offload_mask.outer_l2_len |= ~0;
446 tx_offload_mask.outer_l3_len |= ~0;
447 tx_offload_mask.l2_len |= ~0;
448 seqnum_seed |= tx_offload.outer_l3_len
449 << IXGBE_ADVTXD_OUTER_IPLEN;
450 seqnum_seed |= tx_offload.l2_len
451 << IXGBE_ADVTXD_TUNNEL_LEN;
454 txq->ctx_cache[ctx_idx].flags = ol_flags;
455 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
456 tx_offload_mask.data[0] & tx_offload.data[0];
457 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
458 tx_offload_mask.data[1] & tx_offload.data[1];
459 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
461 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
462 vlan_macip_lens = tx_offload.l3_len;
463 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
464 vlan_macip_lens |= (tx_offload.outer_l2_len <<
465 IXGBE_ADVTXD_MACLEN_SHIFT);
467 vlan_macip_lens |= (tx_offload.l2_len <<
468 IXGBE_ADVTXD_MACLEN_SHIFT);
469 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
470 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
471 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
472 ctx_txd->seqnum_seed = seqnum_seed;
476 * Check which hardware context can be used. Use the existing match
477 * or create a new context descriptor.
479 static inline uint32_t
480 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
481 union ixgbe_tx_offload tx_offload)
483 /* If match with the current used context */
484 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
485 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
486 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
487 & tx_offload.data[0])) &&
488 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
489 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
490 & tx_offload.data[1])))) {
491 return txq->ctx_curr;
494 /* What if match with the next context */
496 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
497 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
498 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
499 & tx_offload.data[0])) &&
500 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
501 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
502 & tx_offload.data[1])))) {
503 return txq->ctx_curr;
506 /* Mismatch, use the previous context */
507 return IXGBE_CTX_NUM;
510 static inline uint32_t
511 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
514 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
515 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
516 if (ol_flags & PKT_TX_IP_CKSUM)
517 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
518 if (ol_flags & PKT_TX_TCP_SEG)
519 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
523 static inline uint32_t
524 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
526 uint32_t cmdtype = 0;
527 if (ol_flags & PKT_TX_VLAN_PKT)
528 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
529 if (ol_flags & PKT_TX_TCP_SEG)
530 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
531 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
532 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
536 /* Default RS bit threshold values */
537 #ifndef DEFAULT_TX_RS_THRESH
538 #define DEFAULT_TX_RS_THRESH 32
540 #ifndef DEFAULT_TX_FREE_THRESH
541 #define DEFAULT_TX_FREE_THRESH 32
544 /* Reset transmit descriptors after they have been used */
546 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
548 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
549 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
550 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
551 uint16_t nb_tx_desc = txq->nb_tx_desc;
552 uint16_t desc_to_clean_to;
553 uint16_t nb_tx_to_clean;
556 /* Determine the last descriptor needing to be cleaned */
557 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
558 if (desc_to_clean_to >= nb_tx_desc)
559 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
561 /* Check to make sure the last descriptor to clean is done */
562 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
563 status = txr[desc_to_clean_to].wb.status;
564 if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
566 PMD_TX_FREE_LOG(DEBUG,
567 "TX descriptor %4u is not done"
568 "(port=%d queue=%d)",
570 txq->port_id, txq->queue_id);
571 /* Failed to clean any descriptors, better luck next time */
575 /* Figure out how many descriptors will be cleaned */
576 if (last_desc_cleaned > desc_to_clean_to)
577 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
580 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
583 PMD_TX_FREE_LOG(DEBUG,
584 "Cleaning %4u TX descriptors: %4u to %4u "
585 "(port=%d queue=%d)",
586 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
587 txq->port_id, txq->queue_id);
590 * The last descriptor to clean is done, so that means all the
591 * descriptors from the last descriptor that was cleaned
592 * up to the last descriptor with the RS bit set
593 * are done. Only reset the threshold descriptor.
595 txr[desc_to_clean_to].wb.status = 0;
597 /* Update the txq to reflect the last descriptor that was cleaned */
598 txq->last_desc_cleaned = desc_to_clean_to;
599 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
606 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
609 struct ixgbe_tx_queue *txq;
610 struct ixgbe_tx_entry *sw_ring;
611 struct ixgbe_tx_entry *txe, *txn;
612 volatile union ixgbe_adv_tx_desc *txr;
613 volatile union ixgbe_adv_tx_desc *txd, *txp;
614 struct rte_mbuf *tx_pkt;
615 struct rte_mbuf *m_seg;
616 uint64_t buf_dma_addr;
617 uint32_t olinfo_status;
618 uint32_t cmd_type_len;
629 union ixgbe_tx_offload tx_offload;
631 tx_offload.data[0] = 0;
632 tx_offload.data[1] = 0;
634 sw_ring = txq->sw_ring;
636 tx_id = txq->tx_tail;
637 txe = &sw_ring[tx_id];
640 /* Determine if the descriptor ring needs to be cleaned. */
641 if (txq->nb_tx_free < txq->tx_free_thresh)
642 ixgbe_xmit_cleanup(txq);
644 rte_prefetch0(&txe->mbuf->pool);
647 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
650 pkt_len = tx_pkt->pkt_len;
653 * Determine how many (if any) context descriptors
654 * are needed for offload functionality.
656 ol_flags = tx_pkt->ol_flags;
658 /* If hardware offload required */
659 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
661 tx_offload.l2_len = tx_pkt->l2_len;
662 tx_offload.l3_len = tx_pkt->l3_len;
663 tx_offload.l4_len = tx_pkt->l4_len;
664 tx_offload.vlan_tci = tx_pkt->vlan_tci;
665 tx_offload.tso_segsz = tx_pkt->tso_segsz;
666 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
667 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
669 /* If new context need be built or reuse the exist ctx. */
670 ctx = what_advctx_update(txq, tx_ol_req,
672 /* Only allocate context descriptor if required*/
673 new_ctx = (ctx == IXGBE_CTX_NUM);
678 * Keep track of how many descriptors are used this loop
679 * This will always be the number of segments + the number of
680 * Context descriptors required to transmit the packet
682 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
685 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
686 /* set RS on the previous packet in the burst */
687 txp->read.cmd_type_len |=
688 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
691 * The number of descriptors that must be allocated for a
692 * packet is the number of segments of that packet, plus 1
693 * Context Descriptor for the hardware offload, if any.
694 * Determine the last TX descriptor to allocate in the TX ring
695 * for the packet, starting from the current position (tx_id)
698 tx_last = (uint16_t) (tx_id + nb_used - 1);
701 if (tx_last >= txq->nb_tx_desc)
702 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
704 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
705 " tx_first=%u tx_last=%u",
706 (unsigned) txq->port_id,
707 (unsigned) txq->queue_id,
713 * Make sure there are enough TX descriptors available to
714 * transmit the entire packet.
715 * nb_used better be less than or equal to txq->tx_rs_thresh
717 if (nb_used > txq->nb_tx_free) {
718 PMD_TX_FREE_LOG(DEBUG,
719 "Not enough free TX descriptors "
720 "nb_used=%4u nb_free=%4u "
721 "(port=%d queue=%d)",
722 nb_used, txq->nb_tx_free,
723 txq->port_id, txq->queue_id);
725 if (ixgbe_xmit_cleanup(txq) != 0) {
726 /* Could not clean any descriptors */
732 /* nb_used better be <= txq->tx_rs_thresh */
733 if (unlikely(nb_used > txq->tx_rs_thresh)) {
734 PMD_TX_FREE_LOG(DEBUG,
735 "The number of descriptors needed to "
736 "transmit the packet exceeds the "
737 "RS bit threshold. This will impact "
739 "nb_used=%4u nb_free=%4u "
741 "(port=%d queue=%d)",
742 nb_used, txq->nb_tx_free,
744 txq->port_id, txq->queue_id);
746 * Loop here until there are enough TX
747 * descriptors or until the ring cannot be
750 while (nb_used > txq->nb_tx_free) {
751 if (ixgbe_xmit_cleanup(txq) != 0) {
753 * Could not clean any
765 * By now there are enough free TX descriptors to transmit
770 * Set common flags of all TX Data Descriptors.
772 * The following bits must be set in all Data Descriptors:
773 * - IXGBE_ADVTXD_DTYP_DATA
774 * - IXGBE_ADVTXD_DCMD_DEXT
776 * The following bits must be set in the first Data Descriptor
777 * and are ignored in the other ones:
778 * - IXGBE_ADVTXD_DCMD_IFCS
779 * - IXGBE_ADVTXD_MAC_1588
780 * - IXGBE_ADVTXD_DCMD_VLE
782 * The following bits must only be set in the last Data
784 * - IXGBE_TXD_CMD_EOP
786 * The following bits can be set in any Data Descriptor, but
787 * are only set in the last Data Descriptor:
790 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
791 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
793 #ifdef RTE_LIBRTE_IEEE1588
794 if (ol_flags & PKT_TX_IEEE1588_TMST)
795 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
801 if (ol_flags & PKT_TX_TCP_SEG) {
802 /* when TSO is on, paylen in descriptor is the
803 * not the packet len but the tcp payload len */
804 pkt_len -= (tx_offload.l2_len +
805 tx_offload.l3_len + tx_offload.l4_len);
809 * Setup the TX Advanced Context Descriptor if required
812 volatile struct ixgbe_adv_tx_context_desc *
815 ctx_txd = (volatile struct
816 ixgbe_adv_tx_context_desc *)
819 txn = &sw_ring[txe->next_id];
820 rte_prefetch0(&txn->mbuf->pool);
822 if (txe->mbuf != NULL) {
823 rte_pktmbuf_free_seg(txe->mbuf);
827 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
830 txe->last_id = tx_last;
831 tx_id = txe->next_id;
836 * Setup the TX Advanced Data Descriptor,
837 * This path will go through
838 * whatever new/reuse the context descriptor
840 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
841 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
842 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
845 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
850 txn = &sw_ring[txe->next_id];
851 rte_prefetch0(&txn->mbuf->pool);
853 if (txe->mbuf != NULL)
854 rte_pktmbuf_free_seg(txe->mbuf);
858 * Set up Transmit Data Descriptor.
860 slen = m_seg->data_len;
861 buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
862 txd->read.buffer_addr =
863 rte_cpu_to_le_64(buf_dma_addr);
864 txd->read.cmd_type_len =
865 rte_cpu_to_le_32(cmd_type_len | slen);
866 txd->read.olinfo_status =
867 rte_cpu_to_le_32(olinfo_status);
868 txe->last_id = tx_last;
869 tx_id = txe->next_id;
872 } while (m_seg != NULL);
875 * The last packet data descriptor needs End Of Packet (EOP)
877 cmd_type_len |= IXGBE_TXD_CMD_EOP;
878 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
879 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
881 /* Set RS bit only on threshold packets' last descriptor */
882 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
883 PMD_TX_FREE_LOG(DEBUG,
884 "Setting RS bit on TXD id="
885 "%4u (port=%d queue=%d)",
886 tx_last, txq->port_id, txq->queue_id);
888 cmd_type_len |= IXGBE_TXD_CMD_RS;
890 /* Update txq RS bit counters */
896 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
900 /* set RS on last packet in the burst */
902 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
907 * Set the Transmit Descriptor Tail (TDT)
909 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
910 (unsigned) txq->port_id, (unsigned) txq->queue_id,
911 (unsigned) tx_id, (unsigned) nb_tx);
912 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
913 txq->tx_tail = tx_id;
918 /*********************************************************************
922 **********************************************************************/
923 #define IXGBE_PACKET_TYPE_IPV4 0X01
924 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
925 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
926 #define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
927 #define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
928 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
929 #define IXGBE_PACKET_TYPE_IPV6 0X04
930 #define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
931 #define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
932 #define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
933 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
934 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
935 #define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
936 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
937 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
938 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
939 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
940 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
941 #define IXGBE_PACKET_TYPE_MAX 0X80
942 #define IXGBE_PACKET_TYPE_MASK 0X7F
943 #define IXGBE_PACKET_TYPE_SHIFT 0X04
944 static inline uint32_t
945 ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
947 static const uint32_t
948 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
949 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
951 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
952 RTE_PTYPE_L3_IPV4_EXT,
953 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
955 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
956 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
957 RTE_PTYPE_INNER_L3_IPV6,
958 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
959 RTE_PTYPE_L3_IPV6_EXT,
960 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
961 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
962 RTE_PTYPE_INNER_L3_IPV6_EXT,
963 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
964 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
965 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
966 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
967 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
968 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
969 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
970 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
971 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
972 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
973 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
974 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
975 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
976 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
977 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
978 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
979 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
980 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
981 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
982 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
983 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
984 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
985 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
986 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
987 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
988 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
989 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
990 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
992 if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
993 return RTE_PTYPE_UNKNOWN;
995 pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) &
996 IXGBE_PACKET_TYPE_MASK;
998 return ptype_table[pkt_info];
1001 static inline uint64_t
1002 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1004 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1005 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1006 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1007 PKT_RX_RSS_HASH, 0, 0, 0,
1008 0, 0, 0, PKT_RX_FDIR,
1010 #ifdef RTE_LIBRTE_IEEE1588
1011 static uint64_t ip_pkt_etqf_map[8] = {
1012 0, 0, 0, PKT_RX_IEEE1588_PTP,
1016 if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1017 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1018 ip_rss_types_map[pkt_info & 0XF];
1020 return ip_rss_types_map[pkt_info & 0XF];
1022 return ip_rss_types_map[pkt_info & 0XF];
1026 static inline uint64_t
1027 rx_desc_status_to_pkt_flags(uint32_t rx_status)
1032 * Check if VLAN present only.
1033 * Do not check whether L3/L4 rx checksum done by NIC or not,
1034 * That can be found from rte_eth_rxmode.hw_ip_checksum flag
1036 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
1038 #ifdef RTE_LIBRTE_IEEE1588
1039 if (rx_status & IXGBE_RXD_STAT_TMST)
1040 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1045 static inline uint64_t
1046 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1051 * Bit 31: IPE, IPv4 checksum error
1052 * Bit 30: L4I, L4I integrity error
1054 static uint64_t error_to_pkt_flags_map[4] = {
1055 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
1056 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1058 pkt_flags = error_to_pkt_flags_map[(rx_status >>
1059 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1061 if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1062 (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1063 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1070 * LOOK_AHEAD defines how many desc statuses to check beyond the
1071 * current descriptor.
1072 * It must be a pound define for optimal performance.
1073 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1074 * function only works with LOOK_AHEAD=8.
1076 #define LOOK_AHEAD 8
1077 #if (LOOK_AHEAD != 8)
1078 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1081 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1083 volatile union ixgbe_adv_rx_desc *rxdp;
1084 struct ixgbe_rx_entry *rxep;
1085 struct rte_mbuf *mb;
1089 uint32_t s[LOOK_AHEAD];
1090 uint16_t pkt_info[LOOK_AHEAD];
1091 int i, j, nb_rx = 0;
1094 /* get references to current descriptor and S/W ring entry */
1095 rxdp = &rxq->rx_ring[rxq->rx_tail];
1096 rxep = &rxq->sw_ring[rxq->rx_tail];
1098 status = rxdp->wb.upper.status_error;
1099 /* check to make sure there is at least 1 packet to receive */
1100 if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1104 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1105 * reference packets that are ready to be received.
1107 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1108 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
1110 /* Read desc statuses backwards to avoid race condition */
1111 for (j = LOOK_AHEAD-1; j >= 0; --j)
1112 s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1114 for (j = LOOK_AHEAD - 1; j >= 0; --j)
1115 pkt_info[j] = rxdp[j].wb.lower.lo_dword.
1118 /* Compute how many status bits were set */
1120 for (j = 0; j < LOOK_AHEAD; ++j)
1121 nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
1125 /* Translate descriptor info to mbuf format */
1126 for (j = 0; j < nb_dd; ++j) {
1128 pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1130 mb->data_len = pkt_len;
1131 mb->pkt_len = pkt_len;
1132 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1134 /* convert descriptor fields to rte mbuf flags */
1135 pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
1136 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1138 ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1139 mb->ol_flags = pkt_flags;
1141 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
1143 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1144 mb->hash.rss = rte_le_to_cpu_32(
1145 rxdp[j].wb.lower.hi_dword.rss);
1146 else if (pkt_flags & PKT_RX_FDIR) {
1147 mb->hash.fdir.hash = rte_le_to_cpu_16(
1148 rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1149 IXGBE_ATR_HASH_MASK;
1150 mb->hash.fdir.id = rte_le_to_cpu_16(
1151 rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1155 /* Move mbuf pointers from the S/W ring to the stage */
1156 for (j = 0; j < LOOK_AHEAD; ++j) {
1157 rxq->rx_stage[i + j] = rxep[j].mbuf;
1160 /* stop if all requested packets could not be received */
1161 if (nb_dd != LOOK_AHEAD)
1165 /* clear software ring entries so we can cleanup correctly */
1166 for (i = 0; i < nb_rx; ++i) {
1167 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1175 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1177 volatile union ixgbe_adv_rx_desc *rxdp;
1178 struct ixgbe_rx_entry *rxep;
1179 struct rte_mbuf *mb;
1184 /* allocate buffers in bulk directly into the S/W ring */
1185 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1186 rxep = &rxq->sw_ring[alloc_idx];
1187 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1188 rxq->rx_free_thresh);
1189 if (unlikely(diag != 0))
1192 rxdp = &rxq->rx_ring[alloc_idx];
1193 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1194 /* populate the static rte mbuf fields */
1199 mb->port = rxq->port_id;
1202 rte_mbuf_refcnt_set(mb, 1);
1203 mb->data_off = RTE_PKTMBUF_HEADROOM;
1205 /* populate the descriptors */
1206 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
1207 rxdp[i].read.hdr_addr = 0;
1208 rxdp[i].read.pkt_addr = dma_addr;
1211 /* update state of internal queue structure */
1212 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1213 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1214 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1220 static inline uint16_t
1221 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1224 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1227 /* how many packets are ready to return? */
1228 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1230 /* copy mbuf pointers to the application's packet list */
1231 for (i = 0; i < nb_pkts; ++i)
1232 rx_pkts[i] = stage[i];
1234 /* update internal queue state */
1235 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1236 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1241 static inline uint16_t
1242 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1245 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1248 /* Any previously recv'd pkts will be returned from the Rx stage */
1249 if (rxq->rx_nb_avail)
1250 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1252 /* Scan the H/W ring for packets to receive */
1253 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1255 /* update internal queue state */
1256 rxq->rx_next_avail = 0;
1257 rxq->rx_nb_avail = nb_rx;
1258 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1260 /* if required, allocate new buffers to replenish descriptors */
1261 if (rxq->rx_tail > rxq->rx_free_trigger) {
1262 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1264 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1266 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1267 "queue_id=%u", (unsigned) rxq->port_id,
1268 (unsigned) rxq->queue_id);
1270 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1271 rxq->rx_free_thresh;
1274 * Need to rewind any previous receives if we cannot
1275 * allocate new buffers to replenish the old ones.
1277 rxq->rx_nb_avail = 0;
1278 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1279 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1280 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1285 /* update tail pointer */
1287 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
1290 if (rxq->rx_tail >= rxq->nb_rx_desc)
1293 /* received any packets this loop? */
1294 if (rxq->rx_nb_avail)
1295 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1300 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1302 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1307 if (unlikely(nb_pkts == 0))
1310 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1311 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1313 /* request is relatively large, chunk it up */
1317 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1318 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1319 nb_rx = (uint16_t)(nb_rx + ret);
1320 nb_pkts = (uint16_t)(nb_pkts - ret);
1329 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1332 struct ixgbe_rx_queue *rxq;
1333 volatile union ixgbe_adv_rx_desc *rx_ring;
1334 volatile union ixgbe_adv_rx_desc *rxdp;
1335 struct ixgbe_rx_entry *sw_ring;
1336 struct ixgbe_rx_entry *rxe;
1337 struct rte_mbuf *rxm;
1338 struct rte_mbuf *nmb;
1339 union ixgbe_adv_rx_desc rxd;
1352 rx_id = rxq->rx_tail;
1353 rx_ring = rxq->rx_ring;
1354 sw_ring = rxq->sw_ring;
1355 while (nb_rx < nb_pkts) {
1357 * The order of operations here is important as the DD status
1358 * bit must not be read after any other descriptor fields.
1359 * rx_ring and rxdp are pointing to volatile data so the order
1360 * of accesses cannot be reordered by the compiler. If they were
1361 * not volatile, they could be reordered which could lead to
1362 * using invalid descriptor fields when read from rxd.
1364 rxdp = &rx_ring[rx_id];
1365 staterr = rxdp->wb.upper.status_error;
1366 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1373 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1374 * is likely to be invalid and to be dropped by the various
1375 * validation checks performed by the network stack.
1377 * Allocate a new mbuf to replenish the RX ring descriptor.
1378 * If the allocation fails:
1379 * - arrange for that RX descriptor to be the first one
1380 * being parsed the next time the receive function is
1381 * invoked [on the same queue].
1383 * - Stop parsing the RX ring and return immediately.
1385 * This policy do not drop the packet received in the RX
1386 * descriptor for which the allocation of a new mbuf failed.
1387 * Thus, it allows that packet to be later retrieved if
1388 * mbuf have been freed in the mean time.
1389 * As a side effect, holding RX descriptors instead of
1390 * systematically giving them back to the NIC may lead to
1391 * RX ring exhaustion situations.
1392 * However, the NIC can gracefully prevent such situations
1393 * to happen by sending specific "back-pressure" flow control
1394 * frames to its peer(s).
1396 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1397 "ext_err_stat=0x%08x pkt_len=%u",
1398 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1399 (unsigned) rx_id, (unsigned) staterr,
1400 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1402 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1404 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1405 "queue_id=%u", (unsigned) rxq->port_id,
1406 (unsigned) rxq->queue_id);
1407 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1412 rxe = &sw_ring[rx_id];
1414 if (rx_id == rxq->nb_rx_desc)
1417 /* Prefetch next mbuf while processing current one. */
1418 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1421 * When next RX descriptor is on a cache-line boundary,
1422 * prefetch the next 4 RX descriptors and the next 8 pointers
1425 if ((rx_id & 0x3) == 0) {
1426 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1427 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1433 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1434 rxdp->read.hdr_addr = 0;
1435 rxdp->read.pkt_addr = dma_addr;
1438 * Initialize the returned mbuf.
1439 * 1) setup generic mbuf fields:
1440 * - number of segments,
1443 * - RX port identifier.
1444 * 2) integrate hardware offload data, if any:
1445 * - RSS flag & hash,
1446 * - IP checksum flag,
1447 * - VLAN TCI, if any,
1450 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1452 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1453 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1456 rxm->pkt_len = pkt_len;
1457 rxm->data_len = pkt_len;
1458 rxm->port = rxq->port_id;
1460 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
1462 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1463 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1465 pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1466 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1467 pkt_flags = pkt_flags |
1468 ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1469 rxm->ol_flags = pkt_flags;
1470 rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1472 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1473 rxm->hash.rss = rte_le_to_cpu_32(
1474 rxd.wb.lower.hi_dword.rss);
1475 else if (pkt_flags & PKT_RX_FDIR) {
1476 rxm->hash.fdir.hash = rte_le_to_cpu_16(
1477 rxd.wb.lower.hi_dword.csum_ip.csum) &
1478 IXGBE_ATR_HASH_MASK;
1479 rxm->hash.fdir.id = rte_le_to_cpu_16(
1480 rxd.wb.lower.hi_dword.csum_ip.ip_id);
1483 * Store the mbuf address into the next entry of the array
1484 * of returned packets.
1486 rx_pkts[nb_rx++] = rxm;
1488 rxq->rx_tail = rx_id;
1491 * If the number of free RX descriptors is greater than the RX free
1492 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1494 * Update the RDT with the value of the last processed RX descriptor
1495 * minus 1, to guarantee that the RDT register is never equal to the
1496 * RDH register, which creates a "full" ring situtation from the
1497 * hardware point of view...
1499 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1500 if (nb_hold > rxq->rx_free_thresh) {
1501 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1502 "nb_hold=%u nb_rx=%u",
1503 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1504 (unsigned) rx_id, (unsigned) nb_hold,
1506 rx_id = (uint16_t) ((rx_id == 0) ?
1507 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1508 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1511 rxq->nb_rx_hold = nb_hold;
1516 * Detect an RSC descriptor.
1518 static inline uint32_t
1519 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1521 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1522 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1526 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1528 * Fill the following info in the HEAD buffer of the Rx cluster:
1529 * - RX port identifier
1530 * - hardware offload data, if any:
1532 * - IP checksum flag
1533 * - VLAN TCI, if any
1535 * @head HEAD of the packet cluster
1536 * @desc HW descriptor to get data from
1537 * @port_id Port ID of the Rx queue
1540 ixgbe_fill_cluster_head_buf(
1541 struct rte_mbuf *head,
1542 union ixgbe_adv_rx_desc *desc,
1549 head->port = port_id;
1551 /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1552 * set in the pkt_flags field.
1554 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1555 pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info);
1556 pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1557 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1558 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1559 head->ol_flags = pkt_flags;
1560 head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1562 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1563 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1564 else if (pkt_flags & PKT_RX_FDIR) {
1565 head->hash.fdir.hash =
1566 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1567 & IXGBE_ATR_HASH_MASK;
1568 head->hash.fdir.id =
1569 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1574 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1576 * @rx_queue Rx queue handle
1577 * @rx_pkts table of received packets
1578 * @nb_pkts size of rx_pkts table
1579 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1581 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1582 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1584 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1585 * 1) When non-EOP RSC completion arrives:
1586 * a) Update the HEAD of the current RSC aggregation cluster with the new
1587 * segment's data length.
1588 * b) Set the "next" pointer of the current segment to point to the segment
1589 * at the NEXTP index.
1590 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1591 * in the sw_rsc_ring.
1592 * 2) When EOP arrives we just update the cluster's total length and offload
1593 * flags and deliver the cluster up to the upper layers. In our case - put it
1594 * in the rx_pkts table.
1596 * Returns the number of received packets/clusters (according to the "bulk
1597 * receive" interface).
1599 static inline uint16_t
1600 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1603 struct ixgbe_rx_queue *rxq = rx_queue;
1604 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
1605 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
1606 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1607 uint16_t rx_id = rxq->rx_tail;
1609 uint16_t nb_hold = rxq->nb_rx_hold;
1610 uint16_t prev_id = rxq->rx_tail;
1612 while (nb_rx < nb_pkts) {
1614 struct ixgbe_rx_entry *rxe;
1615 struct ixgbe_scattered_rx_entry *sc_entry;
1616 struct ixgbe_scattered_rx_entry *next_sc_entry;
1617 struct ixgbe_rx_entry *next_rxe;
1618 struct rte_mbuf *first_seg;
1619 struct rte_mbuf *rxm;
1620 struct rte_mbuf *nmb;
1621 union ixgbe_adv_rx_desc rxd;
1624 volatile union ixgbe_adv_rx_desc *rxdp;
1629 * The code in this whole file uses the volatile pointer to
1630 * ensure the read ordering of the status and the rest of the
1631 * descriptor fields (on the compiler level only!!!). This is so
1632 * UGLY - why not to just use the compiler barrier instead? DPDK
1633 * even has the rte_compiler_barrier() for that.
1635 * But most importantly this is just wrong because this doesn't
1636 * ensure memory ordering in a general case at all. For
1637 * instance, DPDK is supposed to work on Power CPUs where
1638 * compiler barrier may just not be enough!
1640 * I tried to write only this function properly to have a
1641 * starting point (as a part of an LRO/RSC series) but the
1642 * compiler cursed at me when I tried to cast away the
1643 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
1644 * keeping it the way it is for now.
1646 * The code in this file is broken in so many other places and
1647 * will just not work on a big endian CPU anyway therefore the
1648 * lines below will have to be revisited together with the rest
1652 * - Get rid of "volatile" crap and let the compiler do its
1654 * - Use the proper memory barrier (rte_rmb()) to ensure the
1655 * memory ordering below.
1657 rxdp = &rx_ring[rx_id];
1658 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
1660 if (!(staterr & IXGBE_RXDADV_STAT_DD))
1665 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1666 "staterr=0x%x data_len=%u",
1667 rxq->port_id, rxq->queue_id, rx_id, staterr,
1668 rte_le_to_cpu_16(rxd.wb.upper.length));
1671 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1673 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
1674 "port_id=%u queue_id=%u",
1675 rxq->port_id, rxq->queue_id);
1677 rte_eth_devices[rxq->port_id].data->
1678 rx_mbuf_alloc_failed++;
1682 else if (nb_hold > rxq->rx_free_thresh) {
1683 uint16_t next_rdt = rxq->rx_free_trigger;
1685 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
1687 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
1689 nb_hold -= rxq->rx_free_thresh;
1691 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
1692 "port_id=%u queue_id=%u",
1693 rxq->port_id, rxq->queue_id);
1695 rte_eth_devices[rxq->port_id].data->
1696 rx_mbuf_alloc_failed++;
1702 rxe = &sw_ring[rx_id];
1703 eop = staterr & IXGBE_RXDADV_STAT_EOP;
1705 next_id = rx_id + 1;
1706 if (next_id == rxq->nb_rx_desc)
1709 /* Prefetch next mbuf while processing current one. */
1710 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
1713 * When next RX descriptor is on a cache-line boundary,
1714 * prefetch the next 4 RX descriptors and the next 4 pointers
1717 if ((next_id & 0x3) == 0) {
1718 rte_ixgbe_prefetch(&rx_ring[next_id]);
1719 rte_ixgbe_prefetch(&sw_ring[next_id]);
1726 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1728 * Update RX descriptor with the physical address of the
1729 * new data buffer of the new allocated mbuf.
1733 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1734 rxdp->read.hdr_addr = 0;
1735 rxdp->read.pkt_addr = dma;
1740 * Set data length & data buffer address of mbuf.
1742 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1743 rxm->data_len = data_len;
1748 * Get next descriptor index:
1749 * - For RSC it's in the NEXTP field.
1750 * - For a scattered packet - it's just a following
1753 if (ixgbe_rsc_count(&rxd))
1755 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1756 IXGBE_RXDADV_NEXTP_SHIFT;
1760 next_sc_entry = &sw_sc_ring[nextp_id];
1761 next_rxe = &sw_ring[nextp_id];
1762 rte_ixgbe_prefetch(next_rxe);
1765 sc_entry = &sw_sc_ring[rx_id];
1766 first_seg = sc_entry->fbuf;
1767 sc_entry->fbuf = NULL;
1770 * If this is the first buffer of the received packet,
1771 * set the pointer to the first mbuf of the packet and
1772 * initialize its context.
1773 * Otherwise, update the total length and the number of segments
1774 * of the current scattered packet, and update the pointer to
1775 * the last mbuf of the current packet.
1777 if (first_seg == NULL) {
1779 first_seg->pkt_len = data_len;
1780 first_seg->nb_segs = 1;
1782 first_seg->pkt_len += data_len;
1783 first_seg->nb_segs++;
1790 * If this is not the last buffer of the received packet, update
1791 * the pointer to the first mbuf at the NEXTP entry in the
1792 * sw_sc_ring and continue to parse the RX ring.
1795 rxm->next = next_rxe->mbuf;
1796 next_sc_entry->fbuf = first_seg;
1801 * This is the last buffer of the received packet - return
1802 * the current cluster to the user.
1806 /* Initialize the first mbuf of the returned packet */
1807 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
1811 * Deal with the case, when HW CRC srip is disabled.
1812 * That can't happen when LRO is enabled, but still could
1813 * happen for scattered RX mode.
1815 first_seg->pkt_len -= rxq->crc_len;
1816 if (unlikely(rxm->data_len <= rxq->crc_len)) {
1817 struct rte_mbuf *lp;
1819 for (lp = first_seg; lp->next != rxm; lp = lp->next)
1822 first_seg->nb_segs--;
1823 lp->data_len -= rxq->crc_len - rxm->data_len;
1825 rte_pktmbuf_free_seg(rxm);
1827 rxm->data_len -= rxq->crc_len;
1829 /* Prefetch data of first segment, if configured to do so. */
1830 rte_packet_prefetch((char *)first_seg->buf_addr +
1831 first_seg->data_off);
1834 * Store the mbuf address into the next entry of the array
1835 * of returned packets.
1837 rx_pkts[nb_rx++] = first_seg;
1841 * Record index of the next RX descriptor to probe.
1843 rxq->rx_tail = rx_id;
1846 * If the number of free RX descriptors is greater than the RX free
1847 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1849 * Update the RDT with the value of the last processed RX descriptor
1850 * minus 1, to guarantee that the RDT register is never equal to the
1851 * RDH register, which creates a "full" ring situtation from the
1852 * hardware point of view...
1854 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1855 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1856 "nb_hold=%u nb_rx=%u",
1857 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1860 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
1864 rxq->nb_rx_hold = nb_hold;
1869 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1872 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
1876 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1879 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
1882 /*********************************************************************
1884 * Queue management functions
1886 **********************************************************************/
1888 static void __attribute__((cold))
1889 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
1893 if (txq->sw_ring != NULL) {
1894 for (i = 0; i < txq->nb_tx_desc; i++) {
1895 if (txq->sw_ring[i].mbuf != NULL) {
1896 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1897 txq->sw_ring[i].mbuf = NULL;
1903 static void __attribute__((cold))
1904 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
1907 txq->sw_ring != NULL)
1908 rte_free(txq->sw_ring);
1911 static void __attribute__((cold))
1912 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
1914 if (txq != NULL && txq->ops != NULL) {
1915 txq->ops->release_mbufs(txq);
1916 txq->ops->free_swring(txq);
1921 void __attribute__((cold))
1922 ixgbe_dev_tx_queue_release(void *txq)
1924 ixgbe_tx_queue_release(txq);
1927 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
1928 static void __attribute__((cold))
1929 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
1931 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
1932 struct ixgbe_tx_entry *txe = txq->sw_ring;
1935 /* Zero out HW ring memory */
1936 for (i = 0; i < txq->nb_tx_desc; i++) {
1937 txq->tx_ring[i] = zeroed_desc;
1940 /* Initialize SW ring entries */
1941 prev = (uint16_t) (txq->nb_tx_desc - 1);
1942 for (i = 0; i < txq->nb_tx_desc; i++) {
1943 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
1944 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
1947 txe[prev].next_id = i;
1951 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1952 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1955 txq->nb_tx_used = 0;
1957 * Always allow 1 descriptor to be un-allocated to avoid
1958 * a H/W race condition
1960 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1961 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1963 memset((void*)&txq->ctx_cache, 0,
1964 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
1967 static const struct ixgbe_txq_ops def_txq_ops = {
1968 .release_mbufs = ixgbe_tx_queue_release_mbufs,
1969 .free_swring = ixgbe_tx_free_swring,
1970 .reset = ixgbe_reset_tx_queue,
1973 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1974 * the queue parameters. Used in tx_queue_setup by primary process and then
1975 * in dev_init by secondary process when attaching to an existing ethdev.
1977 void __attribute__((cold))
1978 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
1980 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1981 if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
1982 && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
1983 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1984 #ifdef RTE_IXGBE_INC_VECTOR
1985 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
1986 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
1987 ixgbe_txq_vec_setup(txq) == 0)) {
1988 PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
1989 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
1992 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
1994 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1996 " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
1997 (unsigned long)txq->txq_flags,
1998 (unsigned long)IXGBE_SIMPLE_FLAGS);
2000 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2001 (unsigned long)txq->tx_rs_thresh,
2002 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2003 dev->tx_pkt_burst = ixgbe_xmit_pkts;
2007 int __attribute__((cold))
2008 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2011 unsigned int socket_id,
2012 const struct rte_eth_txconf *tx_conf)
2014 const struct rte_memzone *tz;
2015 struct ixgbe_tx_queue *txq;
2016 struct ixgbe_hw *hw;
2017 uint16_t tx_rs_thresh, tx_free_thresh;
2019 PMD_INIT_FUNC_TRACE();
2020 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2023 * Validate number of transmit descriptors.
2024 * It must not exceed hardware maximum, and must be multiple
2027 if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2028 (nb_desc > IXGBE_MAX_RING_DESC) ||
2029 (nb_desc < IXGBE_MIN_RING_DESC)) {
2034 * The following two parameters control the setting of the RS bit on
2035 * transmit descriptors.
2036 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2037 * descriptors have been used.
2038 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2039 * descriptors are used or if the number of descriptors required
2040 * to transmit a packet is greater than the number of free TX
2042 * The following constraints must be satisfied:
2043 * tx_rs_thresh must be greater than 0.
2044 * tx_rs_thresh must be less than the size of the ring minus 2.
2045 * tx_rs_thresh must be less than or equal to tx_free_thresh.
2046 * tx_rs_thresh must be a divisor of the ring size.
2047 * tx_free_thresh must be greater than 0.
2048 * tx_free_thresh must be less than the size of the ring minus 3.
2049 * One descriptor in the TX ring is used as a sentinel to avoid a
2050 * H/W race condition, hence the maximum threshold constraints.
2051 * When set to zero use default values.
2053 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2054 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2055 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2056 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2057 if (tx_rs_thresh >= (nb_desc - 2)) {
2058 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2059 "of TX descriptors minus 2. (tx_rs_thresh=%u "
2060 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2061 (int)dev->data->port_id, (int)queue_idx);
2064 if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2065 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2066 "(tx_rs_thresh=%u port=%d queue=%d)",
2067 DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2068 (int)dev->data->port_id, (int)queue_idx);
2071 if (tx_free_thresh >= (nb_desc - 3)) {
2072 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2073 "tx_free_thresh must be less than the number of "
2074 "TX descriptors minus 3. (tx_free_thresh=%u "
2075 "port=%d queue=%d)",
2076 (unsigned int)tx_free_thresh,
2077 (int)dev->data->port_id, (int)queue_idx);
2080 if (tx_rs_thresh > tx_free_thresh) {
2081 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2082 "tx_free_thresh. (tx_free_thresh=%u "
2083 "tx_rs_thresh=%u port=%d queue=%d)",
2084 (unsigned int)tx_free_thresh,
2085 (unsigned int)tx_rs_thresh,
2086 (int)dev->data->port_id,
2090 if ((nb_desc % tx_rs_thresh) != 0) {
2091 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2092 "number of TX descriptors. (tx_rs_thresh=%u "
2093 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2094 (int)dev->data->port_id, (int)queue_idx);
2099 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2100 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2101 * by the NIC and all descriptors are written back after the NIC
2102 * accumulates WTHRESH descriptors.
2104 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2105 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2106 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2107 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2108 (int)dev->data->port_id, (int)queue_idx);
2112 /* Free memory prior to re-allocation if needed... */
2113 if (dev->data->tx_queues[queue_idx] != NULL) {
2114 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2115 dev->data->tx_queues[queue_idx] = NULL;
2118 /* First allocate the tx queue data structure */
2119 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2120 RTE_CACHE_LINE_SIZE, socket_id);
2125 * Allocate TX ring hardware descriptors. A memzone large enough to
2126 * handle the maximum ring size is allocated in order to allow for
2127 * resizing in later calls to the queue setup function.
2129 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2130 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2131 IXGBE_ALIGN, socket_id);
2133 ixgbe_tx_queue_release(txq);
2137 txq->nb_tx_desc = nb_desc;
2138 txq->tx_rs_thresh = tx_rs_thresh;
2139 txq->tx_free_thresh = tx_free_thresh;
2140 txq->pthresh = tx_conf->tx_thresh.pthresh;
2141 txq->hthresh = tx_conf->tx_thresh.hthresh;
2142 txq->wthresh = tx_conf->tx_thresh.wthresh;
2143 txq->queue_id = queue_idx;
2144 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2145 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2146 txq->port_id = dev->data->port_id;
2147 txq->txq_flags = tx_conf->txq_flags;
2148 txq->ops = &def_txq_ops;
2149 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2152 * Modification to set VFTDT for virtual function if vf is detected
2154 if (hw->mac.type == ixgbe_mac_82599_vf ||
2155 hw->mac.type == ixgbe_mac_X540_vf ||
2156 hw->mac.type == ixgbe_mac_X550_vf ||
2157 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2158 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2159 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2161 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2163 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2164 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2166 /* Allocate software ring */
2167 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2168 sizeof(struct ixgbe_tx_entry) * nb_desc,
2169 RTE_CACHE_LINE_SIZE, socket_id);
2170 if (txq->sw_ring == NULL) {
2171 ixgbe_tx_queue_release(txq);
2174 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2175 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2177 /* set up vector or scalar TX function as appropriate */
2178 ixgbe_set_tx_function(dev, txq);
2180 txq->ops->reset(txq);
2182 dev->data->tx_queues[queue_idx] = txq;
2189 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2191 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2192 * in the sw_rsc_ring is not set to NULL but rather points to the next
2193 * mbuf of this RSC aggregation (that has not been completed yet and still
2194 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2195 * will just free first "nb_segs" segments of the cluster explicitly by calling
2196 * an rte_pktmbuf_free_seg().
2198 * @m scattered cluster head
2200 static void __attribute__((cold))
2201 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2203 uint8_t i, nb_segs = m->nb_segs;
2204 struct rte_mbuf *next_seg;
2206 for (i = 0; i < nb_segs; i++) {
2208 rte_pktmbuf_free_seg(m);
2213 static void __attribute__((cold))
2214 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2218 #ifdef RTE_IXGBE_INC_VECTOR
2219 /* SSE Vector driver has a different way of releasing mbufs. */
2220 if (rxq->rx_using_sse) {
2221 ixgbe_rx_queue_release_mbufs_vec(rxq);
2226 if (rxq->sw_ring != NULL) {
2227 for (i = 0; i < rxq->nb_rx_desc; i++) {
2228 if (rxq->sw_ring[i].mbuf != NULL) {
2229 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2230 rxq->sw_ring[i].mbuf = NULL;
2233 if (rxq->rx_nb_avail) {
2234 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2235 struct rte_mbuf *mb;
2236 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2237 rte_pktmbuf_free_seg(mb);
2239 rxq->rx_nb_avail = 0;
2243 if (rxq->sw_sc_ring)
2244 for (i = 0; i < rxq->nb_rx_desc; i++)
2245 if (rxq->sw_sc_ring[i].fbuf) {
2246 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2247 rxq->sw_sc_ring[i].fbuf = NULL;
2251 static void __attribute__((cold))
2252 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2255 ixgbe_rx_queue_release_mbufs(rxq);
2256 rte_free(rxq->sw_ring);
2257 rte_free(rxq->sw_sc_ring);
2262 void __attribute__((cold))
2263 ixgbe_dev_rx_queue_release(void *rxq)
2265 ixgbe_rx_queue_release(rxq);
2269 * Check if Rx Burst Bulk Alloc function can be used.
2271 * 0: the preconditions are satisfied and the bulk allocation function
2273 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2274 * function must be used.
2276 static inline int __attribute__((cold))
2277 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2282 * Make sure the following pre-conditions are satisfied:
2283 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2284 * rxq->rx_free_thresh < rxq->nb_rx_desc
2285 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2286 * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
2287 * Scattered packets are not supported. This should be checked
2288 * outside of this function.
2290 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2291 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2292 "rxq->rx_free_thresh=%d, "
2293 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2294 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2296 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2297 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2298 "rxq->rx_free_thresh=%d, "
2299 "rxq->nb_rx_desc=%d",
2300 rxq->rx_free_thresh, rxq->nb_rx_desc);
2302 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2303 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2304 "rxq->nb_rx_desc=%d, "
2305 "rxq->rx_free_thresh=%d",
2306 rxq->nb_rx_desc, rxq->rx_free_thresh);
2308 } else if (!(rxq->nb_rx_desc <
2309 (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
2310 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2311 "rxq->nb_rx_desc=%d, "
2312 "IXGBE_MAX_RING_DESC=%d, "
2313 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2314 rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
2315 RTE_PMD_IXGBE_RX_MAX_BURST);
2322 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2323 static void __attribute__((cold))
2324 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2326 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2328 uint16_t len = rxq->nb_rx_desc;
2331 * By default, the Rx queue setup function allocates enough memory for
2332 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2333 * extra memory at the end of the descriptor ring to be zero'd out. A
2334 * pre-condition for using the Rx burst bulk alloc function is that the
2335 * number of descriptors is less than or equal to
2336 * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
2337 * constraints here to see if we need to zero out memory after the end
2338 * of the H/W descriptor ring.
2340 if (adapter->rx_bulk_alloc_allowed)
2341 /* zero out extra memory */
2342 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2345 * Zero out HW ring memory. Zero out extra memory at the end of
2346 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2347 * reads extra memory as zeros.
2349 for (i = 0; i < len; i++) {
2350 rxq->rx_ring[i] = zeroed_desc;
2354 * initialize extra software ring entries. Space for these extra
2355 * entries is always allocated
2357 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2358 for (i = rxq->nb_rx_desc; i < len; ++i) {
2359 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2362 rxq->rx_nb_avail = 0;
2363 rxq->rx_next_avail = 0;
2364 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2366 rxq->nb_rx_hold = 0;
2367 rxq->pkt_first_seg = NULL;
2368 rxq->pkt_last_seg = NULL;
2370 #ifdef RTE_IXGBE_INC_VECTOR
2371 rxq->rxrearm_start = 0;
2372 rxq->rxrearm_nb = 0;
2376 int __attribute__((cold))
2377 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2380 unsigned int socket_id,
2381 const struct rte_eth_rxconf *rx_conf,
2382 struct rte_mempool *mp)
2384 const struct rte_memzone *rz;
2385 struct ixgbe_rx_queue *rxq;
2386 struct ixgbe_hw *hw;
2388 struct ixgbe_adapter *adapter =
2389 (struct ixgbe_adapter *)dev->data->dev_private;
2391 PMD_INIT_FUNC_TRACE();
2392 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2395 * Validate number of receive descriptors.
2396 * It must not exceed hardware maximum, and must be multiple
2399 if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2400 (nb_desc > IXGBE_MAX_RING_DESC) ||
2401 (nb_desc < IXGBE_MIN_RING_DESC)) {
2405 /* Free memory prior to re-allocation if needed... */
2406 if (dev->data->rx_queues[queue_idx] != NULL) {
2407 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2408 dev->data->rx_queues[queue_idx] = NULL;
2411 /* First allocate the rx queue data structure */
2412 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2413 RTE_CACHE_LINE_SIZE, socket_id);
2417 rxq->nb_rx_desc = nb_desc;
2418 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2419 rxq->queue_id = queue_idx;
2420 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2421 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2422 rxq->port_id = dev->data->port_id;
2423 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2425 rxq->drop_en = rx_conf->rx_drop_en;
2426 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2429 * Allocate RX ring hardware descriptors. A memzone large enough to
2430 * handle the maximum ring size is allocated in order to allow for
2431 * resizing in later calls to the queue setup function.
2433 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2434 RX_RING_SZ, IXGBE_ALIGN, socket_id);
2436 ixgbe_rx_queue_release(rxq);
2441 * Zero init all the descriptors in the ring.
2443 memset (rz->addr, 0, RX_RING_SZ);
2446 * Modified to setup VFRDT for Virtual Function
2448 if (hw->mac.type == ixgbe_mac_82599_vf ||
2449 hw->mac.type == ixgbe_mac_X540_vf ||
2450 hw->mac.type == ixgbe_mac_X550_vf ||
2451 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2452 hw->mac.type == ixgbe_mac_X550EM_a_vf) {
2454 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2456 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2460 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2462 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2465 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2466 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
2469 * Certain constraints must be met in order to use the bulk buffer
2470 * allocation Rx burst function. If any of Rx queues doesn't meet them
2471 * the feature should be disabled for the whole port.
2473 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2474 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2475 "preconditions - canceling the feature for "
2476 "the whole port[%d]",
2477 rxq->queue_id, rxq->port_id);
2478 adapter->rx_bulk_alloc_allowed = false;
2482 * Allocate software ring. Allow for space at the end of the
2483 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2484 * function does not access an invalid memory region.
2487 if (adapter->rx_bulk_alloc_allowed)
2488 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2490 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2491 sizeof(struct ixgbe_rx_entry) * len,
2492 RTE_CACHE_LINE_SIZE, socket_id);
2493 if (!rxq->sw_ring) {
2494 ixgbe_rx_queue_release(rxq);
2499 * Always allocate even if it's not going to be needed in order to
2500 * simplify the code.
2502 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2503 * be requested in ixgbe_dev_rx_init(), which is called later from
2507 rte_zmalloc_socket("rxq->sw_sc_ring",
2508 sizeof(struct ixgbe_scattered_rx_entry) * len,
2509 RTE_CACHE_LINE_SIZE, socket_id);
2510 if (!rxq->sw_sc_ring) {
2511 ixgbe_rx_queue_release(rxq);
2515 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2516 "dma_addr=0x%"PRIx64,
2517 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2518 rxq->rx_ring_phys_addr);
2520 if (!rte_is_power_of_2(nb_desc)) {
2521 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
2522 "preconditions - canceling the feature for "
2523 "the whole port[%d]",
2524 rxq->queue_id, rxq->port_id);
2525 adapter->rx_vec_allowed = false;
2527 ixgbe_rxq_vec_setup(rxq);
2529 dev->data->rx_queues[queue_idx] = rxq;
2531 ixgbe_reset_rx_queue(adapter, rxq);
2537 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2539 #define IXGBE_RXQ_SCAN_INTERVAL 4
2540 volatile union ixgbe_adv_rx_desc *rxdp;
2541 struct ixgbe_rx_queue *rxq;
2544 if (rx_queue_id >= dev->data->nb_rx_queues) {
2545 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
2549 rxq = dev->data->rx_queues[rx_queue_id];
2550 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2552 while ((desc < rxq->nb_rx_desc) &&
2553 (rxdp->wb.upper.status_error &
2554 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
2555 desc += IXGBE_RXQ_SCAN_INTERVAL;
2556 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
2557 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2558 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2559 desc - rxq->nb_rx_desc]);
2566 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2568 volatile union ixgbe_adv_rx_desc *rxdp;
2569 struct ixgbe_rx_queue *rxq = rx_queue;
2572 if (unlikely(offset >= rxq->nb_rx_desc))
2574 desc = rxq->rx_tail + offset;
2575 if (desc >= rxq->nb_rx_desc)
2576 desc -= rxq->nb_rx_desc;
2578 rxdp = &rxq->rx_ring[desc];
2579 return !!(rxdp->wb.upper.status_error &
2580 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
2583 void __attribute__((cold))
2584 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
2587 struct ixgbe_adapter *adapter =
2588 (struct ixgbe_adapter *)dev->data->dev_private;
2590 PMD_INIT_FUNC_TRACE();
2592 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2593 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
2595 txq->ops->release_mbufs(txq);
2596 txq->ops->reset(txq);
2600 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2601 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
2603 ixgbe_rx_queue_release_mbufs(rxq);
2604 ixgbe_reset_rx_queue(adapter, rxq);
2610 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
2614 PMD_INIT_FUNC_TRACE();
2616 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2617 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
2618 dev->data->rx_queues[i] = NULL;
2620 dev->data->nb_rx_queues = 0;
2622 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2623 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
2624 dev->data->tx_queues[i] = NULL;
2626 dev->data->nb_tx_queues = 0;
2629 /*********************************************************************
2631 * Device RX/TX init functions
2633 **********************************************************************/
2636 * Receive Side Scaling (RSS)
2637 * See section 7.1.2.8 in the following document:
2638 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
2641 * The source and destination IP addresses of the IP header and the source
2642 * and destination ports of TCP/UDP headers, if any, of received packets are
2643 * hashed against a configurable random key to compute a 32-bit RSS hash result.
2644 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2645 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
2646 * RSS output index which is used as the RX queue index where to store the
2648 * The following output is supplied in the RX write-back descriptor:
2649 * - 32-bit result of the Microsoft RSS hash function,
2650 * - 4-bit RSS type field.
2654 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
2655 * Used as the default key.
2657 static uint8_t rss_intel_key[40] = {
2658 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2659 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2660 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2661 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2662 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2666 ixgbe_rss_disable(struct rte_eth_dev *dev)
2668 struct ixgbe_hw *hw;
2672 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2673 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2674 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2675 mrqc &= ~IXGBE_MRQC_RSSEN;
2676 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2680 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
2690 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2691 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2693 hash_key = rss_conf->rss_key;
2694 if (hash_key != NULL) {
2695 /* Fill in RSS hash key */
2696 for (i = 0; i < 10; i++) {
2697 rss_key = hash_key[(i * 4)];
2698 rss_key |= hash_key[(i * 4) + 1] << 8;
2699 rss_key |= hash_key[(i * 4) + 2] << 16;
2700 rss_key |= hash_key[(i * 4) + 3] << 24;
2701 IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
2705 /* Set configured hashing protocols in MRQC register */
2706 rss_hf = rss_conf->rss_hf;
2707 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
2708 if (rss_hf & ETH_RSS_IPV4)
2709 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2710 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2711 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2712 if (rss_hf & ETH_RSS_IPV6)
2713 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2714 if (rss_hf & ETH_RSS_IPV6_EX)
2715 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2716 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2717 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2718 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
2719 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2720 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2721 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2722 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2723 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2724 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
2725 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2726 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2730 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2731 struct rte_eth_rss_conf *rss_conf)
2733 struct ixgbe_hw *hw;
2738 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2740 if (!ixgbe_rss_update_sp(hw->mac.type)) {
2741 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
2745 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2748 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
2749 * "RSS enabling cannot be done dynamically while it must be
2750 * preceded by a software reset"
2751 * Before changing anything, first check that the update RSS operation
2752 * does not attempt to disable RSS, if RSS was enabled at
2753 * initialization time, or does not attempt to enable RSS, if RSS was
2754 * disabled at initialization time.
2756 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
2757 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2758 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
2759 if (rss_hf != 0) /* Enable RSS */
2761 return 0; /* Nothing to do */
2764 if (rss_hf == 0) /* Disable RSS */
2766 ixgbe_hw_rss_hash_set(hw, rss_conf);
2771 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2772 struct rte_eth_rss_conf *rss_conf)
2774 struct ixgbe_hw *hw;
2783 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2784 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2785 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2786 hash_key = rss_conf->rss_key;
2787 if (hash_key != NULL) {
2788 /* Return RSS hash key */
2789 for (i = 0; i < 10; i++) {
2790 rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
2791 hash_key[(i * 4)] = rss_key & 0x000000FF;
2792 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2793 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2794 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2798 /* Get RSS functions configured in MRQC register */
2799 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2800 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
2801 rss_conf->rss_hf = 0;
2805 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
2806 rss_hf |= ETH_RSS_IPV4;
2807 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
2808 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2809 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
2810 rss_hf |= ETH_RSS_IPV6;
2811 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
2812 rss_hf |= ETH_RSS_IPV6_EX;
2813 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
2814 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2815 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
2816 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2817 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
2818 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2819 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
2820 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2821 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
2822 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2823 rss_conf->rss_hf = rss_hf;
2828 ixgbe_rss_configure(struct rte_eth_dev *dev)
2830 struct rte_eth_rss_conf rss_conf;
2831 struct ixgbe_hw *hw;
2835 uint16_t sp_reta_size;
2838 PMD_INIT_FUNC_TRACE();
2839 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2841 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
2844 * Fill in redirection table
2845 * The byte-swap is needed because NIC registers are in
2846 * little-endian order.
2849 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
2850 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
2852 if (j == dev->data->nb_rx_queues)
2854 reta = (reta << 8) | j;
2856 IXGBE_WRITE_REG(hw, reta_reg,
2861 * Configure the RSS key and the RSS protocols used to compute
2862 * the RSS hash of input packets.
2864 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2865 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
2866 ixgbe_rss_disable(dev);
2869 if (rss_conf.rss_key == NULL)
2870 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2871 ixgbe_hw_rss_hash_set(hw, &rss_conf);
2874 #define NUM_VFTA_REGISTERS 128
2875 #define NIC_RX_BUFFER_SIZE 0x200
2876 #define X550_RX_BUFFER_SIZE 0x180
2879 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
2881 struct rte_eth_vmdq_dcb_conf *cfg;
2882 struct ixgbe_hw *hw;
2883 enum rte_eth_nb_pools num_pools;
2884 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
2886 uint8_t nb_tcs; /* number of traffic classes */
2889 PMD_INIT_FUNC_TRACE();
2890 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2891 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2892 num_pools = cfg->nb_queue_pools;
2893 /* Check we have a valid number of pools */
2894 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
2895 ixgbe_rss_disable(dev);
2898 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
2899 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
2903 * split rx buffer up into sections, each for 1 traffic class
2905 switch (hw->mac.type) {
2906 case ixgbe_mac_X550:
2907 case ixgbe_mac_X550EM_x:
2908 case ixgbe_mac_X550EM_a:
2909 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
2912 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2915 for (i = 0; i < nb_tcs; i++) {
2916 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2917 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
2918 /* clear 10 bits. */
2919 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
2920 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2922 /* zero alloc all unused TCs */
2923 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2924 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2925 rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
2926 /* clear 10 bits. */
2927 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2930 /* MRQC: enable vmdq and dcb */
2931 mrqc = ((num_pools == ETH_16_POOLS) ? \
2932 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
2933 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2935 /* PFVTCTL: turn on virtualisation and set the default pool */
2936 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2937 if (cfg->enable_default_pool) {
2938 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
2940 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
2943 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
2945 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
2947 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
2949 * mapping is done with 3 bits per priority,
2950 * so shift by i*3 each time
2952 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
2954 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
2956 /* RTRPCS: DCB related */
2957 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
2959 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2960 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2961 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2962 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2964 /* VFTA - enable all vlan filters */
2965 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2966 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2969 /* VFRE: pool enabling for receive - 16 or 32 */
2970 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
2971 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2974 * MPSAR - allow pools to read specific mac addresses
2975 * In this case, all pools should be able to read from mac addr 0
2977 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
2978 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
2980 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
2981 for (i = 0; i < cfg->nb_pool_maps; i++) {
2982 /* set vlan id in VF register and set the valid bit */
2983 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
2984 (cfg->pool_map[i].vlan_id & 0xFFF)));
2986 * Put the allowed pools in VFB reg. As we only have 16 or 32
2987 * pools, we only need to use the first half of the register
2990 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
2995 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
2996 * @hw: pointer to hardware structure
2997 * @dcb_config: pointer to ixgbe_dcb_config structure
3000 ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
3001 struct ixgbe_dcb_config *dcb_config)
3006 PMD_INIT_FUNC_TRACE();
3007 if (hw->mac.type != ixgbe_mac_82598EB) {
3008 /* Disable the Tx desc arbiter so that MTQC can be changed */
3009 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3010 reg |= IXGBE_RTTDCS_ARBDIS;
3011 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3013 /* Enable DCB for Tx with 8 TCs */
3014 if (dcb_config->num_tcs.pg_tcs == 8) {
3015 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3018 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3020 if (dcb_config->vt_mode)
3021 reg |= IXGBE_MTQC_VT_ENA;
3022 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3024 /* Disable drop for all queues */
3025 for (q = 0; q < 128; q++)
3026 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3027 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3029 /* Enable the Tx desc arbiter */
3030 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3031 reg &= ~IXGBE_RTTDCS_ARBDIS;
3032 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3034 /* Enable Security TX Buffer IFG for DCB */
3035 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3036 reg |= IXGBE_SECTX_DCB;
3037 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3043 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3044 * @dev: pointer to rte_eth_dev structure
3045 * @dcb_config: pointer to ixgbe_dcb_config structure
3048 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3049 struct ixgbe_dcb_config *dcb_config)
3051 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3052 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3053 struct ixgbe_hw *hw =
3054 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3056 PMD_INIT_FUNC_TRACE();
3057 if (hw->mac.type != ixgbe_mac_82598EB)
3058 /*PF VF Transmit Enable*/
3059 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3060 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3062 /*Configure general DCB TX parameters*/
3063 ixgbe_dcb_tx_hw_config(hw,dcb_config);
3068 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3069 struct ixgbe_dcb_config *dcb_config)
3071 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3072 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3073 struct ixgbe_dcb_tc_config *tc;
3076 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3077 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
3078 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3079 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3082 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3083 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3085 /* User Priority to Traffic Class mapping */
3086 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3087 j = vmdq_rx_conf->dcb_tc[i];
3088 tc = &dcb_config->tc_config[j];
3089 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3095 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3096 struct ixgbe_dcb_config *dcb_config)
3098 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3099 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3100 struct ixgbe_dcb_tc_config *tc;
3103 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3104 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
3105 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3106 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3109 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3110 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3113 /* User Priority to Traffic Class mapping */
3114 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3115 j = vmdq_tx_conf->dcb_tc[i];
3116 tc = &dcb_config->tc_config[j];
3117 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3124 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3125 struct ixgbe_dcb_config *dcb_config)
3127 struct rte_eth_dcb_rx_conf *rx_conf =
3128 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3129 struct ixgbe_dcb_tc_config *tc;
3132 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3133 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3135 /* User Priority to Traffic Class mapping */
3136 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3137 j = rx_conf->dcb_tc[i];
3138 tc = &dcb_config->tc_config[j];
3139 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3145 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3146 struct ixgbe_dcb_config *dcb_config)
3148 struct rte_eth_dcb_tx_conf *tx_conf =
3149 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3150 struct ixgbe_dcb_tc_config *tc;
3153 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3154 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3156 /* User Priority to Traffic Class mapping */
3157 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3158 j = tx_conf->dcb_tc[i];
3159 tc = &dcb_config->tc_config[j];
3160 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3166 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3167 * @hw: pointer to hardware structure
3168 * @dcb_config: pointer to ixgbe_dcb_config structure
3171 ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
3172 struct ixgbe_dcb_config *dcb_config)
3178 PMD_INIT_FUNC_TRACE();
3180 * Disable the arbiter before changing parameters
3181 * (always enable recycle mode; WSP)
3183 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3184 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3186 if (hw->mac.type != ixgbe_mac_82598EB) {
3187 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3188 if (dcb_config->num_tcs.pg_tcs == 4) {
3189 if (dcb_config->vt_mode)
3190 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3191 IXGBE_MRQC_VMDQRT4TCEN;
3193 /* no matter the mode is DCB or DCB_RSS, just
3194 * set the MRQE to RSSXTCEN. RSS is controlled
3197 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3198 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3199 IXGBE_MRQC_RTRSS4TCEN;
3202 if (dcb_config->num_tcs.pg_tcs == 8) {
3203 if (dcb_config->vt_mode)
3204 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3205 IXGBE_MRQC_VMDQRT8TCEN;
3207 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3208 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3209 IXGBE_MRQC_RTRSS8TCEN;
3213 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3216 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3217 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3218 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3219 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3221 /* VFTA - enable all vlan filters */
3222 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3223 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3227 * Configure Rx packet plane (recycle mode; WSP) and
3230 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3231 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3237 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3238 uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3240 switch (hw->mac.type) {
3241 case ixgbe_mac_82598EB:
3242 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3244 case ixgbe_mac_82599EB:
3245 case ixgbe_mac_X540:
3246 case ixgbe_mac_X550:
3247 case ixgbe_mac_X550EM_x:
3248 case ixgbe_mac_X550EM_a:
3249 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3258 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3259 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3261 switch (hw->mac.type) {
3262 case ixgbe_mac_82598EB:
3263 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
3264 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
3266 case ixgbe_mac_82599EB:
3267 case ixgbe_mac_X540:
3268 case ixgbe_mac_X550:
3269 case ixgbe_mac_X550EM_x:
3270 case ixgbe_mac_X550EM_a:
3271 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
3272 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
3279 #define DCB_RX_CONFIG 1
3280 #define DCB_TX_CONFIG 1
3281 #define DCB_TX_PB 1024
3283 * ixgbe_dcb_hw_configure - Enable DCB and configure
3284 * general DCB in VT mode and non-VT mode parameters
3285 * @dev: pointer to rte_eth_dev structure
3286 * @dcb_config: pointer to ixgbe_dcb_config structure
3289 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3290 struct ixgbe_dcb_config *dcb_config)
3293 uint8_t i,pfc_en,nb_tcs;
3294 uint16_t pbsize, rx_buffer_size;
3295 uint8_t config_dcb_rx = 0;
3296 uint8_t config_dcb_tx = 0;
3297 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3298 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3299 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3300 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3301 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3302 struct ixgbe_dcb_tc_config *tc;
3303 uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3304 struct ixgbe_hw *hw =
3305 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3307 switch(dev->data->dev_conf.rxmode.mq_mode){
3308 case ETH_MQ_RX_VMDQ_DCB:
3309 dcb_config->vt_mode = true;
3310 if (hw->mac.type != ixgbe_mac_82598EB) {
3311 config_dcb_rx = DCB_RX_CONFIG;
3313 *get dcb and VT rx configuration parameters
3316 ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
3317 /*Configure general VMDQ and DCB RX parameters*/
3318 ixgbe_vmdq_dcb_configure(dev);
3322 case ETH_MQ_RX_DCB_RSS:
3323 dcb_config->vt_mode = false;
3324 config_dcb_rx = DCB_RX_CONFIG;
3325 /* Get dcb TX configuration parameters from rte_eth_conf */
3326 ixgbe_dcb_rx_config(dev, dcb_config);
3327 /*Configure general DCB RX parameters*/
3328 ixgbe_dcb_rx_hw_config(hw, dcb_config);
3331 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3334 switch (dev->data->dev_conf.txmode.mq_mode) {
3335 case ETH_MQ_TX_VMDQ_DCB:
3336 dcb_config->vt_mode = true;
3337 config_dcb_tx = DCB_TX_CONFIG;
3338 /* get DCB and VT TX configuration parameters from rte_eth_conf */
3339 ixgbe_dcb_vt_tx_config(dev,dcb_config);
3340 /*Configure general VMDQ and DCB TX parameters*/
3341 ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
3345 dcb_config->vt_mode = false;
3346 config_dcb_tx = DCB_TX_CONFIG;
3347 /*get DCB TX configuration parameters from rte_eth_conf*/
3348 ixgbe_dcb_tx_config(dev, dcb_config);
3349 /*Configure general DCB TX parameters*/
3350 ixgbe_dcb_tx_hw_config(hw, dcb_config);
3353 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3357 nb_tcs = dcb_config->num_tcs.pfc_tcs;
3359 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3360 if (nb_tcs == ETH_4_TCS) {
3361 /* Avoid un-configured priority mapping to TC0 */
3363 uint8_t mask = 0xFF;
3364 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
3365 mask = (uint8_t)(mask & (~ (1 << map[i])));
3366 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
3367 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
3371 /* Re-configure 4 TCs BW */
3372 for (i = 0; i < nb_tcs; i++) {
3373 tc = &dcb_config->tc_config[i];
3374 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
3375 (uint8_t)(100 / nb_tcs);
3376 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
3377 (uint8_t)(100 / nb_tcs);
3379 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3380 tc = &dcb_config->tc_config[i];
3381 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
3382 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
3386 switch (hw->mac.type) {
3387 case ixgbe_mac_X550:
3388 case ixgbe_mac_X550EM_x:
3389 case ixgbe_mac_X550EM_a:
3390 rx_buffer_size = X550_RX_BUFFER_SIZE;
3393 rx_buffer_size = NIC_RX_BUFFER_SIZE;
3397 if (config_dcb_rx) {
3398 /* Set RX buffer size */
3399 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3400 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
3401 for (i = 0; i < nb_tcs; i++) {
3402 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3404 /* zero alloc all unused TCs */
3405 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3406 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3409 if (config_dcb_tx) {
3410 /* Only support an equally distributed Tx packet buffer strategy. */
3411 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
3412 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
3413 for (i = 0; i < nb_tcs; i++) {
3414 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3415 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3417 /* Clear unused TCs, if any, to zero buffer size*/
3418 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3419 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3420 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3424 /*Calculates traffic class credits*/
3425 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3426 IXGBE_DCB_TX_CONFIG);
3427 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3428 IXGBE_DCB_RX_CONFIG);
3430 if (config_dcb_rx) {
3431 /* Unpack CEE standard containers */
3432 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
3433 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3434 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
3435 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
3436 /* Configure PG(ETS) RX */
3437 ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
3440 if (config_dcb_tx) {
3441 /* Unpack CEE standard containers */
3442 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
3443 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3444 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
3445 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
3446 /* Configure PG(ETS) TX */
3447 ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
3450 /*Configure queue statistics registers*/
3451 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
3453 /* Check if the PFC is supported */
3454 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3455 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3456 for (i = 0; i < nb_tcs; i++) {
3458 * If the TC count is 8,and the default high_water is 48,
3459 * the low_water is 16 as default.
3461 hw->fc.high_water[i] = (pbsize * 3 ) / 4;
3462 hw->fc.low_water[i] = pbsize / 4;
3463 /* Enable pfc for this TC */
3464 tc = &dcb_config->tc_config[i];
3465 tc->pfc = ixgbe_dcb_pfc_enabled;
3467 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3468 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3470 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
3477 * ixgbe_configure_dcb - Configure DCB Hardware
3478 * @dev: pointer to rte_eth_dev
3480 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
3482 struct ixgbe_dcb_config *dcb_cfg =
3483 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3484 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
3486 PMD_INIT_FUNC_TRACE();
3488 /* check support mq_mode for DCB */
3489 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
3490 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
3491 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
3494 if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
3497 /** Configure DCB hardware **/
3498 ixgbe_dcb_hw_configure(dev, dcb_cfg);
3504 * VMDq only support for 10 GbE NIC.
3507 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3509 struct rte_eth_vmdq_rx_conf *cfg;
3510 struct ixgbe_hw *hw;
3511 enum rte_eth_nb_pools num_pools;
3512 uint32_t mrqc, vt_ctl, vlanctrl;
3516 PMD_INIT_FUNC_TRACE();
3517 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3518 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3519 num_pools = cfg->nb_queue_pools;
3521 ixgbe_rss_disable(dev);
3523 /* MRQC: enable vmdq */
3524 mrqc = IXGBE_MRQC_VMDQEN;
3525 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3527 /* PFVTCTL: turn on virtualisation and set the default pool */
3528 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3529 if (cfg->enable_default_pool)
3530 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3532 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3534 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3536 for (i = 0; i < (int)num_pools; i++) {
3537 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
3538 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
3541 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3542 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3543 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3544 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3546 /* VFTA - enable all vlan filters */
3547 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3548 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
3550 /* VFRE: pool enabling for receive - 64 */
3551 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
3552 if (num_pools == ETH_64_POOLS)
3553 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
3556 * MPSAR - allow pools to read specific mac addresses
3557 * In this case, all pools should be able to read from mac addr 0
3559 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
3560 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
3562 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3563 for (i = 0; i < cfg->nb_pool_maps; i++) {
3564 /* set vlan id in VF register and set the valid bit */
3565 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
3566 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
3568 * Put the allowed pools in VFB reg. As we only have 16 or 64
3569 * pools, we only need to use the first half of the register
3572 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3573 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
3574 (cfg->pool_map[i].pools & UINT32_MAX));
3576 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
3577 ((cfg->pool_map[i].pools >> 32) \
3582 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
3583 if (cfg->enable_loop_back) {
3584 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3585 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
3586 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
3589 IXGBE_WRITE_FLUSH(hw);
3593 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
3594 * @hw: pointer to hardware structure
3597 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
3602 PMD_INIT_FUNC_TRACE();
3603 /*PF VF Transmit Enable*/
3604 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
3605 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
3607 /* Disable the Tx desc arbiter so that MTQC can be changed */
3608 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3609 reg |= IXGBE_RTTDCS_ARBDIS;
3610 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3612 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3613 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3615 /* Disable drop for all queues */
3616 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3617 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3618 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3620 /* Enable the Tx desc arbiter */
3621 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3622 reg &= ~IXGBE_RTTDCS_ARBDIS;
3623 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3625 IXGBE_WRITE_FLUSH(hw);
3630 static int __attribute__((cold))
3631 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
3633 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
3637 /* Initialize software ring entries */
3638 for (i = 0; i < rxq->nb_rx_desc; i++) {
3639 volatile union ixgbe_adv_rx_desc *rxd;
3640 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
3642 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
3643 (unsigned) rxq->queue_id);
3647 rte_mbuf_refcnt_set(mbuf, 1);
3649 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3651 mbuf->port = rxq->port_id;
3654 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
3655 rxd = &rxq->rx_ring[i];
3656 rxd->read.hdr_addr = 0;
3657 rxd->read.pkt_addr = dma_addr;
3665 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
3667 struct ixgbe_hw *hw;
3670 ixgbe_rss_configure(dev);
3672 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3674 /* MRQC: enable VF RSS */
3675 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3676 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
3677 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3679 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
3683 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
3687 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
3691 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3697 ixgbe_config_vf_default(struct rte_eth_dev *dev)
3699 struct ixgbe_hw *hw =
3700 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3702 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3704 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3709 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3710 IXGBE_MRQC_VMDQRT4TCEN);
3714 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3715 IXGBE_MRQC_VMDQRT8TCEN);
3719 "invalid pool number in IOV mode");
3726 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3728 struct ixgbe_hw *hw =
3729 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3731 if (hw->mac.type == ixgbe_mac_82598EB)
3734 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3736 * SRIOV inactive scheme
3737 * any DCB/RSS w/o VMDq multi-queue setting
3739 switch (dev->data->dev_conf.rxmode.mq_mode) {
3741 case ETH_MQ_RX_DCB_RSS:
3742 case ETH_MQ_RX_VMDQ_RSS:
3743 ixgbe_rss_configure(dev);
3746 case ETH_MQ_RX_VMDQ_DCB:
3747 ixgbe_vmdq_dcb_configure(dev);
3750 case ETH_MQ_RX_VMDQ_ONLY:
3751 ixgbe_vmdq_rx_hw_configure(dev);
3754 case ETH_MQ_RX_NONE:
3756 /* if mq_mode is none, disable rss mode.*/
3757 ixgbe_rss_disable(dev);
3762 * SRIOV active scheme
3763 * Support RSS together with VMDq & SRIOV
3765 switch (dev->data->dev_conf.rxmode.mq_mode) {
3767 case ETH_MQ_RX_VMDQ_RSS:
3768 ixgbe_config_vf_rss(dev);
3771 /* FIXME if support DCB/RSS together with VMDq & SRIOV */
3772 case ETH_MQ_RX_VMDQ_DCB:
3773 case ETH_MQ_RX_VMDQ_DCB_RSS:
3775 "Could not support DCB with VMDq & SRIOV");
3778 ixgbe_config_vf_default(dev);
3787 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
3789 struct ixgbe_hw *hw =
3790 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3794 if (hw->mac.type == ixgbe_mac_82598EB)
3797 /* disable arbiter before setting MTQC */
3798 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3799 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3800 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3802 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3804 * SRIOV inactive scheme
3805 * any DCB w/o VMDq multi-queue setting
3807 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
3808 ixgbe_vmdq_tx_hw_configure(hw);
3810 mtqc = IXGBE_MTQC_64Q_1PB;
3811 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3814 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3817 * SRIOV active scheme
3818 * FIXME if support DCB together with VMDq & SRIOV
3821 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3824 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
3827 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
3831 mtqc = IXGBE_MTQC_64Q_1PB;
3832 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
3834 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3837 /* re-enable arbiter */
3838 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3839 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3845 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
3847 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
3848 * spec rev. 3.0 chapter 8.2.3.8.13.
3850 * @pool Memory pool of the Rx queue
3852 static inline uint32_t
3853 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
3855 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
3857 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
3860 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
3863 return IXGBE_RSCCTL_MAXDESC_16;
3864 else if (maxdesc >= 8)
3865 return IXGBE_RSCCTL_MAXDESC_8;
3866 else if (maxdesc >= 4)
3867 return IXGBE_RSCCTL_MAXDESC_4;
3869 return IXGBE_RSCCTL_MAXDESC_1;
3873 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
3876 * (Taken from FreeBSD tree)
3877 * (yes this is all very magic and confusing :)
3880 * @entry the register array entry
3881 * @vector the MSIX vector for this queue
3885 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
3887 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3890 vector |= IXGBE_IVAR_ALLOC_VAL;
3892 switch (hw->mac.type) {
3894 case ixgbe_mac_82598EB:
3896 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3898 entry += (type * 64);
3899 index = (entry >> 2) & 0x1F;
3900 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3901 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3902 ivar |= (vector << (8 * (entry & 0x3)));
3903 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3906 case ixgbe_mac_82599EB:
3907 case ixgbe_mac_X540:
3908 if (type == -1) { /* MISC IVAR */
3909 index = (entry & 1) * 8;
3910 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3911 ivar &= ~(0xFF << index);
3912 ivar |= (vector << index);
3913 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3914 } else { /* RX/TX IVARS */
3915 index = (16 * (entry & 1)) + (8 * type);
3916 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3917 ivar &= ~(0xFF << index);
3918 ivar |= (vector << index);
3919 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3929 void __attribute__((cold))
3930 ixgbe_set_rx_function(struct rte_eth_dev *dev)
3932 uint16_t i, rx_using_sse;
3933 struct ixgbe_adapter *adapter =
3934 (struct ixgbe_adapter *)dev->data->dev_private;
3937 * In order to allow Vector Rx there are a few configuration
3938 * conditions to be met and Rx Bulk Allocation should be allowed.
3940 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
3941 !adapter->rx_bulk_alloc_allowed) {
3942 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
3943 "preconditions or RTE_IXGBE_INC_VECTOR is "
3945 dev->data->port_id);
3947 adapter->rx_vec_allowed = false;
3951 * Initialize the appropriate LRO callback.
3953 * If all queues satisfy the bulk allocation preconditions
3954 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
3955 * Otherwise use a single allocation version.
3957 if (dev->data->lro) {
3958 if (adapter->rx_bulk_alloc_allowed) {
3959 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
3960 "allocation version");
3961 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3963 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
3964 "allocation version");
3965 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3967 } else if (dev->data->scattered_rx) {
3969 * Set the non-LRO scattered callback: there are Vector and
3970 * single allocation versions.
3972 if (adapter->rx_vec_allowed) {
3973 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
3974 "callback (port=%d).",
3975 dev->data->port_id);
3977 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
3978 } else if (adapter->rx_bulk_alloc_allowed) {
3979 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
3980 "allocation callback (port=%d).",
3981 dev->data->port_id);
3982 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3984 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
3985 "single allocation) "
3986 "Scattered Rx callback "
3988 dev->data->port_id);
3990 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3993 * Below we set "simple" callbacks according to port/queues parameters.
3994 * If parameters allow we are going to choose between the following
3998 * - Single buffer allocation (the simplest one)
4000 } else if (adapter->rx_vec_allowed) {
4001 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4002 "burst size no less than %d (port=%d).",
4003 RTE_IXGBE_DESCS_PER_LOOP,
4004 dev->data->port_id);
4006 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4007 } else if (adapter->rx_bulk_alloc_allowed) {
4008 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4009 "satisfied. Rx Burst Bulk Alloc function "
4010 "will be used on port=%d.",
4011 dev->data->port_id);
4013 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4015 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4016 "satisfied, or Scattered Rx is requested "
4018 dev->data->port_id);
4020 dev->rx_pkt_burst = ixgbe_recv_pkts;
4023 /* Propagate information about RX function choice through all queues. */
4026 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4027 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4029 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4030 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4031 rxq->rx_using_sse = rx_using_sse;
4036 * ixgbe_set_rsc - configure RSC related port HW registers
4038 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4039 * of 82599 Spec (x540 configuration is virtually the same).
4043 * Returns 0 in case of success or a non-zero error code
4046 ixgbe_set_rsc(struct rte_eth_dev *dev)
4048 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4049 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4050 struct rte_eth_dev_info dev_info = { 0 };
4051 bool rsc_capable = false;
4056 dev->dev_ops->dev_infos_get(dev, &dev_info);
4057 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4060 if (!rsc_capable && rx_conf->enable_lro) {
4061 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4066 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4068 if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
4070 * According to chapter of 4.6.7.2.1 of the Spec Rev.
4071 * 3.0 RSC configuration requires HW CRC stripping being
4072 * enabled. If user requested both HW CRC stripping off
4073 * and RSC on - return an error.
4075 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4080 /* RFCTL configuration */
4082 uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4083 if (rx_conf->enable_lro)
4085 * Since NFS packets coalescing is not supported - clear
4086 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4089 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4090 IXGBE_RFCTL_NFSR_DIS);
4092 rfctl |= IXGBE_RFCTL_RSC_DIS;
4094 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4097 /* If LRO hasn't been requested - we are done here. */
4098 if (!rx_conf->enable_lro)
4101 /* Set RDRXCTL.RSCACKC bit */
4102 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4103 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4104 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4106 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4107 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4108 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4110 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4112 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4114 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4116 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4119 * ixgbe PMD doesn't support header-split at the moment.
4121 * Following the 4.6.7.2.1 chapter of the 82599/x540
4122 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4123 * should be configured even if header split is not
4124 * enabled. We will configure it 128 bytes following the
4125 * recommendation in the spec.
4127 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4128 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4129 IXGBE_SRRCTL_BSIZEHDR_MASK;
4132 * TODO: Consider setting the Receive Descriptor Minimum
4133 * Threshold Size for an RSC case. This is not an obviously
4134 * beneficiary option but the one worth considering...
4137 rscctl |= IXGBE_RSCCTL_RSCEN;
4138 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4139 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4142 * RSC: Set ITR interval corresponding to 2K ints/s.
4144 * Full-sized RSC aggregations for a 10Gb/s link will
4145 * arrive at about 20K aggregation/s rate.
4147 * 2K inst/s rate will make only 10% of the
4148 * aggregations to be closed due to the interrupt timer
4149 * expiration for a streaming at wire-speed case.
4151 * For a sparse streaming case this setting will yield
4152 * at most 500us latency for a single RSC aggregation.
4154 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4155 eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
4157 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4158 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4159 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4160 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4163 * RSC requires the mapping of the queue to the
4166 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4171 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4177 * Initializes Receive Unit.
4179 int __attribute__((cold))
4180 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4182 struct ixgbe_hw *hw;
4183 struct ixgbe_rx_queue *rxq;
4194 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4197 PMD_INIT_FUNC_TRACE();
4198 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4201 * Make sure receives are disabled while setting
4202 * up the RX context (registers, descriptor rings, etc.).
4204 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4205 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4207 /* Enable receipt of broadcasted frames */
4208 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4209 fctrl |= IXGBE_FCTRL_BAM;
4210 fctrl |= IXGBE_FCTRL_DPF;
4211 fctrl |= IXGBE_FCTRL_PMCF;
4212 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4215 * Configure CRC stripping, if any.
4217 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4218 if (rx_conf->hw_strip_crc)
4219 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4221 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4224 * Configure jumbo frame support, if any.
4226 if (rx_conf->jumbo_frame == 1) {
4227 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4228 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4229 maxfrs &= 0x0000FFFF;
4230 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4231 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4233 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4236 * If loopback mode is configured for 82599, set LPBK bit.
4238 if (hw->mac.type == ixgbe_mac_82599EB &&
4239 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4240 hlreg0 |= IXGBE_HLREG0_LPBK;
4242 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4244 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4246 /* Setup RX queues */
4247 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4248 rxq = dev->data->rx_queues[i];
4251 * Reset crc_len in case it was changed after queue setup by a
4252 * call to configure.
4254 rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
4256 /* Setup the Base and Length of the Rx Descriptor Rings */
4257 bus_addr = rxq->rx_ring_phys_addr;
4258 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4259 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4260 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4261 (uint32_t)(bus_addr >> 32));
4262 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4263 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4264 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4265 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4267 /* Configure the SRRCTL register */
4268 #ifdef RTE_HEADER_SPLIT_ENABLE
4270 * Configure Header Split
4272 if (rx_conf->header_split) {
4273 if (hw->mac.type == ixgbe_mac_82599EB) {
4274 /* Must setup the PSRTYPE register */
4276 psrtype = IXGBE_PSRTYPE_TCPHDR |
4277 IXGBE_PSRTYPE_UDPHDR |
4278 IXGBE_PSRTYPE_IPV4HDR |
4279 IXGBE_PSRTYPE_IPV6HDR;
4280 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4282 srrctl = ((rx_conf->split_hdr_size <<
4283 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4284 IXGBE_SRRCTL_BSIZEHDR_MASK);
4285 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4288 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4290 /* Set if packets are dropped when no descriptors available */
4292 srrctl |= IXGBE_SRRCTL_DROP_EN;
4295 * Configure the RX buffer size in the BSIZEPACKET field of
4296 * the SRRCTL register of the queue.
4297 * The value is in 1 KB resolution. Valid values can be from
4300 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4301 RTE_PKTMBUF_HEADROOM);
4302 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4303 IXGBE_SRRCTL_BSIZEPKT_MASK);
4305 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4307 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4308 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4310 /* It adds dual VLAN length for supporting dual VLAN */
4311 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4312 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
4313 dev->data->scattered_rx = 1;
4316 if (rx_conf->enable_scatter)
4317 dev->data->scattered_rx = 1;
4320 * Device configured with multiple RX queues.
4322 ixgbe_dev_mq_rx_configure(dev);
4325 * Setup the Checksum Register.
4326 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4327 * Enable IP/L4 checkum computation by hardware if requested to do so.
4329 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
4330 rxcsum |= IXGBE_RXCSUM_PCSD;
4331 if (rx_conf->hw_ip_checksum)
4332 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4334 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
4336 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4338 if (hw->mac.type == ixgbe_mac_82599EB ||
4339 hw->mac.type == ixgbe_mac_X540) {
4340 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4341 if (rx_conf->hw_strip_crc)
4342 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4344 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
4345 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4346 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4349 rc = ixgbe_set_rsc(dev);
4353 ixgbe_set_rx_function(dev);
4359 * Initializes Transmit Unit.
4361 void __attribute__((cold))
4362 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
4364 struct ixgbe_hw *hw;
4365 struct ixgbe_tx_queue *txq;
4371 PMD_INIT_FUNC_TRACE();
4372 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4374 /* Enable TX CRC (checksum offload requirement) and hw padding
4375 * (TSO requirement) */
4376 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4377 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
4378 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4380 /* Setup the Base and Length of the Tx Descriptor Rings */
4381 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4382 txq = dev->data->tx_queues[i];
4384 bus_addr = txq->tx_ring_phys_addr;
4385 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
4386 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4387 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
4388 (uint32_t)(bus_addr >> 32));
4389 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
4390 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4391 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4392 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4393 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4396 * Disable Tx Head Writeback RO bit, since this hoses
4397 * bookkeeping if things aren't delivered in order.
4399 switch (hw->mac.type) {
4400 case ixgbe_mac_82598EB:
4401 txctrl = IXGBE_READ_REG(hw,
4402 IXGBE_DCA_TXCTRL(txq->reg_idx));
4403 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4404 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
4408 case ixgbe_mac_82599EB:
4409 case ixgbe_mac_X540:
4410 case ixgbe_mac_X550:
4411 case ixgbe_mac_X550EM_x:
4412 case ixgbe_mac_X550EM_a:
4414 txctrl = IXGBE_READ_REG(hw,
4415 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
4416 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4417 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
4423 /* Device configured with multiple TX queues. */
4424 ixgbe_dev_mq_tx_configure(dev);
4428 * Set up link for 82599 loopback mode Tx->Rx.
4430 static inline void __attribute__((cold))
4431 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
4433 PMD_INIT_FUNC_TRACE();
4435 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
4436 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
4438 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
4447 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
4448 ixgbe_reset_pipeline_82599(hw);
4450 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
4456 * Start Transmit and Receive Units.
4458 int __attribute__((cold))
4459 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
4461 struct ixgbe_hw *hw;
4462 struct ixgbe_tx_queue *txq;
4463 struct ixgbe_rx_queue *rxq;
4470 PMD_INIT_FUNC_TRACE();
4471 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4473 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4474 txq = dev->data->tx_queues[i];
4475 /* Setup Transmit Threshold Registers */
4476 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4477 txdctl |= txq->pthresh & 0x7F;
4478 txdctl |= ((txq->hthresh & 0x7F) << 8);
4479 txdctl |= ((txq->wthresh & 0x7F) << 16);
4480 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4483 if (hw->mac.type != ixgbe_mac_82598EB) {
4484 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
4485 dmatxctl |= IXGBE_DMATXCTL_TE;
4486 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
4489 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4490 txq = dev->data->tx_queues[i];
4491 if (!txq->tx_deferred_start) {
4492 ret = ixgbe_dev_tx_queue_start(dev, i);
4498 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4499 rxq = dev->data->rx_queues[i];
4500 if (!rxq->rx_deferred_start) {
4501 ret = ixgbe_dev_rx_queue_start(dev, i);
4507 /* Enable Receive engine */
4508 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4509 if (hw->mac.type == ixgbe_mac_82598EB)
4510 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4511 rxctrl |= IXGBE_RXCTRL_RXEN;
4512 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4514 /* If loopback mode is enabled for 82599, set up the link accordingly */
4515 if (hw->mac.type == ixgbe_mac_82599EB &&
4516 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4517 ixgbe_setup_loopback_link_82599(hw);
4523 * Start Receive Units for specified queue.
4525 int __attribute__((cold))
4526 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4528 struct ixgbe_hw *hw;
4529 struct ixgbe_rx_queue *rxq;
4533 PMD_INIT_FUNC_TRACE();
4534 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4536 if (rx_queue_id < dev->data->nb_rx_queues) {
4537 rxq = dev->data->rx_queues[rx_queue_id];
4539 /* Allocate buffers for descriptor rings */
4540 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
4541 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
4545 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4546 rxdctl |= IXGBE_RXDCTL_ENABLE;
4547 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4549 /* Wait until RX Enable ready */
4550 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4553 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4554 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4556 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
4559 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4560 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
4561 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4569 * Stop Receive Units for specified queue.
4571 int __attribute__((cold))
4572 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4574 struct ixgbe_hw *hw;
4575 struct ixgbe_adapter *adapter =
4576 (struct ixgbe_adapter *)dev->data->dev_private;
4577 struct ixgbe_rx_queue *rxq;
4581 PMD_INIT_FUNC_TRACE();
4582 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4584 if (rx_queue_id < dev->data->nb_rx_queues) {
4585 rxq = dev->data->rx_queues[rx_queue_id];
4587 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4588 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4589 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4591 /* Wait until RX Enable ready */
4592 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4595 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4596 } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
4598 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
4601 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4603 ixgbe_rx_queue_release_mbufs(rxq);
4604 ixgbe_reset_rx_queue(adapter, rxq);
4605 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4614 * Start Transmit Units for specified queue.
4616 int __attribute__((cold))
4617 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4619 struct ixgbe_hw *hw;
4620 struct ixgbe_tx_queue *txq;
4624 PMD_INIT_FUNC_TRACE();
4625 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4627 if (tx_queue_id < dev->data->nb_tx_queues) {
4628 txq = dev->data->tx_queues[tx_queue_id];
4629 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4630 txdctl |= IXGBE_TXDCTL_ENABLE;
4631 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4633 /* Wait until TX Enable ready */
4634 if (hw->mac.type == ixgbe_mac_82599EB) {
4635 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4638 txdctl = IXGBE_READ_REG(hw,
4639 IXGBE_TXDCTL(txq->reg_idx));
4640 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4642 PMD_INIT_LOG(ERR, "Could not enable "
4643 "Tx Queue %d", tx_queue_id);
4646 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4647 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4648 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4656 * Stop Transmit Units for specified queue.
4658 int __attribute__((cold))
4659 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4661 struct ixgbe_hw *hw;
4662 struct ixgbe_tx_queue *txq;
4664 uint32_t txtdh, txtdt;
4667 PMD_INIT_FUNC_TRACE();
4668 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4670 if (tx_queue_id < dev->data->nb_tx_queues) {
4671 txq = dev->data->tx_queues[tx_queue_id];
4673 /* Wait until TX queue is empty */
4674 if (hw->mac.type == ixgbe_mac_82599EB) {
4675 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4677 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4678 txtdh = IXGBE_READ_REG(hw,
4679 IXGBE_TDH(txq->reg_idx));
4680 txtdt = IXGBE_READ_REG(hw,
4681 IXGBE_TDT(txq->reg_idx));
4682 } while (--poll_ms && (txtdh != txtdt));
4684 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
4685 "when stopping.", tx_queue_id);
4688 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4689 txdctl &= ~IXGBE_TXDCTL_ENABLE;
4690 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4692 /* Wait until TX Enable ready */
4693 if (hw->mac.type == ixgbe_mac_82599EB) {
4694 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4697 txdctl = IXGBE_READ_REG(hw,
4698 IXGBE_TXDCTL(txq->reg_idx));
4699 } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
4701 PMD_INIT_LOG(ERR, "Could not disable "
4702 "Tx Queue %d", tx_queue_id);
4705 if (txq->ops != NULL) {
4706 txq->ops->release_mbufs(txq);
4707 txq->ops->reset(txq);
4709 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4717 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4718 struct rte_eth_rxq_info *qinfo)
4720 struct ixgbe_rx_queue *rxq;
4722 rxq = dev->data->rx_queues[queue_id];
4724 qinfo->mp = rxq->mb_pool;
4725 qinfo->scattered_rx = dev->data->scattered_rx;
4726 qinfo->nb_desc = rxq->nb_rx_desc;
4728 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4729 qinfo->conf.rx_drop_en = rxq->drop_en;
4730 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4734 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4735 struct rte_eth_txq_info *qinfo)
4737 struct ixgbe_tx_queue *txq;
4739 txq = dev->data->tx_queues[queue_id];
4741 qinfo->nb_desc = txq->nb_tx_desc;
4743 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
4744 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
4745 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
4747 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4748 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
4749 qinfo->conf.txq_flags = txq->txq_flags;
4750 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4754 * [VF] Initializes Receive Unit.
4756 int __attribute__((cold))
4757 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
4759 struct ixgbe_hw *hw;
4760 struct ixgbe_rx_queue *rxq;
4762 uint32_t srrctl, psrtype = 0;
4767 PMD_INIT_FUNC_TRACE();
4768 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4770 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
4771 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4772 "it should be power of 2");
4776 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
4777 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4778 "it should be equal to or less than %d",
4779 hw->mac.max_rx_queues);
4784 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
4785 * disables the VF receipt of packets if the PF MTU is > 1500.
4786 * This is done to deal with 82599 limitations that imposes
4787 * the PF and all VFs to share the same MTU.
4788 * Then, the PF driver enables again the VF receipt of packet when
4789 * the VF driver issues a IXGBE_VF_SET_LPE request.
4790 * In the meantime, the VF device cannot be used, even if the VF driver
4791 * and the Guest VM network stack are ready to accept packets with a
4792 * size up to the PF MTU.
4793 * As a work-around to this PF behaviour, force the call to
4794 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
4795 * VF packets received can work in all cases.
4797 ixgbevf_rlpml_set_vf(hw,
4798 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
4800 /* Setup RX queues */
4801 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4802 rxq = dev->data->rx_queues[i];
4804 /* Allocate buffers for descriptor rings */
4805 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
4809 /* Setup the Base and Length of the Rx Descriptor Rings */
4810 bus_addr = rxq->rx_ring_phys_addr;
4812 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
4813 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4814 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
4815 (uint32_t)(bus_addr >> 32));
4816 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
4817 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4818 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
4819 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
4822 /* Configure the SRRCTL register */
4823 #ifdef RTE_HEADER_SPLIT_ENABLE
4825 * Configure Header Split
4827 if (dev->data->dev_conf.rxmode.header_split) {
4828 srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
4829 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4830 IXGBE_SRRCTL_BSIZEHDR_MASK);
4831 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4834 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4836 /* Set if packets are dropped when no descriptors available */
4838 srrctl |= IXGBE_SRRCTL_DROP_EN;
4841 * Configure the RX buffer size in the BSIZEPACKET field of
4842 * the SRRCTL register of the queue.
4843 * The value is in 1 KB resolution. Valid values can be from
4846 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4847 RTE_PKTMBUF_HEADROOM);
4848 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4849 IXGBE_SRRCTL_BSIZEPKT_MASK);
4852 * VF modification to write virtual function SRRCTL register
4854 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
4856 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4857 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4859 if (dev->data->dev_conf.rxmode.enable_scatter ||
4860 /* It adds dual VLAN length for supporting dual VLAN */
4861 (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4862 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
4863 if (!dev->data->scattered_rx)
4864 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
4865 dev->data->scattered_rx = 1;
4869 #ifdef RTE_HEADER_SPLIT_ENABLE
4870 if (dev->data->dev_conf.rxmode.header_split)
4871 /* Must setup the PSRTYPE register */
4872 psrtype = IXGBE_PSRTYPE_TCPHDR |
4873 IXGBE_PSRTYPE_UDPHDR |
4874 IXGBE_PSRTYPE_IPV4HDR |
4875 IXGBE_PSRTYPE_IPV6HDR;
4878 /* Set RQPL for VF RSS according to max Rx queue */
4879 psrtype |= (dev->data->nb_rx_queues >> 1) <<
4880 IXGBE_PSRTYPE_RQPL_SHIFT;
4881 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
4883 ixgbe_set_rx_function(dev);
4889 * [VF] Initializes Transmit Unit.
4891 void __attribute__((cold))
4892 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
4894 struct ixgbe_hw *hw;
4895 struct ixgbe_tx_queue *txq;
4900 PMD_INIT_FUNC_TRACE();
4901 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4903 /* Setup the Base and Length of the Tx Descriptor Rings */
4904 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4905 txq = dev->data->tx_queues[i];
4906 bus_addr = txq->tx_ring_phys_addr;
4907 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
4908 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4909 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
4910 (uint32_t)(bus_addr >> 32));
4911 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
4912 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4913 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4914 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
4915 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
4918 * Disable Tx Head Writeback RO bit, since this hoses
4919 * bookkeeping if things aren't delivered in order.
4921 txctrl = IXGBE_READ_REG(hw,
4922 IXGBE_VFDCA_TXCTRL(i));
4923 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4924 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
4930 * [VF] Start Transmit and Receive Units.
4932 void __attribute__((cold))
4933 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4935 struct ixgbe_hw *hw;
4936 struct ixgbe_tx_queue *txq;
4937 struct ixgbe_rx_queue *rxq;
4943 PMD_INIT_FUNC_TRACE();
4944 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4946 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4947 txq = dev->data->tx_queues[i];
4948 /* Setup Transmit Threshold Registers */
4949 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4950 txdctl |= txq->pthresh & 0x7F;
4951 txdctl |= ((txq->hthresh & 0x7F) << 8);
4952 txdctl |= ((txq->wthresh & 0x7F) << 16);
4953 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4956 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4958 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4959 txdctl |= IXGBE_TXDCTL_ENABLE;
4960 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4963 /* Wait until TX Enable ready */
4966 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4967 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4969 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
4971 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4973 rxq = dev->data->rx_queues[i];
4975 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4976 rxdctl |= IXGBE_RXDCTL_ENABLE;
4977 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
4979 /* Wait until RX Enable ready */
4983 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4984 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4986 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
4988 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
4993 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
4994 int __attribute__((weak))
4995 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5000 uint16_t __attribute__((weak))
5001 ixgbe_recv_pkts_vec(
5002 void __rte_unused *rx_queue,
5003 struct rte_mbuf __rte_unused **rx_pkts,
5004 uint16_t __rte_unused nb_pkts)
5009 uint16_t __attribute__((weak))
5010 ixgbe_recv_scattered_pkts_vec(
5011 void __rte_unused *rx_queue,
5012 struct rte_mbuf __rte_unused **rx_pkts,
5013 uint16_t __rte_unused nb_pkts)
5018 int __attribute__((weak))
5019 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)