4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_cycles.h>
49 #include <rte_debug.h>
50 #include <rte_interrupts.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_prefetch.h>
71 #include <rte_string_fns.h>
72 #include <rte_errno.h>
74 #include "ixgbe_logs.h"
75 #include "ixgbe/ixgbe_api.h"
76 #include "ixgbe/ixgbe_vf.h"
77 #include "ixgbe_ethdev.h"
78 #include "ixgbe/ixgbe_dcb.h"
79 #include "ixgbe/ixgbe_common.h"
80 #include "ixgbe_rxtx.h"
82 #define IXGBE_RSS_OFFLOAD_ALL ( \
88 ETH_RSS_IPV6_TCP_EX | \
93 static inline struct rte_mbuf *
94 rte_rxmbuf_alloc(struct rte_mempool *mp)
98 m = __rte_mbuf_raw_alloc(mp);
99 __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
105 #define RTE_PMD_USE_PREFETCH
108 #ifdef RTE_PMD_USE_PREFETCH
110 * Prefetch a cache line into all cache levels.
112 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
114 #define rte_ixgbe_prefetch(p) do {} while(0)
117 /*********************************************************************
121 **********************************************************************/
124 * Check for descriptors with their DD bit set and free mbufs.
125 * Return the total number of buffers freed.
127 static inline int __attribute__((always_inline))
128 ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
130 struct igb_tx_entry *txep;
134 /* check DD bit on threshold descriptor */
135 status = txq->tx_ring[txq->tx_next_dd].wb.status;
136 if (! (status & IXGBE_ADVTXD_STAT_DD))
140 * first buffer to free from S/W ring is at index
141 * tx_next_dd - (tx_rs_thresh-1)
143 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
145 /* prefetch the mbufs that are about to be freed */
146 for (i = 0; i < txq->tx_rs_thresh; ++i)
147 rte_prefetch0((txep + i)->mbuf);
149 /* free buffers one at a time */
150 if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
151 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
152 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
156 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
157 rte_pktmbuf_free_seg(txep->mbuf);
162 /* buffers were freed, update counters */
163 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
164 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
165 if (txq->tx_next_dd >= txq->nb_tx_desc)
166 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
168 return txq->tx_rs_thresh;
171 /* Populate 4 descriptors with data from 4 mbufs */
173 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
175 uint64_t buf_dma_addr;
179 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
180 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
181 pkt_len = (*pkts)->pkt.data_len;
183 /* write data to descriptor */
184 txdp->read.buffer_addr = buf_dma_addr;
185 txdp->read.cmd_type_len =
186 ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
187 txdp->read.olinfo_status =
188 (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
192 /* Populate 1 descriptor with data from 1 mbuf */
194 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
196 uint64_t buf_dma_addr;
199 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
200 pkt_len = (*pkts)->pkt.data_len;
202 /* write data to descriptor */
203 txdp->read.buffer_addr = buf_dma_addr;
204 txdp->read.cmd_type_len =
205 ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
206 txdp->read.olinfo_status =
207 (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
211 * Fill H/W descriptor ring with mbuf data.
212 * Copy mbuf pointers to the S/W ring.
215 ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
218 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
219 struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
220 const int N_PER_LOOP = 4;
221 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
222 int mainpart, leftover;
226 * Process most of the packets in chunks of N pkts. Any
227 * leftover packets will get processed one at a time.
229 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
230 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
231 for (i = 0; i < mainpart; i += N_PER_LOOP) {
232 /* Copy N mbuf pointers to the S/W ring */
233 for (j = 0; j < N_PER_LOOP; ++j) {
234 (txep + i + j)->mbuf = *(pkts + i + j);
236 tx4(txdp + i, pkts + i);
239 if (unlikely(leftover > 0)) {
240 for (i = 0; i < leftover; ++i) {
241 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
242 tx1(txdp + mainpart + i, pkts + mainpart + i);
247 static inline uint16_t
248 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
251 struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
252 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
256 * Begin scanning the H/W ring for done descriptors when the
257 * number of available descriptors drops below tx_free_thresh. For
258 * each done descriptor, free the associated buffer.
260 if (txq->nb_tx_free < txq->tx_free_thresh)
261 ixgbe_tx_free_bufs(txq);
263 /* Only use descriptors that are available */
264 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
265 if (unlikely(nb_pkts == 0))
268 /* Use exactly nb_pkts descriptors */
269 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
272 * At this point, we know there are enough descriptors in the
273 * ring to transmit all the packets. This assumes that each
274 * mbuf contains a single segment, and that no new offloads
275 * are expected, which would require a new context descriptor.
279 * See if we're going to wrap-around. If so, handle the top
280 * of the descriptor ring first, then do the bottom. If not,
281 * the processing looks just like the "bottom" part anyway...
283 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
284 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
285 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
288 * We know that the last descriptor in the ring will need to
289 * have its RS bit set because tx_rs_thresh has to be
290 * a divisor of the ring size
292 tx_r[txq->tx_next_rs].read.cmd_type_len |=
293 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
294 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
299 /* Fill H/W descriptor ring with mbuf data */
300 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
301 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
304 * Determine if RS bit should be set
305 * This is what we actually want:
306 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
307 * but instead of subtracting 1 and doing >=, we can just do
308 * greater than without subtracting.
310 if (txq->tx_tail > txq->tx_next_rs) {
311 tx_r[txq->tx_next_rs].read.cmd_type_len |=
312 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
313 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
315 if (txq->tx_next_rs >= txq->nb_tx_desc)
316 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
320 * Check for wrap-around. This would only happen if we used
321 * up to the last descriptor in the ring, no more, no less.
323 if (txq->tx_tail >= txq->nb_tx_desc)
326 /* update tail pointer */
328 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
334 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
339 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
340 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
341 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
343 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
347 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
348 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
349 nb_tx = (uint16_t)(nb_tx + ret);
350 nb_pkts = (uint16_t)(nb_pkts - ret);
359 ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
360 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
361 uint16_t ol_flags, uint32_t vlan_macip_lens)
363 uint32_t type_tucmd_mlhl;
364 uint32_t mss_l4len_idx;
368 ctx_idx = txq->ctx_curr;
372 if (ol_flags & PKT_TX_VLAN_PKT) {
373 cmp_mask |= TX_VLAN_CMP_MASK;
376 if (ol_flags & PKT_TX_IP_CKSUM) {
377 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
378 cmp_mask |= TX_MAC_LEN_CMP_MASK;
381 /* Specify which HW CTX to upload. */
382 mss_l4len_idx = (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
383 switch (ol_flags & PKT_TX_L4_MASK) {
384 case PKT_TX_UDP_CKSUM:
385 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
386 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
387 mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
388 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
390 case PKT_TX_TCP_CKSUM:
391 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
392 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
393 mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
394 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
396 case PKT_TX_SCTP_CKSUM:
397 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
398 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
399 mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
400 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
403 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
404 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
408 txq->ctx_cache[ctx_idx].flags = ol_flags;
409 txq->ctx_cache[ctx_idx].cmp_mask = cmp_mask;
410 txq->ctx_cache[ctx_idx].vlan_macip_lens.data =
411 vlan_macip_lens & cmp_mask;
413 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
414 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
415 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
416 ctx_txd->seqnum_seed = 0;
420 * Check which hardware context can be used. Use the existing match
421 * or create a new context descriptor.
423 static inline uint32_t
424 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
425 uint32_t vlan_macip_lens)
427 /* If match with the current used context */
428 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
429 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
430 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
431 return txq->ctx_curr;
434 /* What if match with the next context */
436 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
437 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
438 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
439 return txq->ctx_curr;
442 /* Mismatch, use the previous context */
443 return (IXGBE_CTX_NUM);
446 static inline uint32_t
447 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
449 static const uint32_t l4_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_TXSM};
450 static const uint32_t l3_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_IXSM};
453 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
454 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
458 static inline uint32_t
459 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
461 static const uint32_t vlan_cmd[2] = {0, IXGBE_ADVTXD_DCMD_VLE};
462 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
465 /* Default RS bit threshold values */
466 #ifndef DEFAULT_TX_RS_THRESH
467 #define DEFAULT_TX_RS_THRESH 32
469 #ifndef DEFAULT_TX_FREE_THRESH
470 #define DEFAULT_TX_FREE_THRESH 32
473 /* Reset transmit descriptors after they have been used */
475 ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
477 struct igb_tx_entry *sw_ring = txq->sw_ring;
478 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
479 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
480 uint16_t nb_tx_desc = txq->nb_tx_desc;
481 uint16_t desc_to_clean_to;
482 uint16_t nb_tx_to_clean;
484 /* Determine the last descriptor needing to be cleaned */
485 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
486 if (desc_to_clean_to >= nb_tx_desc)
487 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
489 /* Check to make sure the last descriptor to clean is done */
490 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
491 if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
493 PMD_TX_FREE_LOG(DEBUG,
494 "TX descriptor %4u is not done"
495 "(port=%d queue=%d)",
497 txq->port_id, txq->queue_id);
498 /* Failed to clean any descriptors, better luck next time */
502 /* Figure out how many descriptors will be cleaned */
503 if (last_desc_cleaned > desc_to_clean_to)
504 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
507 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
510 PMD_TX_FREE_LOG(DEBUG,
511 "Cleaning %4u TX descriptors: %4u to %4u "
512 "(port=%d queue=%d)",
513 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
514 txq->port_id, txq->queue_id);
517 * The last descriptor to clean is done, so that means all the
518 * descriptors from the last descriptor that was cleaned
519 * up to the last descriptor with the RS bit set
520 * are done. Only reset the threshold descriptor.
522 txr[desc_to_clean_to].wb.status = 0;
524 /* Update the txq to reflect the last descriptor that was cleaned */
525 txq->last_desc_cleaned = desc_to_clean_to;
526 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
533 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
536 struct igb_tx_queue *txq;
537 struct igb_tx_entry *sw_ring;
538 struct igb_tx_entry *txe, *txn;
539 volatile union ixgbe_adv_tx_desc *txr;
540 volatile union ixgbe_adv_tx_desc *txd;
541 struct rte_mbuf *tx_pkt;
542 struct rte_mbuf *m_seg;
543 uint64_t buf_dma_addr;
544 uint32_t olinfo_status;
545 uint32_t cmd_type_len;
554 uint32_t vlan_macip_lens;
559 sw_ring = txq->sw_ring;
561 tx_id = txq->tx_tail;
562 txe = &sw_ring[tx_id];
564 /* Determine if the descriptor ring needs to be cleaned. */
565 if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
566 ixgbe_xmit_cleanup(txq);
570 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
573 pkt_len = tx_pkt->pkt.pkt_len;
575 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
578 * Determine how many (if any) context descriptors
579 * are needed for offload functionality.
581 ol_flags = tx_pkt->ol_flags;
582 vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
584 /* If hardware offload required */
585 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
587 /* If new context need be built or reuse the exist ctx. */
588 ctx = what_advctx_update(txq, tx_ol_req,
590 /* Only allocate context descriptor if required*/
591 new_ctx = (ctx == IXGBE_CTX_NUM);
596 * Keep track of how many descriptors are used this loop
597 * This will always be the number of segments + the number of
598 * Context descriptors required to transmit the packet
600 nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
603 * The number of descriptors that must be allocated for a
604 * packet is the number of segments of that packet, plus 1
605 * Context Descriptor for the hardware offload, if any.
606 * Determine the last TX descriptor to allocate in the TX ring
607 * for the packet, starting from the current position (tx_id)
610 tx_last = (uint16_t) (tx_id + nb_used - 1);
613 if (tx_last >= txq->nb_tx_desc)
614 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
616 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
617 " tx_first=%u tx_last=%u\n",
618 (unsigned) txq->port_id,
619 (unsigned) txq->queue_id,
625 * Make sure there are enough TX descriptors available to
626 * transmit the entire packet.
627 * nb_used better be less than or equal to txq->tx_rs_thresh
629 if (nb_used > txq->nb_tx_free) {
630 PMD_TX_FREE_LOG(DEBUG,
631 "Not enough free TX descriptors "
632 "nb_used=%4u nb_free=%4u "
633 "(port=%d queue=%d)",
634 nb_used, txq->nb_tx_free,
635 txq->port_id, txq->queue_id);
637 if (ixgbe_xmit_cleanup(txq) != 0) {
638 /* Could not clean any descriptors */
644 /* nb_used better be <= txq->tx_rs_thresh */
645 if (unlikely(nb_used > txq->tx_rs_thresh)) {
646 PMD_TX_FREE_LOG(DEBUG,
647 "The number of descriptors needed to "
648 "transmit the packet exceeds the "
649 "RS bit threshold. This will impact "
651 "nb_used=%4u nb_free=%4u "
653 "(port=%d queue=%d)",
654 nb_used, txq->nb_tx_free,
656 txq->port_id, txq->queue_id);
658 * Loop here until there are enough TX
659 * descriptors or until the ring cannot be
662 while (nb_used > txq->nb_tx_free) {
663 if (ixgbe_xmit_cleanup(txq) != 0) {
665 * Could not clean any
677 * By now there are enough free TX descriptors to transmit
682 * Set common flags of all TX Data Descriptors.
684 * The following bits must be set in all Data Descriptors:
685 * - IXGBE_ADVTXD_DTYP_DATA
686 * - IXGBE_ADVTXD_DCMD_DEXT
688 * The following bits must be set in the first Data Descriptor
689 * and are ignored in the other ones:
690 * - IXGBE_ADVTXD_DCMD_IFCS
691 * - IXGBE_ADVTXD_MAC_1588
692 * - IXGBE_ADVTXD_DCMD_VLE
694 * The following bits must only be set in the last Data
696 * - IXGBE_TXD_CMD_EOP
698 * The following bits can be set in any Data Descriptor, but
699 * are only set in the last Data Descriptor:
702 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
703 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
704 olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
705 #ifdef RTE_LIBRTE_IEEE1588
706 if (ol_flags & PKT_TX_IEEE1588_TMST)
707 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
712 * Setup the TX Advanced Context Descriptor if required
715 volatile struct ixgbe_adv_tx_context_desc *
718 ctx_txd = (volatile struct
719 ixgbe_adv_tx_context_desc *)
722 txn = &sw_ring[txe->next_id];
723 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
725 if (txe->mbuf != NULL) {
726 rte_pktmbuf_free_seg(txe->mbuf);
730 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
733 txe->last_id = tx_last;
734 tx_id = txe->next_id;
739 * Setup the TX Advanced Data Descriptor,
740 * This path will go through
741 * whatever new/reuse the context descriptor
743 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
744 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
745 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
751 txn = &sw_ring[txe->next_id];
753 if (txe->mbuf != NULL)
754 rte_pktmbuf_free_seg(txe->mbuf);
758 * Set up Transmit Data Descriptor.
760 slen = m_seg->pkt.data_len;
761 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
762 txd->read.buffer_addr =
763 rte_cpu_to_le_64(buf_dma_addr);
764 txd->read.cmd_type_len =
765 rte_cpu_to_le_32(cmd_type_len | slen);
766 txd->read.olinfo_status =
767 rte_cpu_to_le_32(olinfo_status);
768 txe->last_id = tx_last;
769 tx_id = txe->next_id;
771 m_seg = m_seg->pkt.next;
772 } while (m_seg != NULL);
775 * The last packet data descriptor needs End Of Packet (EOP)
777 cmd_type_len |= IXGBE_TXD_CMD_EOP;
778 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
779 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
781 /* Set RS bit only on threshold packets' last descriptor */
782 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
783 PMD_TX_FREE_LOG(DEBUG,
784 "Setting RS bit on TXD id="
785 "%4u (port=%d queue=%d)",
786 tx_last, txq->port_id, txq->queue_id);
788 cmd_type_len |= IXGBE_TXD_CMD_RS;
790 /* Update txq RS bit counters */
793 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
799 * Set the Transmit Descriptor Tail (TDT)
801 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
802 (unsigned) txq->port_id, (unsigned) txq->queue_id,
803 (unsigned) tx_id, (unsigned) nb_tx);
804 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
805 txq->tx_tail = tx_id;
810 /*********************************************************************
814 **********************************************************************/
815 static inline uint16_t
816 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
820 static uint16_t ip_pkt_types_map[16] = {
821 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
822 PKT_RX_IPV6_HDR, 0, 0, 0,
823 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
824 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
827 static uint16_t ip_rss_types_map[16] = {
828 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
829 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
830 PKT_RX_RSS_HASH, 0, 0, 0,
831 0, 0, 0, PKT_RX_FDIR,
834 #ifdef RTE_LIBRTE_IEEE1588
835 static uint32_t ip_pkt_etqf_map[8] = {
836 0, 0, 0, PKT_RX_IEEE1588_PTP,
840 pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
841 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
842 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
844 pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
845 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
848 return (uint16_t)(pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]);
851 static inline uint16_t
852 rx_desc_status_to_pkt_flags(uint32_t rx_status)
857 * Check if VLAN present only.
858 * Do not check whether L3/L4 rx checksum done by NIC or not,
859 * That can be found from rte_eth_rxmode.hw_ip_checksum flag
861 pkt_flags = (uint16_t)((rx_status & IXGBE_RXD_STAT_VP) ?
862 PKT_RX_VLAN_PKT : 0);
864 #ifdef RTE_LIBRTE_IEEE1588
865 if (rx_status & IXGBE_RXD_STAT_TMST)
866 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
871 static inline uint16_t
872 rx_desc_error_to_pkt_flags(uint32_t rx_status)
875 * Bit 31: IPE, IPv4 checksum error
876 * Bit 30: L4I, L4I integrity error
878 static uint16_t error_to_pkt_flags_map[4] = {
879 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
880 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
882 return error_to_pkt_flags_map[(rx_status >>
883 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
886 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
888 * LOOK_AHEAD defines how many desc statuses to check beyond the
889 * current descriptor.
890 * It must be a pound define for optimal performance.
891 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
892 * function only works with LOOK_AHEAD=8.
895 #if (LOOK_AHEAD != 8)
896 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
899 ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
901 volatile union ixgbe_adv_rx_desc *rxdp;
902 struct igb_rx_entry *rxep;
905 int s[LOOK_AHEAD], nb_dd;
909 /* get references to current descriptor and S/W ring entry */
910 rxdp = &rxq->rx_ring[rxq->rx_tail];
911 rxep = &rxq->sw_ring[rxq->rx_tail];
913 /* check to make sure there is at least 1 packet to receive */
914 if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
918 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
919 * reference packets that are ready to be received.
921 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
922 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
924 /* Read desc statuses backwards to avoid race condition */
925 for (j = LOOK_AHEAD-1; j >= 0; --j)
926 s[j] = rxdp[j].wb.upper.status_error;
928 /* Compute how many status bits were set */
930 for (j = 0; j < LOOK_AHEAD; ++j)
931 nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
935 /* Translate descriptor info to mbuf format */
936 for (j = 0; j < nb_dd; ++j) {
938 pkt_len = (uint16_t)(rxdp[j].wb.upper.length -
940 mb->pkt.data_len = pkt_len;
941 mb->pkt.pkt_len = pkt_len;
942 mb->pkt.vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
943 mb->pkt.hash.rss = rxdp[j].wb.lower.hi_dword.rss;
945 /* convert descriptor fields to rte mbuf flags */
946 mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags(
947 rxdp[j].wb.lower.lo_dword.data);
948 /* reuse status field from scan list */
949 mb->ol_flags = (uint16_t)(mb->ol_flags |
950 rx_desc_status_to_pkt_flags(s[j]));
951 mb->ol_flags = (uint16_t)(mb->ol_flags |
952 rx_desc_error_to_pkt_flags(s[j]));
955 /* Move mbuf pointers from the S/W ring to the stage */
956 for (j = 0; j < LOOK_AHEAD; ++j) {
957 rxq->rx_stage[i + j] = rxep[j].mbuf;
960 /* stop if all requested packets could not be received */
961 if (nb_dd != LOOK_AHEAD)
965 /* clear software ring entries so we can cleanup correctly */
966 for (i = 0; i < nb_rx; ++i) {
967 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
975 ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
977 volatile union ixgbe_adv_rx_desc *rxdp;
978 struct igb_rx_entry *rxep;
984 /* allocate buffers in bulk directly into the S/W ring */
985 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
986 (rxq->rx_free_thresh - 1));
987 rxep = &rxq->sw_ring[alloc_idx];
988 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
989 rxq->rx_free_thresh);
990 if (unlikely(diag != 0))
993 rxdp = &rxq->rx_ring[alloc_idx];
994 for (i = 0; i < rxq->rx_free_thresh; ++i) {
995 /* populate the static rte mbuf fields */
997 rte_mbuf_refcnt_set(mb, 1);
998 mb->type = RTE_MBUF_PKT;
1000 mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
1001 mb->pkt.nb_segs = 1;
1002 mb->pkt.in_port = rxq->port_id;
1004 /* populate the descriptors */
1005 dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
1006 rxdp[i].read.hdr_addr = dma_addr;
1007 rxdp[i].read.pkt_addr = dma_addr;
1010 /* update tail pointer */
1012 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
1014 /* update state of internal queue structure */
1015 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger +
1016 rxq->rx_free_thresh);
1017 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1018 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1024 static inline uint16_t
1025 ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1028 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1031 /* how many packets are ready to return? */
1032 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1034 /* copy mbuf pointers to the application's packet list */
1035 for (i = 0; i < nb_pkts; ++i)
1036 rx_pkts[i] = stage[i];
1038 /* update internal queue state */
1039 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1040 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1045 static inline uint16_t
1046 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1049 struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
1052 /* Any previously recv'd pkts will be returned from the Rx stage */
1053 if (rxq->rx_nb_avail)
1054 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1056 /* Scan the H/W ring for packets to receive */
1057 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1059 /* update internal queue state */
1060 rxq->rx_next_avail = 0;
1061 rxq->rx_nb_avail = nb_rx;
1062 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1064 /* if required, allocate new buffers to replenish descriptors */
1065 if (rxq->rx_tail > rxq->rx_free_trigger) {
1066 if (ixgbe_rx_alloc_bufs(rxq) != 0) {
1068 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1069 "queue_id=%u\n", (unsigned) rxq->port_id,
1070 (unsigned) rxq->queue_id);
1072 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1073 rxq->rx_free_thresh;
1076 * Need to rewind any previous receives if we cannot
1077 * allocate new buffers to replenish the old ones.
1079 rxq->rx_nb_avail = 0;
1080 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1081 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1082 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1088 if (rxq->rx_tail >= rxq->nb_rx_desc)
1091 /* received any packets this loop? */
1092 if (rxq->rx_nb_avail)
1093 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1098 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1100 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1105 if (unlikely(nb_pkts == 0))
1108 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1109 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1111 /* request is relatively large, chunk it up */
1115 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1116 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1117 nb_rx = (uint16_t)(nb_rx + ret);
1118 nb_pkts = (uint16_t)(nb_pkts - ret);
1125 #endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
1128 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1131 struct igb_rx_queue *rxq;
1132 volatile union ixgbe_adv_rx_desc *rx_ring;
1133 volatile union ixgbe_adv_rx_desc *rxdp;
1134 struct igb_rx_entry *sw_ring;
1135 struct igb_rx_entry *rxe;
1136 struct rte_mbuf *rxm;
1137 struct rte_mbuf *nmb;
1138 union ixgbe_adv_rx_desc rxd;
1141 uint32_t hlen_type_rss;
1151 rx_id = rxq->rx_tail;
1152 rx_ring = rxq->rx_ring;
1153 sw_ring = rxq->sw_ring;
1154 while (nb_rx < nb_pkts) {
1156 * The order of operations here is important as the DD status
1157 * bit must not be read after any other descriptor fields.
1158 * rx_ring and rxdp are pointing to volatile data so the order
1159 * of accesses cannot be reordered by the compiler. If they were
1160 * not volatile, they could be reordered which could lead to
1161 * using invalid descriptor fields when read from rxd.
1163 rxdp = &rx_ring[rx_id];
1164 staterr = rxdp->wb.upper.status_error;
1165 if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1172 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1173 * is likely to be invalid and to be dropped by the various
1174 * validation checks performed by the network stack.
1176 * Allocate a new mbuf to replenish the RX ring descriptor.
1177 * If the allocation fails:
1178 * - arrange for that RX descriptor to be the first one
1179 * being parsed the next time the receive function is
1180 * invoked [on the same queue].
1182 * - Stop parsing the RX ring and return immediately.
1184 * This policy do not drop the packet received in the RX
1185 * descriptor for which the allocation of a new mbuf failed.
1186 * Thus, it allows that packet to be later retrieved if
1187 * mbuf have been freed in the mean time.
1188 * As a side effect, holding RX descriptors instead of
1189 * systematically giving them back to the NIC may lead to
1190 * RX ring exhaustion situations.
1191 * However, the NIC can gracefully prevent such situations
1192 * to happen by sending specific "back-pressure" flow control
1193 * frames to its peer(s).
1195 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1196 "ext_err_stat=0x%08x pkt_len=%u\n",
1197 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1198 (unsigned) rx_id, (unsigned) staterr,
1199 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1201 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1203 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1204 "queue_id=%u\n", (unsigned) rxq->port_id,
1205 (unsigned) rxq->queue_id);
1206 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1211 rxe = &sw_ring[rx_id];
1213 if (rx_id == rxq->nb_rx_desc)
1216 /* Prefetch next mbuf while processing current one. */
1217 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1220 * When next RX descriptor is on a cache-line boundary,
1221 * prefetch the next 4 RX descriptors and the next 8 pointers
1224 if ((rx_id & 0x3) == 0) {
1225 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1226 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1232 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1233 rxdp->read.hdr_addr = dma_addr;
1234 rxdp->read.pkt_addr = dma_addr;
1237 * Initialize the returned mbuf.
1238 * 1) setup generic mbuf fields:
1239 * - number of segments,
1242 * - RX port identifier.
1243 * 2) integrate hardware offload data, if any:
1244 * - RSS flag & hash,
1245 * - IP checksum flag,
1246 * - VLAN TCI, if any,
1249 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1251 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
1252 rte_packet_prefetch(rxm->pkt.data);
1253 rxm->pkt.nb_segs = 1;
1254 rxm->pkt.next = NULL;
1255 rxm->pkt.pkt_len = pkt_len;
1256 rxm->pkt.data_len = pkt_len;
1257 rxm->pkt.in_port = rxq->port_id;
1259 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1260 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1261 rxm->pkt.vlan_macip.f.vlan_tci =
1262 rte_le_to_cpu_16(rxd.wb.upper.vlan);
1264 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1265 pkt_flags = (uint16_t)(pkt_flags |
1266 rx_desc_status_to_pkt_flags(staterr));
1267 pkt_flags = (uint16_t)(pkt_flags |
1268 rx_desc_error_to_pkt_flags(staterr));
1269 rxm->ol_flags = pkt_flags;
1271 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1272 rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
1273 else if (pkt_flags & PKT_RX_FDIR) {
1274 rxm->pkt.hash.fdir.hash =
1275 (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
1276 & IXGBE_ATR_HASH_MASK);
1277 rxm->pkt.hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
1280 * Store the mbuf address into the next entry of the array
1281 * of returned packets.
1283 rx_pkts[nb_rx++] = rxm;
1285 rxq->rx_tail = rx_id;
1288 * If the number of free RX descriptors is greater than the RX free
1289 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1291 * Update the RDT with the value of the last processed RX descriptor
1292 * minus 1, to guarantee that the RDT register is never equal to the
1293 * RDH register, which creates a "full" ring situtation from the
1294 * hardware point of view...
1296 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1297 if (nb_hold > rxq->rx_free_thresh) {
1298 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1299 "nb_hold=%u nb_rx=%u\n",
1300 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1301 (unsigned) rx_id, (unsigned) nb_hold,
1303 rx_id = (uint16_t) ((rx_id == 0) ?
1304 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1305 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1308 rxq->nb_rx_hold = nb_hold;
1313 ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1316 struct igb_rx_queue *rxq;
1317 volatile union ixgbe_adv_rx_desc *rx_ring;
1318 volatile union ixgbe_adv_rx_desc *rxdp;
1319 struct igb_rx_entry *sw_ring;
1320 struct igb_rx_entry *rxe;
1321 struct rte_mbuf *first_seg;
1322 struct rte_mbuf *last_seg;
1323 struct rte_mbuf *rxm;
1324 struct rte_mbuf *nmb;
1325 union ixgbe_adv_rx_desc rxd;
1326 uint64_t dma; /* Physical address of mbuf data buffer */
1328 uint32_t hlen_type_rss;
1338 rx_id = rxq->rx_tail;
1339 rx_ring = rxq->rx_ring;
1340 sw_ring = rxq->sw_ring;
1343 * Retrieve RX context of current packet, if any.
1345 first_seg = rxq->pkt_first_seg;
1346 last_seg = rxq->pkt_last_seg;
1348 while (nb_rx < nb_pkts) {
1351 * The order of operations here is important as the DD status
1352 * bit must not be read after any other descriptor fields.
1353 * rx_ring and rxdp are pointing to volatile data so the order
1354 * of accesses cannot be reordered by the compiler. If they were
1355 * not volatile, they could be reordered which could lead to
1356 * using invalid descriptor fields when read from rxd.
1358 rxdp = &rx_ring[rx_id];
1359 staterr = rxdp->wb.upper.status_error;
1360 if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1367 * Allocate a new mbuf to replenish the RX ring descriptor.
1368 * If the allocation fails:
1369 * - arrange for that RX descriptor to be the first one
1370 * being parsed the next time the receive function is
1371 * invoked [on the same queue].
1373 * - Stop parsing the RX ring and return immediately.
1375 * This policy does not drop the packet received in the RX
1376 * descriptor for which the allocation of a new mbuf failed.
1377 * Thus, it allows that packet to be later retrieved if
1378 * mbuf have been freed in the mean time.
1379 * As a side effect, holding RX descriptors instead of
1380 * systematically giving them back to the NIC may lead to
1381 * RX ring exhaustion situations.
1382 * However, the NIC can gracefully prevent such situations
1383 * to happen by sending specific "back-pressure" flow control
1384 * frames to its peer(s).
1386 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
1387 "staterr=0x%x data_len=%u\n",
1388 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1389 (unsigned) rx_id, (unsigned) staterr,
1390 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1392 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1394 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1395 "queue_id=%u\n", (unsigned) rxq->port_id,
1396 (unsigned) rxq->queue_id);
1397 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1402 rxe = &sw_ring[rx_id];
1404 if (rx_id == rxq->nb_rx_desc)
1407 /* Prefetch next mbuf while processing current one. */
1408 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1411 * When next RX descriptor is on a cache-line boundary,
1412 * prefetch the next 4 RX descriptors and the next 8 pointers
1415 if ((rx_id & 0x3) == 0) {
1416 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1417 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1421 * Update RX descriptor with the physical address of the new
1422 * data buffer of the new allocated mbuf.
1426 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1427 rxdp->read.hdr_addr = dma;
1428 rxdp->read.pkt_addr = dma;
1431 * Set data length & data buffer address of mbuf.
1433 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1434 rxm->pkt.data_len = data_len;
1435 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
1438 * If this is the first buffer of the received packet,
1439 * set the pointer to the first mbuf of the packet and
1440 * initialize its context.
1441 * Otherwise, update the total length and the number of segments
1442 * of the current scattered packet, and update the pointer to
1443 * the last mbuf of the current packet.
1445 if (first_seg == NULL) {
1447 first_seg->pkt.pkt_len = data_len;
1448 first_seg->pkt.nb_segs = 1;
1450 first_seg->pkt.pkt_len = (uint16_t)(first_seg->pkt.pkt_len
1452 first_seg->pkt.nb_segs++;
1453 last_seg->pkt.next = rxm;
1457 * If this is not the last buffer of the received packet,
1458 * update the pointer to the last mbuf of the current scattered
1459 * packet and continue to parse the RX ring.
1461 if (! (staterr & IXGBE_RXDADV_STAT_EOP)) {
1467 * This is the last buffer of the received packet.
1468 * If the CRC is not stripped by the hardware:
1469 * - Subtract the CRC length from the total packet length.
1470 * - If the last buffer only contains the whole CRC or a part
1471 * of it, free the mbuf associated to the last buffer.
1472 * If part of the CRC is also contained in the previous
1473 * mbuf, subtract the length of that CRC part from the
1474 * data length of the previous mbuf.
1476 rxm->pkt.next = NULL;
1477 if (unlikely(rxq->crc_len > 0)) {
1478 first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
1479 if (data_len <= ETHER_CRC_LEN) {
1480 rte_pktmbuf_free_seg(rxm);
1481 first_seg->pkt.nb_segs--;
1482 last_seg->pkt.data_len = (uint16_t)
1483 (last_seg->pkt.data_len -
1484 (ETHER_CRC_LEN - data_len));
1485 last_seg->pkt.next = NULL;
1488 (uint16_t) (data_len - ETHER_CRC_LEN);
1492 * Initialize the first mbuf of the returned packet:
1493 * - RX port identifier,
1494 * - hardware offload data, if any:
1495 * - RSS flag & hash,
1496 * - IP checksum flag,
1497 * - VLAN TCI, if any,
1500 first_seg->pkt.in_port = rxq->port_id;
1503 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1504 * set in the pkt_flags field.
1506 first_seg->pkt.vlan_macip.f.vlan_tci =
1507 rte_le_to_cpu_16(rxd.wb.upper.vlan);
1508 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1509 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1510 pkt_flags = (uint16_t)(pkt_flags |
1511 rx_desc_status_to_pkt_flags(staterr));
1512 pkt_flags = (uint16_t)(pkt_flags |
1513 rx_desc_error_to_pkt_flags(staterr));
1514 first_seg->ol_flags = pkt_flags;
1516 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1517 first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
1518 else if (pkt_flags & PKT_RX_FDIR) {
1519 first_seg->pkt.hash.fdir.hash =
1520 (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
1521 & IXGBE_ATR_HASH_MASK);
1522 first_seg->pkt.hash.fdir.id =
1523 rxd.wb.lower.hi_dword.csum_ip.ip_id;
1526 /* Prefetch data of first segment, if configured to do so. */
1527 rte_packet_prefetch(first_seg->pkt.data);
1530 * Store the mbuf address into the next entry of the array
1531 * of returned packets.
1533 rx_pkts[nb_rx++] = first_seg;
1536 * Setup receipt context for a new packet.
1542 * Record index of the next RX descriptor to probe.
1544 rxq->rx_tail = rx_id;
1547 * Save receive context.
1549 rxq->pkt_first_seg = first_seg;
1550 rxq->pkt_last_seg = last_seg;
1553 * If the number of free RX descriptors is greater than the RX free
1554 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1556 * Update the RDT with the value of the last processed RX descriptor
1557 * minus 1, to guarantee that the RDT register is never equal to the
1558 * RDH register, which creates a "full" ring situtation from the
1559 * hardware point of view...
1561 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1562 if (nb_hold > rxq->rx_free_thresh) {
1563 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1564 "nb_hold=%u nb_rx=%u\n",
1565 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1566 (unsigned) rx_id, (unsigned) nb_hold,
1568 rx_id = (uint16_t) ((rx_id == 0) ?
1569 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1570 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1573 rxq->nb_rx_hold = nb_hold;
1577 /*********************************************************************
1579 * Queue management functions
1581 **********************************************************************/
1584 * Rings setup and release.
1586 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1587 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
1588 * also optimize cache line size effect. H/W supports up to cache line size 128.
1590 #define IXGBE_ALIGN 128
1593 * Maximum number of Ring Descriptors.
1595 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
1596 * descriptors should meet the following condition:
1597 * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
1599 #define IXGBE_MIN_RING_DESC 32
1600 #define IXGBE_MAX_RING_DESC 4096
1603 * Create memzone for HW rings. malloc can't be used as the physical address is
1604 * needed. If the memzone is already created, then this function returns a ptr
1607 static const struct rte_memzone *
1608 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1609 uint16_t queue_id, uint32_t ring_size, int socket_id)
1611 char z_name[RTE_MEMZONE_NAMESIZE];
1612 const struct rte_memzone *mz;
1614 rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1615 dev->driver->pci_drv.name, ring_name,
1616 dev->data->port_id, queue_id);
1618 mz = rte_memzone_lookup(z_name);
1622 #ifdef RTE_LIBRTE_XEN_DOM0
1623 return rte_memzone_reserve_bounded(z_name, ring_size,
1624 socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
1626 return rte_memzone_reserve_aligned(z_name, ring_size,
1627 socket_id, 0, IXGBE_ALIGN);
1632 ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1636 if (txq->sw_ring != NULL) {
1637 for (i = 0; i < txq->nb_tx_desc; i++) {
1638 if (txq->sw_ring[i].mbuf != NULL) {
1639 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1640 txq->sw_ring[i].mbuf = NULL;
1647 ixgbe_tx_free_swring(struct igb_tx_queue *txq)
1650 txq->sw_ring != NULL)
1651 rte_free(txq->sw_ring);
1655 ixgbe_tx_queue_release(struct igb_tx_queue *txq)
1657 if (txq != NULL && txq->ops != NULL) {
1658 txq->ops->release_mbufs(txq);
1659 txq->ops->free_swring(txq);
1665 ixgbe_dev_tx_queue_release(void *txq)
1667 ixgbe_tx_queue_release(txq);
1670 /* (Re)set dynamic igb_tx_queue fields to defaults */
1672 ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
1674 static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
1676 struct igb_tx_entry *txe = txq->sw_ring;
1679 /* Zero out HW ring memory */
1680 for (i = 0; i < txq->nb_tx_desc; i++) {
1681 txq->tx_ring[i] = zeroed_desc;
1684 /* Initialize SW ring entries */
1685 prev = (uint16_t) (txq->nb_tx_desc - 1);
1686 for (i = 0; i < txq->nb_tx_desc; i++) {
1687 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
1688 txd->wb.status = IXGBE_TXD_STAT_DD;
1691 txe[prev].next_id = i;
1695 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1696 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1699 txq->nb_tx_used = 0;
1701 * Always allow 1 descriptor to be un-allocated to avoid
1702 * a H/W race condition
1704 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1705 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1707 memset((void*)&txq->ctx_cache, 0,
1708 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
1711 static struct ixgbe_txq_ops def_txq_ops = {
1712 .release_mbufs = ixgbe_tx_queue_release_mbufs,
1713 .free_swring = ixgbe_tx_free_swring,
1714 .reset = ixgbe_reset_tx_queue,
1718 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1721 unsigned int socket_id,
1722 const struct rte_eth_txconf *tx_conf)
1724 const struct rte_memzone *tz;
1725 struct igb_tx_queue *txq;
1726 struct ixgbe_hw *hw;
1727 uint16_t tx_rs_thresh, tx_free_thresh;
1729 PMD_INIT_FUNC_TRACE();
1730 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1733 * Validate number of transmit descriptors.
1734 * It must not exceed hardware maximum, and must be multiple
1737 if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
1738 (nb_desc > IXGBE_MAX_RING_DESC) ||
1739 (nb_desc < IXGBE_MIN_RING_DESC)) {
1744 * The following two parameters control the setting of the RS bit on
1745 * transmit descriptors.
1746 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
1747 * descriptors have been used.
1748 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
1749 * descriptors are used or if the number of descriptors required
1750 * to transmit a packet is greater than the number of free TX
1752 * The following constraints must be satisfied:
1753 * tx_rs_thresh must be greater than 0.
1754 * tx_rs_thresh must be less than the size of the ring minus 2.
1755 * tx_rs_thresh must be less than or equal to tx_free_thresh.
1756 * tx_rs_thresh must be a divisor of the ring size.
1757 * tx_free_thresh must be greater than 0.
1758 * tx_free_thresh must be less than the size of the ring minus 3.
1759 * One descriptor in the TX ring is used as a sentinel to avoid a
1760 * H/W race condition, hence the maximum threshold constraints.
1761 * When set to zero use default values.
1763 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
1764 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
1765 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1766 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
1767 if (tx_rs_thresh >= (nb_desc - 2)) {
1768 RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the number "
1769 "of TX descriptors minus 2. (tx_rs_thresh=%u port=%d "
1770 "queue=%d)\n", (unsigned int)tx_rs_thresh,
1771 (int)dev->data->port_id, (int)queue_idx);
1774 if (tx_free_thresh >= (nb_desc - 3)) {
1775 RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the "
1776 "tx_free_thresh must be less than the number of TX "
1777 "descriptors minus 3. (tx_free_thresh=%u port=%d "
1778 "queue=%d)\n", (unsigned int)tx_free_thresh,
1779 (int)dev->data->port_id, (int)queue_idx);
1782 if (tx_rs_thresh > tx_free_thresh) {
1783 RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or equal to "
1784 "tx_free_thresh. (tx_free_thresh=%u tx_rs_thresh=%u "
1785 "port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
1786 (unsigned int)tx_rs_thresh, (int)dev->data->port_id,
1790 if ((nb_desc % tx_rs_thresh) != 0) {
1791 RTE_LOG(ERR, PMD, "tx_rs_thresh must be a divisor of the "
1792 "number of TX descriptors. (tx_rs_thresh=%u port=%d "
1793 "queue=%d)\n", (unsigned int)tx_rs_thresh,
1794 (int)dev->data->port_id, (int)queue_idx);
1799 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
1800 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
1801 * by the NIC and all descriptors are written back after the NIC
1802 * accumulates WTHRESH descriptors.
1804 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
1805 RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if "
1806 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
1807 "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
1808 (int)dev->data->port_id, (int)queue_idx);
1812 /* Free memory prior to re-allocation if needed... */
1813 if (dev->data->tx_queues[queue_idx] != NULL) {
1814 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
1815 dev->data->tx_queues[queue_idx] = NULL;
1818 /* First allocate the tx queue data structure */
1819 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
1820 CACHE_LINE_SIZE, socket_id);
1825 * Allocate TX ring hardware descriptors. A memzone large enough to
1826 * handle the maximum ring size is allocated in order to allow for
1827 * resizing in later calls to the queue setup function.
1829 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1830 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
1833 ixgbe_tx_queue_release(txq);
1837 txq->nb_tx_desc = nb_desc;
1838 txq->tx_rs_thresh = tx_rs_thresh;
1839 txq->tx_free_thresh = tx_free_thresh;
1840 txq->pthresh = tx_conf->tx_thresh.pthresh;
1841 txq->hthresh = tx_conf->tx_thresh.hthresh;
1842 txq->wthresh = tx_conf->tx_thresh.wthresh;
1843 txq->queue_id = queue_idx;
1844 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1845 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1846 txq->port_id = dev->data->port_id;
1847 txq->txq_flags = tx_conf->txq_flags;
1848 txq->ops = &def_txq_ops;
1849 txq->start_tx_per_q = tx_conf->start_tx_per_q;
1852 * Modification to set VFTDT for virtual function if vf is detected
1854 if (hw->mac.type == ixgbe_mac_82599_vf)
1855 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
1857 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
1858 #ifndef RTE_LIBRTE_XEN_DOM0
1859 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1861 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1863 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
1865 /* Allocate software ring */
1866 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
1867 sizeof(struct igb_tx_entry) * nb_desc,
1868 CACHE_LINE_SIZE, socket_id);
1869 if (txq->sw_ring == NULL) {
1870 ixgbe_tx_queue_release(txq);
1873 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1874 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1876 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1877 if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
1878 (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
1879 PMD_INIT_LOG(INFO, "Using simple tx code path\n");
1880 #ifdef RTE_IXGBE_INC_VECTOR
1881 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
1882 ixgbe_txq_vec_setup(txq, socket_id) == 0) {
1883 PMD_INIT_LOG(INFO, "Vector tx enabled.\n");
1884 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
1888 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
1890 PMD_INIT_LOG(INFO, "Using full-featured tx code path\n");
1891 PMD_INIT_LOG(INFO, " - txq_flags = %lx [IXGBE_SIMPLE_FLAGS=%lx]\n", (long unsigned)txq->txq_flags, (long unsigned)IXGBE_SIMPLE_FLAGS);
1892 PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu [RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n", (long unsigned)txq->tx_rs_thresh, (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
1893 dev->tx_pkt_burst = ixgbe_xmit_pkts;
1896 txq->ops->reset(txq);
1898 dev->data->tx_queues[queue_idx] = txq;
1905 ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1909 if (rxq->sw_ring != NULL) {
1910 for (i = 0; i < rxq->nb_rx_desc; i++) {
1911 if (rxq->sw_ring[i].mbuf != NULL) {
1912 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1913 rxq->sw_ring[i].mbuf = NULL;
1916 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
1917 if (rxq->rx_nb_avail) {
1918 for (i = 0; i < rxq->rx_nb_avail; ++i) {
1919 struct rte_mbuf *mb;
1920 mb = rxq->rx_stage[rxq->rx_next_avail + i];
1921 rte_pktmbuf_free_seg(mb);
1923 rxq->rx_nb_avail = 0;
1930 ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
1933 ixgbe_rx_queue_release_mbufs(rxq);
1934 rte_free(rxq->sw_ring);
1940 ixgbe_dev_rx_queue_release(void *rxq)
1942 ixgbe_rx_queue_release(rxq);
1946 * Check if Rx Burst Bulk Alloc function can be used.
1948 * 0: the preconditions are satisfied and the bulk allocation function
1950 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
1951 * function must be used.
1954 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
1955 check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
1957 check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
1963 * Make sure the following pre-conditions are satisfied:
1964 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
1965 * rxq->rx_free_thresh < rxq->nb_rx_desc
1966 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
1967 * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
1968 * Scattered packets are not supported. This should be checked
1969 * outside of this function.
1971 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
1972 if (! (rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST))
1974 else if (! (rxq->rx_free_thresh < rxq->nb_rx_desc))
1976 else if (! ((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0))
1978 else if (! (rxq->nb_rx_desc <
1979 (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST)))
1988 /* Reset dynamic igb_rx_queue fields back to defaults */
1990 ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
1992 static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
1998 * By default, the Rx queue setup function allocates enough memory for
1999 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2000 * extra memory at the end of the descriptor ring to be zero'd out. A
2001 * pre-condition for using the Rx burst bulk alloc function is that the
2002 * number of descriptors is less than or equal to
2003 * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
2004 * constraints here to see if we need to zero out memory after the end
2005 * of the H/W descriptor ring.
2007 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2008 if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
2009 /* zero out extra memory */
2010 len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
2013 /* do not zero out extra memory */
2014 len = rxq->nb_rx_desc;
2017 * Zero out HW ring memory. Zero out extra memory at the end of
2018 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2019 * reads extra memory as zeros.
2021 for (i = 0; i < len; i++) {
2022 rxq->rx_ring[i] = zeroed_desc;
2025 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2027 * initialize extra software ring entries. Space for these extra
2028 * entries is always allocated
2030 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2031 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; ++i) {
2032 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
2035 rxq->rx_nb_avail = 0;
2036 rxq->rx_next_avail = 0;
2037 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2038 #endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
2040 rxq->nb_rx_hold = 0;
2041 rxq->pkt_first_seg = NULL;
2042 rxq->pkt_last_seg = NULL;
2046 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2049 unsigned int socket_id,
2050 const struct rte_eth_rxconf *rx_conf,
2051 struct rte_mempool *mp)
2053 const struct rte_memzone *rz;
2054 struct igb_rx_queue *rxq;
2055 struct ixgbe_hw *hw;
2056 int use_def_burst_func = 1;
2059 PMD_INIT_FUNC_TRACE();
2060 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2063 * Validate number of receive descriptors.
2064 * It must not exceed hardware maximum, and must be multiple
2067 if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
2068 (nb_desc > IXGBE_MAX_RING_DESC) ||
2069 (nb_desc < IXGBE_MIN_RING_DESC)) {
2073 /* Free memory prior to re-allocation if needed... */
2074 if (dev->data->rx_queues[queue_idx] != NULL) {
2075 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2076 dev->data->rx_queues[queue_idx] = NULL;
2079 /* First allocate the rx queue data structure */
2080 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
2081 CACHE_LINE_SIZE, socket_id);
2085 rxq->nb_rx_desc = nb_desc;
2086 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2087 rxq->queue_id = queue_idx;
2088 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2089 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2090 rxq->port_id = dev->data->port_id;
2091 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2093 rxq->drop_en = rx_conf->rx_drop_en;
2094 rxq->start_rx_per_q = rx_conf->start_rx_per_q;
2097 * Allocate RX ring hardware descriptors. A memzone large enough to
2098 * handle the maximum ring size is allocated in order to allow for
2099 * resizing in later calls to the queue setup function.
2101 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
2102 RX_RING_SZ, socket_id);
2104 ixgbe_rx_queue_release(rxq);
2109 * Zero init all the descriptors in the ring.
2111 memset (rz->addr, 0, RX_RING_SZ);
2114 * Modified to setup VFRDT for Virtual Function
2116 if (hw->mac.type == ixgbe_mac_82599_vf) {
2118 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2120 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2124 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2126 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2128 #ifndef RTE_LIBRTE_XEN_DOM0
2129 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
2131 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2133 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
2136 * Allocate software ring. Allow for space at the end of the
2137 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2138 * function does not access an invalid memory region.
2140 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2141 len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
2145 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2146 sizeof(struct igb_rx_entry) * len,
2147 CACHE_LINE_SIZE, socket_id);
2148 if (rxq->sw_ring == NULL) {
2149 ixgbe_rx_queue_release(rxq);
2152 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
2153 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
2156 * Certain constraints must be met in order to use the bulk buffer
2157 * allocation Rx burst function.
2159 use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
2161 /* Check if pre-conditions are satisfied, and no Scattered Rx */
2162 if (!use_def_burst_func && !dev->data->scattered_rx) {
2163 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2164 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2165 "satisfied. Rx Burst Bulk Alloc function will be "
2166 "used on port=%d, queue=%d.\n",
2167 rxq->port_id, rxq->queue_id);
2168 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
2169 #ifdef RTE_IXGBE_INC_VECTOR
2170 if (!ixgbe_rx_vec_condition_check(dev)) {
2171 PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
2172 "sure RX burst size no less than 32.\n");
2173 ixgbe_rxq_vec_setup(rxq, socket_id);
2174 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
2179 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
2180 "are not satisfied, Scattered Rx is requested, "
2181 "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
2182 "enabled (port=%d, queue=%d).\n",
2183 rxq->port_id, rxq->queue_id);
2185 dev->data->rx_queues[queue_idx] = rxq;
2187 ixgbe_reset_rx_queue(rxq);
2193 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2195 #define IXGBE_RXQ_SCAN_INTERVAL 4
2196 volatile union ixgbe_adv_rx_desc *rxdp;
2197 struct igb_rx_queue *rxq;
2200 if (rx_queue_id >= dev->data->nb_rx_queues) {
2201 PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
2205 rxq = dev->data->rx_queues[rx_queue_id];
2206 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2208 while ((desc < rxq->nb_rx_desc) &&
2209 (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
2210 desc += IXGBE_RXQ_SCAN_INTERVAL;
2211 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
2212 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2213 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2214 desc - rxq->nb_rx_desc]);
2221 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2223 volatile union ixgbe_adv_rx_desc *rxdp;
2224 struct igb_rx_queue *rxq = rx_queue;
2227 if (unlikely(offset >= rxq->nb_rx_desc))
2229 desc = rxq->rx_tail + offset;
2230 if (desc >= rxq->nb_rx_desc)
2231 desc -= rxq->nb_rx_desc;
2233 rxdp = &rxq->rx_ring[desc];
2234 return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
2238 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
2242 PMD_INIT_FUNC_TRACE();
2244 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2245 struct igb_tx_queue *txq = dev->data->tx_queues[i];
2247 txq->ops->release_mbufs(txq);
2248 txq->ops->reset(txq);
2252 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2253 struct igb_rx_queue *rxq = dev->data->rx_queues[i];
2255 ixgbe_rx_queue_release_mbufs(rxq);
2256 ixgbe_reset_rx_queue(rxq);
2261 /*********************************************************************
2263 * Device RX/TX init functions
2265 **********************************************************************/
2268 * Receive Side Scaling (RSS)
2269 * See section 7.1.2.8 in the following document:
2270 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
2273 * The source and destination IP addresses of the IP header and the source
2274 * and destination ports of TCP/UDP headers, if any, of received packets are
2275 * hashed against a configurable random key to compute a 32-bit RSS hash result.
2276 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2277 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
2278 * RSS output index which is used as the RX queue index where to store the
2280 * The following output is supplied in the RX write-back descriptor:
2281 * - 32-bit result of the Microsoft RSS hash function,
2282 * - 4-bit RSS type field.
2286 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
2287 * Used as the default key.
2289 static uint8_t rss_intel_key[40] = {
2290 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2291 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2292 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2293 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2294 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2298 ixgbe_rss_disable(struct rte_eth_dev *dev)
2300 struct ixgbe_hw *hw;
2303 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2304 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2305 mrqc &= ~IXGBE_MRQC_RSSEN;
2306 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2310 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
2318 hash_key = rss_conf->rss_key;
2319 if (hash_key != NULL) {
2320 /* Fill in RSS hash key */
2321 for (i = 0; i < 10; i++) {
2322 rss_key = hash_key[(i * 4)];
2323 rss_key |= hash_key[(i * 4) + 1] << 8;
2324 rss_key |= hash_key[(i * 4) + 2] << 16;
2325 rss_key |= hash_key[(i * 4) + 3] << 24;
2326 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
2330 /* Set configured hashing protocols in MRQC register */
2331 rss_hf = rss_conf->rss_hf;
2332 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
2333 if (rss_hf & ETH_RSS_IPV4)
2334 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2335 if (rss_hf & ETH_RSS_IPV4_TCP)
2336 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2337 if (rss_hf & ETH_RSS_IPV6)
2338 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2339 if (rss_hf & ETH_RSS_IPV6_EX)
2340 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2341 if (rss_hf & ETH_RSS_IPV6_TCP)
2342 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2343 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
2344 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2345 if (rss_hf & ETH_RSS_IPV4_UDP)
2346 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2347 if (rss_hf & ETH_RSS_IPV6_UDP)
2348 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2349 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
2350 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2351 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2355 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2356 struct rte_eth_rss_conf *rss_conf)
2358 struct ixgbe_hw *hw;
2362 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2365 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
2366 * "RSS enabling cannot be done dynamically while it must be
2367 * preceded by a software reset"
2368 * Before changing anything, first check that the update RSS operation
2369 * does not attempt to disable RSS, if RSS was enabled at
2370 * initialization time, or does not attempt to enable RSS, if RSS was
2371 * disabled at initialization time.
2373 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
2374 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2375 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
2376 if (rss_hf != 0) /* Enable RSS */
2378 return 0; /* Nothing to do */
2381 if (rss_hf == 0) /* Disable RSS */
2383 ixgbe_hw_rss_hash_set(hw, rss_conf);
2388 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2389 struct rte_eth_rss_conf *rss_conf)
2391 struct ixgbe_hw *hw;
2398 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2399 hash_key = rss_conf->rss_key;
2400 if (hash_key != NULL) {
2401 /* Return RSS hash key */
2402 for (i = 0; i < 10; i++) {
2403 rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i);
2404 hash_key[(i * 4)] = rss_key & 0x000000FF;
2405 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2406 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2407 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2411 /* Get RSS functions configured in MRQC register */
2412 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2413 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
2414 rss_conf->rss_hf = 0;
2418 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
2419 rss_hf |= ETH_RSS_IPV4;
2420 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
2421 rss_hf |= ETH_RSS_IPV4_TCP;
2422 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
2423 rss_hf |= ETH_RSS_IPV6;
2424 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
2425 rss_hf |= ETH_RSS_IPV6_EX;
2426 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
2427 rss_hf |= ETH_RSS_IPV6_TCP;
2428 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
2429 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2430 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
2431 rss_hf |= ETH_RSS_IPV4_UDP;
2432 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
2433 rss_hf |= ETH_RSS_IPV6_UDP;
2434 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
2435 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2436 rss_conf->rss_hf = rss_hf;
2441 ixgbe_rss_configure(struct rte_eth_dev *dev)
2443 struct rte_eth_rss_conf rss_conf;
2444 struct ixgbe_hw *hw;
2449 PMD_INIT_FUNC_TRACE();
2450 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2453 * Fill in redirection table
2454 * The byte-swap is needed because NIC registers are in
2455 * little-endian order.
2458 for (i = 0, j = 0; i < 128; i++, j++) {
2459 if (j == dev->data->nb_rx_queues)
2461 reta = (reta << 8) | j;
2463 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
2468 * Configure the RSS key and the RSS protocols used to compute
2469 * the RSS hash of input packets.
2471 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2472 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
2473 ixgbe_rss_disable(dev);
2476 if (rss_conf.rss_key == NULL)
2477 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2478 ixgbe_hw_rss_hash_set(hw, &rss_conf);
2481 #define NUM_VFTA_REGISTERS 128
2482 #define NIC_RX_BUFFER_SIZE 0x200
2485 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
2487 struct rte_eth_vmdq_dcb_conf *cfg;
2488 struct ixgbe_hw *hw;
2489 enum rte_eth_nb_pools num_pools;
2490 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
2492 uint8_t nb_tcs; /* number of traffic classes */
2495 PMD_INIT_FUNC_TRACE();
2496 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2497 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2498 num_pools = cfg->nb_queue_pools;
2499 /* Check we have a valid number of pools */
2500 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
2501 ixgbe_rss_disable(dev);
2504 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
2505 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
2509 * split rx buffer up into sections, each for 1 traffic class
2511 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2512 for (i = 0 ; i < nb_tcs; i++) {
2513 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2514 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
2515 /* clear 10 bits. */
2516 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
2517 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2519 /* zero alloc all unused TCs */
2520 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2521 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2522 rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
2523 /* clear 10 bits. */
2524 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2527 /* MRQC: enable vmdq and dcb */
2528 mrqc = ((num_pools == ETH_16_POOLS) ? \
2529 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
2530 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2532 /* PFVTCTL: turn on virtualisation and set the default pool */
2533 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2534 if (cfg->enable_default_pool) {
2535 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
2537 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
2540 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
2542 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
2544 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
2546 * mapping is done with 3 bits per priority,
2547 * so shift by i*3 each time
2549 queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
2551 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
2553 /* RTRPCS: DCB related */
2554 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
2556 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2557 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2558 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
2559 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2561 /* VFTA - enable all vlan filters */
2562 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2563 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2566 /* VFRE: pool enabling for receive - 16 or 32 */
2567 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
2568 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2571 * MPSAR - allow pools to read specific mac addresses
2572 * In this case, all pools should be able to read from mac addr 0
2574 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
2575 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
2577 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
2578 for (i = 0; i < cfg->nb_pool_maps; i++) {
2579 /* set vlan id in VF register and set the valid bit */
2580 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
2581 (cfg->pool_map[i].vlan_id & 0xFFF)));
2583 * Put the allowed pools in VFB reg. As we only have 16 or 32
2584 * pools, we only need to use the first half of the register
2587 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
2592 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
2593 * @hw: pointer to hardware structure
2594 * @dcb_config: pointer to ixgbe_dcb_config structure
2597 ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
2598 struct ixgbe_dcb_config *dcb_config)
2603 PMD_INIT_FUNC_TRACE();
2604 if (hw->mac.type != ixgbe_mac_82598EB) {
2605 /* Disable the Tx desc arbiter so that MTQC can be changed */
2606 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2607 reg |= IXGBE_RTTDCS_ARBDIS;
2608 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2610 /* Enable DCB for Tx with 8 TCs */
2611 if (dcb_config->num_tcs.pg_tcs == 8) {
2612 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2615 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2617 if (dcb_config->vt_mode)
2618 reg |= IXGBE_MTQC_VT_ENA;
2619 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
2621 /* Disable drop for all queues */
2622 for (q = 0; q < 128; q++)
2623 IXGBE_WRITE_REG(hw, IXGBE_QDE,
2624 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
2626 /* Enable the Tx desc arbiter */
2627 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2628 reg &= ~IXGBE_RTTDCS_ARBDIS;
2629 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2631 /* Enable Security TX Buffer IFG for DCB */
2632 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2633 reg |= IXGBE_SECTX_DCB;
2634 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
2640 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
2641 * @dev: pointer to rte_eth_dev structure
2642 * @dcb_config: pointer to ixgbe_dcb_config structure
2645 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
2646 struct ixgbe_dcb_config *dcb_config)
2648 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2649 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
2650 struct ixgbe_hw *hw =
2651 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2653 PMD_INIT_FUNC_TRACE();
2654 if (hw->mac.type != ixgbe_mac_82598EB)
2655 /*PF VF Transmit Enable*/
2656 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
2657 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2659 /*Configure general DCB TX parameters*/
2660 ixgbe_dcb_tx_hw_config(hw,dcb_config);
2665 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
2666 struct ixgbe_dcb_config *dcb_config)
2668 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2669 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2670 struct ixgbe_dcb_tc_config *tc;
2673 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
2674 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
2675 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
2676 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
2679 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
2680 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
2682 /* User Priority to Traffic Class mapping */
2683 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2684 j = vmdq_rx_conf->dcb_queue[i];
2685 tc = &dcb_config->tc_config[j];
2686 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
2692 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
2693 struct ixgbe_dcb_config *dcb_config)
2695 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2696 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
2697 struct ixgbe_dcb_tc_config *tc;
2700 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
2701 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
2702 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
2703 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
2706 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
2707 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
2710 /* User Priority to Traffic Class mapping */
2711 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2712 j = vmdq_tx_conf->dcb_queue[i];
2713 tc = &dcb_config->tc_config[j];
2714 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
2721 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
2722 struct ixgbe_dcb_config *dcb_config)
2724 struct rte_eth_dcb_rx_conf *rx_conf =
2725 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
2726 struct ixgbe_dcb_tc_config *tc;
2729 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
2730 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
2732 /* User Priority to Traffic Class mapping */
2733 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2734 j = rx_conf->dcb_queue[i];
2735 tc = &dcb_config->tc_config[j];
2736 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
2742 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
2743 struct ixgbe_dcb_config *dcb_config)
2745 struct rte_eth_dcb_tx_conf *tx_conf =
2746 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
2747 struct ixgbe_dcb_tc_config *tc;
2750 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
2751 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
2753 /* User Priority to Traffic Class mapping */
2754 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2755 j = tx_conf->dcb_queue[i];
2756 tc = &dcb_config->tc_config[j];
2757 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
2763 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
2764 * @hw: pointer to hardware structure
2765 * @dcb_config: pointer to ixgbe_dcb_config structure
2768 ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
2769 struct ixgbe_dcb_config *dcb_config)
2775 PMD_INIT_FUNC_TRACE();
2777 * Disable the arbiter before changing parameters
2778 * (always enable recycle mode; WSP)
2780 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
2781 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
2783 if (hw->mac.type != ixgbe_mac_82598EB) {
2784 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
2785 if (dcb_config->num_tcs.pg_tcs == 4) {
2786 if (dcb_config->vt_mode)
2787 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
2788 IXGBE_MRQC_VMDQRT4TCEN;
2790 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
2791 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
2795 if (dcb_config->num_tcs.pg_tcs == 8) {
2796 if (dcb_config->vt_mode)
2797 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
2798 IXGBE_MRQC_VMDQRT8TCEN;
2800 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
2801 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
2806 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
2809 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2810 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2811 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
2812 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2814 /* VFTA - enable all vlan filters */
2815 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2816 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2820 * Configure Rx packet plane (recycle mode; WSP) and
2823 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
2824 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
2830 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
2831 uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
2833 switch (hw->mac.type) {
2834 case ixgbe_mac_82598EB:
2835 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
2837 case ixgbe_mac_82599EB:
2838 case ixgbe_mac_X540:
2839 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
2848 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
2849 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
2851 switch (hw->mac.type) {
2852 case ixgbe_mac_82598EB:
2853 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
2854 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
2856 case ixgbe_mac_82599EB:
2857 case ixgbe_mac_X540:
2858 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
2859 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
2866 #define DCB_RX_CONFIG 1
2867 #define DCB_TX_CONFIG 1
2868 #define DCB_TX_PB 1024
2870 * ixgbe_dcb_hw_configure - Enable DCB and configure
2871 * general DCB in VT mode and non-VT mode parameters
2872 * @dev: pointer to rte_eth_dev structure
2873 * @dcb_config: pointer to ixgbe_dcb_config structure
2876 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
2877 struct ixgbe_dcb_config *dcb_config)
2880 uint8_t i,pfc_en,nb_tcs;
2882 uint8_t config_dcb_rx = 0;
2883 uint8_t config_dcb_tx = 0;
2884 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
2885 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
2886 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
2887 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
2888 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
2889 struct ixgbe_dcb_tc_config *tc;
2890 uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2891 struct ixgbe_hw *hw =
2892 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2894 switch(dev->data->dev_conf.rxmode.mq_mode){
2895 case ETH_MQ_RX_VMDQ_DCB:
2896 dcb_config->vt_mode = true;
2897 if (hw->mac.type != ixgbe_mac_82598EB) {
2898 config_dcb_rx = DCB_RX_CONFIG;
2900 *get dcb and VT rx configuration parameters
2903 ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
2904 /*Configure general VMDQ and DCB RX parameters*/
2905 ixgbe_vmdq_dcb_configure(dev);
2909 dcb_config->vt_mode = false;
2910 config_dcb_rx = DCB_RX_CONFIG;
2911 /* Get dcb TX configuration parameters from rte_eth_conf */
2912 ixgbe_dcb_rx_config(dev,dcb_config);
2913 /*Configure general DCB RX parameters*/
2914 ixgbe_dcb_rx_hw_config(hw, dcb_config);
2917 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration\n");
2920 switch (dev->data->dev_conf.txmode.mq_mode) {
2921 case ETH_MQ_TX_VMDQ_DCB:
2922 dcb_config->vt_mode = true;
2923 config_dcb_tx = DCB_TX_CONFIG;
2924 /* get DCB and VT TX configuration parameters from rte_eth_conf */
2925 ixgbe_dcb_vt_tx_config(dev,dcb_config);
2926 /*Configure general VMDQ and DCB TX parameters*/
2927 ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
2931 dcb_config->vt_mode = false;
2932 config_dcb_tx = DCB_TX_CONFIG;
2933 /*get DCB TX configuration parameters from rte_eth_conf*/
2934 ixgbe_dcb_tx_config(dev,dcb_config);
2935 /*Configure general DCB TX parameters*/
2936 ixgbe_dcb_tx_hw_config(hw, dcb_config);
2939 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration\n");
2943 nb_tcs = dcb_config->num_tcs.pfc_tcs;
2945 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
2946 if(nb_tcs == ETH_4_TCS) {
2947 /* Avoid un-configured priority mapping to TC0 */
2949 uint8_t mask = 0xFF;
2950 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
2951 mask = (uint8_t)(mask & (~ (1 << map[i])));
2952 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
2953 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
2957 /* Re-configure 4 TCs BW */
2958 for (i = 0; i < nb_tcs; i++) {
2959 tc = &dcb_config->tc_config[i];
2960 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
2961 (uint8_t)(100 / nb_tcs);
2962 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
2963 (uint8_t)(100 / nb_tcs);
2965 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2966 tc = &dcb_config->tc_config[i];
2967 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
2968 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
2973 /* Set RX buffer size */
2974 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2975 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
2976 for (i = 0 ; i < nb_tcs; i++) {
2977 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2979 /* zero alloc all unused TCs */
2980 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2981 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
2985 /* Only support an equally distributed Tx packet buffer strategy. */
2986 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
2987 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
2988 for (i = 0; i < nb_tcs; i++) {
2989 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
2990 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
2992 /* Clear unused TCs, if any, to zero buffer size*/
2993 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2994 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
2995 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
2999 /*Calculates traffic class credits*/
3000 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3001 IXGBE_DCB_TX_CONFIG);
3002 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3003 IXGBE_DCB_RX_CONFIG);
3006 /* Unpack CEE standard containers */
3007 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
3008 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3009 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
3010 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
3011 /* Configure PG(ETS) RX */
3012 ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
3016 /* Unpack CEE standard containers */
3017 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
3018 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3019 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
3020 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
3021 /* Configure PG(ETS) TX */
3022 ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
3025 /*Configure queue statistics registers*/
3026 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
3028 /* Check if the PFC is supported */
3029 if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3030 pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
3031 for (i = 0; i < nb_tcs; i++) {
3033 * If the TC count is 8,and the default high_water is 48,
3034 * the low_water is 16 as default.
3036 hw->fc.high_water[i] = (pbsize * 3 ) / 4;
3037 hw->fc.low_water[i] = pbsize / 4;
3038 /* Enable pfc for this TC */
3039 tc = &dcb_config->tc_config[i];
3040 tc->pfc = ixgbe_dcb_pfc_enabled;
3042 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3043 if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3045 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
3052 * ixgbe_configure_dcb - Configure DCB Hardware
3053 * @dev: pointer to rte_eth_dev
3055 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
3057 struct ixgbe_dcb_config *dcb_cfg =
3058 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3059 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
3061 PMD_INIT_FUNC_TRACE();
3063 /* check support mq_mode for DCB */
3064 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
3065 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
3068 if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
3071 /** Configure DCB hardware **/
3072 ixgbe_dcb_hw_configure(dev,dcb_cfg);
3078 * VMDq only support for 10 GbE NIC.
3081 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3083 struct rte_eth_vmdq_rx_conf *cfg;
3084 struct ixgbe_hw *hw;
3085 enum rte_eth_nb_pools num_pools;
3086 uint32_t mrqc, vt_ctl, vlanctrl;
3089 PMD_INIT_FUNC_TRACE();
3090 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3091 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3092 num_pools = cfg->nb_queue_pools;
3094 ixgbe_rss_disable(dev);
3096 /* MRQC: enable vmdq */
3097 mrqc = IXGBE_MRQC_VMDQEN;
3098 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3100 /* PFVTCTL: turn on virtualisation and set the default pool */
3101 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3102 if (cfg->enable_default_pool)
3103 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3105 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3107 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3109 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3110 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3111 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
3112 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3114 /* VFTA - enable all vlan filters */
3115 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3116 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
3118 /* VFRE: pool enabling for receive - 64 */
3119 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
3120 if (num_pools == ETH_64_POOLS)
3121 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
3124 * MPSAR - allow pools to read specific mac addresses
3125 * In this case, all pools should be able to read from mac addr 0
3127 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
3128 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
3130 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3131 for (i = 0; i < cfg->nb_pool_maps; i++) {
3132 /* set vlan id in VF register and set the valid bit */
3133 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
3134 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
3136 * Put the allowed pools in VFB reg. As we only have 16 or 64
3137 * pools, we only need to use the first half of the register
3140 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3141 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
3142 (cfg->pool_map[i].pools & UINT32_MAX));
3144 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
3145 ((cfg->pool_map[i].pools >> 32) \
3150 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
3151 if (cfg->enable_loop_back) {
3152 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3153 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
3154 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
3157 IXGBE_WRITE_FLUSH(hw);
3161 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
3162 * @hw: pointer to hardware structure
3165 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
3170 PMD_INIT_FUNC_TRACE();
3171 /*PF VF Transmit Enable*/
3172 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
3173 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
3175 /* Disable the Tx desc arbiter so that MTQC can be changed */
3176 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3177 reg |= IXGBE_RTTDCS_ARBDIS;
3178 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3180 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3181 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3183 /* Disable drop for all queues */
3184 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3185 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3186 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3188 /* Enable the Tx desc arbiter */
3189 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3190 reg &= ~IXGBE_RTTDCS_ARBDIS;
3191 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3193 IXGBE_WRITE_FLUSH(hw);
3199 ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
3201 struct igb_rx_entry *rxe = rxq->sw_ring;
3205 /* Initialize software ring entries */
3206 for (i = 0; i < rxq->nb_rx_desc; i++) {
3207 volatile union ixgbe_adv_rx_desc *rxd;
3208 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
3210 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n",
3211 (unsigned) rxq->queue_id);
3215 rte_mbuf_refcnt_set(mbuf, 1);
3216 mbuf->type = RTE_MBUF_PKT;
3217 mbuf->pkt.next = NULL;
3218 mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
3219 mbuf->pkt.nb_segs = 1;
3220 mbuf->pkt.in_port = rxq->port_id;
3223 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
3224 rxd = &rxq->rx_ring[i];
3225 rxd->read.hdr_addr = dma_addr;
3226 rxd->read.pkt_addr = dma_addr;
3234 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3236 struct ixgbe_hw *hw =
3237 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3239 if (hw->mac.type == ixgbe_mac_82598EB)
3242 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3244 * SRIOV inactive scheme
3245 * any DCB/RSS w/o VMDq multi-queue setting
3247 switch (dev->data->dev_conf.rxmode.mq_mode) {
3249 ixgbe_rss_configure(dev);
3252 case ETH_MQ_RX_VMDQ_DCB:
3253 ixgbe_vmdq_dcb_configure(dev);
3256 case ETH_MQ_RX_VMDQ_ONLY:
3257 ixgbe_vmdq_rx_hw_configure(dev);
3260 case ETH_MQ_RX_NONE:
3261 /* if mq_mode is none, disable rss mode.*/
3262 default: ixgbe_rss_disable(dev);
3265 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3267 * SRIOV active scheme
3268 * FIXME if support DCB/RSS together with VMDq & SRIOV
3271 IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQEN);
3275 IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT4TCEN);
3279 IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN);
3282 RTE_LOG(ERR, PMD, "invalid pool number in IOV mode\n");
3290 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
3292 struct ixgbe_hw *hw =
3293 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3297 if (hw->mac.type == ixgbe_mac_82598EB)
3300 /* disable arbiter before setting MTQC */
3301 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3302 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3303 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3305 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3307 * SRIOV inactive scheme
3308 * any DCB w/o VMDq multi-queue setting
3310 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
3311 ixgbe_vmdq_tx_hw_configure(hw);
3313 mtqc = IXGBE_MTQC_64Q_1PB;
3314 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3317 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3320 * SRIOV active scheme
3321 * FIXME if support DCB together with VMDq & SRIOV
3324 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3327 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
3330 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
3334 mtqc = IXGBE_MTQC_64Q_1PB;
3335 RTE_LOG(ERR, PMD, "invalid pool number in IOV mode\n");
3337 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3340 /* re-enable arbiter */
3341 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3342 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3348 * Initializes Receive Unit.
3351 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
3353 struct ixgbe_hw *hw;
3354 struct igb_rx_queue *rxq;
3355 struct rte_pktmbuf_pool_private *mbp_priv;
3367 PMD_INIT_FUNC_TRACE();
3368 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3371 * Make sure receives are disabled while setting
3372 * up the RX context (registers, descriptor rings, etc.).
3374 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3375 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3377 /* Enable receipt of broadcasted frames */
3378 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3379 fctrl |= IXGBE_FCTRL_BAM;
3380 fctrl |= IXGBE_FCTRL_DPF;
3381 fctrl |= IXGBE_FCTRL_PMCF;
3382 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3385 * Configure CRC stripping, if any.
3387 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3388 if (dev->data->dev_conf.rxmode.hw_strip_crc)
3389 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
3391 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
3394 * Configure jumbo frame support, if any.
3396 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
3397 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3398 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
3399 maxfrs &= 0x0000FFFF;
3400 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
3401 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
3403 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
3406 * If loopback mode is configured for 82599, set LPBK bit.
3408 if (hw->mac.type == ixgbe_mac_82599EB &&
3409 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
3410 hlreg0 |= IXGBE_HLREG0_LPBK;
3412 hlreg0 &= ~IXGBE_HLREG0_LPBK;
3414 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3416 /* Setup RX queues */
3417 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3418 rxq = dev->data->rx_queues[i];
3421 * Reset crc_len in case it was changed after queue setup by a
3422 * call to configure.
3424 rxq->crc_len = (uint8_t)
3425 ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
3428 /* Setup the Base and Length of the Rx Descriptor Rings */
3429 bus_addr = rxq->rx_ring_phys_addr;
3430 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
3431 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
3432 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
3433 (uint32_t)(bus_addr >> 32));
3434 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
3435 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3436 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
3437 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
3439 /* Configure the SRRCTL register */
3440 #ifdef RTE_HEADER_SPLIT_ENABLE
3442 * Configure Header Split
3444 if (dev->data->dev_conf.rxmode.header_split) {
3445 if (hw->mac.type == ixgbe_mac_82599EB) {
3446 /* Must setup the PSRTYPE register */
3448 psrtype = IXGBE_PSRTYPE_TCPHDR |
3449 IXGBE_PSRTYPE_UDPHDR |
3450 IXGBE_PSRTYPE_IPV4HDR |
3451 IXGBE_PSRTYPE_IPV6HDR;
3452 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
3454 srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
3455 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
3456 IXGBE_SRRCTL_BSIZEHDR_MASK);
3457 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3460 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3462 /* Set if packets are dropped when no descriptors available */
3464 srrctl |= IXGBE_SRRCTL_DROP_EN;
3467 * Configure the RX buffer size in the BSIZEPACKET field of
3468 * the SRRCTL register of the queue.
3469 * The value is in 1 KB resolution. Valid values can be from
3472 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
3473 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
3474 RTE_PKTMBUF_HEADROOM);
3475 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
3476 IXGBE_SRRCTL_BSIZEPKT_MASK);
3477 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
3479 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
3480 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
3482 /* It adds dual VLAN length for supporting dual VLAN */
3483 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
3484 2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
3485 dev->data->scattered_rx = 1;
3486 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
3490 if (dev->data->dev_conf.rxmode.enable_scatter) {
3491 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
3492 dev->data->scattered_rx = 1;
3496 * Device configured with multiple RX queues.
3498 ixgbe_dev_mq_rx_configure(dev);
3501 * Setup the Checksum Register.
3502 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
3503 * Enable IP/L4 checkum computation by hardware if requested to do so.
3505 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3506 rxcsum |= IXGBE_RXCSUM_PCSD;
3507 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
3508 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3510 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
3512 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3514 if (hw->mac.type == ixgbe_mac_82599EB) {
3515 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3516 if (dev->data->dev_conf.rxmode.hw_strip_crc)
3517 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3519 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
3520 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3521 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3528 * Initializes Transmit Unit.
3531 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
3533 struct ixgbe_hw *hw;
3534 struct igb_tx_queue *txq;
3540 PMD_INIT_FUNC_TRACE();
3541 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3543 /* Enable TX CRC (checksum offload requirement) */
3544 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3545 hlreg0 |= IXGBE_HLREG0_TXCRCEN;
3546 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3548 /* Setup the Base and Length of the Tx Descriptor Rings */
3549 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3550 txq = dev->data->tx_queues[i];
3552 bus_addr = txq->tx_ring_phys_addr;
3553 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
3554 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
3555 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
3556 (uint32_t)(bus_addr >> 32));
3557 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
3558 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
3559 /* Setup the HW Tx Head and TX Tail descriptor pointers */
3560 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
3561 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
3564 * Disable Tx Head Writeback RO bit, since this hoses
3565 * bookkeeping if things aren't delivered in order.
3567 switch (hw->mac.type) {
3568 case ixgbe_mac_82598EB:
3569 txctrl = IXGBE_READ_REG(hw,
3570 IXGBE_DCA_TXCTRL(txq->reg_idx));
3571 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3572 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
3576 case ixgbe_mac_82599EB:
3577 case ixgbe_mac_X540:
3579 txctrl = IXGBE_READ_REG(hw,
3580 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
3581 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3582 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
3588 /* Device configured with multiple TX queues. */
3589 ixgbe_dev_mq_tx_configure(dev);
3593 * Set up link for 82599 loopback mode Tx->Rx.
3596 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
3598 DEBUGFUNC("ixgbe_setup_loopback_link_82599");
3600 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3601 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
3603 PMD_INIT_LOG(ERR, "Could not enable loopback mode\n");
3612 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
3613 ixgbe_reset_pipeline_82599(hw);
3615 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
3621 * Start Transmit and Receive Units.
3624 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
3626 struct ixgbe_hw *hw;
3627 struct igb_tx_queue *txq;
3628 struct igb_rx_queue *rxq;
3634 PMD_INIT_FUNC_TRACE();
3635 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3637 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3638 txq = dev->data->tx_queues[i];
3639 /* Setup Transmit Threshold Registers */
3640 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
3641 txdctl |= txq->pthresh & 0x7F;
3642 txdctl |= ((txq->hthresh & 0x7F) << 8);
3643 txdctl |= ((txq->wthresh & 0x7F) << 16);
3644 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
3647 if (hw->mac.type != ixgbe_mac_82598EB) {
3648 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3649 dmatxctl |= IXGBE_DMATXCTL_TE;
3650 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3653 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3654 txq = dev->data->tx_queues[i];
3655 if (!txq->start_tx_per_q)
3656 ixgbe_dev_tx_queue_start(dev, i);
3659 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3660 rxq = dev->data->rx_queues[i];
3661 if (!rxq->start_rx_per_q)
3662 ixgbe_dev_rx_queue_start(dev, i);
3665 /* Enable Receive engine */
3666 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3667 if (hw->mac.type == ixgbe_mac_82598EB)
3668 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3669 rxctrl |= IXGBE_RXCTRL_RXEN;
3670 hw->mac.ops.enable_rx_dma(hw, rxctrl);
3672 /* If loopback mode is enabled for 82599, set up the link accordingly */
3673 if (hw->mac.type == ixgbe_mac_82599EB &&
3674 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
3675 ixgbe_setup_loopback_link_82599(hw);
3680 * Start Receive Units for specified queue.
3683 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3685 struct ixgbe_hw *hw;
3686 struct igb_rx_queue *rxq;
3690 PMD_INIT_FUNC_TRACE();
3691 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3693 if (rx_queue_id < dev->data->nb_rx_queues) {
3694 rxq = dev->data->rx_queues[rx_queue_id];
3696 /* Allocate buffers for descriptor rings */
3697 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
3699 "Could not alloc mbuf for queue:%d\n",
3703 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
3704 rxdctl |= IXGBE_RXDCTL_ENABLE;
3705 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
3707 /* Wait until RX Enable ready */
3708 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
3711 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
3712 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3714 PMD_INIT_LOG(ERR, "Could not enable "
3715 "Rx Queue %d\n", rx_queue_id);
3717 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
3718 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
3726 * Stop Receive Units for specified queue.
3729 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3731 struct ixgbe_hw *hw;
3732 struct igb_rx_queue *rxq;
3736 PMD_INIT_FUNC_TRACE();
3737 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3739 if (rx_queue_id < dev->data->nb_rx_queues) {
3740 rxq = dev->data->rx_queues[rx_queue_id];
3742 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
3743 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3744 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
3746 /* Wait until RX Enable ready */
3747 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
3750 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
3751 } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
3753 PMD_INIT_LOG(ERR, "Could not disable "
3754 "Rx Queue %d\n", rx_queue_id);
3756 rte_delay_us(RTE_IXGBE_WAIT_100_US);
3758 ixgbe_rx_queue_release_mbufs(rxq);
3759 ixgbe_reset_rx_queue(rxq);
3768 * Start Transmit Units for specified queue.
3771 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3773 struct ixgbe_hw *hw;
3774 struct igb_tx_queue *txq;
3778 PMD_INIT_FUNC_TRACE();
3779 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3781 if (tx_queue_id < dev->data->nb_tx_queues) {
3782 txq = dev->data->tx_queues[tx_queue_id];
3783 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
3784 txdctl |= IXGBE_TXDCTL_ENABLE;
3785 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
3787 /* Wait until TX Enable ready */
3788 if (hw->mac.type == ixgbe_mac_82599EB) {
3789 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
3792 txdctl = IXGBE_READ_REG(hw,
3793 IXGBE_TXDCTL(txq->reg_idx));
3794 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
3796 PMD_INIT_LOG(ERR, "Could not enable "
3797 "Tx Queue %d\n", tx_queue_id);
3800 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
3801 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
3809 * Stop Transmit Units for specified queue.
3812 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3814 struct ixgbe_hw *hw;
3815 struct igb_tx_queue *txq;
3817 uint32_t txtdh, txtdt;
3820 PMD_INIT_FUNC_TRACE();
3821 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3823 if (tx_queue_id < dev->data->nb_tx_queues) {
3824 txq = dev->data->tx_queues[tx_queue_id];
3826 /* Wait until TX queue is empty */
3827 if (hw->mac.type == ixgbe_mac_82599EB) {
3828 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
3830 rte_delay_us(RTE_IXGBE_WAIT_100_US);
3831 txtdh = IXGBE_READ_REG(hw,
3832 IXGBE_TDH(txq->reg_idx));
3833 txtdt = IXGBE_READ_REG(hw,
3834 IXGBE_TDT(txq->reg_idx));
3835 } while (--poll_ms && (txtdh != txtdt));
3838 "Tx Queue %d is not empty when stopping.\n",
3842 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
3843 txdctl &= ~IXGBE_TXDCTL_ENABLE;
3844 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
3846 /* Wait until TX Enable ready */
3847 if (hw->mac.type == ixgbe_mac_82599EB) {
3848 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
3851 txdctl = IXGBE_READ_REG(hw,
3852 IXGBE_TXDCTL(txq->reg_idx));
3853 } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
3855 PMD_INIT_LOG(ERR, "Could not disable "
3856 "Tx Queue %d\n", tx_queue_id);
3859 if (txq->ops != NULL) {
3860 txq->ops->release_mbufs(txq);
3861 txq->ops->reset(txq);
3870 * [VF] Initializes Receive Unit.
3873 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
3875 struct ixgbe_hw *hw;
3876 struct igb_rx_queue *rxq;
3877 struct rte_pktmbuf_pool_private *mbp_priv;
3884 PMD_INIT_FUNC_TRACE();
3885 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3888 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
3889 * disables the VF receipt of packets if the PF MTU is > 1500.
3890 * This is done to deal with 82599 limitations that imposes
3891 * the PF and all VFs to share the same MTU.
3892 * Then, the PF driver enables again the VF receipt of packet when
3893 * the VF driver issues a IXGBE_VF_SET_LPE request.
3894 * In the meantime, the VF device cannot be used, even if the VF driver
3895 * and the Guest VM network stack are ready to accept packets with a
3896 * size up to the PF MTU.
3897 * As a work-around to this PF behaviour, force the call to
3898 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
3899 * VF packets received can work in all cases.
3901 ixgbevf_rlpml_set_vf(hw,
3902 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
3904 /* Setup RX queues */
3905 dev->rx_pkt_burst = ixgbe_recv_pkts;
3906 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3907 rxq = dev->data->rx_queues[i];
3909 /* Allocate buffers for descriptor rings */
3910 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
3914 /* Setup the Base and Length of the Rx Descriptor Rings */
3915 bus_addr = rxq->rx_ring_phys_addr;
3917 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3918 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
3919 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3920 (uint32_t)(bus_addr >> 32));
3921 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3922 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3923 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
3924 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
3927 /* Configure the SRRCTL register */
3928 #ifdef RTE_HEADER_SPLIT_ENABLE
3930 * Configure Header Split
3932 if (dev->data->dev_conf.rxmode.header_split) {
3934 /* Must setup the PSRTYPE register */
3936 psrtype = IXGBE_PSRTYPE_TCPHDR |
3937 IXGBE_PSRTYPE_UDPHDR |
3938 IXGBE_PSRTYPE_IPV4HDR |
3939 IXGBE_PSRTYPE_IPV6HDR;
3941 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE(i), psrtype);
3943 srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
3944 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
3945 IXGBE_SRRCTL_BSIZEHDR_MASK);
3946 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3949 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3951 /* Set if packets are dropped when no descriptors available */
3953 srrctl |= IXGBE_SRRCTL_DROP_EN;
3956 * Configure the RX buffer size in the BSIZEPACKET field of
3957 * the SRRCTL register of the queue.
3958 * The value is in 1 KB resolution. Valid values can be from
3961 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
3962 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
3963 RTE_PKTMBUF_HEADROOM);
3964 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
3965 IXGBE_SRRCTL_BSIZEPKT_MASK);
3968 * VF modification to write virtual function SRRCTL register
3970 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
3972 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
3973 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
3975 /* It adds dual VLAN length for supporting dual VLAN */
3976 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
3977 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
3978 dev->data->scattered_rx = 1;
3979 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
3983 if (dev->data->dev_conf.rxmode.enable_scatter) {
3984 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
3985 dev->data->scattered_rx = 1;
3992 * [VF] Initializes Transmit Unit.
3995 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
3997 struct ixgbe_hw *hw;
3998 struct igb_tx_queue *txq;
4003 PMD_INIT_FUNC_TRACE();
4004 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4006 /* Setup the Base and Length of the Tx Descriptor Rings */
4007 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4008 txq = dev->data->tx_queues[i];
4009 bus_addr = txq->tx_ring_phys_addr;
4010 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
4011 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4012 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
4013 (uint32_t)(bus_addr >> 32));
4014 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
4015 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4016 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4017 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
4018 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
4021 * Disable Tx Head Writeback RO bit, since this hoses
4022 * bookkeeping if things aren't delivered in order.
4024 txctrl = IXGBE_READ_REG(hw,
4025 IXGBE_VFDCA_TXCTRL(i));
4026 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4027 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
4033 * [VF] Start Transmit and Receive Units.
4036 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4038 struct ixgbe_hw *hw;
4039 struct igb_tx_queue *txq;
4040 struct igb_rx_queue *rxq;
4046 PMD_INIT_FUNC_TRACE();
4047 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4049 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4050 txq = dev->data->tx_queues[i];
4051 /* Setup Transmit Threshold Registers */
4052 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4053 txdctl |= txq->pthresh & 0x7F;
4054 txdctl |= ((txq->hthresh & 0x7F) << 8);
4055 txdctl |= ((txq->wthresh & 0x7F) << 16);
4056 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4059 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4061 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4062 txdctl |= IXGBE_TXDCTL_ENABLE;
4063 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4066 /* Wait until TX Enable ready */
4069 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4070 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4072 PMD_INIT_LOG(ERR, "Could not enable "
4073 "Tx Queue %d\n", i);
4075 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4077 rxq = dev->data->rx_queues[i];
4079 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4080 rxdctl |= IXGBE_RXDCTL_ENABLE;
4081 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
4083 /* Wait until RX Enable ready */
4087 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4088 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4090 PMD_INIT_LOG(ERR, "Could not enable "
4091 "Rx Queue %d\n", i);
4093 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);