4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_cycles.h>
49 #include <rte_debug.h>
50 #include <rte_interrupts.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_prefetch.h>
71 #include <rte_string_fns.h>
72 #include <rte_errno.h>
74 #include "ixgbe_logs.h"
75 #include "ixgbe/ixgbe_api.h"
76 #include "ixgbe/ixgbe_vf.h"
77 #include "ixgbe_ethdev.h"
78 #include "ixgbe/ixgbe_dcb.h"
79 #include "ixgbe/ixgbe_common.h"
80 #include "ixgbe_rxtx.h"
82 #define IXGBE_RSS_OFFLOAD_ALL ( \
88 ETH_RSS_IPV6_TCP_EX | \
93 static inline struct rte_mbuf *
94 rte_rxmbuf_alloc(struct rte_mempool *mp)
98 m = __rte_mbuf_raw_alloc(mp);
99 __rte_mbuf_sanity_check_raw(m, 0);
105 #define RTE_PMD_USE_PREFETCH
108 #ifdef RTE_PMD_USE_PREFETCH
110 * Prefetch a cache line into all cache levels.
112 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
114 #define rte_ixgbe_prefetch(p) do {} while(0)
117 /*********************************************************************
121 **********************************************************************/
124 * Check for descriptors with their DD bit set and free mbufs.
125 * Return the total number of buffers freed.
127 static inline int __attribute__((always_inline))
128 ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
130 struct igb_tx_entry *txep;
134 /* check DD bit on threshold descriptor */
135 status = txq->tx_ring[txq->tx_next_dd].wb.status;
136 if (! (status & IXGBE_ADVTXD_STAT_DD))
140 * first buffer to free from S/W ring is at index
141 * tx_next_dd - (tx_rs_thresh-1)
143 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
145 /* prefetch the mbufs that are about to be freed */
146 for (i = 0; i < txq->tx_rs_thresh; ++i)
147 rte_prefetch0((txep + i)->mbuf);
149 /* free buffers one at a time */
150 if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
151 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
152 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
156 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
157 rte_pktmbuf_free_seg(txep->mbuf);
162 /* buffers were freed, update counters */
163 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
164 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
165 if (txq->tx_next_dd >= txq->nb_tx_desc)
166 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
168 return txq->tx_rs_thresh;
171 /* Populate 4 descriptors with data from 4 mbufs */
173 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
175 uint64_t buf_dma_addr;
179 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
180 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
181 pkt_len = (*pkts)->data_len;
183 /* write data to descriptor */
184 txdp->read.buffer_addr = buf_dma_addr;
185 txdp->read.cmd_type_len =
186 ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
187 txdp->read.olinfo_status =
188 (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
192 /* Populate 1 descriptor with data from 1 mbuf */
194 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
196 uint64_t buf_dma_addr;
199 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
200 pkt_len = (*pkts)->data_len;
202 /* write data to descriptor */
203 txdp->read.buffer_addr = buf_dma_addr;
204 txdp->read.cmd_type_len =
205 ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
206 txdp->read.olinfo_status =
207 (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
211 * Fill H/W descriptor ring with mbuf data.
212 * Copy mbuf pointers to the S/W ring.
215 ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
218 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
219 struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
220 const int N_PER_LOOP = 4;
221 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
222 int mainpart, leftover;
226 * Process most of the packets in chunks of N pkts. Any
227 * leftover packets will get processed one at a time.
229 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
230 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
231 for (i = 0; i < mainpart; i += N_PER_LOOP) {
232 /* Copy N mbuf pointers to the S/W ring */
233 for (j = 0; j < N_PER_LOOP; ++j) {
234 (txep + i + j)->mbuf = *(pkts + i + j);
236 tx4(txdp + i, pkts + i);
239 if (unlikely(leftover > 0)) {
240 for (i = 0; i < leftover; ++i) {
241 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
242 tx1(txdp + mainpart + i, pkts + mainpart + i);
247 static inline uint16_t
248 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
251 struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
252 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
256 * Begin scanning the H/W ring for done descriptors when the
257 * number of available descriptors drops below tx_free_thresh. For
258 * each done descriptor, free the associated buffer.
260 if (txq->nb_tx_free < txq->tx_free_thresh)
261 ixgbe_tx_free_bufs(txq);
263 /* Only use descriptors that are available */
264 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
265 if (unlikely(nb_pkts == 0))
268 /* Use exactly nb_pkts descriptors */
269 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
272 * At this point, we know there are enough descriptors in the
273 * ring to transmit all the packets. This assumes that each
274 * mbuf contains a single segment, and that no new offloads
275 * are expected, which would require a new context descriptor.
279 * See if we're going to wrap-around. If so, handle the top
280 * of the descriptor ring first, then do the bottom. If not,
281 * the processing looks just like the "bottom" part anyway...
283 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
284 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
285 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
288 * We know that the last descriptor in the ring will need to
289 * have its RS bit set because tx_rs_thresh has to be
290 * a divisor of the ring size
292 tx_r[txq->tx_next_rs].read.cmd_type_len |=
293 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
294 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
299 /* Fill H/W descriptor ring with mbuf data */
300 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
301 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
304 * Determine if RS bit should be set
305 * This is what we actually want:
306 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
307 * but instead of subtracting 1 and doing >=, we can just do
308 * greater than without subtracting.
310 if (txq->tx_tail > txq->tx_next_rs) {
311 tx_r[txq->tx_next_rs].read.cmd_type_len |=
312 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
313 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
315 if (txq->tx_next_rs >= txq->nb_tx_desc)
316 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
320 * Check for wrap-around. This would only happen if we used
321 * up to the last descriptor in the ring, no more, no less.
323 if (txq->tx_tail >= txq->nb_tx_desc)
326 /* update tail pointer */
328 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
334 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
339 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
340 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
341 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
343 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
347 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
348 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
349 nb_tx = (uint16_t)(nb_tx + ret);
350 nb_pkts = (uint16_t)(nb_pkts - ret);
359 ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
360 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
361 uint16_t ol_flags, uint32_t vlan_macip_lens)
363 uint32_t type_tucmd_mlhl;
364 uint32_t mss_l4len_idx;
368 ctx_idx = txq->ctx_curr;
372 if (ol_flags & PKT_TX_VLAN_PKT) {
373 cmp_mask |= TX_VLAN_CMP_MASK;
376 if (ol_flags & PKT_TX_IP_CKSUM) {
377 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
378 cmp_mask |= TX_MAC_LEN_CMP_MASK;
381 /* Specify which HW CTX to upload. */
382 mss_l4len_idx = (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
383 switch (ol_flags & PKT_TX_L4_MASK) {
384 case PKT_TX_UDP_CKSUM:
385 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
386 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
387 mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
388 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
390 case PKT_TX_TCP_CKSUM:
391 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
392 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
393 mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
394 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
396 case PKT_TX_SCTP_CKSUM:
397 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
398 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
399 mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
400 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
403 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
404 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
408 txq->ctx_cache[ctx_idx].flags = ol_flags;
409 txq->ctx_cache[ctx_idx].cmp_mask = cmp_mask;
410 txq->ctx_cache[ctx_idx].vlan_macip_lens.data =
411 vlan_macip_lens & cmp_mask;
413 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
414 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
415 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
416 ctx_txd->seqnum_seed = 0;
420 * Check which hardware context can be used. Use the existing match
421 * or create a new context descriptor.
423 static inline uint32_t
424 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
425 uint32_t vlan_macip_lens)
427 /* If match with the current used context */
428 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
429 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
430 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
431 return txq->ctx_curr;
434 /* What if match with the next context */
436 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
437 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
438 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
439 return txq->ctx_curr;
442 /* Mismatch, use the previous context */
443 return (IXGBE_CTX_NUM);
446 static inline uint32_t
447 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
449 static const uint32_t l4_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_TXSM};
450 static const uint32_t l3_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_IXSM};
453 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
454 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
458 static inline uint32_t
459 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
461 static const uint32_t vlan_cmd[2] = {0, IXGBE_ADVTXD_DCMD_VLE};
462 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
465 /* Default RS bit threshold values */
466 #ifndef DEFAULT_TX_RS_THRESH
467 #define DEFAULT_TX_RS_THRESH 32
469 #ifndef DEFAULT_TX_FREE_THRESH
470 #define DEFAULT_TX_FREE_THRESH 32
473 /* Reset transmit descriptors after they have been used */
475 ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
477 struct igb_tx_entry *sw_ring = txq->sw_ring;
478 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
479 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
480 uint16_t nb_tx_desc = txq->nb_tx_desc;
481 uint16_t desc_to_clean_to;
482 uint16_t nb_tx_to_clean;
484 /* Determine the last descriptor needing to be cleaned */
485 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
486 if (desc_to_clean_to >= nb_tx_desc)
487 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
489 /* Check to make sure the last descriptor to clean is done */
490 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
491 if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
493 PMD_TX_FREE_LOG(DEBUG,
494 "TX descriptor %4u is not done"
495 "(port=%d queue=%d)",
497 txq->port_id, txq->queue_id);
498 /* Failed to clean any descriptors, better luck next time */
502 /* Figure out how many descriptors will be cleaned */
503 if (last_desc_cleaned > desc_to_clean_to)
504 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
507 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
510 PMD_TX_FREE_LOG(DEBUG,
511 "Cleaning %4u TX descriptors: %4u to %4u "
512 "(port=%d queue=%d)",
513 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
514 txq->port_id, txq->queue_id);
517 * The last descriptor to clean is done, so that means all the
518 * descriptors from the last descriptor that was cleaned
519 * up to the last descriptor with the RS bit set
520 * are done. Only reset the threshold descriptor.
522 txr[desc_to_clean_to].wb.status = 0;
524 /* Update the txq to reflect the last descriptor that was cleaned */
525 txq->last_desc_cleaned = desc_to_clean_to;
526 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
533 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
536 struct igb_tx_queue *txq;
537 struct igb_tx_entry *sw_ring;
538 struct igb_tx_entry *txe, *txn;
539 volatile union ixgbe_adv_tx_desc *txr;
540 volatile union ixgbe_adv_tx_desc *txd;
541 struct rte_mbuf *tx_pkt;
542 struct rte_mbuf *m_seg;
543 union ixgbe_vlan_macip vlan_macip_lens;
544 uint64_t buf_dma_addr;
545 uint32_t olinfo_status;
546 uint32_t cmd_type_len;
559 sw_ring = txq->sw_ring;
561 tx_id = txq->tx_tail;
562 txe = &sw_ring[tx_id];
564 /* Determine if the descriptor ring needs to be cleaned. */
565 if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
566 ixgbe_xmit_cleanup(txq);
570 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
573 pkt_len = tx_pkt->pkt_len;
575 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
578 * Determine how many (if any) context descriptors
579 * are needed for offload functionality.
581 ol_flags = tx_pkt->ol_flags;
582 vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
583 vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
585 /* If hardware offload required */
586 tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
588 /* If new context need be built or reuse the exist ctx. */
589 ctx = what_advctx_update(txq, tx_ol_req,
590 vlan_macip_lens.data);
591 /* Only allocate context descriptor if required*/
592 new_ctx = (ctx == IXGBE_CTX_NUM);
597 * Keep track of how many descriptors are used this loop
598 * This will always be the number of segments + the number of
599 * Context descriptors required to transmit the packet
601 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
604 * The number of descriptors that must be allocated for a
605 * packet is the number of segments of that packet, plus 1
606 * Context Descriptor for the hardware offload, if any.
607 * Determine the last TX descriptor to allocate in the TX ring
608 * for the packet, starting from the current position (tx_id)
611 tx_last = (uint16_t) (tx_id + nb_used - 1);
614 if (tx_last >= txq->nb_tx_desc)
615 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
617 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
618 " tx_first=%u tx_last=%u\n",
619 (unsigned) txq->port_id,
620 (unsigned) txq->queue_id,
626 * Make sure there are enough TX descriptors available to
627 * transmit the entire packet.
628 * nb_used better be less than or equal to txq->tx_rs_thresh
630 if (nb_used > txq->nb_tx_free) {
631 PMD_TX_FREE_LOG(DEBUG,
632 "Not enough free TX descriptors "
633 "nb_used=%4u nb_free=%4u "
634 "(port=%d queue=%d)",
635 nb_used, txq->nb_tx_free,
636 txq->port_id, txq->queue_id);
638 if (ixgbe_xmit_cleanup(txq) != 0) {
639 /* Could not clean any descriptors */
645 /* nb_used better be <= txq->tx_rs_thresh */
646 if (unlikely(nb_used > txq->tx_rs_thresh)) {
647 PMD_TX_FREE_LOG(DEBUG,
648 "The number of descriptors needed to "
649 "transmit the packet exceeds the "
650 "RS bit threshold. This will impact "
652 "nb_used=%4u nb_free=%4u "
654 "(port=%d queue=%d)",
655 nb_used, txq->nb_tx_free,
657 txq->port_id, txq->queue_id);
659 * Loop here until there are enough TX
660 * descriptors or until the ring cannot be
663 while (nb_used > txq->nb_tx_free) {
664 if (ixgbe_xmit_cleanup(txq) != 0) {
666 * Could not clean any
678 * By now there are enough free TX descriptors to transmit
683 * Set common flags of all TX Data Descriptors.
685 * The following bits must be set in all Data Descriptors:
686 * - IXGBE_ADVTXD_DTYP_DATA
687 * - IXGBE_ADVTXD_DCMD_DEXT
689 * The following bits must be set in the first Data Descriptor
690 * and are ignored in the other ones:
691 * - IXGBE_ADVTXD_DCMD_IFCS
692 * - IXGBE_ADVTXD_MAC_1588
693 * - IXGBE_ADVTXD_DCMD_VLE
695 * The following bits must only be set in the last Data
697 * - IXGBE_TXD_CMD_EOP
699 * The following bits can be set in any Data Descriptor, but
700 * are only set in the last Data Descriptor:
703 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
704 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
705 olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
706 #ifdef RTE_LIBRTE_IEEE1588
707 if (ol_flags & PKT_TX_IEEE1588_TMST)
708 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
713 * Setup the TX Advanced Context Descriptor if required
716 volatile struct ixgbe_adv_tx_context_desc *
719 ctx_txd = (volatile struct
720 ixgbe_adv_tx_context_desc *)
723 txn = &sw_ring[txe->next_id];
724 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
726 if (txe->mbuf != NULL) {
727 rte_pktmbuf_free_seg(txe->mbuf);
731 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
732 vlan_macip_lens.data);
734 txe->last_id = tx_last;
735 tx_id = txe->next_id;
740 * Setup the TX Advanced Data Descriptor,
741 * This path will go through
742 * whatever new/reuse the context descriptor
744 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
745 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
746 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
752 txn = &sw_ring[txe->next_id];
754 if (txe->mbuf != NULL)
755 rte_pktmbuf_free_seg(txe->mbuf);
759 * Set up Transmit Data Descriptor.
761 slen = m_seg->data_len;
762 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
763 txd->read.buffer_addr =
764 rte_cpu_to_le_64(buf_dma_addr);
765 txd->read.cmd_type_len =
766 rte_cpu_to_le_32(cmd_type_len | slen);
767 txd->read.olinfo_status =
768 rte_cpu_to_le_32(olinfo_status);
769 txe->last_id = tx_last;
770 tx_id = txe->next_id;
773 } while (m_seg != NULL);
776 * The last packet data descriptor needs End Of Packet (EOP)
778 cmd_type_len |= IXGBE_TXD_CMD_EOP;
779 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
780 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
782 /* Set RS bit only on threshold packets' last descriptor */
783 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
784 PMD_TX_FREE_LOG(DEBUG,
785 "Setting RS bit on TXD id="
786 "%4u (port=%d queue=%d)",
787 tx_last, txq->port_id, txq->queue_id);
789 cmd_type_len |= IXGBE_TXD_CMD_RS;
791 /* Update txq RS bit counters */
794 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
800 * Set the Transmit Descriptor Tail (TDT)
802 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
803 (unsigned) txq->port_id, (unsigned) txq->queue_id,
804 (unsigned) tx_id, (unsigned) nb_tx);
805 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
806 txq->tx_tail = tx_id;
811 /*********************************************************************
815 **********************************************************************/
816 static inline uint16_t
817 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
821 static uint16_t ip_pkt_types_map[16] = {
822 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
823 PKT_RX_IPV6_HDR, 0, 0, 0,
824 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
825 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
828 static uint16_t ip_rss_types_map[16] = {
829 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
830 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
831 PKT_RX_RSS_HASH, 0, 0, 0,
832 0, 0, 0, PKT_RX_FDIR,
835 #ifdef RTE_LIBRTE_IEEE1588
836 static uint32_t ip_pkt_etqf_map[8] = {
837 0, 0, 0, PKT_RX_IEEE1588_PTP,
841 pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
842 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
843 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
845 pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
846 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
849 return (uint16_t)(pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]);
852 static inline uint16_t
853 rx_desc_status_to_pkt_flags(uint32_t rx_status)
858 * Check if VLAN present only.
859 * Do not check whether L3/L4 rx checksum done by NIC or not,
860 * That can be found from rte_eth_rxmode.hw_ip_checksum flag
862 pkt_flags = (uint16_t)((rx_status & IXGBE_RXD_STAT_VP) ?
863 PKT_RX_VLAN_PKT : 0);
865 #ifdef RTE_LIBRTE_IEEE1588
866 if (rx_status & IXGBE_RXD_STAT_TMST)
867 pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
872 static inline uint16_t
873 rx_desc_error_to_pkt_flags(uint32_t rx_status)
876 * Bit 31: IPE, IPv4 checksum error
877 * Bit 30: L4I, L4I integrity error
879 static uint16_t error_to_pkt_flags_map[4] = {
880 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
881 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
883 return error_to_pkt_flags_map[(rx_status >>
884 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
887 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
889 * LOOK_AHEAD defines how many desc statuses to check beyond the
890 * current descriptor.
891 * It must be a pound define for optimal performance.
892 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
893 * function only works with LOOK_AHEAD=8.
896 #if (LOOK_AHEAD != 8)
897 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
900 ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
902 volatile union ixgbe_adv_rx_desc *rxdp;
903 struct igb_rx_entry *rxep;
906 int s[LOOK_AHEAD], nb_dd;
910 /* get references to current descriptor and S/W ring entry */
911 rxdp = &rxq->rx_ring[rxq->rx_tail];
912 rxep = &rxq->sw_ring[rxq->rx_tail];
914 /* check to make sure there is at least 1 packet to receive */
915 if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
919 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
920 * reference packets that are ready to be received.
922 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
923 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
925 /* Read desc statuses backwards to avoid race condition */
926 for (j = LOOK_AHEAD-1; j >= 0; --j)
927 s[j] = rxdp[j].wb.upper.status_error;
929 /* Compute how many status bits were set */
931 for (j = 0; j < LOOK_AHEAD; ++j)
932 nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
936 /* Translate descriptor info to mbuf format */
937 for (j = 0; j < nb_dd; ++j) {
939 pkt_len = (uint16_t)(rxdp[j].wb.upper.length -
941 mb->data_len = pkt_len;
942 mb->pkt_len = pkt_len;
943 mb->vlan_tci = rxdp[j].wb.upper.vlan;
944 mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
946 /* convert descriptor fields to rte mbuf flags */
947 mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags(
948 rxdp[j].wb.lower.lo_dword.data);
949 /* reuse status field from scan list */
950 mb->ol_flags = (uint16_t)(mb->ol_flags |
951 rx_desc_status_to_pkt_flags(s[j]));
952 mb->ol_flags = (uint16_t)(mb->ol_flags |
953 rx_desc_error_to_pkt_flags(s[j]));
956 /* Move mbuf pointers from the S/W ring to the stage */
957 for (j = 0; j < LOOK_AHEAD; ++j) {
958 rxq->rx_stage[i + j] = rxep[j].mbuf;
961 /* stop if all requested packets could not be received */
962 if (nb_dd != LOOK_AHEAD)
966 /* clear software ring entries so we can cleanup correctly */
967 for (i = 0; i < nb_rx; ++i) {
968 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
976 ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
978 volatile union ixgbe_adv_rx_desc *rxdp;
979 struct igb_rx_entry *rxep;
985 /* allocate buffers in bulk directly into the S/W ring */
986 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
987 (rxq->rx_free_thresh - 1));
988 rxep = &rxq->sw_ring[alloc_idx];
989 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
990 rxq->rx_free_thresh);
991 if (unlikely(diag != 0))
994 rxdp = &rxq->rx_ring[alloc_idx];
995 for (i = 0; i < rxq->rx_free_thresh; ++i) {
996 /* populate the static rte mbuf fields */
998 rte_mbuf_refcnt_set(mb, 1);
1000 mb->data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
1002 mb->port = rxq->port_id;
1004 /* populate the descriptors */
1005 dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
1006 rxdp[i].read.hdr_addr = dma_addr;
1007 rxdp[i].read.pkt_addr = dma_addr;
1010 /* update tail pointer */
1012 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
1014 /* update state of internal queue structure */
1015 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger +
1016 rxq->rx_free_thresh);
1017 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1018 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1024 static inline uint16_t
1025 ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1028 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1031 /* how many packets are ready to return? */
1032 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1034 /* copy mbuf pointers to the application's packet list */
1035 for (i = 0; i < nb_pkts; ++i)
1036 rx_pkts[i] = stage[i];
1038 /* update internal queue state */
1039 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1040 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1045 static inline uint16_t
1046 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1049 struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
1052 /* Any previously recv'd pkts will be returned from the Rx stage */
1053 if (rxq->rx_nb_avail)
1054 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1056 /* Scan the H/W ring for packets to receive */
1057 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1059 /* update internal queue state */
1060 rxq->rx_next_avail = 0;
1061 rxq->rx_nb_avail = nb_rx;
1062 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1064 /* if required, allocate new buffers to replenish descriptors */
1065 if (rxq->rx_tail > rxq->rx_free_trigger) {
1066 if (ixgbe_rx_alloc_bufs(rxq) != 0) {
1068 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1069 "queue_id=%u\n", (unsigned) rxq->port_id,
1070 (unsigned) rxq->queue_id);
1072 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1073 rxq->rx_free_thresh;
1076 * Need to rewind any previous receives if we cannot
1077 * allocate new buffers to replenish the old ones.
1079 rxq->rx_nb_avail = 0;
1080 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1081 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1082 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1088 if (rxq->rx_tail >= rxq->nb_rx_desc)
1091 /* received any packets this loop? */
1092 if (rxq->rx_nb_avail)
1093 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1098 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1100 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1105 if (unlikely(nb_pkts == 0))
1108 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1109 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1111 /* request is relatively large, chunk it up */
1115 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1116 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1117 nb_rx = (uint16_t)(nb_rx + ret);
1118 nb_pkts = (uint16_t)(nb_pkts - ret);
1125 #endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
1128 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1131 struct igb_rx_queue *rxq;
1132 volatile union ixgbe_adv_rx_desc *rx_ring;
1133 volatile union ixgbe_adv_rx_desc *rxdp;
1134 struct igb_rx_entry *sw_ring;
1135 struct igb_rx_entry *rxe;
1136 struct rte_mbuf *rxm;
1137 struct rte_mbuf *nmb;
1138 union ixgbe_adv_rx_desc rxd;
1141 uint32_t hlen_type_rss;
1151 rx_id = rxq->rx_tail;
1152 rx_ring = rxq->rx_ring;
1153 sw_ring = rxq->sw_ring;
1154 while (nb_rx < nb_pkts) {
1156 * The order of operations here is important as the DD status
1157 * bit must not be read after any other descriptor fields.
1158 * rx_ring and rxdp are pointing to volatile data so the order
1159 * of accesses cannot be reordered by the compiler. If they were
1160 * not volatile, they could be reordered which could lead to
1161 * using invalid descriptor fields when read from rxd.
1163 rxdp = &rx_ring[rx_id];
1164 staterr = rxdp->wb.upper.status_error;
1165 if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1172 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1173 * is likely to be invalid and to be dropped by the various
1174 * validation checks performed by the network stack.
1176 * Allocate a new mbuf to replenish the RX ring descriptor.
1177 * If the allocation fails:
1178 * - arrange for that RX descriptor to be the first one
1179 * being parsed the next time the receive function is
1180 * invoked [on the same queue].
1182 * - Stop parsing the RX ring and return immediately.
1184 * This policy do not drop the packet received in the RX
1185 * descriptor for which the allocation of a new mbuf failed.
1186 * Thus, it allows that packet to be later retrieved if
1187 * mbuf have been freed in the mean time.
1188 * As a side effect, holding RX descriptors instead of
1189 * systematically giving them back to the NIC may lead to
1190 * RX ring exhaustion situations.
1191 * However, the NIC can gracefully prevent such situations
1192 * to happen by sending specific "back-pressure" flow control
1193 * frames to its peer(s).
1195 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1196 "ext_err_stat=0x%08x pkt_len=%u\n",
1197 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1198 (unsigned) rx_id, (unsigned) staterr,
1199 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1201 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1203 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1204 "queue_id=%u\n", (unsigned) rxq->port_id,
1205 (unsigned) rxq->queue_id);
1206 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1211 rxe = &sw_ring[rx_id];
1213 if (rx_id == rxq->nb_rx_desc)
1216 /* Prefetch next mbuf while processing current one. */
1217 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1220 * When next RX descriptor is on a cache-line boundary,
1221 * prefetch the next 4 RX descriptors and the next 8 pointers
1224 if ((rx_id & 0x3) == 0) {
1225 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1226 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1232 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1233 rxdp->read.hdr_addr = dma_addr;
1234 rxdp->read.pkt_addr = dma_addr;
1237 * Initialize the returned mbuf.
1238 * 1) setup generic mbuf fields:
1239 * - number of segments,
1242 * - RX port identifier.
1243 * 2) integrate hardware offload data, if any:
1244 * - RSS flag & hash,
1245 * - IP checksum flag,
1246 * - VLAN TCI, if any,
1249 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1251 rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
1252 rte_packet_prefetch(rxm->data);
1255 rxm->pkt_len = pkt_len;
1256 rxm->data_len = pkt_len;
1257 rxm->port = rxq->port_id;
1259 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1260 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1261 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1263 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1264 pkt_flags = (uint16_t)(pkt_flags |
1265 rx_desc_status_to_pkt_flags(staterr));
1266 pkt_flags = (uint16_t)(pkt_flags |
1267 rx_desc_error_to_pkt_flags(staterr));
1268 rxm->ol_flags = pkt_flags;
1270 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1271 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
1272 else if (pkt_flags & PKT_RX_FDIR) {
1273 rxm->hash.fdir.hash =
1274 (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
1275 & IXGBE_ATR_HASH_MASK);
1276 rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
1279 * Store the mbuf address into the next entry of the array
1280 * of returned packets.
1282 rx_pkts[nb_rx++] = rxm;
1284 rxq->rx_tail = rx_id;
1287 * If the number of free RX descriptors is greater than the RX free
1288 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1290 * Update the RDT with the value of the last processed RX descriptor
1291 * minus 1, to guarantee that the RDT register is never equal to the
1292 * RDH register, which creates a "full" ring situtation from the
1293 * hardware point of view...
1295 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1296 if (nb_hold > rxq->rx_free_thresh) {
1297 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1298 "nb_hold=%u nb_rx=%u\n",
1299 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1300 (unsigned) rx_id, (unsigned) nb_hold,
1302 rx_id = (uint16_t) ((rx_id == 0) ?
1303 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1304 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1307 rxq->nb_rx_hold = nb_hold;
1312 ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1315 struct igb_rx_queue *rxq;
1316 volatile union ixgbe_adv_rx_desc *rx_ring;
1317 volatile union ixgbe_adv_rx_desc *rxdp;
1318 struct igb_rx_entry *sw_ring;
1319 struct igb_rx_entry *rxe;
1320 struct rte_mbuf *first_seg;
1321 struct rte_mbuf *last_seg;
1322 struct rte_mbuf *rxm;
1323 struct rte_mbuf *nmb;
1324 union ixgbe_adv_rx_desc rxd;
1325 uint64_t dma; /* Physical address of mbuf data buffer */
1327 uint32_t hlen_type_rss;
1337 rx_id = rxq->rx_tail;
1338 rx_ring = rxq->rx_ring;
1339 sw_ring = rxq->sw_ring;
1342 * Retrieve RX context of current packet, if any.
1344 first_seg = rxq->pkt_first_seg;
1345 last_seg = rxq->pkt_last_seg;
1347 while (nb_rx < nb_pkts) {
1350 * The order of operations here is important as the DD status
1351 * bit must not be read after any other descriptor fields.
1352 * rx_ring and rxdp are pointing to volatile data so the order
1353 * of accesses cannot be reordered by the compiler. If they were
1354 * not volatile, they could be reordered which could lead to
1355 * using invalid descriptor fields when read from rxd.
1357 rxdp = &rx_ring[rx_id];
1358 staterr = rxdp->wb.upper.status_error;
1359 if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1366 * Allocate a new mbuf to replenish the RX ring descriptor.
1367 * If the allocation fails:
1368 * - arrange for that RX descriptor to be the first one
1369 * being parsed the next time the receive function is
1370 * invoked [on the same queue].
1372 * - Stop parsing the RX ring and return immediately.
1374 * This policy does not drop the packet received in the RX
1375 * descriptor for which the allocation of a new mbuf failed.
1376 * Thus, it allows that packet to be later retrieved if
1377 * mbuf have been freed in the mean time.
1378 * As a side effect, holding RX descriptors instead of
1379 * systematically giving them back to the NIC may lead to
1380 * RX ring exhaustion situations.
1381 * However, the NIC can gracefully prevent such situations
1382 * to happen by sending specific "back-pressure" flow control
1383 * frames to its peer(s).
1385 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
1386 "staterr=0x%x data_len=%u\n",
1387 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1388 (unsigned) rx_id, (unsigned) staterr,
1389 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1391 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1393 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1394 "queue_id=%u\n", (unsigned) rxq->port_id,
1395 (unsigned) rxq->queue_id);
1396 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1401 rxe = &sw_ring[rx_id];
1403 if (rx_id == rxq->nb_rx_desc)
1406 /* Prefetch next mbuf while processing current one. */
1407 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1410 * When next RX descriptor is on a cache-line boundary,
1411 * prefetch the next 4 RX descriptors and the next 8 pointers
1414 if ((rx_id & 0x3) == 0) {
1415 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1416 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1420 * Update RX descriptor with the physical address of the new
1421 * data buffer of the new allocated mbuf.
1425 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1426 rxdp->read.hdr_addr = dma;
1427 rxdp->read.pkt_addr = dma;
1430 * Set data length & data buffer address of mbuf.
1432 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1433 rxm->data_len = data_len;
1434 rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
1437 * If this is the first buffer of the received packet,
1438 * set the pointer to the first mbuf of the packet and
1439 * initialize its context.
1440 * Otherwise, update the total length and the number of segments
1441 * of the current scattered packet, and update the pointer to
1442 * the last mbuf of the current packet.
1444 if (first_seg == NULL) {
1446 first_seg->pkt_len = data_len;
1447 first_seg->nb_segs = 1;
1449 first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
1451 first_seg->nb_segs++;
1452 last_seg->next = rxm;
1456 * If this is not the last buffer of the received packet,
1457 * update the pointer to the last mbuf of the current scattered
1458 * packet and continue to parse the RX ring.
1460 if (! (staterr & IXGBE_RXDADV_STAT_EOP)) {
1466 * This is the last buffer of the received packet.
1467 * If the CRC is not stripped by the hardware:
1468 * - Subtract the CRC length from the total packet length.
1469 * - If the last buffer only contains the whole CRC or a part
1470 * of it, free the mbuf associated to the last buffer.
1471 * If part of the CRC is also contained in the previous
1472 * mbuf, subtract the length of that CRC part from the
1473 * data length of the previous mbuf.
1476 if (unlikely(rxq->crc_len > 0)) {
1477 first_seg->pkt_len -= ETHER_CRC_LEN;
1478 if (data_len <= ETHER_CRC_LEN) {
1479 rte_pktmbuf_free_seg(rxm);
1480 first_seg->nb_segs--;
1481 last_seg->data_len = (uint16_t)
1482 (last_seg->data_len -
1483 (ETHER_CRC_LEN - data_len));
1484 last_seg->next = NULL;
1487 (uint16_t) (data_len - ETHER_CRC_LEN);
1491 * Initialize the first mbuf of the returned packet:
1492 * - RX port identifier,
1493 * - hardware offload data, if any:
1494 * - RSS flag & hash,
1495 * - IP checksum flag,
1496 * - VLAN TCI, if any,
1499 first_seg->port = rxq->port_id;
1502 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1503 * set in the pkt_flags field.
1505 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1506 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1507 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1508 pkt_flags = (uint16_t)(pkt_flags |
1509 rx_desc_status_to_pkt_flags(staterr));
1510 pkt_flags = (uint16_t)(pkt_flags |
1511 rx_desc_error_to_pkt_flags(staterr));
1512 first_seg->ol_flags = pkt_flags;
1514 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1515 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1516 else if (pkt_flags & PKT_RX_FDIR) {
1517 first_seg->hash.fdir.hash =
1518 (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
1519 & IXGBE_ATR_HASH_MASK);
1520 first_seg->hash.fdir.id =
1521 rxd.wb.lower.hi_dword.csum_ip.ip_id;
1524 /* Prefetch data of first segment, if configured to do so. */
1525 rte_packet_prefetch(first_seg->data);
1528 * Store the mbuf address into the next entry of the array
1529 * of returned packets.
1531 rx_pkts[nb_rx++] = first_seg;
1534 * Setup receipt context for a new packet.
1540 * Record index of the next RX descriptor to probe.
1542 rxq->rx_tail = rx_id;
1545 * Save receive context.
1547 rxq->pkt_first_seg = first_seg;
1548 rxq->pkt_last_seg = last_seg;
1551 * If the number of free RX descriptors is greater than the RX free
1552 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1554 * Update the RDT with the value of the last processed RX descriptor
1555 * minus 1, to guarantee that the RDT register is never equal to the
1556 * RDH register, which creates a "full" ring situtation from the
1557 * hardware point of view...
1559 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1560 if (nb_hold > rxq->rx_free_thresh) {
1561 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1562 "nb_hold=%u nb_rx=%u\n",
1563 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1564 (unsigned) rx_id, (unsigned) nb_hold,
1566 rx_id = (uint16_t) ((rx_id == 0) ?
1567 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1568 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1571 rxq->nb_rx_hold = nb_hold;
1575 /*********************************************************************
1577 * Queue management functions
1579 **********************************************************************/
1582 * Rings setup and release.
1584 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1585 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
1586 * also optimize cache line size effect. H/W supports up to cache line size 128.
1588 #define IXGBE_ALIGN 128
1591 * Maximum number of Ring Descriptors.
1593 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
1594 * descriptors should meet the following condition:
1595 * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
1597 #define IXGBE_MIN_RING_DESC 32
1598 #define IXGBE_MAX_RING_DESC 4096
1601 * Create memzone for HW rings. malloc can't be used as the physical address is
1602 * needed. If the memzone is already created, then this function returns a ptr
1605 static const struct rte_memzone *
1606 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1607 uint16_t queue_id, uint32_t ring_size, int socket_id)
1609 char z_name[RTE_MEMZONE_NAMESIZE];
1610 const struct rte_memzone *mz;
1612 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1613 dev->driver->pci_drv.name, ring_name,
1614 dev->data->port_id, queue_id);
1616 mz = rte_memzone_lookup(z_name);
1620 #ifdef RTE_LIBRTE_XEN_DOM0
1621 return rte_memzone_reserve_bounded(z_name, ring_size,
1622 socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
1624 return rte_memzone_reserve_aligned(z_name, ring_size,
1625 socket_id, 0, IXGBE_ALIGN);
1630 ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1634 if (txq->sw_ring != NULL) {
1635 for (i = 0; i < txq->nb_tx_desc; i++) {
1636 if (txq->sw_ring[i].mbuf != NULL) {
1637 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1638 txq->sw_ring[i].mbuf = NULL;
1645 ixgbe_tx_free_swring(struct igb_tx_queue *txq)
1648 txq->sw_ring != NULL)
1649 rte_free(txq->sw_ring);
1653 ixgbe_tx_queue_release(struct igb_tx_queue *txq)
1655 if (txq != NULL && txq->ops != NULL) {
1656 txq->ops->release_mbufs(txq);
1657 txq->ops->free_swring(txq);
1663 ixgbe_dev_tx_queue_release(void *txq)
1665 ixgbe_tx_queue_release(txq);
1668 /* (Re)set dynamic igb_tx_queue fields to defaults */
1670 ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
1672 static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
1674 struct igb_tx_entry *txe = txq->sw_ring;
1677 /* Zero out HW ring memory */
1678 for (i = 0; i < txq->nb_tx_desc; i++) {
1679 txq->tx_ring[i] = zeroed_desc;
1682 /* Initialize SW ring entries */
1683 prev = (uint16_t) (txq->nb_tx_desc - 1);
1684 for (i = 0; i < txq->nb_tx_desc; i++) {
1685 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
1686 txd->wb.status = IXGBE_TXD_STAT_DD;
1689 txe[prev].next_id = i;
1693 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1694 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1697 txq->nb_tx_used = 0;
1699 * Always allow 1 descriptor to be un-allocated to avoid
1700 * a H/W race condition
1702 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1703 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1705 memset((void*)&txq->ctx_cache, 0,
1706 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
1709 static struct ixgbe_txq_ops def_txq_ops = {
1710 .release_mbufs = ixgbe_tx_queue_release_mbufs,
1711 .free_swring = ixgbe_tx_free_swring,
1712 .reset = ixgbe_reset_tx_queue,
1716 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1719 unsigned int socket_id,
1720 const struct rte_eth_txconf *tx_conf)
1722 const struct rte_memzone *tz;
1723 struct igb_tx_queue *txq;
1724 struct ixgbe_hw *hw;
1725 uint16_t tx_rs_thresh, tx_free_thresh;
1727 PMD_INIT_FUNC_TRACE();
1728 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1731 * Validate number of transmit descriptors.
1732 * It must not exceed hardware maximum, and must be multiple
1735 if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
1736 (nb_desc > IXGBE_MAX_RING_DESC) ||
1737 (nb_desc < IXGBE_MIN_RING_DESC)) {
1742 * The following two parameters control the setting of the RS bit on
1743 * transmit descriptors.
1744 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
1745 * descriptors have been used.
1746 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
1747 * descriptors are used or if the number of descriptors required
1748 * to transmit a packet is greater than the number of free TX
1750 * The following constraints must be satisfied:
1751 * tx_rs_thresh must be greater than 0.
1752 * tx_rs_thresh must be less than the size of the ring minus 2.
1753 * tx_rs_thresh must be less than or equal to tx_free_thresh.
1754 * tx_rs_thresh must be a divisor of the ring size.
1755 * tx_free_thresh must be greater than 0.
1756 * tx_free_thresh must be less than the size of the ring minus 3.
1757 * One descriptor in the TX ring is used as a sentinel to avoid a
1758 * H/W race condition, hence the maximum threshold constraints.
1759 * When set to zero use default values.
1761 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
1762 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
1763 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1764 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
1765 if (tx_rs_thresh >= (nb_desc - 2)) {
1766 RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the number "
1767 "of TX descriptors minus 2. (tx_rs_thresh=%u port=%d "
1768 "queue=%d)\n", (unsigned int)tx_rs_thresh,
1769 (int)dev->data->port_id, (int)queue_idx);
1772 if (tx_free_thresh >= (nb_desc - 3)) {
1773 RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the "
1774 "tx_free_thresh must be less than the number of TX "
1775 "descriptors minus 3. (tx_free_thresh=%u port=%d "
1776 "queue=%d)\n", (unsigned int)tx_free_thresh,
1777 (int)dev->data->port_id, (int)queue_idx);
1780 if (tx_rs_thresh > tx_free_thresh) {
1781 RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or equal to "
1782 "tx_free_thresh. (tx_free_thresh=%u tx_rs_thresh=%u "
1783 "port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
1784 (unsigned int)tx_rs_thresh, (int)dev->data->port_id,
1788 if ((nb_desc % tx_rs_thresh) != 0) {
1789 RTE_LOG(ERR, PMD, "tx_rs_thresh must be a divisor of the "
1790 "number of TX descriptors. (tx_rs_thresh=%u port=%d "
1791 "queue=%d)\n", (unsigned int)tx_rs_thresh,
1792 (int)dev->data->port_id, (int)queue_idx);
1797 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
1798 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
1799 * by the NIC and all descriptors are written back after the NIC
1800 * accumulates WTHRESH descriptors.
1802 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
1803 RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if "
1804 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
1805 "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
1806 (int)dev->data->port_id, (int)queue_idx);
1810 /* Free memory prior to re-allocation if needed... */
1811 if (dev->data->tx_queues[queue_idx] != NULL) {
1812 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
1813 dev->data->tx_queues[queue_idx] = NULL;
1816 /* First allocate the tx queue data structure */
1817 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
1818 CACHE_LINE_SIZE, socket_id);
1823 * Allocate TX ring hardware descriptors. A memzone large enough to
1824 * handle the maximum ring size is allocated in order to allow for
1825 * resizing in later calls to the queue setup function.
1827 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1828 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
1831 ixgbe_tx_queue_release(txq);
1835 txq->nb_tx_desc = nb_desc;
1836 txq->tx_rs_thresh = tx_rs_thresh;
1837 txq->tx_free_thresh = tx_free_thresh;
1838 txq->pthresh = tx_conf->tx_thresh.pthresh;
1839 txq->hthresh = tx_conf->tx_thresh.hthresh;
1840 txq->wthresh = tx_conf->tx_thresh.wthresh;
1841 txq->queue_id = queue_idx;
1842 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1843 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1844 txq->port_id = dev->data->port_id;
1845 txq->txq_flags = tx_conf->txq_flags;
1846 txq->ops = &def_txq_ops;
1847 txq->start_tx_per_q = tx_conf->start_tx_per_q;
1850 * Modification to set VFTDT for virtual function if vf is detected
1852 if (hw->mac.type == ixgbe_mac_82599_vf)
1853 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
1855 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
1856 #ifndef RTE_LIBRTE_XEN_DOM0
1857 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1859 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1861 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
1863 /* Allocate software ring */
1864 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
1865 sizeof(struct igb_tx_entry) * nb_desc,
1866 CACHE_LINE_SIZE, socket_id);
1867 if (txq->sw_ring == NULL) {
1868 ixgbe_tx_queue_release(txq);
1871 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1872 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1874 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1875 if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
1876 (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
1877 PMD_INIT_LOG(INFO, "Using simple tx code path\n");
1878 #ifdef RTE_IXGBE_INC_VECTOR
1879 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
1880 ixgbe_txq_vec_setup(txq, socket_id) == 0) {
1881 PMD_INIT_LOG(INFO, "Vector tx enabled.\n");
1882 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
1886 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
1888 PMD_INIT_LOG(INFO, "Using full-featured tx code path\n");
1889 PMD_INIT_LOG(INFO, " - txq_flags = %lx [IXGBE_SIMPLE_FLAGS=%lx]\n", (long unsigned)txq->txq_flags, (long unsigned)IXGBE_SIMPLE_FLAGS);
1890 PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu [RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n", (long unsigned)txq->tx_rs_thresh, (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
1891 dev->tx_pkt_burst = ixgbe_xmit_pkts;
1894 txq->ops->reset(txq);
1896 dev->data->tx_queues[queue_idx] = txq;
1903 ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1907 if (rxq->sw_ring != NULL) {
1908 for (i = 0; i < rxq->nb_rx_desc; i++) {
1909 if (rxq->sw_ring[i].mbuf != NULL) {
1910 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1911 rxq->sw_ring[i].mbuf = NULL;
1914 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
1915 if (rxq->rx_nb_avail) {
1916 for (i = 0; i < rxq->rx_nb_avail; ++i) {
1917 struct rte_mbuf *mb;
1918 mb = rxq->rx_stage[rxq->rx_next_avail + i];
1919 rte_pktmbuf_free_seg(mb);
1921 rxq->rx_nb_avail = 0;
1928 ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
1931 ixgbe_rx_queue_release_mbufs(rxq);
1932 rte_free(rxq->sw_ring);
1938 ixgbe_dev_rx_queue_release(void *rxq)
1940 ixgbe_rx_queue_release(rxq);
1944 * Check if Rx Burst Bulk Alloc function can be used.
1946 * 0: the preconditions are satisfied and the bulk allocation function
1948 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
1949 * function must be used.
1952 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
1953 check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
1955 check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
1961 * Make sure the following pre-conditions are satisfied:
1962 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
1963 * rxq->rx_free_thresh < rxq->nb_rx_desc
1964 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
1965 * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
1966 * Scattered packets are not supported. This should be checked
1967 * outside of this function.
1969 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
1970 if (! (rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST))
1972 else if (! (rxq->rx_free_thresh < rxq->nb_rx_desc))
1974 else if (! ((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0))
1976 else if (! (rxq->nb_rx_desc <
1977 (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST)))
1986 /* Reset dynamic igb_rx_queue fields back to defaults */
1988 ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
1990 static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
1996 * By default, the Rx queue setup function allocates enough memory for
1997 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
1998 * extra memory at the end of the descriptor ring to be zero'd out. A
1999 * pre-condition for using the Rx burst bulk alloc function is that the
2000 * number of descriptors is less than or equal to
2001 * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
2002 * constraints here to see if we need to zero out memory after the end
2003 * of the H/W descriptor ring.
2005 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2006 if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
2007 /* zero out extra memory */
2008 len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
2011 /* do not zero out extra memory */
2012 len = rxq->nb_rx_desc;
2015 * Zero out HW ring memory. Zero out extra memory at the end of
2016 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2017 * reads extra memory as zeros.
2019 for (i = 0; i < len; i++) {
2020 rxq->rx_ring[i] = zeroed_desc;
2023 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2025 * initialize extra software ring entries. Space for these extra
2026 * entries is always allocated
2028 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2029 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; ++i) {
2030 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
2033 rxq->rx_nb_avail = 0;
2034 rxq->rx_next_avail = 0;
2035 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2036 #endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
2038 rxq->nb_rx_hold = 0;
2039 rxq->pkt_first_seg = NULL;
2040 rxq->pkt_last_seg = NULL;
2044 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2047 unsigned int socket_id,
2048 const struct rte_eth_rxconf *rx_conf,
2049 struct rte_mempool *mp)
2051 const struct rte_memzone *rz;
2052 struct igb_rx_queue *rxq;
2053 struct ixgbe_hw *hw;
2054 int use_def_burst_func = 1;
2057 PMD_INIT_FUNC_TRACE();
2058 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2061 * Validate number of receive descriptors.
2062 * It must not exceed hardware maximum, and must be multiple
2065 if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
2066 (nb_desc > IXGBE_MAX_RING_DESC) ||
2067 (nb_desc < IXGBE_MIN_RING_DESC)) {
2071 /* Free memory prior to re-allocation if needed... */
2072 if (dev->data->rx_queues[queue_idx] != NULL) {
2073 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2074 dev->data->rx_queues[queue_idx] = NULL;
2077 /* First allocate the rx queue data structure */
2078 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
2079 CACHE_LINE_SIZE, socket_id);
2083 rxq->nb_rx_desc = nb_desc;
2084 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2085 rxq->queue_id = queue_idx;
2086 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2087 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2088 rxq->port_id = dev->data->port_id;
2089 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2091 rxq->drop_en = rx_conf->rx_drop_en;
2092 rxq->start_rx_per_q = rx_conf->start_rx_per_q;
2095 * Allocate RX ring hardware descriptors. A memzone large enough to
2096 * handle the maximum ring size is allocated in order to allow for
2097 * resizing in later calls to the queue setup function.
2099 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
2100 RX_RING_SZ, socket_id);
2102 ixgbe_rx_queue_release(rxq);
2107 * Zero init all the descriptors in the ring.
2109 memset (rz->addr, 0, RX_RING_SZ);
2112 * Modified to setup VFRDT for Virtual Function
2114 if (hw->mac.type == ixgbe_mac_82599_vf) {
2116 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2118 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2122 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2124 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2126 #ifndef RTE_LIBRTE_XEN_DOM0
2127 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
2129 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2131 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
2134 * Allocate software ring. Allow for space at the end of the
2135 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2136 * function does not access an invalid memory region.
2138 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2139 len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
2143 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2144 sizeof(struct igb_rx_entry) * len,
2145 CACHE_LINE_SIZE, socket_id);
2146 if (rxq->sw_ring == NULL) {
2147 ixgbe_rx_queue_release(rxq);
2150 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
2151 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
2154 * Certain constraints must be met in order to use the bulk buffer
2155 * allocation Rx burst function.
2157 use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
2159 /* Check if pre-conditions are satisfied, and no Scattered Rx */
2160 if (!use_def_burst_func && !dev->data->scattered_rx) {
2161 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2162 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2163 "satisfied. Rx Burst Bulk Alloc function will be "
2164 "used on port=%d, queue=%d.\n",
2165 rxq->port_id, rxq->queue_id);
2166 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
2167 #ifdef RTE_IXGBE_INC_VECTOR
2168 if (!ixgbe_rx_vec_condition_check(dev)) {
2169 PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
2170 "sure RX burst size no less than 32.\n");
2171 ixgbe_rxq_vec_setup(rxq, socket_id);
2172 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
2177 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
2178 "are not satisfied, Scattered Rx is requested, "
2179 "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
2180 "enabled (port=%d, queue=%d).\n",
2181 rxq->port_id, rxq->queue_id);
2183 dev->data->rx_queues[queue_idx] = rxq;
2185 ixgbe_reset_rx_queue(rxq);
2191 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2193 #define IXGBE_RXQ_SCAN_INTERVAL 4
2194 volatile union ixgbe_adv_rx_desc *rxdp;
2195 struct igb_rx_queue *rxq;
2198 if (rx_queue_id >= dev->data->nb_rx_queues) {
2199 PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
2203 rxq = dev->data->rx_queues[rx_queue_id];
2204 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2206 while ((desc < rxq->nb_rx_desc) &&
2207 (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
2208 desc += IXGBE_RXQ_SCAN_INTERVAL;
2209 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
2210 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2211 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2212 desc - rxq->nb_rx_desc]);
2219 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2221 volatile union ixgbe_adv_rx_desc *rxdp;
2222 struct igb_rx_queue *rxq = rx_queue;
2225 if (unlikely(offset >= rxq->nb_rx_desc))
2227 desc = rxq->rx_tail + offset;
2228 if (desc >= rxq->nb_rx_desc)
2229 desc -= rxq->nb_rx_desc;
2231 rxdp = &rxq->rx_ring[desc];
2232 return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
2236 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
2240 PMD_INIT_FUNC_TRACE();
2242 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2243 struct igb_tx_queue *txq = dev->data->tx_queues[i];
2245 txq->ops->release_mbufs(txq);
2246 txq->ops->reset(txq);
2250 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2251 struct igb_rx_queue *rxq = dev->data->rx_queues[i];
2253 ixgbe_rx_queue_release_mbufs(rxq);
2254 ixgbe_reset_rx_queue(rxq);
2259 /*********************************************************************
2261 * Device RX/TX init functions
2263 **********************************************************************/
2266 * Receive Side Scaling (RSS)
2267 * See section 7.1.2.8 in the following document:
2268 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
2271 * The source and destination IP addresses of the IP header and the source
2272 * and destination ports of TCP/UDP headers, if any, of received packets are
2273 * hashed against a configurable random key to compute a 32-bit RSS hash result.
2274 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2275 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
2276 * RSS output index which is used as the RX queue index where to store the
2278 * The following output is supplied in the RX write-back descriptor:
2279 * - 32-bit result of the Microsoft RSS hash function,
2280 * - 4-bit RSS type field.
2284 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
2285 * Used as the default key.
2287 static uint8_t rss_intel_key[40] = {
2288 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2289 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2290 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2291 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2292 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2296 ixgbe_rss_disable(struct rte_eth_dev *dev)
2298 struct ixgbe_hw *hw;
2301 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2302 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2303 mrqc &= ~IXGBE_MRQC_RSSEN;
2304 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2308 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
2316 hash_key = rss_conf->rss_key;
2317 if (hash_key != NULL) {
2318 /* Fill in RSS hash key */
2319 for (i = 0; i < 10; i++) {
2320 rss_key = hash_key[(i * 4)];
2321 rss_key |= hash_key[(i * 4) + 1] << 8;
2322 rss_key |= hash_key[(i * 4) + 2] << 16;
2323 rss_key |= hash_key[(i * 4) + 3] << 24;
2324 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
2328 /* Set configured hashing protocols in MRQC register */
2329 rss_hf = rss_conf->rss_hf;
2330 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
2331 if (rss_hf & ETH_RSS_IPV4)
2332 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2333 if (rss_hf & ETH_RSS_IPV4_TCP)
2334 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2335 if (rss_hf & ETH_RSS_IPV6)
2336 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2337 if (rss_hf & ETH_RSS_IPV6_EX)
2338 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2339 if (rss_hf & ETH_RSS_IPV6_TCP)
2340 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2341 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
2342 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2343 if (rss_hf & ETH_RSS_IPV4_UDP)
2344 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2345 if (rss_hf & ETH_RSS_IPV6_UDP)
2346 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2347 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
2348 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2349 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2353 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2354 struct rte_eth_rss_conf *rss_conf)
2356 struct ixgbe_hw *hw;
2360 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2363 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
2364 * "RSS enabling cannot be done dynamically while it must be
2365 * preceded by a software reset"
2366 * Before changing anything, first check that the update RSS operation
2367 * does not attempt to disable RSS, if RSS was enabled at
2368 * initialization time, or does not attempt to enable RSS, if RSS was
2369 * disabled at initialization time.
2371 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
2372 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2373 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
2374 if (rss_hf != 0) /* Enable RSS */
2376 return 0; /* Nothing to do */
2379 if (rss_hf == 0) /* Disable RSS */
2381 ixgbe_hw_rss_hash_set(hw, rss_conf);
2386 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2387 struct rte_eth_rss_conf *rss_conf)
2389 struct ixgbe_hw *hw;
2396 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2397 hash_key = rss_conf->rss_key;
2398 if (hash_key != NULL) {
2399 /* Return RSS hash key */
2400 for (i = 0; i < 10; i++) {
2401 rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i);
2402 hash_key[(i * 4)] = rss_key & 0x000000FF;
2403 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2404 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2405 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2409 /* Get RSS functions configured in MRQC register */
2410 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2411 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
2412 rss_conf->rss_hf = 0;
2416 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
2417 rss_hf |= ETH_RSS_IPV4;
2418 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
2419 rss_hf |= ETH_RSS_IPV4_TCP;
2420 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
2421 rss_hf |= ETH_RSS_IPV6;
2422 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
2423 rss_hf |= ETH_RSS_IPV6_EX;
2424 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
2425 rss_hf |= ETH_RSS_IPV6_TCP;
2426 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
2427 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2428 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
2429 rss_hf |= ETH_RSS_IPV4_UDP;
2430 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
2431 rss_hf |= ETH_RSS_IPV6_UDP;
2432 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
2433 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2434 rss_conf->rss_hf = rss_hf;
2439 ixgbe_rss_configure(struct rte_eth_dev *dev)
2441 struct rte_eth_rss_conf rss_conf;
2442 struct ixgbe_hw *hw;
2447 PMD_INIT_FUNC_TRACE();
2448 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2451 * Fill in redirection table
2452 * The byte-swap is needed because NIC registers are in
2453 * little-endian order.
2456 for (i = 0, j = 0; i < 128; i++, j++) {
2457 if (j == dev->data->nb_rx_queues)
2459 reta = (reta << 8) | j;
2461 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
2466 * Configure the RSS key and the RSS protocols used to compute
2467 * the RSS hash of input packets.
2469 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2470 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
2471 ixgbe_rss_disable(dev);
2474 if (rss_conf.rss_key == NULL)
2475 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2476 ixgbe_hw_rss_hash_set(hw, &rss_conf);
2479 #define NUM_VFTA_REGISTERS 128
2480 #define NIC_RX_BUFFER_SIZE 0x200
2483 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
2485 struct rte_eth_vmdq_dcb_conf *cfg;
2486 struct ixgbe_hw *hw;
2487 enum rte_eth_nb_pools num_pools;
2488 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
2490 uint8_t nb_tcs; /* number of traffic classes */
2493 PMD_INIT_FUNC_TRACE();
2494 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2495 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2496 num_pools = cfg->nb_queue_pools;
2497 /* Check we have a valid number of pools */
2498 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
2499 ixgbe_rss_disable(dev);
2502 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
2503 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
2507 * split rx buffer up into sections, each for 1 traffic class
2509 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2510 for (i = 0 ; i < nb_tcs; i++) {
2511 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2512 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
2513 /* clear 10 bits. */
2514 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
2515 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2517 /* zero alloc all unused TCs */
2518 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2519 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2520 rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
2521 /* clear 10 bits. */
2522 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2525 /* MRQC: enable vmdq and dcb */
2526 mrqc = ((num_pools == ETH_16_POOLS) ? \
2527 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
2528 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2530 /* PFVTCTL: turn on virtualisation and set the default pool */
2531 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2532 if (cfg->enable_default_pool) {
2533 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
2535 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
2538 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
2540 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
2542 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
2544 * mapping is done with 3 bits per priority,
2545 * so shift by i*3 each time
2547 queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
2549 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
2551 /* RTRPCS: DCB related */
2552 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
2554 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2555 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2556 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
2557 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2559 /* VFTA - enable all vlan filters */
2560 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2561 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2564 /* VFRE: pool enabling for receive - 16 or 32 */
2565 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
2566 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2569 * MPSAR - allow pools to read specific mac addresses
2570 * In this case, all pools should be able to read from mac addr 0
2572 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
2573 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
2575 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
2576 for (i = 0; i < cfg->nb_pool_maps; i++) {
2577 /* set vlan id in VF register and set the valid bit */
2578 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
2579 (cfg->pool_map[i].vlan_id & 0xFFF)));
2581 * Put the allowed pools in VFB reg. As we only have 16 or 32
2582 * pools, we only need to use the first half of the register
2585 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
2590 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
2591 * @hw: pointer to hardware structure
2592 * @dcb_config: pointer to ixgbe_dcb_config structure
2595 ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
2596 struct ixgbe_dcb_config *dcb_config)
2601 PMD_INIT_FUNC_TRACE();
2602 if (hw->mac.type != ixgbe_mac_82598EB) {
2603 /* Disable the Tx desc arbiter so that MTQC can be changed */
2604 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2605 reg |= IXGBE_RTTDCS_ARBDIS;
2606 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2608 /* Enable DCB for Tx with 8 TCs */
2609 if (dcb_config->num_tcs.pg_tcs == 8) {
2610 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2613 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2615 if (dcb_config->vt_mode)
2616 reg |= IXGBE_MTQC_VT_ENA;
2617 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
2619 /* Disable drop for all queues */
2620 for (q = 0; q < 128; q++)
2621 IXGBE_WRITE_REG(hw, IXGBE_QDE,
2622 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
2624 /* Enable the Tx desc arbiter */
2625 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2626 reg &= ~IXGBE_RTTDCS_ARBDIS;
2627 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2629 /* Enable Security TX Buffer IFG for DCB */
2630 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2631 reg |= IXGBE_SECTX_DCB;
2632 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
2638 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
2639 * @dev: pointer to rte_eth_dev structure
2640 * @dcb_config: pointer to ixgbe_dcb_config structure
2643 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
2644 struct ixgbe_dcb_config *dcb_config)
2646 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2647 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
2648 struct ixgbe_hw *hw =
2649 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2651 PMD_INIT_FUNC_TRACE();
2652 if (hw->mac.type != ixgbe_mac_82598EB)
2653 /*PF VF Transmit Enable*/
2654 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
2655 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2657 /*Configure general DCB TX parameters*/
2658 ixgbe_dcb_tx_hw_config(hw,dcb_config);
2663 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
2664 struct ixgbe_dcb_config *dcb_config)
2666 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2667 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2668 struct ixgbe_dcb_tc_config *tc;
2671 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
2672 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
2673 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
2674 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
2677 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
2678 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
2680 /* User Priority to Traffic Class mapping */
2681 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2682 j = vmdq_rx_conf->dcb_queue[i];
2683 tc = &dcb_config->tc_config[j];
2684 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
2690 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
2691 struct ixgbe_dcb_config *dcb_config)
2693 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2694 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
2695 struct ixgbe_dcb_tc_config *tc;
2698 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
2699 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
2700 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
2701 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
2704 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
2705 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
2708 /* User Priority to Traffic Class mapping */
2709 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2710 j = vmdq_tx_conf->dcb_queue[i];
2711 tc = &dcb_config->tc_config[j];
2712 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
2719 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
2720 struct ixgbe_dcb_config *dcb_config)
2722 struct rte_eth_dcb_rx_conf *rx_conf =
2723 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
2724 struct ixgbe_dcb_tc_config *tc;
2727 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
2728 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
2730 /* User Priority to Traffic Class mapping */
2731 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2732 j = rx_conf->dcb_queue[i];
2733 tc = &dcb_config->tc_config[j];
2734 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
2740 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
2741 struct ixgbe_dcb_config *dcb_config)
2743 struct rte_eth_dcb_tx_conf *tx_conf =
2744 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
2745 struct ixgbe_dcb_tc_config *tc;
2748 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
2749 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
2751 /* User Priority to Traffic Class mapping */
2752 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2753 j = tx_conf->dcb_queue[i];
2754 tc = &dcb_config->tc_config[j];
2755 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
2761 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
2762 * @hw: pointer to hardware structure
2763 * @dcb_config: pointer to ixgbe_dcb_config structure
2766 ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
2767 struct ixgbe_dcb_config *dcb_config)
2773 PMD_INIT_FUNC_TRACE();
2775 * Disable the arbiter before changing parameters
2776 * (always enable recycle mode; WSP)
2778 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
2779 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
2781 if (hw->mac.type != ixgbe_mac_82598EB) {
2782 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
2783 if (dcb_config->num_tcs.pg_tcs == 4) {
2784 if (dcb_config->vt_mode)
2785 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
2786 IXGBE_MRQC_VMDQRT4TCEN;
2788 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
2789 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
2793 if (dcb_config->num_tcs.pg_tcs == 8) {
2794 if (dcb_config->vt_mode)
2795 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
2796 IXGBE_MRQC_VMDQRT8TCEN;
2798 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
2799 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
2804 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
2807 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2808 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2809 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
2810 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2812 /* VFTA - enable all vlan filters */
2813 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2814 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2818 * Configure Rx packet plane (recycle mode; WSP) and
2821 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
2822 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
2828 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
2829 uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
2831 switch (hw->mac.type) {
2832 case ixgbe_mac_82598EB:
2833 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
2835 case ixgbe_mac_82599EB:
2836 case ixgbe_mac_X540:
2837 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
2846 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
2847 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
2849 switch (hw->mac.type) {
2850 case ixgbe_mac_82598EB:
2851 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
2852 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
2854 case ixgbe_mac_82599EB:
2855 case ixgbe_mac_X540:
2856 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
2857 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
2864 #define DCB_RX_CONFIG 1
2865 #define DCB_TX_CONFIG 1
2866 #define DCB_TX_PB 1024
2868 * ixgbe_dcb_hw_configure - Enable DCB and configure
2869 * general DCB in VT mode and non-VT mode parameters
2870 * @dev: pointer to rte_eth_dev structure
2871 * @dcb_config: pointer to ixgbe_dcb_config structure
2874 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
2875 struct ixgbe_dcb_config *dcb_config)
2878 uint8_t i,pfc_en,nb_tcs;
2880 uint8_t config_dcb_rx = 0;
2881 uint8_t config_dcb_tx = 0;
2882 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
2883 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
2884 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
2885 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
2886 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
2887 struct ixgbe_dcb_tc_config *tc;
2888 uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2889 struct ixgbe_hw *hw =
2890 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2892 switch(dev->data->dev_conf.rxmode.mq_mode){
2893 case ETH_MQ_RX_VMDQ_DCB:
2894 dcb_config->vt_mode = true;
2895 if (hw->mac.type != ixgbe_mac_82598EB) {
2896 config_dcb_rx = DCB_RX_CONFIG;
2898 *get dcb and VT rx configuration parameters
2901 ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
2902 /*Configure general VMDQ and DCB RX parameters*/
2903 ixgbe_vmdq_dcb_configure(dev);
2907 dcb_config->vt_mode = false;
2908 config_dcb_rx = DCB_RX_CONFIG;
2909 /* Get dcb TX configuration parameters from rte_eth_conf */
2910 ixgbe_dcb_rx_config(dev,dcb_config);
2911 /*Configure general DCB RX parameters*/
2912 ixgbe_dcb_rx_hw_config(hw, dcb_config);
2915 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration\n");
2918 switch (dev->data->dev_conf.txmode.mq_mode) {
2919 case ETH_MQ_TX_VMDQ_DCB:
2920 dcb_config->vt_mode = true;
2921 config_dcb_tx = DCB_TX_CONFIG;
2922 /* get DCB and VT TX configuration parameters from rte_eth_conf */
2923 ixgbe_dcb_vt_tx_config(dev,dcb_config);
2924 /*Configure general VMDQ and DCB TX parameters*/
2925 ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
2929 dcb_config->vt_mode = false;
2930 config_dcb_tx = DCB_TX_CONFIG;
2931 /*get DCB TX configuration parameters from rte_eth_conf*/
2932 ixgbe_dcb_tx_config(dev,dcb_config);
2933 /*Configure general DCB TX parameters*/
2934 ixgbe_dcb_tx_hw_config(hw, dcb_config);
2937 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration\n");
2941 nb_tcs = dcb_config->num_tcs.pfc_tcs;
2943 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
2944 if(nb_tcs == ETH_4_TCS) {
2945 /* Avoid un-configured priority mapping to TC0 */
2947 uint8_t mask = 0xFF;
2948 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
2949 mask = (uint8_t)(mask & (~ (1 << map[i])));
2950 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
2951 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
2955 /* Re-configure 4 TCs BW */
2956 for (i = 0; i < nb_tcs; i++) {
2957 tc = &dcb_config->tc_config[i];
2958 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
2959 (uint8_t)(100 / nb_tcs);
2960 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
2961 (uint8_t)(100 / nb_tcs);
2963 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2964 tc = &dcb_config->tc_config[i];
2965 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
2966 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
2971 /* Set RX buffer size */
2972 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2973 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
2974 for (i = 0 ; i < nb_tcs; i++) {
2975 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2977 /* zero alloc all unused TCs */
2978 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2979 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
2983 /* Only support an equally distributed Tx packet buffer strategy. */
2984 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
2985 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
2986 for (i = 0; i < nb_tcs; i++) {
2987 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
2988 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
2990 /* Clear unused TCs, if any, to zero buffer size*/
2991 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2992 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
2993 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
2997 /*Calculates traffic class credits*/
2998 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
2999 IXGBE_DCB_TX_CONFIG);
3000 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3001 IXGBE_DCB_RX_CONFIG);
3004 /* Unpack CEE standard containers */
3005 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
3006 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3007 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
3008 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
3009 /* Configure PG(ETS) RX */
3010 ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
3014 /* Unpack CEE standard containers */
3015 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
3016 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3017 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
3018 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
3019 /* Configure PG(ETS) TX */
3020 ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
3023 /*Configure queue statistics registers*/
3024 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
3026 /* Check if the PFC is supported */
3027 if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3028 pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
3029 for (i = 0; i < nb_tcs; i++) {
3031 * If the TC count is 8,and the default high_water is 48,
3032 * the low_water is 16 as default.
3034 hw->fc.high_water[i] = (pbsize * 3 ) / 4;
3035 hw->fc.low_water[i] = pbsize / 4;
3036 /* Enable pfc for this TC */
3037 tc = &dcb_config->tc_config[i];
3038 tc->pfc = ixgbe_dcb_pfc_enabled;
3040 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3041 if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3043 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
3050 * ixgbe_configure_dcb - Configure DCB Hardware
3051 * @dev: pointer to rte_eth_dev
3053 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
3055 struct ixgbe_dcb_config *dcb_cfg =
3056 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3057 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
3059 PMD_INIT_FUNC_TRACE();
3061 /* check support mq_mode for DCB */
3062 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
3063 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
3066 if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
3069 /** Configure DCB hardware **/
3070 ixgbe_dcb_hw_configure(dev,dcb_cfg);
3076 * VMDq only support for 10 GbE NIC.
3079 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3081 struct rte_eth_vmdq_rx_conf *cfg;
3082 struct ixgbe_hw *hw;
3083 enum rte_eth_nb_pools num_pools;
3084 uint32_t mrqc, vt_ctl, vlanctrl;
3087 PMD_INIT_FUNC_TRACE();
3088 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3089 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3090 num_pools = cfg->nb_queue_pools;
3092 ixgbe_rss_disable(dev);
3094 /* MRQC: enable vmdq */
3095 mrqc = IXGBE_MRQC_VMDQEN;
3096 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3098 /* PFVTCTL: turn on virtualisation and set the default pool */
3099 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3100 if (cfg->enable_default_pool)
3101 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3103 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3105 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3107 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3108 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3109 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
3110 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3112 /* VFTA - enable all vlan filters */
3113 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3114 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
3116 /* VFRE: pool enabling for receive - 64 */
3117 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
3118 if (num_pools == ETH_64_POOLS)
3119 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
3122 * MPSAR - allow pools to read specific mac addresses
3123 * In this case, all pools should be able to read from mac addr 0
3125 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
3126 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
3128 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3129 for (i = 0; i < cfg->nb_pool_maps; i++) {
3130 /* set vlan id in VF register and set the valid bit */
3131 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
3132 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
3134 * Put the allowed pools in VFB reg. As we only have 16 or 64
3135 * pools, we only need to use the first half of the register
3138 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3139 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
3140 (cfg->pool_map[i].pools & UINT32_MAX));
3142 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
3143 ((cfg->pool_map[i].pools >> 32) \
3148 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
3149 if (cfg->enable_loop_back) {
3150 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3151 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
3152 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
3155 IXGBE_WRITE_FLUSH(hw);
3159 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
3160 * @hw: pointer to hardware structure
3163 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
3168 PMD_INIT_FUNC_TRACE();
3169 /*PF VF Transmit Enable*/
3170 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
3171 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
3173 /* Disable the Tx desc arbiter so that MTQC can be changed */
3174 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3175 reg |= IXGBE_RTTDCS_ARBDIS;
3176 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3178 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3179 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3181 /* Disable drop for all queues */
3182 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3183 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3184 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3186 /* Enable the Tx desc arbiter */
3187 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3188 reg &= ~IXGBE_RTTDCS_ARBDIS;
3189 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3191 IXGBE_WRITE_FLUSH(hw);
3197 ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
3199 struct igb_rx_entry *rxe = rxq->sw_ring;
3203 /* Initialize software ring entries */
3204 for (i = 0; i < rxq->nb_rx_desc; i++) {
3205 volatile union ixgbe_adv_rx_desc *rxd;
3206 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
3208 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n",
3209 (unsigned) rxq->queue_id);
3213 rte_mbuf_refcnt_set(mbuf, 1);
3215 mbuf->data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
3217 mbuf->port = rxq->port_id;
3220 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
3221 rxd = &rxq->rx_ring[i];
3222 rxd->read.hdr_addr = dma_addr;
3223 rxd->read.pkt_addr = dma_addr;
3231 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3233 struct ixgbe_hw *hw =
3234 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3236 if (hw->mac.type == ixgbe_mac_82598EB)
3239 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3241 * SRIOV inactive scheme
3242 * any DCB/RSS w/o VMDq multi-queue setting
3244 switch (dev->data->dev_conf.rxmode.mq_mode) {
3246 ixgbe_rss_configure(dev);
3249 case ETH_MQ_RX_VMDQ_DCB:
3250 ixgbe_vmdq_dcb_configure(dev);
3253 case ETH_MQ_RX_VMDQ_ONLY:
3254 ixgbe_vmdq_rx_hw_configure(dev);
3257 case ETH_MQ_RX_NONE:
3258 /* if mq_mode is none, disable rss mode.*/
3259 default: ixgbe_rss_disable(dev);
3262 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3264 * SRIOV active scheme
3265 * FIXME if support DCB/RSS together with VMDq & SRIOV
3268 IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQEN);
3272 IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT4TCEN);
3276 IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN);
3279 RTE_LOG(ERR, PMD, "invalid pool number in IOV mode\n");
3287 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
3289 struct ixgbe_hw *hw =
3290 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3294 if (hw->mac.type == ixgbe_mac_82598EB)
3297 /* disable arbiter before setting MTQC */
3298 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3299 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3300 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3302 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3304 * SRIOV inactive scheme
3305 * any DCB w/o VMDq multi-queue setting
3307 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
3308 ixgbe_vmdq_tx_hw_configure(hw);
3310 mtqc = IXGBE_MTQC_64Q_1PB;
3311 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3314 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3317 * SRIOV active scheme
3318 * FIXME if support DCB together with VMDq & SRIOV
3321 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3324 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
3327 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
3331 mtqc = IXGBE_MTQC_64Q_1PB;
3332 RTE_LOG(ERR, PMD, "invalid pool number in IOV mode\n");
3334 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3337 /* re-enable arbiter */
3338 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3339 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3345 * Initializes Receive Unit.
3348 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
3350 struct ixgbe_hw *hw;
3351 struct igb_rx_queue *rxq;
3352 struct rte_pktmbuf_pool_private *mbp_priv;
3364 PMD_INIT_FUNC_TRACE();
3365 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3368 * Make sure receives are disabled while setting
3369 * up the RX context (registers, descriptor rings, etc.).
3371 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3372 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3374 /* Enable receipt of broadcasted frames */
3375 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3376 fctrl |= IXGBE_FCTRL_BAM;
3377 fctrl |= IXGBE_FCTRL_DPF;
3378 fctrl |= IXGBE_FCTRL_PMCF;
3379 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3382 * Configure CRC stripping, if any.
3384 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3385 if (dev->data->dev_conf.rxmode.hw_strip_crc)
3386 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
3388 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
3391 * Configure jumbo frame support, if any.
3393 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
3394 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3395 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
3396 maxfrs &= 0x0000FFFF;
3397 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
3398 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
3400 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
3403 * If loopback mode is configured for 82599, set LPBK bit.
3405 if (hw->mac.type == ixgbe_mac_82599EB &&
3406 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
3407 hlreg0 |= IXGBE_HLREG0_LPBK;
3409 hlreg0 &= ~IXGBE_HLREG0_LPBK;
3411 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3413 /* Setup RX queues */
3414 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3415 rxq = dev->data->rx_queues[i];
3418 * Reset crc_len in case it was changed after queue setup by a
3419 * call to configure.
3421 rxq->crc_len = (uint8_t)
3422 ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
3425 /* Setup the Base and Length of the Rx Descriptor Rings */
3426 bus_addr = rxq->rx_ring_phys_addr;
3427 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
3428 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
3429 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
3430 (uint32_t)(bus_addr >> 32));
3431 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
3432 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3433 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
3434 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
3436 /* Configure the SRRCTL register */
3437 #ifdef RTE_HEADER_SPLIT_ENABLE
3439 * Configure Header Split
3441 if (dev->data->dev_conf.rxmode.header_split) {
3442 if (hw->mac.type == ixgbe_mac_82599EB) {
3443 /* Must setup the PSRTYPE register */
3445 psrtype = IXGBE_PSRTYPE_TCPHDR |
3446 IXGBE_PSRTYPE_UDPHDR |
3447 IXGBE_PSRTYPE_IPV4HDR |
3448 IXGBE_PSRTYPE_IPV6HDR;
3449 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
3451 srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
3452 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
3453 IXGBE_SRRCTL_BSIZEHDR_MASK);
3454 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3457 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3459 /* Set if packets are dropped when no descriptors available */
3461 srrctl |= IXGBE_SRRCTL_DROP_EN;
3464 * Configure the RX buffer size in the BSIZEPACKET field of
3465 * the SRRCTL register of the queue.
3466 * The value is in 1 KB resolution. Valid values can be from
3469 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
3470 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
3471 RTE_PKTMBUF_HEADROOM);
3472 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
3473 IXGBE_SRRCTL_BSIZEPKT_MASK);
3474 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
3476 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
3477 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
3479 /* It adds dual VLAN length for supporting dual VLAN */
3480 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
3481 2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
3482 dev->data->scattered_rx = 1;
3483 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
3487 if (dev->data->dev_conf.rxmode.enable_scatter) {
3488 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
3489 dev->data->scattered_rx = 1;
3493 * Device configured with multiple RX queues.
3495 ixgbe_dev_mq_rx_configure(dev);
3498 * Setup the Checksum Register.
3499 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
3500 * Enable IP/L4 checkum computation by hardware if requested to do so.
3502 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3503 rxcsum |= IXGBE_RXCSUM_PCSD;
3504 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
3505 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3507 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
3509 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3511 if (hw->mac.type == ixgbe_mac_82599EB) {
3512 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3513 if (dev->data->dev_conf.rxmode.hw_strip_crc)
3514 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3516 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
3517 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3518 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3525 * Initializes Transmit Unit.
3528 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
3530 struct ixgbe_hw *hw;
3531 struct igb_tx_queue *txq;
3537 PMD_INIT_FUNC_TRACE();
3538 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3540 /* Enable TX CRC (checksum offload requirement) */
3541 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3542 hlreg0 |= IXGBE_HLREG0_TXCRCEN;
3543 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3545 /* Setup the Base and Length of the Tx Descriptor Rings */
3546 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3547 txq = dev->data->tx_queues[i];
3549 bus_addr = txq->tx_ring_phys_addr;
3550 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
3551 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
3552 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
3553 (uint32_t)(bus_addr >> 32));
3554 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
3555 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
3556 /* Setup the HW Tx Head and TX Tail descriptor pointers */
3557 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
3558 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
3561 * Disable Tx Head Writeback RO bit, since this hoses
3562 * bookkeeping if things aren't delivered in order.
3564 switch (hw->mac.type) {
3565 case ixgbe_mac_82598EB:
3566 txctrl = IXGBE_READ_REG(hw,
3567 IXGBE_DCA_TXCTRL(txq->reg_idx));
3568 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3569 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
3573 case ixgbe_mac_82599EB:
3574 case ixgbe_mac_X540:
3576 txctrl = IXGBE_READ_REG(hw,
3577 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
3578 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3579 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
3585 /* Device configured with multiple TX queues. */
3586 ixgbe_dev_mq_tx_configure(dev);
3590 * Set up link for 82599 loopback mode Tx->Rx.
3593 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
3595 DEBUGFUNC("ixgbe_setup_loopback_link_82599");
3597 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3598 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
3600 PMD_INIT_LOG(ERR, "Could not enable loopback mode\n");
3609 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
3610 ixgbe_reset_pipeline_82599(hw);
3612 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
3618 * Start Transmit and Receive Units.
3621 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
3623 struct ixgbe_hw *hw;
3624 struct igb_tx_queue *txq;
3625 struct igb_rx_queue *rxq;
3631 PMD_INIT_FUNC_TRACE();
3632 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3634 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3635 txq = dev->data->tx_queues[i];
3636 /* Setup Transmit Threshold Registers */
3637 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
3638 txdctl |= txq->pthresh & 0x7F;
3639 txdctl |= ((txq->hthresh & 0x7F) << 8);
3640 txdctl |= ((txq->wthresh & 0x7F) << 16);
3641 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
3644 if (hw->mac.type != ixgbe_mac_82598EB) {
3645 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3646 dmatxctl |= IXGBE_DMATXCTL_TE;
3647 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3650 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3651 txq = dev->data->tx_queues[i];
3652 if (!txq->start_tx_per_q)
3653 ixgbe_dev_tx_queue_start(dev, i);
3656 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3657 rxq = dev->data->rx_queues[i];
3658 if (!rxq->start_rx_per_q)
3659 ixgbe_dev_rx_queue_start(dev, i);
3662 /* Enable Receive engine */
3663 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3664 if (hw->mac.type == ixgbe_mac_82598EB)
3665 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3666 rxctrl |= IXGBE_RXCTRL_RXEN;
3667 hw->mac.ops.enable_rx_dma(hw, rxctrl);
3669 /* If loopback mode is enabled for 82599, set up the link accordingly */
3670 if (hw->mac.type == ixgbe_mac_82599EB &&
3671 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
3672 ixgbe_setup_loopback_link_82599(hw);
3677 * Start Receive Units for specified queue.
3680 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3682 struct ixgbe_hw *hw;
3683 struct igb_rx_queue *rxq;
3687 PMD_INIT_FUNC_TRACE();
3688 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3690 if (rx_queue_id < dev->data->nb_rx_queues) {
3691 rxq = dev->data->rx_queues[rx_queue_id];
3693 /* Allocate buffers for descriptor rings */
3694 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
3696 "Could not alloc mbuf for queue:%d\n",
3700 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
3701 rxdctl |= IXGBE_RXDCTL_ENABLE;
3702 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
3704 /* Wait until RX Enable ready */
3705 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
3708 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
3709 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3711 PMD_INIT_LOG(ERR, "Could not enable "
3712 "Rx Queue %d\n", rx_queue_id);
3714 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
3715 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
3723 * Stop Receive Units for specified queue.
3726 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3728 struct ixgbe_hw *hw;
3729 struct igb_rx_queue *rxq;
3733 PMD_INIT_FUNC_TRACE();
3734 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3736 if (rx_queue_id < dev->data->nb_rx_queues) {
3737 rxq = dev->data->rx_queues[rx_queue_id];
3739 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
3740 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3741 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
3743 /* Wait until RX Enable ready */
3744 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
3747 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
3748 } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
3750 PMD_INIT_LOG(ERR, "Could not disable "
3751 "Rx Queue %d\n", rx_queue_id);
3753 rte_delay_us(RTE_IXGBE_WAIT_100_US);
3755 ixgbe_rx_queue_release_mbufs(rxq);
3756 ixgbe_reset_rx_queue(rxq);
3765 * Start Transmit Units for specified queue.
3768 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3770 struct ixgbe_hw *hw;
3771 struct igb_tx_queue *txq;
3775 PMD_INIT_FUNC_TRACE();
3776 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3778 if (tx_queue_id < dev->data->nb_tx_queues) {
3779 txq = dev->data->tx_queues[tx_queue_id];
3780 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
3781 txdctl |= IXGBE_TXDCTL_ENABLE;
3782 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
3784 /* Wait until TX Enable ready */
3785 if (hw->mac.type == ixgbe_mac_82599EB) {
3786 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
3789 txdctl = IXGBE_READ_REG(hw,
3790 IXGBE_TXDCTL(txq->reg_idx));
3791 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
3793 PMD_INIT_LOG(ERR, "Could not enable "
3794 "Tx Queue %d\n", tx_queue_id);
3797 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
3798 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
3806 * Stop Transmit Units for specified queue.
3809 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3811 struct ixgbe_hw *hw;
3812 struct igb_tx_queue *txq;
3814 uint32_t txtdh, txtdt;
3817 PMD_INIT_FUNC_TRACE();
3818 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3820 if (tx_queue_id < dev->data->nb_tx_queues) {
3821 txq = dev->data->tx_queues[tx_queue_id];
3823 /* Wait until TX queue is empty */
3824 if (hw->mac.type == ixgbe_mac_82599EB) {
3825 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
3827 rte_delay_us(RTE_IXGBE_WAIT_100_US);
3828 txtdh = IXGBE_READ_REG(hw,
3829 IXGBE_TDH(txq->reg_idx));
3830 txtdt = IXGBE_READ_REG(hw,
3831 IXGBE_TDT(txq->reg_idx));
3832 } while (--poll_ms && (txtdh != txtdt));
3835 "Tx Queue %d is not empty when stopping.\n",
3839 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
3840 txdctl &= ~IXGBE_TXDCTL_ENABLE;
3841 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
3843 /* Wait until TX Enable ready */
3844 if (hw->mac.type == ixgbe_mac_82599EB) {
3845 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
3848 txdctl = IXGBE_READ_REG(hw,
3849 IXGBE_TXDCTL(txq->reg_idx));
3850 } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
3852 PMD_INIT_LOG(ERR, "Could not disable "
3853 "Tx Queue %d\n", tx_queue_id);
3856 if (txq->ops != NULL) {
3857 txq->ops->release_mbufs(txq);
3858 txq->ops->reset(txq);
3867 * [VF] Initializes Receive Unit.
3870 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
3872 struct ixgbe_hw *hw;
3873 struct igb_rx_queue *rxq;
3874 struct rte_pktmbuf_pool_private *mbp_priv;
3881 PMD_INIT_FUNC_TRACE();
3882 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3885 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
3886 * disables the VF receipt of packets if the PF MTU is > 1500.
3887 * This is done to deal with 82599 limitations that imposes
3888 * the PF and all VFs to share the same MTU.
3889 * Then, the PF driver enables again the VF receipt of packet when
3890 * the VF driver issues a IXGBE_VF_SET_LPE request.
3891 * In the meantime, the VF device cannot be used, even if the VF driver
3892 * and the Guest VM network stack are ready to accept packets with a
3893 * size up to the PF MTU.
3894 * As a work-around to this PF behaviour, force the call to
3895 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
3896 * VF packets received can work in all cases.
3898 ixgbevf_rlpml_set_vf(hw,
3899 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
3901 /* Setup RX queues */
3902 dev->rx_pkt_burst = ixgbe_recv_pkts;
3903 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3904 rxq = dev->data->rx_queues[i];
3906 /* Allocate buffers for descriptor rings */
3907 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
3911 /* Setup the Base and Length of the Rx Descriptor Rings */
3912 bus_addr = rxq->rx_ring_phys_addr;
3914 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3915 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
3916 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3917 (uint32_t)(bus_addr >> 32));
3918 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3919 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3920 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
3921 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
3924 /* Configure the SRRCTL register */
3925 #ifdef RTE_HEADER_SPLIT_ENABLE
3927 * Configure Header Split
3929 if (dev->data->dev_conf.rxmode.header_split) {
3931 /* Must setup the PSRTYPE register */
3933 psrtype = IXGBE_PSRTYPE_TCPHDR |
3934 IXGBE_PSRTYPE_UDPHDR |
3935 IXGBE_PSRTYPE_IPV4HDR |
3936 IXGBE_PSRTYPE_IPV6HDR;
3938 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE(i), psrtype);
3940 srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
3941 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
3942 IXGBE_SRRCTL_BSIZEHDR_MASK);
3943 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3946 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3948 /* Set if packets are dropped when no descriptors available */
3950 srrctl |= IXGBE_SRRCTL_DROP_EN;
3953 * Configure the RX buffer size in the BSIZEPACKET field of
3954 * the SRRCTL register of the queue.
3955 * The value is in 1 KB resolution. Valid values can be from
3958 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
3959 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
3960 RTE_PKTMBUF_HEADROOM);
3961 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
3962 IXGBE_SRRCTL_BSIZEPKT_MASK);
3965 * VF modification to write virtual function SRRCTL register
3967 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
3969 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
3970 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
3972 /* It adds dual VLAN length for supporting dual VLAN */
3973 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
3974 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
3975 dev->data->scattered_rx = 1;
3976 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
3980 if (dev->data->dev_conf.rxmode.enable_scatter) {
3981 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
3982 dev->data->scattered_rx = 1;
3989 * [VF] Initializes Transmit Unit.
3992 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
3994 struct ixgbe_hw *hw;
3995 struct igb_tx_queue *txq;
4000 PMD_INIT_FUNC_TRACE();
4001 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4003 /* Setup the Base and Length of the Tx Descriptor Rings */
4004 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4005 txq = dev->data->tx_queues[i];
4006 bus_addr = txq->tx_ring_phys_addr;
4007 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
4008 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4009 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
4010 (uint32_t)(bus_addr >> 32));
4011 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
4012 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4013 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4014 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
4015 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
4018 * Disable Tx Head Writeback RO bit, since this hoses
4019 * bookkeeping if things aren't delivered in order.
4021 txctrl = IXGBE_READ_REG(hw,
4022 IXGBE_VFDCA_TXCTRL(i));
4023 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4024 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
4030 * [VF] Start Transmit and Receive Units.
4033 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4035 struct ixgbe_hw *hw;
4036 struct igb_tx_queue *txq;
4037 struct igb_rx_queue *rxq;
4043 PMD_INIT_FUNC_TRACE();
4044 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4046 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4047 txq = dev->data->tx_queues[i];
4048 /* Setup Transmit Threshold Registers */
4049 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4050 txdctl |= txq->pthresh & 0x7F;
4051 txdctl |= ((txq->hthresh & 0x7F) << 8);
4052 txdctl |= ((txq->wthresh & 0x7F) << 16);
4053 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4056 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4058 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4059 txdctl |= IXGBE_TXDCTL_ENABLE;
4060 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4063 /* Wait until TX Enable ready */
4066 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4067 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4069 PMD_INIT_LOG(ERR, "Could not enable "
4070 "Tx Queue %d\n", i);
4072 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4074 rxq = dev->data->rx_queues[i];
4076 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4077 rxdctl |= IXGBE_RXDCTL_ENABLE;
4078 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
4080 /* Wait until RX Enable ready */
4084 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4085 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4087 PMD_INIT_LOG(ERR, "Could not enable "
4088 "Rx Queue %d\n", i);
4090 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);