4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5 * Copyright 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
46 #include <rte_byteorder.h>
47 #include <rte_common.h>
48 #include <rte_cycles.h>
50 #include <rte_debug.h>
51 #include <rte_interrupts.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_prefetch.h>
71 #include <rte_string_fns.h>
72 #include <rte_errno.h>
75 #include "ixgbe_logs.h"
76 #include "base/ixgbe_api.h"
77 #include "base/ixgbe_vf.h"
78 #include "ixgbe_ethdev.h"
79 #include "base/ixgbe_dcb.h"
80 #include "base/ixgbe_common.h"
81 #include "ixgbe_rxtx.h"
83 /* Bit Mask to indicate what bits required for building TX context */
84 #define IXGBE_TX_OFFLOAD_MASK ( \
90 static inline struct rte_mbuf *
91 rte_rxmbuf_alloc(struct rte_mempool *mp)
95 m = __rte_mbuf_raw_alloc(mp);
96 __rte_mbuf_sanity_check_raw(m, 0);
102 #define RTE_PMD_USE_PREFETCH
105 #ifdef RTE_PMD_USE_PREFETCH
107 * Prefetch a cache line into all cache levels.
109 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
111 #define rte_ixgbe_prefetch(p) do {} while(0)
114 /*********************************************************************
118 **********************************************************************/
121 * Check for descriptors with their DD bit set and free mbufs.
122 * Return the total number of buffers freed.
124 static inline int __attribute__((always_inline))
125 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
127 struct ixgbe_tx_entry *txep;
131 /* check DD bit on threshold descriptor */
132 status = txq->tx_ring[txq->tx_next_dd].wb.status;
133 if (! (status & IXGBE_ADVTXD_STAT_DD))
137 * first buffer to free from S/W ring is at index
138 * tx_next_dd - (tx_rs_thresh-1)
140 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
142 /* free buffers one at a time */
143 if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
144 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
145 txep->mbuf->next = NULL;
146 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
150 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
151 rte_pktmbuf_free_seg(txep->mbuf);
156 /* buffers were freed, update counters */
157 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
158 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
159 if (txq->tx_next_dd >= txq->nb_tx_desc)
160 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
162 return txq->tx_rs_thresh;
165 /* Populate 4 descriptors with data from 4 mbufs */
167 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
169 uint64_t buf_dma_addr;
173 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
174 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
175 pkt_len = (*pkts)->data_len;
177 /* write data to descriptor */
178 txdp->read.buffer_addr = buf_dma_addr;
179 txdp->read.cmd_type_len =
180 ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
181 txdp->read.olinfo_status =
182 (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
183 rte_prefetch0(&(*pkts)->pool);
187 /* Populate 1 descriptor with data from 1 mbuf */
189 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
191 uint64_t buf_dma_addr;
194 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
195 pkt_len = (*pkts)->data_len;
197 /* write data to descriptor */
198 txdp->read.buffer_addr = buf_dma_addr;
199 txdp->read.cmd_type_len =
200 ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
201 txdp->read.olinfo_status =
202 (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
203 rte_prefetch0(&(*pkts)->pool);
207 * Fill H/W descriptor ring with mbuf data.
208 * Copy mbuf pointers to the S/W ring.
211 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
214 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
215 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
216 const int N_PER_LOOP = 4;
217 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
218 int mainpart, leftover;
222 * Process most of the packets in chunks of N pkts. Any
223 * leftover packets will get processed one at a time.
225 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
226 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
227 for (i = 0; i < mainpart; i += N_PER_LOOP) {
228 /* Copy N mbuf pointers to the S/W ring */
229 for (j = 0; j < N_PER_LOOP; ++j) {
230 (txep + i + j)->mbuf = *(pkts + i + j);
232 tx4(txdp + i, pkts + i);
235 if (unlikely(leftover > 0)) {
236 for (i = 0; i < leftover; ++i) {
237 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
238 tx1(txdp + mainpart + i, pkts + mainpart + i);
243 static inline uint16_t
244 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
247 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
248 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
252 * Begin scanning the H/W ring for done descriptors when the
253 * number of available descriptors drops below tx_free_thresh. For
254 * each done descriptor, free the associated buffer.
256 if (txq->nb_tx_free < txq->tx_free_thresh)
257 ixgbe_tx_free_bufs(txq);
259 /* Only use descriptors that are available */
260 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
261 if (unlikely(nb_pkts == 0))
264 /* Use exactly nb_pkts descriptors */
265 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
268 * At this point, we know there are enough descriptors in the
269 * ring to transmit all the packets. This assumes that each
270 * mbuf contains a single segment, and that no new offloads
271 * are expected, which would require a new context descriptor.
275 * See if we're going to wrap-around. If so, handle the top
276 * of the descriptor ring first, then do the bottom. If not,
277 * the processing looks just like the "bottom" part anyway...
279 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
280 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
281 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
284 * We know that the last descriptor in the ring will need to
285 * have its RS bit set because tx_rs_thresh has to be
286 * a divisor of the ring size
288 tx_r[txq->tx_next_rs].read.cmd_type_len |=
289 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
290 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
295 /* Fill H/W descriptor ring with mbuf data */
296 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
297 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
300 * Determine if RS bit should be set
301 * This is what we actually want:
302 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
303 * but instead of subtracting 1 and doing >=, we can just do
304 * greater than without subtracting.
306 if (txq->tx_tail > txq->tx_next_rs) {
307 tx_r[txq->tx_next_rs].read.cmd_type_len |=
308 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
309 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
311 if (txq->tx_next_rs >= txq->nb_tx_desc)
312 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
316 * Check for wrap-around. This would only happen if we used
317 * up to the last descriptor in the ring, no more, no less.
319 if (txq->tx_tail >= txq->nb_tx_desc)
322 /* update tail pointer */
324 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
330 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
335 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
336 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
337 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
339 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
343 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
344 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
345 nb_tx = (uint16_t)(nb_tx + ret);
346 nb_pkts = (uint16_t)(nb_pkts - ret);
355 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
356 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
357 uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
359 uint32_t type_tucmd_mlhl;
360 uint32_t mss_l4len_idx = 0;
362 uint32_t vlan_macip_lens;
363 union ixgbe_tx_offload tx_offload_mask;
365 ctx_idx = txq->ctx_curr;
366 tx_offload_mask.data = 0;
369 /* Specify which HW CTX to upload. */
370 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
372 if (ol_flags & PKT_TX_VLAN_PKT) {
373 tx_offload_mask.vlan_tci |= ~0;
376 /* check if TCP segmentation required for this packet */
377 if (ol_flags & PKT_TX_TCP_SEG) {
378 /* implies IP cksum in IPv4 */
379 if (ol_flags & PKT_TX_IP_CKSUM)
380 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
381 IXGBE_ADVTXD_TUCMD_L4T_TCP |
382 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
384 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
385 IXGBE_ADVTXD_TUCMD_L4T_TCP |
386 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
388 tx_offload_mask.l2_len |= ~0;
389 tx_offload_mask.l3_len |= ~0;
390 tx_offload_mask.l4_len |= ~0;
391 tx_offload_mask.tso_segsz |= ~0;
392 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
393 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
394 } else { /* no TSO, check if hardware checksum is needed */
395 if (ol_flags & PKT_TX_IP_CKSUM) {
396 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
397 tx_offload_mask.l2_len |= ~0;
398 tx_offload_mask.l3_len |= ~0;
401 switch (ol_flags & PKT_TX_L4_MASK) {
402 case PKT_TX_UDP_CKSUM:
403 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
404 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
405 mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
406 tx_offload_mask.l2_len |= ~0;
407 tx_offload_mask.l3_len |= ~0;
409 case PKT_TX_TCP_CKSUM:
410 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
411 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
412 mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
413 tx_offload_mask.l2_len |= ~0;
414 tx_offload_mask.l3_len |= ~0;
415 tx_offload_mask.l4_len |= ~0;
417 case PKT_TX_SCTP_CKSUM:
418 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
419 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
420 mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
421 tx_offload_mask.l2_len |= ~0;
422 tx_offload_mask.l3_len |= ~0;
425 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
426 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
431 txq->ctx_cache[ctx_idx].flags = ol_flags;
432 txq->ctx_cache[ctx_idx].tx_offload.data =
433 tx_offload_mask.data & tx_offload.data;
434 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
436 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
437 vlan_macip_lens = tx_offload.l3_len;
438 vlan_macip_lens |= (tx_offload.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT);
439 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
440 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
441 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
442 ctx_txd->seqnum_seed = 0;
446 * Check which hardware context can be used. Use the existing match
447 * or create a new context descriptor.
449 static inline uint32_t
450 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
451 union ixgbe_tx_offload tx_offload)
453 /* If match with the current used context */
454 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
455 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
456 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
457 return txq->ctx_curr;
460 /* What if match with the next context */
462 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
463 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
464 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
465 return txq->ctx_curr;
468 /* Mismatch, use the previous context */
469 return (IXGBE_CTX_NUM);
472 static inline uint32_t
473 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
476 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
477 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
478 if (ol_flags & PKT_TX_IP_CKSUM)
479 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
480 if (ol_flags & PKT_TX_TCP_SEG)
481 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
485 static inline uint32_t
486 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
488 uint32_t cmdtype = 0;
489 if (ol_flags & PKT_TX_VLAN_PKT)
490 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
491 if (ol_flags & PKT_TX_TCP_SEG)
492 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
496 /* Default RS bit threshold values */
497 #ifndef DEFAULT_TX_RS_THRESH
498 #define DEFAULT_TX_RS_THRESH 32
500 #ifndef DEFAULT_TX_FREE_THRESH
501 #define DEFAULT_TX_FREE_THRESH 32
504 /* Reset transmit descriptors after they have been used */
506 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
508 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
509 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
510 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
511 uint16_t nb_tx_desc = txq->nb_tx_desc;
512 uint16_t desc_to_clean_to;
513 uint16_t nb_tx_to_clean;
515 /* Determine the last descriptor needing to be cleaned */
516 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
517 if (desc_to_clean_to >= nb_tx_desc)
518 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
520 /* Check to make sure the last descriptor to clean is done */
521 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
522 if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
524 PMD_TX_FREE_LOG(DEBUG,
525 "TX descriptor %4u is not done"
526 "(port=%d queue=%d)",
528 txq->port_id, txq->queue_id);
529 /* Failed to clean any descriptors, better luck next time */
533 /* Figure out how many descriptors will be cleaned */
534 if (last_desc_cleaned > desc_to_clean_to)
535 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
538 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
541 PMD_TX_FREE_LOG(DEBUG,
542 "Cleaning %4u TX descriptors: %4u to %4u "
543 "(port=%d queue=%d)",
544 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
545 txq->port_id, txq->queue_id);
548 * The last descriptor to clean is done, so that means all the
549 * descriptors from the last descriptor that was cleaned
550 * up to the last descriptor with the RS bit set
551 * are done. Only reset the threshold descriptor.
553 txr[desc_to_clean_to].wb.status = 0;
555 /* Update the txq to reflect the last descriptor that was cleaned */
556 txq->last_desc_cleaned = desc_to_clean_to;
557 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
564 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
567 struct ixgbe_tx_queue *txq;
568 struct ixgbe_tx_entry *sw_ring;
569 struct ixgbe_tx_entry *txe, *txn;
570 volatile union ixgbe_adv_tx_desc *txr;
571 volatile union ixgbe_adv_tx_desc *txd;
572 struct rte_mbuf *tx_pkt;
573 struct rte_mbuf *m_seg;
574 uint64_t buf_dma_addr;
575 uint32_t olinfo_status;
576 uint32_t cmd_type_len;
587 union ixgbe_tx_offload tx_offload = {0};
590 sw_ring = txq->sw_ring;
592 tx_id = txq->tx_tail;
593 txe = &sw_ring[tx_id];
595 /* Determine if the descriptor ring needs to be cleaned. */
596 if (txq->nb_tx_free < txq->tx_free_thresh)
597 ixgbe_xmit_cleanup(txq);
599 rte_prefetch0(&txe->mbuf->pool);
602 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
605 pkt_len = tx_pkt->pkt_len;
608 * Determine how many (if any) context descriptors
609 * are needed for offload functionality.
611 ol_flags = tx_pkt->ol_flags;
613 /* If hardware offload required */
614 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
616 tx_offload.l2_len = tx_pkt->l2_len;
617 tx_offload.l3_len = tx_pkt->l3_len;
618 tx_offload.l4_len = tx_pkt->l4_len;
619 tx_offload.vlan_tci = tx_pkt->vlan_tci;
620 tx_offload.tso_segsz = tx_pkt->tso_segsz;
622 /* If new context need be built or reuse the exist ctx. */
623 ctx = what_advctx_update(txq, tx_ol_req,
625 /* Only allocate context descriptor if required*/
626 new_ctx = (ctx == IXGBE_CTX_NUM);
631 * Keep track of how many descriptors are used this loop
632 * This will always be the number of segments + the number of
633 * Context descriptors required to transmit the packet
635 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
638 * The number of descriptors that must be allocated for a
639 * packet is the number of segments of that packet, plus 1
640 * Context Descriptor for the hardware offload, if any.
641 * Determine the last TX descriptor to allocate in the TX ring
642 * for the packet, starting from the current position (tx_id)
645 tx_last = (uint16_t) (tx_id + nb_used - 1);
648 if (tx_last >= txq->nb_tx_desc)
649 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
651 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
652 " tx_first=%u tx_last=%u",
653 (unsigned) txq->port_id,
654 (unsigned) txq->queue_id,
660 * Make sure there are enough TX descriptors available to
661 * transmit the entire packet.
662 * nb_used better be less than or equal to txq->tx_rs_thresh
664 if (nb_used > txq->nb_tx_free) {
665 PMD_TX_FREE_LOG(DEBUG,
666 "Not enough free TX descriptors "
667 "nb_used=%4u nb_free=%4u "
668 "(port=%d queue=%d)",
669 nb_used, txq->nb_tx_free,
670 txq->port_id, txq->queue_id);
672 if (ixgbe_xmit_cleanup(txq) != 0) {
673 /* Could not clean any descriptors */
679 /* nb_used better be <= txq->tx_rs_thresh */
680 if (unlikely(nb_used > txq->tx_rs_thresh)) {
681 PMD_TX_FREE_LOG(DEBUG,
682 "The number of descriptors needed to "
683 "transmit the packet exceeds the "
684 "RS bit threshold. This will impact "
686 "nb_used=%4u nb_free=%4u "
688 "(port=%d queue=%d)",
689 nb_used, txq->nb_tx_free,
691 txq->port_id, txq->queue_id);
693 * Loop here until there are enough TX
694 * descriptors or until the ring cannot be
697 while (nb_used > txq->nb_tx_free) {
698 if (ixgbe_xmit_cleanup(txq) != 0) {
700 * Could not clean any
712 * By now there are enough free TX descriptors to transmit
717 * Set common flags of all TX Data Descriptors.
719 * The following bits must be set in all Data Descriptors:
720 * - IXGBE_ADVTXD_DTYP_DATA
721 * - IXGBE_ADVTXD_DCMD_DEXT
723 * The following bits must be set in the first Data Descriptor
724 * and are ignored in the other ones:
725 * - IXGBE_ADVTXD_DCMD_IFCS
726 * - IXGBE_ADVTXD_MAC_1588
727 * - IXGBE_ADVTXD_DCMD_VLE
729 * The following bits must only be set in the last Data
731 * - IXGBE_TXD_CMD_EOP
733 * The following bits can be set in any Data Descriptor, but
734 * are only set in the last Data Descriptor:
737 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
738 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
740 #ifdef RTE_LIBRTE_IEEE1588
741 if (ol_flags & PKT_TX_IEEE1588_TMST)
742 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
748 if (ol_flags & PKT_TX_TCP_SEG) {
749 /* when TSO is on, paylen in descriptor is the
750 * not the packet len but the tcp payload len */
751 pkt_len -= (tx_offload.l2_len +
752 tx_offload.l3_len + tx_offload.l4_len);
756 * Setup the TX Advanced Context Descriptor if required
759 volatile struct ixgbe_adv_tx_context_desc *
762 ctx_txd = (volatile struct
763 ixgbe_adv_tx_context_desc *)
766 txn = &sw_ring[txe->next_id];
767 rte_prefetch0(&txn->mbuf->pool);
769 if (txe->mbuf != NULL) {
770 rte_pktmbuf_free_seg(txe->mbuf);
774 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
777 txe->last_id = tx_last;
778 tx_id = txe->next_id;
783 * Setup the TX Advanced Data Descriptor,
784 * This path will go through
785 * whatever new/reuse the context descriptor
787 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
788 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
789 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
792 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
797 txn = &sw_ring[txe->next_id];
798 rte_prefetch0(&txn->mbuf->pool);
800 if (txe->mbuf != NULL)
801 rte_pktmbuf_free_seg(txe->mbuf);
805 * Set up Transmit Data Descriptor.
807 slen = m_seg->data_len;
808 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
809 txd->read.buffer_addr =
810 rte_cpu_to_le_64(buf_dma_addr);
811 txd->read.cmd_type_len =
812 rte_cpu_to_le_32(cmd_type_len | slen);
813 txd->read.olinfo_status =
814 rte_cpu_to_le_32(olinfo_status);
815 txe->last_id = tx_last;
816 tx_id = txe->next_id;
819 } while (m_seg != NULL);
822 * The last packet data descriptor needs End Of Packet (EOP)
824 cmd_type_len |= IXGBE_TXD_CMD_EOP;
825 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
826 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
828 /* Set RS bit only on threshold packets' last descriptor */
829 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
830 PMD_TX_FREE_LOG(DEBUG,
831 "Setting RS bit on TXD id="
832 "%4u (port=%d queue=%d)",
833 tx_last, txq->port_id, txq->queue_id);
835 cmd_type_len |= IXGBE_TXD_CMD_RS;
837 /* Update txq RS bit counters */
840 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
846 * Set the Transmit Descriptor Tail (TDT)
848 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
849 (unsigned) txq->port_id, (unsigned) txq->queue_id,
850 (unsigned) tx_id, (unsigned) nb_tx);
851 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
852 txq->tx_tail = tx_id;
857 /*********************************************************************
861 **********************************************************************/
862 static inline uint64_t
863 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
867 static const uint64_t ip_pkt_types_map[16] = {
868 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
869 PKT_RX_IPV6_HDR, 0, 0, 0,
870 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
871 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
874 static const uint64_t ip_rss_types_map[16] = {
875 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
876 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
877 PKT_RX_RSS_HASH, 0, 0, 0,
878 0, 0, 0, PKT_RX_FDIR,
881 #ifdef RTE_LIBRTE_IEEE1588
882 static uint64_t ip_pkt_etqf_map[8] = {
883 0, 0, 0, PKT_RX_IEEE1588_PTP,
887 pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
888 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
889 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
891 pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
892 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
895 return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF];
898 static inline uint64_t
899 rx_desc_status_to_pkt_flags(uint32_t rx_status)
904 * Check if VLAN present only.
905 * Do not check whether L3/L4 rx checksum done by NIC or not,
906 * That can be found from rte_eth_rxmode.hw_ip_checksum flag
908 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
910 #ifdef RTE_LIBRTE_IEEE1588
911 if (rx_status & IXGBE_RXD_STAT_TMST)
912 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
917 static inline uint64_t
918 rx_desc_error_to_pkt_flags(uint32_t rx_status)
921 * Bit 31: IPE, IPv4 checksum error
922 * Bit 30: L4I, L4I integrity error
924 static uint64_t error_to_pkt_flags_map[4] = {
925 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
926 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
928 return error_to_pkt_flags_map[(rx_status >>
929 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
932 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
934 * LOOK_AHEAD defines how many desc statuses to check beyond the
935 * current descriptor.
936 * It must be a pound define for optimal performance.
937 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
938 * function only works with LOOK_AHEAD=8.
941 #if (LOOK_AHEAD != 8)
942 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
945 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
947 volatile union ixgbe_adv_rx_desc *rxdp;
948 struct ixgbe_rx_entry *rxep;
952 int s[LOOK_AHEAD], nb_dd;
956 /* get references to current descriptor and S/W ring entry */
957 rxdp = &rxq->rx_ring[rxq->rx_tail];
958 rxep = &rxq->sw_ring[rxq->rx_tail];
960 /* check to make sure there is at least 1 packet to receive */
961 if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
965 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
966 * reference packets that are ready to be received.
968 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
969 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
971 /* Read desc statuses backwards to avoid race condition */
972 for (j = LOOK_AHEAD-1; j >= 0; --j)
973 s[j] = rxdp[j].wb.upper.status_error;
975 /* Compute how many status bits were set */
977 for (j = 0; j < LOOK_AHEAD; ++j)
978 nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
982 /* Translate descriptor info to mbuf format */
983 for (j = 0; j < nb_dd; ++j) {
985 pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
986 mb->data_len = pkt_len;
987 mb->pkt_len = pkt_len;
988 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
990 /* convert descriptor fields to rte mbuf flags */
991 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(
992 rxdp[j].wb.lower.lo_dword.data);
993 /* reuse status field from scan list */
994 pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
995 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
996 mb->ol_flags = pkt_flags;
998 if (likely(pkt_flags & PKT_RX_RSS_HASH))
999 mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
1000 else if (pkt_flags & PKT_RX_FDIR) {
1001 mb->hash.fdir.hash =
1002 (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
1003 & IXGBE_ATR_HASH_MASK);
1004 mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
1008 /* Move mbuf pointers from the S/W ring to the stage */
1009 for (j = 0; j < LOOK_AHEAD; ++j) {
1010 rxq->rx_stage[i + j] = rxep[j].mbuf;
1013 /* stop if all requested packets could not be received */
1014 if (nb_dd != LOOK_AHEAD)
1018 /* clear software ring entries so we can cleanup correctly */
1019 for (i = 0; i < nb_rx; ++i) {
1020 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1028 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1030 volatile union ixgbe_adv_rx_desc *rxdp;
1031 struct ixgbe_rx_entry *rxep;
1032 struct rte_mbuf *mb;
1037 /* allocate buffers in bulk directly into the S/W ring */
1038 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1039 rxep = &rxq->sw_ring[alloc_idx];
1040 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1041 rxq->rx_free_thresh);
1042 if (unlikely(diag != 0))
1045 rxdp = &rxq->rx_ring[alloc_idx];
1046 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1047 /* populate the static rte mbuf fields */
1052 mb->port = rxq->port_id;
1055 rte_mbuf_refcnt_set(mb, 1);
1056 mb->data_off = RTE_PKTMBUF_HEADROOM;
1058 /* populate the descriptors */
1059 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
1060 rxdp[i].read.hdr_addr = dma_addr;
1061 rxdp[i].read.pkt_addr = dma_addr;
1064 /* update state of internal queue structure */
1065 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1066 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1067 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1073 static inline uint16_t
1074 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1077 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1080 /* how many packets are ready to return? */
1081 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1083 /* copy mbuf pointers to the application's packet list */
1084 for (i = 0; i < nb_pkts; ++i)
1085 rx_pkts[i] = stage[i];
1087 /* update internal queue state */
1088 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1089 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1094 static inline uint16_t
1095 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1098 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1101 /* Any previously recv'd pkts will be returned from the Rx stage */
1102 if (rxq->rx_nb_avail)
1103 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1105 /* Scan the H/W ring for packets to receive */
1106 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1108 /* update internal queue state */
1109 rxq->rx_next_avail = 0;
1110 rxq->rx_nb_avail = nb_rx;
1111 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1113 /* if required, allocate new buffers to replenish descriptors */
1114 if (rxq->rx_tail > rxq->rx_free_trigger) {
1115 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1117 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1119 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1120 "queue_id=%u", (unsigned) rxq->port_id,
1121 (unsigned) rxq->queue_id);
1123 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1124 rxq->rx_free_thresh;
1127 * Need to rewind any previous receives if we cannot
1128 * allocate new buffers to replenish the old ones.
1130 rxq->rx_nb_avail = 0;
1131 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1132 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1133 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1138 /* update tail pointer */
1140 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
1143 if (rxq->rx_tail >= rxq->nb_rx_desc)
1146 /* received any packets this loop? */
1147 if (rxq->rx_nb_avail)
1148 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1153 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1155 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1160 if (unlikely(nb_pkts == 0))
1163 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1164 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1166 /* request is relatively large, chunk it up */
1170 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1171 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1172 nb_rx = (uint16_t)(nb_rx + ret);
1173 nb_pkts = (uint16_t)(nb_pkts - ret);
1183 /* Stub to avoid extra ifdefs */
1185 ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue,
1186 __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
1192 ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq,
1193 __rte_unused bool reset_mbuf)
1197 #endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
1200 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1203 struct ixgbe_rx_queue *rxq;
1204 volatile union ixgbe_adv_rx_desc *rx_ring;
1205 volatile union ixgbe_adv_rx_desc *rxdp;
1206 struct ixgbe_rx_entry *sw_ring;
1207 struct ixgbe_rx_entry *rxe;
1208 struct rte_mbuf *rxm;
1209 struct rte_mbuf *nmb;
1210 union ixgbe_adv_rx_desc rxd;
1213 uint32_t hlen_type_rss;
1223 rx_id = rxq->rx_tail;
1224 rx_ring = rxq->rx_ring;
1225 sw_ring = rxq->sw_ring;
1226 while (nb_rx < nb_pkts) {
1228 * The order of operations here is important as the DD status
1229 * bit must not be read after any other descriptor fields.
1230 * rx_ring and rxdp are pointing to volatile data so the order
1231 * of accesses cannot be reordered by the compiler. If they were
1232 * not volatile, they could be reordered which could lead to
1233 * using invalid descriptor fields when read from rxd.
1235 rxdp = &rx_ring[rx_id];
1236 staterr = rxdp->wb.upper.status_error;
1237 if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1244 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1245 * is likely to be invalid and to be dropped by the various
1246 * validation checks performed by the network stack.
1248 * Allocate a new mbuf to replenish the RX ring descriptor.
1249 * If the allocation fails:
1250 * - arrange for that RX descriptor to be the first one
1251 * being parsed the next time the receive function is
1252 * invoked [on the same queue].
1254 * - Stop parsing the RX ring and return immediately.
1256 * This policy do not drop the packet received in the RX
1257 * descriptor for which the allocation of a new mbuf failed.
1258 * Thus, it allows that packet to be later retrieved if
1259 * mbuf have been freed in the mean time.
1260 * As a side effect, holding RX descriptors instead of
1261 * systematically giving them back to the NIC may lead to
1262 * RX ring exhaustion situations.
1263 * However, the NIC can gracefully prevent such situations
1264 * to happen by sending specific "back-pressure" flow control
1265 * frames to its peer(s).
1267 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1268 "ext_err_stat=0x%08x pkt_len=%u",
1269 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1270 (unsigned) rx_id, (unsigned) staterr,
1271 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1273 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1275 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1276 "queue_id=%u", (unsigned) rxq->port_id,
1277 (unsigned) rxq->queue_id);
1278 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1283 rxe = &sw_ring[rx_id];
1285 if (rx_id == rxq->nb_rx_desc)
1288 /* Prefetch next mbuf while processing current one. */
1289 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1292 * When next RX descriptor is on a cache-line boundary,
1293 * prefetch the next 4 RX descriptors and the next 8 pointers
1296 if ((rx_id & 0x3) == 0) {
1297 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1298 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1304 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1305 rxdp->read.hdr_addr = dma_addr;
1306 rxdp->read.pkt_addr = dma_addr;
1309 * Initialize the returned mbuf.
1310 * 1) setup generic mbuf fields:
1311 * - number of segments,
1314 * - RX port identifier.
1315 * 2) integrate hardware offload data, if any:
1316 * - RSS flag & hash,
1317 * - IP checksum flag,
1318 * - VLAN TCI, if any,
1321 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1323 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1324 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1327 rxm->pkt_len = pkt_len;
1328 rxm->data_len = pkt_len;
1329 rxm->port = rxq->port_id;
1331 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1332 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1333 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1335 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1336 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1337 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1338 rxm->ol_flags = pkt_flags;
1340 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1341 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
1342 else if (pkt_flags & PKT_RX_FDIR) {
1343 rxm->hash.fdir.hash =
1344 (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
1345 & IXGBE_ATR_HASH_MASK);
1346 rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
1349 * Store the mbuf address into the next entry of the array
1350 * of returned packets.
1352 rx_pkts[nb_rx++] = rxm;
1354 rxq->rx_tail = rx_id;
1357 * If the number of free RX descriptors is greater than the RX free
1358 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1360 * Update the RDT with the value of the last processed RX descriptor
1361 * minus 1, to guarantee that the RDT register is never equal to the
1362 * RDH register, which creates a "full" ring situtation from the
1363 * hardware point of view...
1365 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1366 if (nb_hold > rxq->rx_free_thresh) {
1367 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1368 "nb_hold=%u nb_rx=%u",
1369 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1370 (unsigned) rx_id, (unsigned) nb_hold,
1372 rx_id = (uint16_t) ((rx_id == 0) ?
1373 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1374 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1377 rxq->nb_rx_hold = nb_hold;
1382 * Detect an RSC descriptor.
1384 static inline uint32_t
1385 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1387 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1388 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1392 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1394 * Fill the following info in the HEAD buffer of the Rx cluster:
1395 * - RX port identifier
1396 * - hardware offload data, if any:
1398 * - IP checksum flag
1399 * - VLAN TCI, if any
1401 * @head HEAD of the packet cluster
1402 * @desc HW descriptor to get data from
1403 * @port_id Port ID of the Rx queue
1406 ixgbe_fill_cluster_head_buf(
1407 struct rte_mbuf *head,
1408 union ixgbe_adv_rx_desc *desc,
1412 uint32_t hlen_type_rss;
1415 head->port = port_id;
1418 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1419 * set in the pkt_flags field.
1421 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1422 hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
1423 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
1424 pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
1425 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1426 head->ol_flags = pkt_flags;
1428 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1429 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1430 else if (pkt_flags & PKT_RX_FDIR) {
1431 head->hash.fdir.hash =
1432 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1433 & IXGBE_ATR_HASH_MASK;
1434 head->hash.fdir.id =
1435 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1440 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1442 * @rx_queue Rx queue handle
1443 * @rx_pkts table of received packets
1444 * @nb_pkts size of rx_pkts table
1445 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1447 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1448 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1450 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1451 * 1) When non-EOP RSC completion arrives:
1452 * a) Update the HEAD of the current RSC aggregation cluster with the new
1453 * segment's data length.
1454 * b) Set the "next" pointer of the current segment to point to the segment
1455 * at the NEXTP index.
1456 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1457 * in the sw_rsc_ring.
1458 * 2) When EOP arrives we just update the cluster's total length and offload
1459 * flags and deliver the cluster up to the upper layers. In our case - put it
1460 * in the rx_pkts table.
1462 * Returns the number of received packets/clusters (according to the "bulk
1463 * receive" interface).
1465 static inline uint16_t
1466 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1469 struct ixgbe_rx_queue *rxq = rx_queue;
1470 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
1471 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
1472 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1473 uint16_t rx_id = rxq->rx_tail;
1475 uint16_t nb_hold = rxq->nb_rx_hold;
1476 uint16_t prev_id = rxq->rx_tail;
1478 while (nb_rx < nb_pkts) {
1480 struct ixgbe_rx_entry *rxe;
1481 struct ixgbe_scattered_rx_entry *sc_entry;
1482 struct ixgbe_scattered_rx_entry *next_sc_entry;
1483 struct ixgbe_rx_entry *next_rxe;
1484 struct rte_mbuf *first_seg;
1485 struct rte_mbuf *rxm;
1486 struct rte_mbuf *nmb;
1487 union ixgbe_adv_rx_desc rxd;
1490 volatile union ixgbe_adv_rx_desc *rxdp;
1495 * The code in this whole file uses the volatile pointer to
1496 * ensure the read ordering of the status and the rest of the
1497 * descriptor fields (on the compiler level only!!!). This is so
1498 * UGLY - why not to just use the compiler barrier instead? DPDK
1499 * even has the rte_compiler_barrier() for that.
1501 * But most importantly this is just wrong because this doesn't
1502 * ensure memory ordering in a general case at all. For
1503 * instance, DPDK is supposed to work on Power CPUs where
1504 * compiler barrier may just not be enough!
1506 * I tried to write only this function properly to have a
1507 * starting point (as a part of an LRO/RSC series) but the
1508 * compiler cursed at me when I tried to cast away the
1509 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
1510 * keeping it the way it is for now.
1512 * The code in this file is broken in so many other places and
1513 * will just not work on a big endian CPU anyway therefore the
1514 * lines below will have to be revisited together with the rest
1518 * - Get rid of "volatile" crap and let the compiler do its
1520 * - Use the proper memory barrier (rte_rmb()) to ensure the
1521 * memory ordering below.
1523 rxdp = &rx_ring[rx_id];
1524 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
1526 if (!(staterr & IXGBE_RXDADV_STAT_DD))
1531 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1532 "staterr=0x%x data_len=%u",
1533 rxq->port_id, rxq->queue_id, rx_id, staterr,
1534 rte_le_to_cpu_16(rxd.wb.upper.length));
1537 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1539 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
1540 "port_id=%u queue_id=%u",
1541 rxq->port_id, rxq->queue_id);
1543 rte_eth_devices[rxq->port_id].data->
1544 rx_mbuf_alloc_failed++;
1547 } else if (nb_hold > rxq->rx_free_thresh) {
1548 uint16_t next_rdt = rxq->rx_free_trigger;
1550 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
1552 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
1554 nb_hold -= rxq->rx_free_thresh;
1556 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
1557 "port_id=%u queue_id=%u",
1558 rxq->port_id, rxq->queue_id);
1560 rte_eth_devices[rxq->port_id].data->
1561 rx_mbuf_alloc_failed++;
1567 rxe = &sw_ring[rx_id];
1568 eop = staterr & IXGBE_RXDADV_STAT_EOP;
1570 next_id = rx_id + 1;
1571 if (next_id == rxq->nb_rx_desc)
1574 /* Prefetch next mbuf while processing current one. */
1575 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
1578 * When next RX descriptor is on a cache-line boundary,
1579 * prefetch the next 4 RX descriptors and the next 4 pointers
1582 if ((next_id & 0x3) == 0) {
1583 rte_ixgbe_prefetch(&rx_ring[next_id]);
1584 rte_ixgbe_prefetch(&sw_ring[next_id]);
1591 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1593 * Update RX descriptor with the physical address of the
1594 * new data buffer of the new allocated mbuf.
1598 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1599 rxdp->read.hdr_addr = dma;
1600 rxdp->read.pkt_addr = dma;
1605 * Set data length & data buffer address of mbuf.
1607 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1608 rxm->data_len = data_len;
1613 * Get next descriptor index:
1614 * - For RSC it's in the NEXTP field.
1615 * - For a scattered packet - it's just a following
1618 if (ixgbe_rsc_count(&rxd))
1620 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1621 IXGBE_RXDADV_NEXTP_SHIFT;
1625 next_sc_entry = &sw_sc_ring[nextp_id];
1626 next_rxe = &sw_ring[nextp_id];
1627 rte_ixgbe_prefetch(next_rxe);
1630 sc_entry = &sw_sc_ring[rx_id];
1631 first_seg = sc_entry->fbuf;
1632 sc_entry->fbuf = NULL;
1635 * If this is the first buffer of the received packet,
1636 * set the pointer to the first mbuf of the packet and
1637 * initialize its context.
1638 * Otherwise, update the total length and the number of segments
1639 * of the current scattered packet, and update the pointer to
1640 * the last mbuf of the current packet.
1642 if (first_seg == NULL) {
1644 first_seg->pkt_len = data_len;
1645 first_seg->nb_segs = 1;
1647 first_seg->pkt_len += data_len;
1648 first_seg->nb_segs++;
1655 * If this is not the last buffer of the received packet, update
1656 * the pointer to the first mbuf at the NEXTP entry in the
1657 * sw_sc_ring and continue to parse the RX ring.
1660 rxm->next = next_rxe->mbuf;
1661 next_sc_entry->fbuf = first_seg;
1666 * This is the last buffer of the received packet - return
1667 * the current cluster to the user.
1671 /* Initialize the first mbuf of the returned packet */
1672 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
1675 /* Prefetch data of first segment, if configured to do so. */
1676 rte_packet_prefetch((char *)first_seg->buf_addr +
1677 first_seg->data_off);
1680 * Store the mbuf address into the next entry of the array
1681 * of returned packets.
1683 rx_pkts[nb_rx++] = first_seg;
1687 * Record index of the next RX descriptor to probe.
1689 rxq->rx_tail = rx_id;
1692 * If the number of free RX descriptors is greater than the RX free
1693 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1695 * Update the RDT with the value of the last processed RX descriptor
1696 * minus 1, to guarantee that the RDT register is never equal to the
1697 * RDH register, which creates a "full" ring situtation from the
1698 * hardware point of view...
1700 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1701 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1702 "nb_hold=%u nb_rx=%u",
1703 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1706 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
1710 rxq->nb_rx_hold = nb_hold;
1715 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1718 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
1722 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1725 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
1728 /*********************************************************************
1730 * Queue management functions
1732 **********************************************************************/
1735 * Rings setup and release.
1737 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1738 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
1739 * also optimize cache line size effect. H/W supports up to cache line size 128.
1741 #define IXGBE_ALIGN 128
1744 * Maximum number of Ring Descriptors.
1746 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
1747 * descriptors should meet the following condition:
1748 * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
1750 #define IXGBE_MIN_RING_DESC 32
1751 #define IXGBE_MAX_RING_DESC 4096
1754 * Create memzone for HW rings. malloc can't be used as the physical address is
1755 * needed. If the memzone is already created, then this function returns a ptr
1758 static const struct rte_memzone * __attribute__((cold))
1759 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1760 uint16_t queue_id, uint32_t ring_size, int socket_id)
1762 char z_name[RTE_MEMZONE_NAMESIZE];
1763 const struct rte_memzone *mz;
1765 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1766 dev->driver->pci_drv.name, ring_name,
1767 dev->data->port_id, queue_id);
1769 mz = rte_memzone_lookup(z_name);
1773 #ifdef RTE_LIBRTE_XEN_DOM0
1774 return rte_memzone_reserve_bounded(z_name, ring_size,
1775 socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
1777 return rte_memzone_reserve_aligned(z_name, ring_size,
1778 socket_id, 0, IXGBE_ALIGN);
1782 static void __attribute__((cold))
1783 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
1787 if (txq->sw_ring != NULL) {
1788 for (i = 0; i < txq->nb_tx_desc; i++) {
1789 if (txq->sw_ring[i].mbuf != NULL) {
1790 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1791 txq->sw_ring[i].mbuf = NULL;
1797 static void __attribute__((cold))
1798 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
1801 txq->sw_ring != NULL)
1802 rte_free(txq->sw_ring);
1805 static void __attribute__((cold))
1806 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
1808 if (txq != NULL && txq->ops != NULL) {
1809 txq->ops->release_mbufs(txq);
1810 txq->ops->free_swring(txq);
1815 void __attribute__((cold))
1816 ixgbe_dev_tx_queue_release(void *txq)
1818 ixgbe_tx_queue_release(txq);
1821 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
1822 static void __attribute__((cold))
1823 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
1825 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
1826 struct ixgbe_tx_entry *txe = txq->sw_ring;
1829 /* Zero out HW ring memory */
1830 for (i = 0; i < txq->nb_tx_desc; i++) {
1831 txq->tx_ring[i] = zeroed_desc;
1834 /* Initialize SW ring entries */
1835 prev = (uint16_t) (txq->nb_tx_desc - 1);
1836 for (i = 0; i < txq->nb_tx_desc; i++) {
1837 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
1838 txd->wb.status = IXGBE_TXD_STAT_DD;
1841 txe[prev].next_id = i;
1845 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1846 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1849 txq->nb_tx_used = 0;
1851 * Always allow 1 descriptor to be un-allocated to avoid
1852 * a H/W race condition
1854 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1855 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1857 memset((void*)&txq->ctx_cache, 0,
1858 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
1861 static const struct ixgbe_txq_ops def_txq_ops = {
1862 .release_mbufs = ixgbe_tx_queue_release_mbufs,
1863 .free_swring = ixgbe_tx_free_swring,
1864 .reset = ixgbe_reset_tx_queue,
1867 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1868 * the queue parameters. Used in tx_queue_setup by primary process and then
1869 * in dev_init by secondary process when attaching to an existing ethdev.
1871 void __attribute__((cold))
1872 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
1874 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1875 if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
1876 && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
1877 PMD_INIT_LOG(INFO, "Using simple tx code path");
1878 #ifdef RTE_IXGBE_INC_VECTOR
1879 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
1880 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
1881 ixgbe_txq_vec_setup(txq) == 0)) {
1882 PMD_INIT_LOG(INFO, "Vector tx enabled.");
1883 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
1886 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
1888 PMD_INIT_LOG(INFO, "Using full-featured tx code path");
1890 " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
1891 (unsigned long)txq->txq_flags,
1892 (unsigned long)IXGBE_SIMPLE_FLAGS);
1894 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
1895 (unsigned long)txq->tx_rs_thresh,
1896 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
1897 dev->tx_pkt_burst = ixgbe_xmit_pkts;
1901 int __attribute__((cold))
1902 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1905 unsigned int socket_id,
1906 const struct rte_eth_txconf *tx_conf)
1908 const struct rte_memzone *tz;
1909 struct ixgbe_tx_queue *txq;
1910 struct ixgbe_hw *hw;
1911 uint16_t tx_rs_thresh, tx_free_thresh;
1913 PMD_INIT_FUNC_TRACE();
1914 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1917 * Validate number of transmit descriptors.
1918 * It must not exceed hardware maximum, and must be multiple
1921 if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
1922 (nb_desc > IXGBE_MAX_RING_DESC) ||
1923 (nb_desc < IXGBE_MIN_RING_DESC)) {
1928 * The following two parameters control the setting of the RS bit on
1929 * transmit descriptors.
1930 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
1931 * descriptors have been used.
1932 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
1933 * descriptors are used or if the number of descriptors required
1934 * to transmit a packet is greater than the number of free TX
1936 * The following constraints must be satisfied:
1937 * tx_rs_thresh must be greater than 0.
1938 * tx_rs_thresh must be less than the size of the ring minus 2.
1939 * tx_rs_thresh must be less than or equal to tx_free_thresh.
1940 * tx_rs_thresh must be a divisor of the ring size.
1941 * tx_free_thresh must be greater than 0.
1942 * tx_free_thresh must be less than the size of the ring minus 3.
1943 * One descriptor in the TX ring is used as a sentinel to avoid a
1944 * H/W race condition, hence the maximum threshold constraints.
1945 * When set to zero use default values.
1947 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
1948 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
1949 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1950 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
1951 if (tx_rs_thresh >= (nb_desc - 2)) {
1952 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
1953 "of TX descriptors minus 2. (tx_rs_thresh=%u "
1954 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
1955 (int)dev->data->port_id, (int)queue_idx);
1958 if (tx_free_thresh >= (nb_desc - 3)) {
1959 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1960 "tx_free_thresh must be less than the number of "
1961 "TX descriptors minus 3. (tx_free_thresh=%u "
1962 "port=%d queue=%d)",
1963 (unsigned int)tx_free_thresh,
1964 (int)dev->data->port_id, (int)queue_idx);
1967 if (tx_rs_thresh > tx_free_thresh) {
1968 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
1969 "tx_free_thresh. (tx_free_thresh=%u "
1970 "tx_rs_thresh=%u port=%d queue=%d)",
1971 (unsigned int)tx_free_thresh,
1972 (unsigned int)tx_rs_thresh,
1973 (int)dev->data->port_id,
1977 if ((nb_desc % tx_rs_thresh) != 0) {
1978 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1979 "number of TX descriptors. (tx_rs_thresh=%u "
1980 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
1981 (int)dev->data->port_id, (int)queue_idx);
1986 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
1987 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
1988 * by the NIC and all descriptors are written back after the NIC
1989 * accumulates WTHRESH descriptors.
1991 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
1992 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1993 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
1994 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
1995 (int)dev->data->port_id, (int)queue_idx);
1999 /* Free memory prior to re-allocation if needed... */
2000 if (dev->data->tx_queues[queue_idx] != NULL) {
2001 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2002 dev->data->tx_queues[queue_idx] = NULL;
2005 /* First allocate the tx queue data structure */
2006 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2007 RTE_CACHE_LINE_SIZE, socket_id);
2012 * Allocate TX ring hardware descriptors. A memzone large enough to
2013 * handle the maximum ring size is allocated in order to allow for
2014 * resizing in later calls to the queue setup function.
2016 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
2017 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2020 ixgbe_tx_queue_release(txq);
2024 txq->nb_tx_desc = nb_desc;
2025 txq->tx_rs_thresh = tx_rs_thresh;
2026 txq->tx_free_thresh = tx_free_thresh;
2027 txq->pthresh = tx_conf->tx_thresh.pthresh;
2028 txq->hthresh = tx_conf->tx_thresh.hthresh;
2029 txq->wthresh = tx_conf->tx_thresh.wthresh;
2030 txq->queue_id = queue_idx;
2031 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2032 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2033 txq->port_id = dev->data->port_id;
2034 txq->txq_flags = tx_conf->txq_flags;
2035 txq->ops = &def_txq_ops;
2036 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2039 * Modification to set VFTDT for virtual function if vf is detected
2041 if (hw->mac.type == ixgbe_mac_82599_vf ||
2042 hw->mac.type == ixgbe_mac_X540_vf ||
2043 hw->mac.type == ixgbe_mac_X550_vf ||
2044 hw->mac.type == ixgbe_mac_X550EM_x_vf)
2045 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2047 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2048 #ifndef RTE_LIBRTE_XEN_DOM0
2049 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
2051 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2053 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2055 /* Allocate software ring */
2056 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2057 sizeof(struct ixgbe_tx_entry) * nb_desc,
2058 RTE_CACHE_LINE_SIZE, socket_id);
2059 if (txq->sw_ring == NULL) {
2060 ixgbe_tx_queue_release(txq);
2063 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2064 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2066 /* set up vector or scalar TX function as appropriate */
2067 ixgbe_set_tx_function(dev, txq);
2069 txq->ops->reset(txq);
2071 dev->data->tx_queues[queue_idx] = txq;
2078 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2080 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2081 * in the sw_rsc_ring is not set to NULL but rather points to the next
2082 * mbuf of this RSC aggregation (that has not been completed yet and still
2083 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2084 * will just free first "nb_segs" segments of the cluster explicitly by calling
2085 * an rte_pktmbuf_free_seg().
2087 * @m scattered cluster head
2089 static void __attribute__((cold))
2090 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2092 uint8_t i, nb_segs = m->nb_segs;
2093 struct rte_mbuf *next_seg;
2095 for (i = 0; i < nb_segs; i++) {
2097 rte_pktmbuf_free_seg(m);
2102 static void __attribute__((cold))
2103 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2107 if (rxq->sw_ring != NULL) {
2108 for (i = 0; i < rxq->nb_rx_desc; i++) {
2109 if (rxq->sw_ring[i].mbuf != NULL &&
2110 rte_mbuf_refcnt_read(rxq->sw_ring[i].mbuf)) {
2111 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2112 rxq->sw_ring[i].mbuf = NULL;
2115 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2116 if (rxq->rx_nb_avail) {
2117 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2118 struct rte_mbuf *mb;
2119 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2120 rte_pktmbuf_free_seg(mb);
2122 rxq->rx_nb_avail = 0;
2127 if (rxq->sw_sc_ring)
2128 for (i = 0; i < rxq->nb_rx_desc; i++)
2129 if (rxq->sw_sc_ring[i].fbuf) {
2130 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2131 rxq->sw_sc_ring[i].fbuf = NULL;
2135 static void __attribute__((cold))
2136 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2139 ixgbe_rx_queue_release_mbufs(rxq);
2140 rte_free(rxq->sw_ring);
2141 rte_free(rxq->sw_sc_ring);
2146 void __attribute__((cold))
2147 ixgbe_dev_rx_queue_release(void *rxq)
2149 ixgbe_rx_queue_release(rxq);
2153 * Check if Rx Burst Bulk Alloc function can be used.
2155 * 0: the preconditions are satisfied and the bulk allocation function
2157 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2158 * function must be used.
2160 static inline int __attribute__((cold))
2161 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2162 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2164 check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
2170 * Make sure the following pre-conditions are satisfied:
2171 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2172 * rxq->rx_free_thresh < rxq->nb_rx_desc
2173 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2174 * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
2175 * Scattered packets are not supported. This should be checked
2176 * outside of this function.
2178 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2179 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2180 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2181 "rxq->rx_free_thresh=%d, "
2182 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2183 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2185 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2186 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2187 "rxq->rx_free_thresh=%d, "
2188 "rxq->nb_rx_desc=%d",
2189 rxq->rx_free_thresh, rxq->nb_rx_desc);
2191 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2192 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2193 "rxq->nb_rx_desc=%d, "
2194 "rxq->rx_free_thresh=%d",
2195 rxq->nb_rx_desc, rxq->rx_free_thresh);
2197 } else if (!(rxq->nb_rx_desc <
2198 (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
2199 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2200 "rxq->nb_rx_desc=%d, "
2201 "IXGBE_MAX_RING_DESC=%d, "
2202 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2203 rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
2204 RTE_PMD_IXGBE_RX_MAX_BURST);
2214 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2215 static void __attribute__((cold))
2216 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2218 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2220 uint16_t len = rxq->nb_rx_desc;
2223 * By default, the Rx queue setup function allocates enough memory for
2224 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2225 * extra memory at the end of the descriptor ring to be zero'd out. A
2226 * pre-condition for using the Rx burst bulk alloc function is that the
2227 * number of descriptors is less than or equal to
2228 * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
2229 * constraints here to see if we need to zero out memory after the end
2230 * of the H/W descriptor ring.
2232 if (adapter->rx_bulk_alloc_allowed)
2233 /* zero out extra memory */
2234 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2237 * Zero out HW ring memory. Zero out extra memory at the end of
2238 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2239 * reads extra memory as zeros.
2241 for (i = 0; i < len; i++) {
2242 rxq->rx_ring[i] = zeroed_desc;
2245 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
2247 * initialize extra software ring entries. Space for these extra
2248 * entries is always allocated
2250 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2251 for (i = rxq->nb_rx_desc; i < len; ++i) {
2252 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2255 rxq->rx_nb_avail = 0;
2256 rxq->rx_next_avail = 0;
2257 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2258 #endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
2260 rxq->nb_rx_hold = 0;
2261 rxq->pkt_first_seg = NULL;
2262 rxq->pkt_last_seg = NULL;
2265 int __attribute__((cold))
2266 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2269 unsigned int socket_id,
2270 const struct rte_eth_rxconf *rx_conf,
2271 struct rte_mempool *mp)
2273 const struct rte_memzone *rz;
2274 struct ixgbe_rx_queue *rxq;
2275 struct ixgbe_hw *hw;
2277 struct ixgbe_adapter *adapter =
2278 (struct ixgbe_adapter *)dev->data->dev_private;
2280 PMD_INIT_FUNC_TRACE();
2281 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2284 * Validate number of receive descriptors.
2285 * It must not exceed hardware maximum, and must be multiple
2288 if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
2289 (nb_desc > IXGBE_MAX_RING_DESC) ||
2290 (nb_desc < IXGBE_MIN_RING_DESC)) {
2294 /* Free memory prior to re-allocation if needed... */
2295 if (dev->data->rx_queues[queue_idx] != NULL) {
2296 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2297 dev->data->rx_queues[queue_idx] = NULL;
2300 /* First allocate the rx queue data structure */
2301 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2302 RTE_CACHE_LINE_SIZE, socket_id);
2306 rxq->nb_rx_desc = nb_desc;
2307 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2308 rxq->queue_id = queue_idx;
2309 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2310 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2311 rxq->port_id = dev->data->port_id;
2312 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2314 rxq->drop_en = rx_conf->rx_drop_en;
2315 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2318 * Allocate RX ring hardware descriptors. A memzone large enough to
2319 * handle the maximum ring size is allocated in order to allow for
2320 * resizing in later calls to the queue setup function.
2322 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
2323 RX_RING_SZ, socket_id);
2325 ixgbe_rx_queue_release(rxq);
2330 * Zero init all the descriptors in the ring.
2332 memset (rz->addr, 0, RX_RING_SZ);
2335 * Modified to setup VFRDT for Virtual Function
2337 if (hw->mac.type == ixgbe_mac_82599_vf ||
2338 hw->mac.type == ixgbe_mac_X540_vf ||
2339 hw->mac.type == ixgbe_mac_X550_vf ||
2340 hw->mac.type == ixgbe_mac_X550EM_x_vf) {
2342 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2344 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2348 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2350 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2352 #ifndef RTE_LIBRTE_XEN_DOM0
2353 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
2355 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2357 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
2360 * Certain constraints must be met in order to use the bulk buffer
2361 * allocation Rx burst function. If any of Rx queues doesn't meet them
2362 * the feature should be disabled for the whole port.
2364 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2365 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2366 "preconditions - canceling the feature for "
2367 "the whole port[%d]",
2368 rxq->queue_id, rxq->port_id);
2369 adapter->rx_bulk_alloc_allowed = false;
2373 * Allocate software ring. Allow for space at the end of the
2374 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2375 * function does not access an invalid memory region.
2378 if (adapter->rx_bulk_alloc_allowed)
2379 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2381 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2382 sizeof(struct ixgbe_rx_entry) * len,
2383 RTE_CACHE_LINE_SIZE, socket_id);
2384 if (!rxq->sw_ring) {
2385 ixgbe_rx_queue_release(rxq);
2390 * Always allocate even if it's not going to be needed in order to
2391 * simplify the code.
2393 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2394 * be requested in ixgbe_dev_rx_init(), which is called later from
2398 rte_zmalloc_socket("rxq->sw_sc_ring",
2399 sizeof(struct ixgbe_scattered_rx_entry) * len,
2400 RTE_CACHE_LINE_SIZE, socket_id);
2401 if (!rxq->sw_sc_ring) {
2402 ixgbe_rx_queue_release(rxq);
2406 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2407 "dma_addr=0x%"PRIx64,
2408 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2409 rxq->rx_ring_phys_addr);
2411 if (!rte_is_power_of_2(nb_desc)) {
2412 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
2413 "preconditions - canceling the feature for "
2414 "the whole port[%d]",
2415 rxq->queue_id, rxq->port_id);
2416 adapter->rx_vec_allowed = false;
2418 ixgbe_rxq_vec_setup(rxq);
2420 dev->data->rx_queues[queue_idx] = rxq;
2422 ixgbe_reset_rx_queue(adapter, rxq);
2428 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2430 #define IXGBE_RXQ_SCAN_INTERVAL 4
2431 volatile union ixgbe_adv_rx_desc *rxdp;
2432 struct ixgbe_rx_queue *rxq;
2435 if (rx_queue_id >= dev->data->nb_rx_queues) {
2436 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
2440 rxq = dev->data->rx_queues[rx_queue_id];
2441 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2443 while ((desc < rxq->nb_rx_desc) &&
2444 (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
2445 desc += IXGBE_RXQ_SCAN_INTERVAL;
2446 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
2447 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2448 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2449 desc - rxq->nb_rx_desc]);
2456 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2458 volatile union ixgbe_adv_rx_desc *rxdp;
2459 struct ixgbe_rx_queue *rxq = rx_queue;
2462 if (unlikely(offset >= rxq->nb_rx_desc))
2464 desc = rxq->rx_tail + offset;
2465 if (desc >= rxq->nb_rx_desc)
2466 desc -= rxq->nb_rx_desc;
2468 rxdp = &rxq->rx_ring[desc];
2469 return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
2472 void __attribute__((cold))
2473 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
2476 struct ixgbe_adapter *adapter =
2477 (struct ixgbe_adapter *)dev->data->dev_private;
2479 PMD_INIT_FUNC_TRACE();
2481 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2482 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
2484 txq->ops->release_mbufs(txq);
2485 txq->ops->reset(txq);
2489 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2490 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
2492 ixgbe_rx_queue_release_mbufs(rxq);
2493 ixgbe_reset_rx_queue(adapter, rxq);
2498 /*********************************************************************
2500 * Device RX/TX init functions
2502 **********************************************************************/
2505 * Receive Side Scaling (RSS)
2506 * See section 7.1.2.8 in the following document:
2507 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
2510 * The source and destination IP addresses of the IP header and the source
2511 * and destination ports of TCP/UDP headers, if any, of received packets are
2512 * hashed against a configurable random key to compute a 32-bit RSS hash result.
2513 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2514 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
2515 * RSS output index which is used as the RX queue index where to store the
2517 * The following output is supplied in the RX write-back descriptor:
2518 * - 32-bit result of the Microsoft RSS hash function,
2519 * - 4-bit RSS type field.
2523 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
2524 * Used as the default key.
2526 static uint8_t rss_intel_key[40] = {
2527 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2528 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2529 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2530 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2531 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2535 ixgbe_rss_disable(struct rte_eth_dev *dev)
2537 struct ixgbe_hw *hw;
2540 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2541 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2542 mrqc &= ~IXGBE_MRQC_RSSEN;
2543 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2547 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
2555 hash_key = rss_conf->rss_key;
2556 if (hash_key != NULL) {
2557 /* Fill in RSS hash key */
2558 for (i = 0; i < 10; i++) {
2559 rss_key = hash_key[(i * 4)];
2560 rss_key |= hash_key[(i * 4) + 1] << 8;
2561 rss_key |= hash_key[(i * 4) + 2] << 16;
2562 rss_key |= hash_key[(i * 4) + 3] << 24;
2563 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
2567 /* Set configured hashing protocols in MRQC register */
2568 rss_hf = rss_conf->rss_hf;
2569 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
2570 if (rss_hf & ETH_RSS_IPV4)
2571 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2572 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2573 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2574 if (rss_hf & ETH_RSS_IPV6)
2575 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2576 if (rss_hf & ETH_RSS_IPV6_EX)
2577 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2578 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2579 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2580 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
2581 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2582 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2583 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2584 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2585 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2586 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
2587 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2588 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2592 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2593 struct rte_eth_rss_conf *rss_conf)
2595 struct ixgbe_hw *hw;
2599 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2602 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
2603 * "RSS enabling cannot be done dynamically while it must be
2604 * preceded by a software reset"
2605 * Before changing anything, first check that the update RSS operation
2606 * does not attempt to disable RSS, if RSS was enabled at
2607 * initialization time, or does not attempt to enable RSS, if RSS was
2608 * disabled at initialization time.
2610 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
2611 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2612 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
2613 if (rss_hf != 0) /* Enable RSS */
2615 return 0; /* Nothing to do */
2618 if (rss_hf == 0) /* Disable RSS */
2620 ixgbe_hw_rss_hash_set(hw, rss_conf);
2625 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2626 struct rte_eth_rss_conf *rss_conf)
2628 struct ixgbe_hw *hw;
2635 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2636 hash_key = rss_conf->rss_key;
2637 if (hash_key != NULL) {
2638 /* Return RSS hash key */
2639 for (i = 0; i < 10; i++) {
2640 rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i);
2641 hash_key[(i * 4)] = rss_key & 0x000000FF;
2642 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2643 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2644 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2648 /* Get RSS functions configured in MRQC register */
2649 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2650 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
2651 rss_conf->rss_hf = 0;
2655 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
2656 rss_hf |= ETH_RSS_IPV4;
2657 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
2658 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2659 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
2660 rss_hf |= ETH_RSS_IPV6;
2661 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
2662 rss_hf |= ETH_RSS_IPV6_EX;
2663 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
2664 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2665 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
2666 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2667 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
2668 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2669 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
2670 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2671 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
2672 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2673 rss_conf->rss_hf = rss_hf;
2678 ixgbe_rss_configure(struct rte_eth_dev *dev)
2680 struct rte_eth_rss_conf rss_conf;
2681 struct ixgbe_hw *hw;
2686 PMD_INIT_FUNC_TRACE();
2687 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2690 * Fill in redirection table
2691 * The byte-swap is needed because NIC registers are in
2692 * little-endian order.
2695 for (i = 0, j = 0; i < 128; i++, j++) {
2696 if (j == dev->data->nb_rx_queues)
2698 reta = (reta << 8) | j;
2700 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
2705 * Configure the RSS key and the RSS protocols used to compute
2706 * the RSS hash of input packets.
2708 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2709 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
2710 ixgbe_rss_disable(dev);
2713 if (rss_conf.rss_key == NULL)
2714 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2715 ixgbe_hw_rss_hash_set(hw, &rss_conf);
2718 #define NUM_VFTA_REGISTERS 128
2719 #define NIC_RX_BUFFER_SIZE 0x200
2722 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
2724 struct rte_eth_vmdq_dcb_conf *cfg;
2725 struct ixgbe_hw *hw;
2726 enum rte_eth_nb_pools num_pools;
2727 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
2729 uint8_t nb_tcs; /* number of traffic classes */
2732 PMD_INIT_FUNC_TRACE();
2733 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2734 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2735 num_pools = cfg->nb_queue_pools;
2736 /* Check we have a valid number of pools */
2737 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
2738 ixgbe_rss_disable(dev);
2741 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
2742 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
2746 * split rx buffer up into sections, each for 1 traffic class
2748 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2749 for (i = 0 ; i < nb_tcs; i++) {
2750 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2751 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
2752 /* clear 10 bits. */
2753 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
2754 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2756 /* zero alloc all unused TCs */
2757 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2758 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2759 rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
2760 /* clear 10 bits. */
2761 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2764 /* MRQC: enable vmdq and dcb */
2765 mrqc = ((num_pools == ETH_16_POOLS) ? \
2766 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
2767 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2769 /* PFVTCTL: turn on virtualisation and set the default pool */
2770 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2771 if (cfg->enable_default_pool) {
2772 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
2774 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
2777 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
2779 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
2781 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
2783 * mapping is done with 3 bits per priority,
2784 * so shift by i*3 each time
2786 queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
2788 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
2790 /* RTRPCS: DCB related */
2791 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
2793 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2794 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2795 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
2796 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2798 /* VFTA - enable all vlan filters */
2799 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2800 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2803 /* VFRE: pool enabling for receive - 16 or 32 */
2804 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
2805 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2808 * MPSAR - allow pools to read specific mac addresses
2809 * In this case, all pools should be able to read from mac addr 0
2811 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
2812 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
2814 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
2815 for (i = 0; i < cfg->nb_pool_maps; i++) {
2816 /* set vlan id in VF register and set the valid bit */
2817 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
2818 (cfg->pool_map[i].vlan_id & 0xFFF)));
2820 * Put the allowed pools in VFB reg. As we only have 16 or 32
2821 * pools, we only need to use the first half of the register
2824 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
2829 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
2830 * @hw: pointer to hardware structure
2831 * @dcb_config: pointer to ixgbe_dcb_config structure
2834 ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
2835 struct ixgbe_dcb_config *dcb_config)
2840 PMD_INIT_FUNC_TRACE();
2841 if (hw->mac.type != ixgbe_mac_82598EB) {
2842 /* Disable the Tx desc arbiter so that MTQC can be changed */
2843 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2844 reg |= IXGBE_RTTDCS_ARBDIS;
2845 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2847 /* Enable DCB for Tx with 8 TCs */
2848 if (dcb_config->num_tcs.pg_tcs == 8) {
2849 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2852 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2854 if (dcb_config->vt_mode)
2855 reg |= IXGBE_MTQC_VT_ENA;
2856 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
2858 /* Disable drop for all queues */
2859 for (q = 0; q < 128; q++)
2860 IXGBE_WRITE_REG(hw, IXGBE_QDE,
2861 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
2863 /* Enable the Tx desc arbiter */
2864 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2865 reg &= ~IXGBE_RTTDCS_ARBDIS;
2866 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2868 /* Enable Security TX Buffer IFG for DCB */
2869 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2870 reg |= IXGBE_SECTX_DCB;
2871 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
2877 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
2878 * @dev: pointer to rte_eth_dev structure
2879 * @dcb_config: pointer to ixgbe_dcb_config structure
2882 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
2883 struct ixgbe_dcb_config *dcb_config)
2885 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2886 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
2887 struct ixgbe_hw *hw =
2888 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2890 PMD_INIT_FUNC_TRACE();
2891 if (hw->mac.type != ixgbe_mac_82598EB)
2892 /*PF VF Transmit Enable*/
2893 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
2894 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2896 /*Configure general DCB TX parameters*/
2897 ixgbe_dcb_tx_hw_config(hw,dcb_config);
2902 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
2903 struct ixgbe_dcb_config *dcb_config)
2905 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2906 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2907 struct ixgbe_dcb_tc_config *tc;
2910 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
2911 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
2912 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
2913 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
2916 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
2917 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
2919 /* User Priority to Traffic Class mapping */
2920 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2921 j = vmdq_rx_conf->dcb_queue[i];
2922 tc = &dcb_config->tc_config[j];
2923 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
2929 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
2930 struct ixgbe_dcb_config *dcb_config)
2932 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2933 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
2934 struct ixgbe_dcb_tc_config *tc;
2937 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
2938 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
2939 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
2940 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
2943 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
2944 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
2947 /* User Priority to Traffic Class mapping */
2948 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2949 j = vmdq_tx_conf->dcb_queue[i];
2950 tc = &dcb_config->tc_config[j];
2951 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
2958 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
2959 struct ixgbe_dcb_config *dcb_config)
2961 struct rte_eth_dcb_rx_conf *rx_conf =
2962 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
2963 struct ixgbe_dcb_tc_config *tc;
2966 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
2967 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
2969 /* User Priority to Traffic Class mapping */
2970 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2971 j = rx_conf->dcb_queue[i];
2972 tc = &dcb_config->tc_config[j];
2973 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
2979 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
2980 struct ixgbe_dcb_config *dcb_config)
2982 struct rte_eth_dcb_tx_conf *tx_conf =
2983 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
2984 struct ixgbe_dcb_tc_config *tc;
2987 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
2988 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
2990 /* User Priority to Traffic Class mapping */
2991 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2992 j = tx_conf->dcb_queue[i];
2993 tc = &dcb_config->tc_config[j];
2994 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3000 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3001 * @hw: pointer to hardware structure
3002 * @dcb_config: pointer to ixgbe_dcb_config structure
3005 ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
3006 struct ixgbe_dcb_config *dcb_config)
3012 PMD_INIT_FUNC_TRACE();
3014 * Disable the arbiter before changing parameters
3015 * (always enable recycle mode; WSP)
3017 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3018 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3020 if (hw->mac.type != ixgbe_mac_82598EB) {
3021 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3022 if (dcb_config->num_tcs.pg_tcs == 4) {
3023 if (dcb_config->vt_mode)
3024 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3025 IXGBE_MRQC_VMDQRT4TCEN;
3027 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3028 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3032 if (dcb_config->num_tcs.pg_tcs == 8) {
3033 if (dcb_config->vt_mode)
3034 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3035 IXGBE_MRQC_VMDQRT8TCEN;
3037 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3038 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3043 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3046 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3047 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3048 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
3049 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3051 /* VFTA - enable all vlan filters */
3052 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3053 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3057 * Configure Rx packet plane (recycle mode; WSP) and
3060 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3061 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3067 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3068 uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3070 switch (hw->mac.type) {
3071 case ixgbe_mac_82598EB:
3072 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3074 case ixgbe_mac_82599EB:
3075 case ixgbe_mac_X540:
3076 case ixgbe_mac_X550:
3077 case ixgbe_mac_X550EM_x:
3078 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3087 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3088 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3090 switch (hw->mac.type) {
3091 case ixgbe_mac_82598EB:
3092 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
3093 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
3095 case ixgbe_mac_82599EB:
3096 case ixgbe_mac_X540:
3097 case ixgbe_mac_X550:
3098 case ixgbe_mac_X550EM_x:
3099 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
3100 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
3107 #define DCB_RX_CONFIG 1
3108 #define DCB_TX_CONFIG 1
3109 #define DCB_TX_PB 1024
3111 * ixgbe_dcb_hw_configure - Enable DCB and configure
3112 * general DCB in VT mode and non-VT mode parameters
3113 * @dev: pointer to rte_eth_dev structure
3114 * @dcb_config: pointer to ixgbe_dcb_config structure
3117 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3118 struct ixgbe_dcb_config *dcb_config)
3121 uint8_t i,pfc_en,nb_tcs;
3123 uint8_t config_dcb_rx = 0;
3124 uint8_t config_dcb_tx = 0;
3125 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3126 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3127 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3128 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3129 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3130 struct ixgbe_dcb_tc_config *tc;
3131 uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3132 struct ixgbe_hw *hw =
3133 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3135 switch(dev->data->dev_conf.rxmode.mq_mode){
3136 case ETH_MQ_RX_VMDQ_DCB:
3137 dcb_config->vt_mode = true;
3138 if (hw->mac.type != ixgbe_mac_82598EB) {
3139 config_dcb_rx = DCB_RX_CONFIG;
3141 *get dcb and VT rx configuration parameters
3144 ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
3145 /*Configure general VMDQ and DCB RX parameters*/
3146 ixgbe_vmdq_dcb_configure(dev);
3150 dcb_config->vt_mode = false;
3151 config_dcb_rx = DCB_RX_CONFIG;
3152 /* Get dcb TX configuration parameters from rte_eth_conf */
3153 ixgbe_dcb_rx_config(dev,dcb_config);
3154 /*Configure general DCB RX parameters*/
3155 ixgbe_dcb_rx_hw_config(hw, dcb_config);
3158 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3161 switch (dev->data->dev_conf.txmode.mq_mode) {
3162 case ETH_MQ_TX_VMDQ_DCB:
3163 dcb_config->vt_mode = true;
3164 config_dcb_tx = DCB_TX_CONFIG;
3165 /* get DCB and VT TX configuration parameters from rte_eth_conf */
3166 ixgbe_dcb_vt_tx_config(dev,dcb_config);
3167 /*Configure general VMDQ and DCB TX parameters*/
3168 ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
3172 dcb_config->vt_mode = false;
3173 config_dcb_tx = DCB_TX_CONFIG;
3174 /*get DCB TX configuration parameters from rte_eth_conf*/
3175 ixgbe_dcb_tx_config(dev,dcb_config);
3176 /*Configure general DCB TX parameters*/
3177 ixgbe_dcb_tx_hw_config(hw, dcb_config);
3180 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3184 nb_tcs = dcb_config->num_tcs.pfc_tcs;
3186 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3187 if(nb_tcs == ETH_4_TCS) {
3188 /* Avoid un-configured priority mapping to TC0 */
3190 uint8_t mask = 0xFF;
3191 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
3192 mask = (uint8_t)(mask & (~ (1 << map[i])));
3193 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
3194 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
3198 /* Re-configure 4 TCs BW */
3199 for (i = 0; i < nb_tcs; i++) {
3200 tc = &dcb_config->tc_config[i];
3201 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
3202 (uint8_t)(100 / nb_tcs);
3203 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
3204 (uint8_t)(100 / nb_tcs);
3206 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3207 tc = &dcb_config->tc_config[i];
3208 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
3209 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
3214 /* Set RX buffer size */
3215 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3216 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
3217 for (i = 0 ; i < nb_tcs; i++) {
3218 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3220 /* zero alloc all unused TCs */
3221 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3222 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3226 /* Only support an equally distributed Tx packet buffer strategy. */
3227 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
3228 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
3229 for (i = 0; i < nb_tcs; i++) {
3230 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3231 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3233 /* Clear unused TCs, if any, to zero buffer size*/
3234 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3235 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3236 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3240 /*Calculates traffic class credits*/
3241 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3242 IXGBE_DCB_TX_CONFIG);
3243 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3244 IXGBE_DCB_RX_CONFIG);
3247 /* Unpack CEE standard containers */
3248 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
3249 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3250 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
3251 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
3252 /* Configure PG(ETS) RX */
3253 ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
3257 /* Unpack CEE standard containers */
3258 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
3259 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3260 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
3261 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
3262 /* Configure PG(ETS) TX */
3263 ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
3266 /*Configure queue statistics registers*/
3267 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
3269 /* Check if the PFC is supported */
3270 if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3271 pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
3272 for (i = 0; i < nb_tcs; i++) {
3274 * If the TC count is 8,and the default high_water is 48,
3275 * the low_water is 16 as default.
3277 hw->fc.high_water[i] = (pbsize * 3 ) / 4;
3278 hw->fc.low_water[i] = pbsize / 4;
3279 /* Enable pfc for this TC */
3280 tc = &dcb_config->tc_config[i];
3281 tc->pfc = ixgbe_dcb_pfc_enabled;
3283 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3284 if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3286 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
3293 * ixgbe_configure_dcb - Configure DCB Hardware
3294 * @dev: pointer to rte_eth_dev
3296 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
3298 struct ixgbe_dcb_config *dcb_cfg =
3299 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3300 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
3302 PMD_INIT_FUNC_TRACE();
3304 /* check support mq_mode for DCB */
3305 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
3306 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
3309 if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
3312 /** Configure DCB hardware **/
3313 ixgbe_dcb_hw_configure(dev,dcb_cfg);
3319 * VMDq only support for 10 GbE NIC.
3322 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3324 struct rte_eth_vmdq_rx_conf *cfg;
3325 struct ixgbe_hw *hw;
3326 enum rte_eth_nb_pools num_pools;
3327 uint32_t mrqc, vt_ctl, vlanctrl;
3331 PMD_INIT_FUNC_TRACE();
3332 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3333 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3334 num_pools = cfg->nb_queue_pools;
3336 ixgbe_rss_disable(dev);
3338 /* MRQC: enable vmdq */
3339 mrqc = IXGBE_MRQC_VMDQEN;
3340 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3342 /* PFVTCTL: turn on virtualisation and set the default pool */
3343 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3344 if (cfg->enable_default_pool)
3345 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3347 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3349 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3351 for (i = 0; i < (int)num_pools; i++) {
3352 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
3353 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
3356 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3357 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3358 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
3359 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3361 /* VFTA - enable all vlan filters */
3362 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3363 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
3365 /* VFRE: pool enabling for receive - 64 */
3366 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
3367 if (num_pools == ETH_64_POOLS)
3368 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
3371 * MPSAR - allow pools to read specific mac addresses
3372 * In this case, all pools should be able to read from mac addr 0
3374 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
3375 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
3377 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3378 for (i = 0; i < cfg->nb_pool_maps; i++) {
3379 /* set vlan id in VF register and set the valid bit */
3380 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
3381 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
3383 * Put the allowed pools in VFB reg. As we only have 16 or 64
3384 * pools, we only need to use the first half of the register
3387 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3388 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
3389 (cfg->pool_map[i].pools & UINT32_MAX));
3391 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
3392 ((cfg->pool_map[i].pools >> 32) \
3397 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
3398 if (cfg->enable_loop_back) {
3399 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3400 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
3401 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
3404 IXGBE_WRITE_FLUSH(hw);
3408 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
3409 * @hw: pointer to hardware structure
3412 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
3417 PMD_INIT_FUNC_TRACE();
3418 /*PF VF Transmit Enable*/
3419 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
3420 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
3422 /* Disable the Tx desc arbiter so that MTQC can be changed */
3423 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3424 reg |= IXGBE_RTTDCS_ARBDIS;
3425 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3427 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3428 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3430 /* Disable drop for all queues */
3431 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3432 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3433 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3435 /* Enable the Tx desc arbiter */
3436 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3437 reg &= ~IXGBE_RTTDCS_ARBDIS;
3438 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3440 IXGBE_WRITE_FLUSH(hw);
3445 static int __attribute__((cold))
3446 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
3448 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
3452 /* Initialize software ring entries */
3453 for (i = 0; i < rxq->nb_rx_desc; i++) {
3454 volatile union ixgbe_adv_rx_desc *rxd;
3455 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
3457 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
3458 (unsigned) rxq->queue_id);
3462 rte_mbuf_refcnt_set(mbuf, 1);
3464 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3466 mbuf->port = rxq->port_id;
3469 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
3470 rxd = &rxq->rx_ring[i];
3471 rxd->read.hdr_addr = dma_addr;
3472 rxd->read.pkt_addr = dma_addr;
3480 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
3482 struct ixgbe_hw *hw;
3485 ixgbe_rss_configure(dev);
3487 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3489 /* MRQC: enable VF RSS */
3490 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3491 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
3492 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3494 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
3498 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
3502 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
3506 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3512 ixgbe_config_vf_default(struct rte_eth_dev *dev)
3514 struct ixgbe_hw *hw =
3515 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3517 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3519 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3524 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3525 IXGBE_MRQC_VMDQRT4TCEN);
3529 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3530 IXGBE_MRQC_VMDQRT8TCEN);
3534 "invalid pool number in IOV mode");
3541 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3543 struct ixgbe_hw *hw =
3544 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3546 if (hw->mac.type == ixgbe_mac_82598EB)
3549 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3551 * SRIOV inactive scheme
3552 * any DCB/RSS w/o VMDq multi-queue setting
3554 switch (dev->data->dev_conf.rxmode.mq_mode) {
3556 ixgbe_rss_configure(dev);
3559 case ETH_MQ_RX_VMDQ_DCB:
3560 ixgbe_vmdq_dcb_configure(dev);
3563 case ETH_MQ_RX_VMDQ_ONLY:
3564 ixgbe_vmdq_rx_hw_configure(dev);
3567 case ETH_MQ_RX_NONE:
3568 /* if mq_mode is none, disable rss mode.*/
3569 default: ixgbe_rss_disable(dev);
3573 * SRIOV active scheme
3574 * Support RSS together with VMDq & SRIOV
3576 switch (dev->data->dev_conf.rxmode.mq_mode) {
3578 case ETH_MQ_RX_VMDQ_RSS:
3579 ixgbe_config_vf_rss(dev);
3582 /* FIXME if support DCB/RSS together with VMDq & SRIOV */
3583 case ETH_MQ_RX_VMDQ_DCB:
3584 case ETH_MQ_RX_VMDQ_DCB_RSS:
3586 "Could not support DCB with VMDq & SRIOV");
3589 ixgbe_config_vf_default(dev);
3598 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
3600 struct ixgbe_hw *hw =
3601 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3605 if (hw->mac.type == ixgbe_mac_82598EB)
3608 /* disable arbiter before setting MTQC */
3609 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3610 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3611 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3613 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3615 * SRIOV inactive scheme
3616 * any DCB w/o VMDq multi-queue setting
3618 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
3619 ixgbe_vmdq_tx_hw_configure(hw);
3621 mtqc = IXGBE_MTQC_64Q_1PB;
3622 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3625 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3628 * SRIOV active scheme
3629 * FIXME if support DCB together with VMDq & SRIOV
3632 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3635 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
3638 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
3642 mtqc = IXGBE_MTQC_64Q_1PB;
3643 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
3645 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3648 /* re-enable arbiter */
3649 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3650 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3656 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
3658 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
3659 * spec rev. 3.0 chapter 8.2.3.8.13.
3661 * @pool Memory pool of the Rx queue
3663 static inline uint32_t
3664 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
3666 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
3668 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
3671 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
3674 return IXGBE_RSCCTL_MAXDESC_16;
3675 else if (maxdesc >= 8)
3676 return IXGBE_RSCCTL_MAXDESC_8;
3677 else if (maxdesc >= 4)
3678 return IXGBE_RSCCTL_MAXDESC_4;
3680 return IXGBE_RSCCTL_MAXDESC_1;
3684 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
3687 * (Taken from FreeBSD tree)
3688 * (yes this is all very magic and confusing :)
3691 * @entry the register array entry
3692 * @vector the MSIX vector for this queue
3696 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
3698 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3701 vector |= IXGBE_IVAR_ALLOC_VAL;
3703 switch (hw->mac.type) {
3705 case ixgbe_mac_82598EB:
3707 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3709 entry += (type * 64);
3710 index = (entry >> 2) & 0x1F;
3711 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3712 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3713 ivar |= (vector << (8 * (entry & 0x3)));
3714 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3717 case ixgbe_mac_82599EB:
3718 case ixgbe_mac_X540:
3719 if (type == -1) { /* MISC IVAR */
3720 index = (entry & 1) * 8;
3721 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3722 ivar &= ~(0xFF << index);
3723 ivar |= (vector << index);
3724 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3725 } else { /* RX/TX IVARS */
3726 index = (16 * (entry & 1)) + (8 * type);
3727 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3728 ivar &= ~(0xFF << index);
3729 ivar |= (vector << index);
3730 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3740 void __attribute__((cold))
3741 ixgbe_set_rx_function(struct rte_eth_dev *dev)
3743 struct ixgbe_adapter *adapter =
3744 (struct ixgbe_adapter *)dev->data->dev_private;
3747 * In order to allow Vector Rx there are a few configuration
3748 * conditions to be met and Rx Bulk Allocation should be allowed.
3750 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
3751 !adapter->rx_bulk_alloc_allowed) {
3752 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
3753 "preconditions or RTE_IXGBE_INC_VECTOR is "
3755 dev->data->port_id);
3757 adapter->rx_vec_allowed = false;
3761 * Initialize the appropriate LRO callback.
3763 * If all queues satisfy the bulk allocation preconditions
3764 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
3765 * Otherwise use a single allocation version.
3767 if (dev->data->lro) {
3768 if (adapter->rx_bulk_alloc_allowed) {
3769 PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk "
3770 "allocation version");
3771 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3773 PMD_INIT_LOG(INFO, "LRO is requested. Using a single "
3774 "allocation version");
3775 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3777 } else if (dev->data->scattered_rx) {
3779 * Set the non-LRO scattered callback: there are Vector and
3780 * single allocation versions.
3782 if (adapter->rx_vec_allowed) {
3783 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
3784 "callback (port=%d).",
3785 dev->data->port_id);
3787 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
3788 } else if (adapter->rx_bulk_alloc_allowed) {
3789 PMD_INIT_LOG(INFO, "Using a Scattered with bulk "
3790 "allocation callback (port=%d).",
3791 dev->data->port_id);
3792 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3794 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
3795 "single allocation) "
3796 "Scattered Rx callback "
3798 dev->data->port_id);
3800 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3803 * Below we set "simple" callbacks according to port/queues parameters.
3804 * If parameters allow we are going to choose between the following
3808 * - Single buffer allocation (the simplest one)
3810 } else if (adapter->rx_vec_allowed) {
3811 PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX "
3812 "burst size no less than 32.");
3814 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
3815 } else if (adapter->rx_bulk_alloc_allowed) {
3816 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
3817 "satisfied. Rx Burst Bulk Alloc function "
3818 "will be used on port=%d.",
3819 dev->data->port_id);
3821 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
3823 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
3824 "satisfied, or Scattered Rx is requested, "
3825 "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
3826 "is not enabled (port=%d).",
3827 dev->data->port_id);
3829 dev->rx_pkt_burst = ixgbe_recv_pkts;
3834 * ixgbe_set_rsc - configure RSC related port HW registers
3836 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
3837 * of 82599 Spec (x540 configuration is virtually the same).
3841 * Returns 0 in case of success or a non-zero error code
3844 ixgbe_set_rsc(struct rte_eth_dev *dev)
3846 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
3847 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3848 struct rte_eth_dev_info dev_info = { 0 };
3849 bool rsc_capable = false;
3854 dev->dev_ops->dev_infos_get(dev, &dev_info);
3855 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
3858 if (!rsc_capable && rx_conf->enable_lro) {
3859 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
3864 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
3866 if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
3868 * According to chapter of 4.6.7.2.1 of the Spec Rev.
3869 * 3.0 RSC configuration requires HW CRC stripping being
3870 * enabled. If user requested both HW CRC stripping off
3871 * and RSC on - return an error.
3873 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
3878 /* RFCTL configuration */
3880 uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
3881 if (rx_conf->enable_lro)
3883 * Since NFS packets coalescing is not supported - clear
3884 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
3887 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
3888 IXGBE_RFCTL_NFSR_DIS);
3890 rfctl |= IXGBE_RFCTL_RSC_DIS;
3892 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
3895 /* If LRO hasn't been requested - we are done here. */
3896 if (!rx_conf->enable_lro)
3899 /* Set RDRXCTL.RSCACKC bit */
3900 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3901 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
3902 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3904 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
3905 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3906 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3908 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
3910 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
3912 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
3914 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
3917 * ixgbe PMD doesn't support header-split at the moment.
3919 * Following the 4.6.7.2.1 chapter of the 82599/x540
3920 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
3921 * should be configured even if header split is not
3922 * enabled. We will configure it 128 bytes following the
3923 * recommendation in the spec.
3925 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3926 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
3927 IXGBE_SRRCTL_BSIZEHDR_MASK;
3930 * TODO: Consider setting the Receive Descriptor Minimum
3931 * Threshold Size for an RSC case. This is not an obviously
3932 * beneficiary option but the one worth considering...
3935 rscctl |= IXGBE_RSCCTL_RSCEN;
3936 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
3937 psrtype |= IXGBE_PSRTYPE_TCPHDR;
3940 * RSC: Set ITR interval corresponding to 2K ints/s.
3942 * Full-sized RSC aggregations for a 10Gb/s link will
3943 * arrive at about 20K aggregation/s rate.
3945 * 2K inst/s rate will make only 10% of the
3946 * aggregations to be closed due to the interrupt timer
3947 * expiration for a streaming at wire-speed case.
3949 * For a sparse streaming case this setting will yield
3950 * at most 500us latency for a single RSC aggregation.
3952 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
3953 eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
3955 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
3956 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
3957 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
3958 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
3961 * RSC requires the mapping of the queue to the
3964 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
3969 PMD_INIT_LOG(INFO, "enabling LRO mode");
3975 * Initializes Receive Unit.
3977 int __attribute__((cold))
3978 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
3980 struct ixgbe_hw *hw;
3981 struct ixgbe_rx_queue *rxq;
3992 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
3995 PMD_INIT_FUNC_TRACE();
3996 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3999 * Make sure receives are disabled while setting
4000 * up the RX context (registers, descriptor rings, etc.).
4002 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4003 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4005 /* Enable receipt of broadcasted frames */
4006 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4007 fctrl |= IXGBE_FCTRL_BAM;
4008 fctrl |= IXGBE_FCTRL_DPF;
4009 fctrl |= IXGBE_FCTRL_PMCF;
4010 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4013 * Configure CRC stripping, if any.
4015 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4016 if (rx_conf->hw_strip_crc)
4017 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4019 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4022 * Configure jumbo frame support, if any.
4024 if (rx_conf->jumbo_frame == 1) {
4025 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4026 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4027 maxfrs &= 0x0000FFFF;
4028 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4029 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4031 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4034 * If loopback mode is configured for 82599, set LPBK bit.
4036 if (hw->mac.type == ixgbe_mac_82599EB &&
4037 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4038 hlreg0 |= IXGBE_HLREG0_LPBK;
4040 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4042 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4044 /* Setup RX queues */
4045 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4046 rxq = dev->data->rx_queues[i];
4049 * Reset crc_len in case it was changed after queue setup by a
4050 * call to configure.
4052 rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
4054 /* Setup the Base and Length of the Rx Descriptor Rings */
4055 bus_addr = rxq->rx_ring_phys_addr;
4056 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4057 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4058 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4059 (uint32_t)(bus_addr >> 32));
4060 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4061 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4062 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4063 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4065 /* Configure the SRRCTL register */
4066 #ifdef RTE_HEADER_SPLIT_ENABLE
4068 * Configure Header Split
4070 if (rx_conf->header_split) {
4071 if (hw->mac.type == ixgbe_mac_82599EB) {
4072 /* Must setup the PSRTYPE register */
4074 psrtype = IXGBE_PSRTYPE_TCPHDR |
4075 IXGBE_PSRTYPE_UDPHDR |
4076 IXGBE_PSRTYPE_IPV4HDR |
4077 IXGBE_PSRTYPE_IPV6HDR;
4078 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4080 srrctl = ((rx_conf->split_hdr_size <<
4081 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4082 IXGBE_SRRCTL_BSIZEHDR_MASK);
4083 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4086 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4088 /* Set if packets are dropped when no descriptors available */
4090 srrctl |= IXGBE_SRRCTL_DROP_EN;
4093 * Configure the RX buffer size in the BSIZEPACKET field of
4094 * the SRRCTL register of the queue.
4095 * The value is in 1 KB resolution. Valid values can be from
4098 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4099 RTE_PKTMBUF_HEADROOM);
4100 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4101 IXGBE_SRRCTL_BSIZEPKT_MASK);
4103 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4105 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4106 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4108 /* It adds dual VLAN length for supporting dual VLAN */
4109 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4110 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
4111 dev->data->scattered_rx = 1;
4114 if (rx_conf->enable_scatter)
4115 dev->data->scattered_rx = 1;
4118 * Device configured with multiple RX queues.
4120 ixgbe_dev_mq_rx_configure(dev);
4123 * Setup the Checksum Register.
4124 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4125 * Enable IP/L4 checkum computation by hardware if requested to do so.
4127 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
4128 rxcsum |= IXGBE_RXCSUM_PCSD;
4129 if (rx_conf->hw_ip_checksum)
4130 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4132 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
4134 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4136 if (hw->mac.type == ixgbe_mac_82599EB ||
4137 hw->mac.type == ixgbe_mac_X540) {
4138 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4139 if (rx_conf->hw_strip_crc)
4140 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4142 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
4143 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4144 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4147 rc = ixgbe_set_rsc(dev);
4151 ixgbe_set_rx_function(dev);
4157 * Initializes Transmit Unit.
4159 void __attribute__((cold))
4160 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
4162 struct ixgbe_hw *hw;
4163 struct ixgbe_tx_queue *txq;
4169 PMD_INIT_FUNC_TRACE();
4170 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4172 /* Enable TX CRC (checksum offload requirement) and hw padding
4173 * (TSO requirement) */
4174 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4175 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
4176 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4178 /* Setup the Base and Length of the Tx Descriptor Rings */
4179 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4180 txq = dev->data->tx_queues[i];
4182 bus_addr = txq->tx_ring_phys_addr;
4183 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
4184 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4185 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
4186 (uint32_t)(bus_addr >> 32));
4187 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
4188 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4189 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4190 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4191 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4194 * Disable Tx Head Writeback RO bit, since this hoses
4195 * bookkeeping if things aren't delivered in order.
4197 switch (hw->mac.type) {
4198 case ixgbe_mac_82598EB:
4199 txctrl = IXGBE_READ_REG(hw,
4200 IXGBE_DCA_TXCTRL(txq->reg_idx));
4201 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4202 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
4206 case ixgbe_mac_82599EB:
4207 case ixgbe_mac_X540:
4208 case ixgbe_mac_X550:
4209 case ixgbe_mac_X550EM_x:
4211 txctrl = IXGBE_READ_REG(hw,
4212 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
4213 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4214 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
4220 /* Device configured with multiple TX queues. */
4221 ixgbe_dev_mq_tx_configure(dev);
4225 * Set up link for 82599 loopback mode Tx->Rx.
4227 static inline void __attribute__((cold))
4228 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
4230 PMD_INIT_FUNC_TRACE();
4232 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
4233 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
4235 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
4244 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
4245 ixgbe_reset_pipeline_82599(hw);
4247 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
4253 * Start Transmit and Receive Units.
4255 int __attribute__((cold))
4256 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
4258 struct ixgbe_hw *hw;
4259 struct ixgbe_tx_queue *txq;
4260 struct ixgbe_rx_queue *rxq;
4267 PMD_INIT_FUNC_TRACE();
4268 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4270 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4271 txq = dev->data->tx_queues[i];
4272 /* Setup Transmit Threshold Registers */
4273 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4274 txdctl |= txq->pthresh & 0x7F;
4275 txdctl |= ((txq->hthresh & 0x7F) << 8);
4276 txdctl |= ((txq->wthresh & 0x7F) << 16);
4277 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4280 if (hw->mac.type != ixgbe_mac_82598EB) {
4281 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
4282 dmatxctl |= IXGBE_DMATXCTL_TE;
4283 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
4286 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4287 txq = dev->data->tx_queues[i];
4288 if (!txq->tx_deferred_start) {
4289 ret = ixgbe_dev_tx_queue_start(dev, i);
4295 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4296 rxq = dev->data->rx_queues[i];
4297 if (!rxq->rx_deferred_start) {
4298 ret = ixgbe_dev_rx_queue_start(dev, i);
4304 /* Enable Receive engine */
4305 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4306 if (hw->mac.type == ixgbe_mac_82598EB)
4307 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4308 rxctrl |= IXGBE_RXCTRL_RXEN;
4309 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4311 /* If loopback mode is enabled for 82599, set up the link accordingly */
4312 if (hw->mac.type == ixgbe_mac_82599EB &&
4313 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4314 ixgbe_setup_loopback_link_82599(hw);
4320 * Start Receive Units for specified queue.
4322 int __attribute__((cold))
4323 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4325 struct ixgbe_hw *hw;
4326 struct ixgbe_rx_queue *rxq;
4330 PMD_INIT_FUNC_TRACE();
4331 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4333 if (rx_queue_id < dev->data->nb_rx_queues) {
4334 rxq = dev->data->rx_queues[rx_queue_id];
4336 /* Allocate buffers for descriptor rings */
4337 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
4338 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
4342 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4343 rxdctl |= IXGBE_RXDCTL_ENABLE;
4344 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4346 /* Wait until RX Enable ready */
4347 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4350 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4351 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4353 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
4356 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4357 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
4365 * Stop Receive Units for specified queue.
4367 int __attribute__((cold))
4368 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4370 struct ixgbe_hw *hw;
4371 struct ixgbe_adapter *adapter =
4372 (struct ixgbe_adapter *)dev->data->dev_private;
4373 struct ixgbe_rx_queue *rxq;
4377 PMD_INIT_FUNC_TRACE();
4378 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4380 if (rx_queue_id < dev->data->nb_rx_queues) {
4381 rxq = dev->data->rx_queues[rx_queue_id];
4383 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4384 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4385 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4387 /* Wait until RX Enable ready */
4388 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4391 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4392 } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
4394 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
4397 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4399 ixgbe_rx_queue_release_mbufs(rxq);
4400 ixgbe_reset_rx_queue(adapter, rxq);
4409 * Start Transmit Units for specified queue.
4411 int __attribute__((cold))
4412 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4414 struct ixgbe_hw *hw;
4415 struct ixgbe_tx_queue *txq;
4419 PMD_INIT_FUNC_TRACE();
4420 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4422 if (tx_queue_id < dev->data->nb_tx_queues) {
4423 txq = dev->data->tx_queues[tx_queue_id];
4424 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4425 txdctl |= IXGBE_TXDCTL_ENABLE;
4426 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4428 /* Wait until TX Enable ready */
4429 if (hw->mac.type == ixgbe_mac_82599EB) {
4430 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4433 txdctl = IXGBE_READ_REG(hw,
4434 IXGBE_TXDCTL(txq->reg_idx));
4435 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4437 PMD_INIT_LOG(ERR, "Could not enable "
4438 "Tx Queue %d", tx_queue_id);
4441 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4442 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4450 * Stop Transmit Units for specified queue.
4452 int __attribute__((cold))
4453 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4455 struct ixgbe_hw *hw;
4456 struct ixgbe_tx_queue *txq;
4458 uint32_t txtdh, txtdt;
4461 PMD_INIT_FUNC_TRACE();
4462 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4464 if (tx_queue_id < dev->data->nb_tx_queues) {
4465 txq = dev->data->tx_queues[tx_queue_id];
4467 /* Wait until TX queue is empty */
4468 if (hw->mac.type == ixgbe_mac_82599EB) {
4469 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4471 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4472 txtdh = IXGBE_READ_REG(hw,
4473 IXGBE_TDH(txq->reg_idx));
4474 txtdt = IXGBE_READ_REG(hw,
4475 IXGBE_TDT(txq->reg_idx));
4476 } while (--poll_ms && (txtdh != txtdt));
4478 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
4479 "when stopping.", tx_queue_id);
4482 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4483 txdctl &= ~IXGBE_TXDCTL_ENABLE;
4484 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4486 /* Wait until TX Enable ready */
4487 if (hw->mac.type == ixgbe_mac_82599EB) {
4488 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4491 txdctl = IXGBE_READ_REG(hw,
4492 IXGBE_TXDCTL(txq->reg_idx));
4493 } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
4495 PMD_INIT_LOG(ERR, "Could not disable "
4496 "Tx Queue %d", tx_queue_id);
4499 if (txq->ops != NULL) {
4500 txq->ops->release_mbufs(txq);
4501 txq->ops->reset(txq);
4510 * [VF] Initializes Receive Unit.
4512 int __attribute__((cold))
4513 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
4515 struct ixgbe_hw *hw;
4516 struct ixgbe_rx_queue *rxq;
4518 uint32_t srrctl, psrtype = 0;
4523 PMD_INIT_FUNC_TRACE();
4524 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4526 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
4527 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4528 "it should be power of 2");
4532 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
4533 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4534 "it should be equal to or less than %d",
4535 hw->mac.max_rx_queues);
4540 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
4541 * disables the VF receipt of packets if the PF MTU is > 1500.
4542 * This is done to deal with 82599 limitations that imposes
4543 * the PF and all VFs to share the same MTU.
4544 * Then, the PF driver enables again the VF receipt of packet when
4545 * the VF driver issues a IXGBE_VF_SET_LPE request.
4546 * In the meantime, the VF device cannot be used, even if the VF driver
4547 * and the Guest VM network stack are ready to accept packets with a
4548 * size up to the PF MTU.
4549 * As a work-around to this PF behaviour, force the call to
4550 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
4551 * VF packets received can work in all cases.
4553 ixgbevf_rlpml_set_vf(hw,
4554 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
4556 /* Setup RX queues */
4557 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4558 rxq = dev->data->rx_queues[i];
4560 /* Allocate buffers for descriptor rings */
4561 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
4565 /* Setup the Base and Length of the Rx Descriptor Rings */
4566 bus_addr = rxq->rx_ring_phys_addr;
4568 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
4569 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4570 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
4571 (uint32_t)(bus_addr >> 32));
4572 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
4573 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4574 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
4575 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
4578 /* Configure the SRRCTL register */
4579 #ifdef RTE_HEADER_SPLIT_ENABLE
4581 * Configure Header Split
4583 if (dev->data->dev_conf.rxmode.header_split) {
4584 srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
4585 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4586 IXGBE_SRRCTL_BSIZEHDR_MASK);
4587 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4590 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4592 /* Set if packets are dropped when no descriptors available */
4594 srrctl |= IXGBE_SRRCTL_DROP_EN;
4597 * Configure the RX buffer size in the BSIZEPACKET field of
4598 * the SRRCTL register of the queue.
4599 * The value is in 1 KB resolution. Valid values can be from
4602 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4603 RTE_PKTMBUF_HEADROOM);
4604 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4605 IXGBE_SRRCTL_BSIZEPKT_MASK);
4608 * VF modification to write virtual function SRRCTL register
4610 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
4612 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4613 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4615 if (dev->data->dev_conf.rxmode.enable_scatter ||
4616 /* It adds dual VLAN length for supporting dual VLAN */
4617 (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4618 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
4619 if (!dev->data->scattered_rx)
4620 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
4621 dev->data->scattered_rx = 1;
4625 #ifdef RTE_HEADER_SPLIT_ENABLE
4626 if (dev->data->dev_conf.rxmode.header_split)
4627 /* Must setup the PSRTYPE register */
4628 psrtype = IXGBE_PSRTYPE_TCPHDR |
4629 IXGBE_PSRTYPE_UDPHDR |
4630 IXGBE_PSRTYPE_IPV4HDR |
4631 IXGBE_PSRTYPE_IPV6HDR;
4634 /* Set RQPL for VF RSS according to max Rx queue */
4635 psrtype |= (dev->data->nb_rx_queues >> 1) <<
4636 IXGBE_PSRTYPE_RQPL_SHIFT;
4637 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
4639 ixgbe_set_rx_function(dev);
4645 * [VF] Initializes Transmit Unit.
4647 void __attribute__((cold))
4648 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
4650 struct ixgbe_hw *hw;
4651 struct ixgbe_tx_queue *txq;
4656 PMD_INIT_FUNC_TRACE();
4657 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4659 /* Setup the Base and Length of the Tx Descriptor Rings */
4660 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4661 txq = dev->data->tx_queues[i];
4662 bus_addr = txq->tx_ring_phys_addr;
4663 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
4664 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4665 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
4666 (uint32_t)(bus_addr >> 32));
4667 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
4668 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4669 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4670 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
4671 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
4674 * Disable Tx Head Writeback RO bit, since this hoses
4675 * bookkeeping if things aren't delivered in order.
4677 txctrl = IXGBE_READ_REG(hw,
4678 IXGBE_VFDCA_TXCTRL(i));
4679 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4680 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
4686 * [VF] Start Transmit and Receive Units.
4688 void __attribute__((cold))
4689 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4691 struct ixgbe_hw *hw;
4692 struct ixgbe_tx_queue *txq;
4693 struct ixgbe_rx_queue *rxq;
4699 PMD_INIT_FUNC_TRACE();
4700 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4702 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4703 txq = dev->data->tx_queues[i];
4704 /* Setup Transmit Threshold Registers */
4705 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4706 txdctl |= txq->pthresh & 0x7F;
4707 txdctl |= ((txq->hthresh & 0x7F) << 8);
4708 txdctl |= ((txq->wthresh & 0x7F) << 16);
4709 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4712 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4714 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4715 txdctl |= IXGBE_TXDCTL_ENABLE;
4716 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4719 /* Wait until TX Enable ready */
4722 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4723 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4725 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
4727 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4729 rxq = dev->data->rx_queues[i];
4731 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4732 rxdctl |= IXGBE_RXDCTL_ENABLE;
4733 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
4735 /* Wait until RX Enable ready */
4739 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4740 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4742 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
4744 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
4749 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
4750 int __attribute__((weak))
4751 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
4756 uint16_t __attribute__((weak))
4757 ixgbe_recv_pkts_vec(
4758 void __rte_unused *rx_queue,
4759 struct rte_mbuf __rte_unused **rx_pkts,
4760 uint16_t __rte_unused nb_pkts)
4765 uint16_t __attribute__((weak))
4766 ixgbe_recv_scattered_pkts_vec(
4767 void __rte_unused *rx_queue,
4768 struct rte_mbuf __rte_unused **rx_pkts,
4769 uint16_t __rte_unused nb_pkts)
4774 int __attribute__((weak))
4775 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)