4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5 * Copyright 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
46 #include <rte_byteorder.h>
47 #include <rte_common.h>
48 #include <rte_cycles.h>
50 #include <rte_debug.h>
51 #include <rte_interrupts.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_prefetch.h>
71 #include <rte_string_fns.h>
72 #include <rte_errno.h>
75 #include "ixgbe_logs.h"
76 #include "base/ixgbe_api.h"
77 #include "base/ixgbe_vf.h"
78 #include "ixgbe_ethdev.h"
79 #include "base/ixgbe_dcb.h"
80 #include "base/ixgbe_common.h"
81 #include "ixgbe_rxtx.h"
83 /* Bit Mask to indicate what bits required for building TX context */
84 #define IXGBE_TX_OFFLOAD_MASK ( \
90 static inline struct rte_mbuf *
91 rte_rxmbuf_alloc(struct rte_mempool *mp)
95 m = __rte_mbuf_raw_alloc(mp);
96 __rte_mbuf_sanity_check_raw(m, 0);
102 #define RTE_PMD_USE_PREFETCH
105 #ifdef RTE_PMD_USE_PREFETCH
107 * Prefetch a cache line into all cache levels.
109 #define rte_ixgbe_prefetch(p) rte_prefetch0(p)
111 #define rte_ixgbe_prefetch(p) do {} while(0)
114 /*********************************************************************
118 **********************************************************************/
121 * Check for descriptors with their DD bit set and free mbufs.
122 * Return the total number of buffers freed.
124 static inline int __attribute__((always_inline))
125 ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
127 struct ixgbe_tx_entry *txep;
131 /* check DD bit on threshold descriptor */
132 status = txq->tx_ring[txq->tx_next_dd].wb.status;
133 if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
137 * first buffer to free from S/W ring is at index
138 * tx_next_dd - (tx_rs_thresh-1)
140 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
142 /* free buffers one at a time */
143 if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
144 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
145 txep->mbuf->next = NULL;
146 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
150 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
151 rte_pktmbuf_free_seg(txep->mbuf);
156 /* buffers were freed, update counters */
157 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
158 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
159 if (txq->tx_next_dd >= txq->nb_tx_desc)
160 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
162 return txq->tx_rs_thresh;
165 /* Populate 4 descriptors with data from 4 mbufs */
167 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
169 uint64_t buf_dma_addr;
173 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
174 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
175 pkt_len = (*pkts)->data_len;
177 /* write data to descriptor */
178 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
180 txdp->read.cmd_type_len =
181 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
183 txdp->read.olinfo_status =
184 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
186 rte_prefetch0(&(*pkts)->pool);
190 /* Populate 1 descriptor with data from 1 mbuf */
192 tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
194 uint64_t buf_dma_addr;
197 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
198 pkt_len = (*pkts)->data_len;
200 /* write data to descriptor */
201 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
202 txdp->read.cmd_type_len =
203 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
204 txdp->read.olinfo_status =
205 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
206 rte_prefetch0(&(*pkts)->pool);
210 * Fill H/W descriptor ring with mbuf data.
211 * Copy mbuf pointers to the S/W ring.
214 ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
217 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
218 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
219 const int N_PER_LOOP = 4;
220 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
221 int mainpart, leftover;
225 * Process most of the packets in chunks of N pkts. Any
226 * leftover packets will get processed one at a time.
228 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
229 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
230 for (i = 0; i < mainpart; i += N_PER_LOOP) {
231 /* Copy N mbuf pointers to the S/W ring */
232 for (j = 0; j < N_PER_LOOP; ++j) {
233 (txep + i + j)->mbuf = *(pkts + i + j);
235 tx4(txdp + i, pkts + i);
238 if (unlikely(leftover > 0)) {
239 for (i = 0; i < leftover; ++i) {
240 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
241 tx1(txdp + mainpart + i, pkts + mainpart + i);
246 static inline uint16_t
247 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
250 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
251 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
255 * Begin scanning the H/W ring for done descriptors when the
256 * number of available descriptors drops below tx_free_thresh. For
257 * each done descriptor, free the associated buffer.
259 if (txq->nb_tx_free < txq->tx_free_thresh)
260 ixgbe_tx_free_bufs(txq);
262 /* Only use descriptors that are available */
263 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
264 if (unlikely(nb_pkts == 0))
267 /* Use exactly nb_pkts descriptors */
268 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
271 * At this point, we know there are enough descriptors in the
272 * ring to transmit all the packets. This assumes that each
273 * mbuf contains a single segment, and that no new offloads
274 * are expected, which would require a new context descriptor.
278 * See if we're going to wrap-around. If so, handle the top
279 * of the descriptor ring first, then do the bottom. If not,
280 * the processing looks just like the "bottom" part anyway...
282 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
283 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
284 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
287 * We know that the last descriptor in the ring will need to
288 * have its RS bit set because tx_rs_thresh has to be
289 * a divisor of the ring size
291 tx_r[txq->tx_next_rs].read.cmd_type_len |=
292 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
293 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
298 /* Fill H/W descriptor ring with mbuf data */
299 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
300 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
303 * Determine if RS bit should be set
304 * This is what we actually want:
305 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
306 * but instead of subtracting 1 and doing >=, we can just do
307 * greater than without subtracting.
309 if (txq->tx_tail > txq->tx_next_rs) {
310 tx_r[txq->tx_next_rs].read.cmd_type_len |=
311 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
312 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
314 if (txq->tx_next_rs >= txq->nb_tx_desc)
315 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
319 * Check for wrap-around. This would only happen if we used
320 * up to the last descriptor in the ring, no more, no less.
322 if (txq->tx_tail >= txq->nb_tx_desc)
325 /* update tail pointer */
327 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
333 ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
338 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
339 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
340 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
342 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
346 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
347 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
348 nb_tx = (uint16_t)(nb_tx + ret);
349 nb_pkts = (uint16_t)(nb_pkts - ret);
358 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
359 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
360 uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
362 uint32_t type_tucmd_mlhl;
363 uint32_t mss_l4len_idx = 0;
365 uint32_t vlan_macip_lens;
366 union ixgbe_tx_offload tx_offload_mask;
368 ctx_idx = txq->ctx_curr;
369 tx_offload_mask.data = 0;
372 /* Specify which HW CTX to upload. */
373 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
375 if (ol_flags & PKT_TX_VLAN_PKT) {
376 tx_offload_mask.vlan_tci |= ~0;
379 /* check if TCP segmentation required for this packet */
380 if (ol_flags & PKT_TX_TCP_SEG) {
381 /* implies IP cksum in IPv4 */
382 if (ol_flags & PKT_TX_IP_CKSUM)
383 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
384 IXGBE_ADVTXD_TUCMD_L4T_TCP |
385 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
387 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
388 IXGBE_ADVTXD_TUCMD_L4T_TCP |
389 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
391 tx_offload_mask.l2_len |= ~0;
392 tx_offload_mask.l3_len |= ~0;
393 tx_offload_mask.l4_len |= ~0;
394 tx_offload_mask.tso_segsz |= ~0;
395 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
396 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
397 } else { /* no TSO, check if hardware checksum is needed */
398 if (ol_flags & PKT_TX_IP_CKSUM) {
399 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
400 tx_offload_mask.l2_len |= ~0;
401 tx_offload_mask.l3_len |= ~0;
404 switch (ol_flags & PKT_TX_L4_MASK) {
405 case PKT_TX_UDP_CKSUM:
406 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
407 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
408 mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
409 tx_offload_mask.l2_len |= ~0;
410 tx_offload_mask.l3_len |= ~0;
412 case PKT_TX_TCP_CKSUM:
413 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
414 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
415 mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
416 tx_offload_mask.l2_len |= ~0;
417 tx_offload_mask.l3_len |= ~0;
419 case PKT_TX_SCTP_CKSUM:
420 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
421 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
422 mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
423 tx_offload_mask.l2_len |= ~0;
424 tx_offload_mask.l3_len |= ~0;
427 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
428 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
433 txq->ctx_cache[ctx_idx].flags = ol_flags;
434 txq->ctx_cache[ctx_idx].tx_offload.data =
435 tx_offload_mask.data & tx_offload.data;
436 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
438 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
439 vlan_macip_lens = tx_offload.l3_len;
440 vlan_macip_lens |= (tx_offload.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT);
441 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
442 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
443 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
444 ctx_txd->seqnum_seed = 0;
448 * Check which hardware context can be used. Use the existing match
449 * or create a new context descriptor.
451 static inline uint32_t
452 what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
453 union ixgbe_tx_offload tx_offload)
455 /* If match with the current used context */
456 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
457 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
458 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
459 return txq->ctx_curr;
462 /* What if match with the next context */
464 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
465 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
466 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
467 return txq->ctx_curr;
470 /* Mismatch, use the previous context */
471 return (IXGBE_CTX_NUM);
474 static inline uint32_t
475 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
478 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
479 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
480 if (ol_flags & PKT_TX_IP_CKSUM)
481 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
482 if (ol_flags & PKT_TX_TCP_SEG)
483 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
487 static inline uint32_t
488 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
490 uint32_t cmdtype = 0;
491 if (ol_flags & PKT_TX_VLAN_PKT)
492 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
493 if (ol_flags & PKT_TX_TCP_SEG)
494 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
498 /* Default RS bit threshold values */
499 #ifndef DEFAULT_TX_RS_THRESH
500 #define DEFAULT_TX_RS_THRESH 32
502 #ifndef DEFAULT_TX_FREE_THRESH
503 #define DEFAULT_TX_FREE_THRESH 32
506 /* Reset transmit descriptors after they have been used */
508 ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
510 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
511 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
512 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
513 uint16_t nb_tx_desc = txq->nb_tx_desc;
514 uint16_t desc_to_clean_to;
515 uint16_t nb_tx_to_clean;
518 /* Determine the last descriptor needing to be cleaned */
519 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
520 if (desc_to_clean_to >= nb_tx_desc)
521 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
523 /* Check to make sure the last descriptor to clean is done */
524 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
525 status = txr[desc_to_clean_to].wb.status;
526 if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
528 PMD_TX_FREE_LOG(DEBUG,
529 "TX descriptor %4u is not done"
530 "(port=%d queue=%d)",
532 txq->port_id, txq->queue_id);
533 /* Failed to clean any descriptors, better luck next time */
537 /* Figure out how many descriptors will be cleaned */
538 if (last_desc_cleaned > desc_to_clean_to)
539 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
542 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
545 PMD_TX_FREE_LOG(DEBUG,
546 "Cleaning %4u TX descriptors: %4u to %4u "
547 "(port=%d queue=%d)",
548 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
549 txq->port_id, txq->queue_id);
552 * The last descriptor to clean is done, so that means all the
553 * descriptors from the last descriptor that was cleaned
554 * up to the last descriptor with the RS bit set
555 * are done. Only reset the threshold descriptor.
557 txr[desc_to_clean_to].wb.status = 0;
559 /* Update the txq to reflect the last descriptor that was cleaned */
560 txq->last_desc_cleaned = desc_to_clean_to;
561 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
568 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
571 struct ixgbe_tx_queue *txq;
572 struct ixgbe_tx_entry *sw_ring;
573 struct ixgbe_tx_entry *txe, *txn;
574 volatile union ixgbe_adv_tx_desc *txr;
575 volatile union ixgbe_adv_tx_desc *txd;
576 struct rte_mbuf *tx_pkt;
577 struct rte_mbuf *m_seg;
578 uint64_t buf_dma_addr;
579 uint32_t olinfo_status;
580 uint32_t cmd_type_len;
591 union ixgbe_tx_offload tx_offload = {0};
594 sw_ring = txq->sw_ring;
596 tx_id = txq->tx_tail;
597 txe = &sw_ring[tx_id];
599 /* Determine if the descriptor ring needs to be cleaned. */
600 if (txq->nb_tx_free < txq->tx_free_thresh)
601 ixgbe_xmit_cleanup(txq);
603 rte_prefetch0(&txe->mbuf->pool);
606 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
609 pkt_len = tx_pkt->pkt_len;
612 * Determine how many (if any) context descriptors
613 * are needed for offload functionality.
615 ol_flags = tx_pkt->ol_flags;
617 /* If hardware offload required */
618 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
620 tx_offload.l2_len = tx_pkt->l2_len;
621 tx_offload.l3_len = tx_pkt->l3_len;
622 tx_offload.l4_len = tx_pkt->l4_len;
623 tx_offload.vlan_tci = tx_pkt->vlan_tci;
624 tx_offload.tso_segsz = tx_pkt->tso_segsz;
626 /* If new context need be built or reuse the exist ctx. */
627 ctx = what_advctx_update(txq, tx_ol_req,
629 /* Only allocate context descriptor if required*/
630 new_ctx = (ctx == IXGBE_CTX_NUM);
635 * Keep track of how many descriptors are used this loop
636 * This will always be the number of segments + the number of
637 * Context descriptors required to transmit the packet
639 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
642 * The number of descriptors that must be allocated for a
643 * packet is the number of segments of that packet, plus 1
644 * Context Descriptor for the hardware offload, if any.
645 * Determine the last TX descriptor to allocate in the TX ring
646 * for the packet, starting from the current position (tx_id)
649 tx_last = (uint16_t) (tx_id + nb_used - 1);
652 if (tx_last >= txq->nb_tx_desc)
653 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
655 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
656 " tx_first=%u tx_last=%u",
657 (unsigned) txq->port_id,
658 (unsigned) txq->queue_id,
664 * Make sure there are enough TX descriptors available to
665 * transmit the entire packet.
666 * nb_used better be less than or equal to txq->tx_rs_thresh
668 if (nb_used > txq->nb_tx_free) {
669 PMD_TX_FREE_LOG(DEBUG,
670 "Not enough free TX descriptors "
671 "nb_used=%4u nb_free=%4u "
672 "(port=%d queue=%d)",
673 nb_used, txq->nb_tx_free,
674 txq->port_id, txq->queue_id);
676 if (ixgbe_xmit_cleanup(txq) != 0) {
677 /* Could not clean any descriptors */
683 /* nb_used better be <= txq->tx_rs_thresh */
684 if (unlikely(nb_used > txq->tx_rs_thresh)) {
685 PMD_TX_FREE_LOG(DEBUG,
686 "The number of descriptors needed to "
687 "transmit the packet exceeds the "
688 "RS bit threshold. This will impact "
690 "nb_used=%4u nb_free=%4u "
692 "(port=%d queue=%d)",
693 nb_used, txq->nb_tx_free,
695 txq->port_id, txq->queue_id);
697 * Loop here until there are enough TX
698 * descriptors or until the ring cannot be
701 while (nb_used > txq->nb_tx_free) {
702 if (ixgbe_xmit_cleanup(txq) != 0) {
704 * Could not clean any
716 * By now there are enough free TX descriptors to transmit
721 * Set common flags of all TX Data Descriptors.
723 * The following bits must be set in all Data Descriptors:
724 * - IXGBE_ADVTXD_DTYP_DATA
725 * - IXGBE_ADVTXD_DCMD_DEXT
727 * The following bits must be set in the first Data Descriptor
728 * and are ignored in the other ones:
729 * - IXGBE_ADVTXD_DCMD_IFCS
730 * - IXGBE_ADVTXD_MAC_1588
731 * - IXGBE_ADVTXD_DCMD_VLE
733 * The following bits must only be set in the last Data
735 * - IXGBE_TXD_CMD_EOP
737 * The following bits can be set in any Data Descriptor, but
738 * are only set in the last Data Descriptor:
741 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
742 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
744 #ifdef RTE_LIBRTE_IEEE1588
745 if (ol_flags & PKT_TX_IEEE1588_TMST)
746 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
752 if (ol_flags & PKT_TX_TCP_SEG) {
753 /* when TSO is on, paylen in descriptor is the
754 * not the packet len but the tcp payload len */
755 pkt_len -= (tx_offload.l2_len +
756 tx_offload.l3_len + tx_offload.l4_len);
760 * Setup the TX Advanced Context Descriptor if required
763 volatile struct ixgbe_adv_tx_context_desc *
766 ctx_txd = (volatile struct
767 ixgbe_adv_tx_context_desc *)
770 txn = &sw_ring[txe->next_id];
771 rte_prefetch0(&txn->mbuf->pool);
773 if (txe->mbuf != NULL) {
774 rte_pktmbuf_free_seg(txe->mbuf);
778 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
781 txe->last_id = tx_last;
782 tx_id = txe->next_id;
787 * Setup the TX Advanced Data Descriptor,
788 * This path will go through
789 * whatever new/reuse the context descriptor
791 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
792 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
793 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
796 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
801 txn = &sw_ring[txe->next_id];
802 rte_prefetch0(&txn->mbuf->pool);
804 if (txe->mbuf != NULL)
805 rte_pktmbuf_free_seg(txe->mbuf);
809 * Set up Transmit Data Descriptor.
811 slen = m_seg->data_len;
812 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
813 txd->read.buffer_addr =
814 rte_cpu_to_le_64(buf_dma_addr);
815 txd->read.cmd_type_len =
816 rte_cpu_to_le_32(cmd_type_len | slen);
817 txd->read.olinfo_status =
818 rte_cpu_to_le_32(olinfo_status);
819 txe->last_id = tx_last;
820 tx_id = txe->next_id;
823 } while (m_seg != NULL);
826 * The last packet data descriptor needs End Of Packet (EOP)
828 cmd_type_len |= IXGBE_TXD_CMD_EOP;
829 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
830 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
832 /* Set RS bit only on threshold packets' last descriptor */
833 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
834 PMD_TX_FREE_LOG(DEBUG,
835 "Setting RS bit on TXD id="
836 "%4u (port=%d queue=%d)",
837 tx_last, txq->port_id, txq->queue_id);
839 cmd_type_len |= IXGBE_TXD_CMD_RS;
841 /* Update txq RS bit counters */
844 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
850 * Set the Transmit Descriptor Tail (TDT)
852 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
853 (unsigned) txq->port_id, (unsigned) txq->queue_id,
854 (unsigned) tx_id, (unsigned) nb_tx);
855 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
856 txq->tx_tail = tx_id;
861 /*********************************************************************
865 **********************************************************************/
866 #define IXGBE_PACKET_TYPE_IPV4 0X01
867 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
868 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
869 #define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
870 #define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
871 #define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
872 #define IXGBE_PACKET_TYPE_IPV6 0X04
873 #define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
874 #define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
875 #define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
876 #define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
877 #define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
878 #define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
879 #define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
880 #define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
881 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
882 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
883 #define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
884 #define IXGBE_PACKET_TYPE_MAX 0X80
885 #define IXGBE_PACKET_TYPE_MASK 0X7F
886 #define IXGBE_PACKET_TYPE_SHIFT 0X04
887 static inline uint32_t
888 ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
890 static const uint32_t
891 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
892 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
894 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
895 RTE_PTYPE_L3_IPV4_EXT,
896 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
898 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
899 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
900 RTE_PTYPE_INNER_L3_IPV6,
901 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
902 RTE_PTYPE_L3_IPV6_EXT,
903 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
904 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
905 RTE_PTYPE_INNER_L3_IPV6_EXT,
906 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
907 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
908 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
909 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
910 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
911 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
912 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
913 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
914 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
915 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
916 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
917 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
918 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
919 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
920 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
921 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
922 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
923 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
924 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
925 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
926 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
927 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
928 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
929 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
930 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
931 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
932 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
933 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
935 if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
936 return RTE_PTYPE_UNKNOWN;
938 pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) &
939 IXGBE_PACKET_TYPE_MASK;
941 return ptype_table[pkt_info];
944 static inline uint64_t
945 ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
947 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
948 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
949 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
950 PKT_RX_RSS_HASH, 0, 0, 0,
951 0, 0, 0, PKT_RX_FDIR,
953 #ifdef RTE_LIBRTE_IEEE1588
954 static uint64_t ip_pkt_etqf_map[8] = {
955 0, 0, 0, PKT_RX_IEEE1588_PTP,
959 if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
960 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
961 ip_rss_types_map[pkt_info & 0XF];
963 return ip_rss_types_map[pkt_info & 0XF];
965 return ip_rss_types_map[pkt_info & 0XF];
969 static inline uint64_t
970 rx_desc_status_to_pkt_flags(uint32_t rx_status)
975 * Check if VLAN present only.
976 * Do not check whether L3/L4 rx checksum done by NIC or not,
977 * That can be found from rte_eth_rxmode.hw_ip_checksum flag
979 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
981 #ifdef RTE_LIBRTE_IEEE1588
982 if (rx_status & IXGBE_RXD_STAT_TMST)
983 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
988 static inline uint64_t
989 rx_desc_error_to_pkt_flags(uint32_t rx_status)
992 * Bit 31: IPE, IPv4 checksum error
993 * Bit 30: L4I, L4I integrity error
995 static uint64_t error_to_pkt_flags_map[4] = {
996 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
997 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
999 return error_to_pkt_flags_map[(rx_status >>
1000 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1004 * LOOK_AHEAD defines how many desc statuses to check beyond the
1005 * current descriptor.
1006 * It must be a pound define for optimal performance.
1007 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1008 * function only works with LOOK_AHEAD=8.
1010 #define LOOK_AHEAD 8
1011 #if (LOOK_AHEAD != 8)
1012 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1015 ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1017 volatile union ixgbe_adv_rx_desc *rxdp;
1018 struct ixgbe_rx_entry *rxep;
1019 struct rte_mbuf *mb;
1023 uint32_t s[LOOK_AHEAD];
1024 uint16_t pkt_info[LOOK_AHEAD];
1025 int i, j, nb_rx = 0;
1028 /* get references to current descriptor and S/W ring entry */
1029 rxdp = &rxq->rx_ring[rxq->rx_tail];
1030 rxep = &rxq->sw_ring[rxq->rx_tail];
1032 status = rxdp->wb.upper.status_error;
1033 /* check to make sure there is at least 1 packet to receive */
1034 if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1038 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1039 * reference packets that are ready to be received.
1041 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1042 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
1044 /* Read desc statuses backwards to avoid race condition */
1045 for (j = LOOK_AHEAD-1; j >= 0; --j)
1046 s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1048 for (j = LOOK_AHEAD - 1; j >= 0; --j)
1049 pkt_info[j] = rxdp[j].wb.lower.lo_dword.
1052 /* Compute how many status bits were set */
1054 for (j = 0; j < LOOK_AHEAD; ++j)
1055 nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
1059 /* Translate descriptor info to mbuf format */
1060 for (j = 0; j < nb_dd; ++j) {
1062 pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1064 mb->data_len = pkt_len;
1065 mb->pkt_len = pkt_len;
1066 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1068 /* convert descriptor fields to rte mbuf flags */
1069 pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
1070 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1072 ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1073 mb->ol_flags = pkt_flags;
1075 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
1077 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1078 mb->hash.rss = rte_le_to_cpu_32(
1079 rxdp[j].wb.lower.hi_dword.rss);
1080 else if (pkt_flags & PKT_RX_FDIR) {
1081 mb->hash.fdir.hash = rte_le_to_cpu_16(
1082 rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1083 IXGBE_ATR_HASH_MASK;
1084 mb->hash.fdir.id = rte_le_to_cpu_16(
1085 rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1089 /* Move mbuf pointers from the S/W ring to the stage */
1090 for (j = 0; j < LOOK_AHEAD; ++j) {
1091 rxq->rx_stage[i + j] = rxep[j].mbuf;
1094 /* stop if all requested packets could not be received */
1095 if (nb_dd != LOOK_AHEAD)
1099 /* clear software ring entries so we can cleanup correctly */
1100 for (i = 0; i < nb_rx; ++i) {
1101 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1109 ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1111 volatile union ixgbe_adv_rx_desc *rxdp;
1112 struct ixgbe_rx_entry *rxep;
1113 struct rte_mbuf *mb;
1118 /* allocate buffers in bulk directly into the S/W ring */
1119 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1120 rxep = &rxq->sw_ring[alloc_idx];
1121 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1122 rxq->rx_free_thresh);
1123 if (unlikely(diag != 0))
1126 rxdp = &rxq->rx_ring[alloc_idx];
1127 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1128 /* populate the static rte mbuf fields */
1133 mb->port = rxq->port_id;
1136 rte_mbuf_refcnt_set(mb, 1);
1137 mb->data_off = RTE_PKTMBUF_HEADROOM;
1139 /* populate the descriptors */
1140 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
1141 rxdp[i].read.hdr_addr = 0;
1142 rxdp[i].read.pkt_addr = dma_addr;
1145 /* update state of internal queue structure */
1146 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1147 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1148 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1154 static inline uint16_t
1155 ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1158 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1161 /* how many packets are ready to return? */
1162 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1164 /* copy mbuf pointers to the application's packet list */
1165 for (i = 0; i < nb_pkts; ++i)
1166 rx_pkts[i] = stage[i];
1168 /* update internal queue state */
1169 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1170 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1175 static inline uint16_t
1176 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1179 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1182 /* Any previously recv'd pkts will be returned from the Rx stage */
1183 if (rxq->rx_nb_avail)
1184 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1186 /* Scan the H/W ring for packets to receive */
1187 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1189 /* update internal queue state */
1190 rxq->rx_next_avail = 0;
1191 rxq->rx_nb_avail = nb_rx;
1192 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1194 /* if required, allocate new buffers to replenish descriptors */
1195 if (rxq->rx_tail > rxq->rx_free_trigger) {
1196 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1198 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1200 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1201 "queue_id=%u", (unsigned) rxq->port_id,
1202 (unsigned) rxq->queue_id);
1204 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1205 rxq->rx_free_thresh;
1208 * Need to rewind any previous receives if we cannot
1209 * allocate new buffers to replenish the old ones.
1211 rxq->rx_nb_avail = 0;
1212 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1213 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1214 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1219 /* update tail pointer */
1221 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
1224 if (rxq->rx_tail >= rxq->nb_rx_desc)
1227 /* received any packets this loop? */
1228 if (rxq->rx_nb_avail)
1229 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1234 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1236 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1241 if (unlikely(nb_pkts == 0))
1244 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1245 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1247 /* request is relatively large, chunk it up */
1251 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1252 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1253 nb_rx = (uint16_t)(nb_rx + ret);
1254 nb_pkts = (uint16_t)(nb_pkts - ret);
1263 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1266 struct ixgbe_rx_queue *rxq;
1267 volatile union ixgbe_adv_rx_desc *rx_ring;
1268 volatile union ixgbe_adv_rx_desc *rxdp;
1269 struct ixgbe_rx_entry *sw_ring;
1270 struct ixgbe_rx_entry *rxe;
1271 struct rte_mbuf *rxm;
1272 struct rte_mbuf *nmb;
1273 union ixgbe_adv_rx_desc rxd;
1286 rx_id = rxq->rx_tail;
1287 rx_ring = rxq->rx_ring;
1288 sw_ring = rxq->sw_ring;
1289 while (nb_rx < nb_pkts) {
1291 * The order of operations here is important as the DD status
1292 * bit must not be read after any other descriptor fields.
1293 * rx_ring and rxdp are pointing to volatile data so the order
1294 * of accesses cannot be reordered by the compiler. If they were
1295 * not volatile, they could be reordered which could lead to
1296 * using invalid descriptor fields when read from rxd.
1298 rxdp = &rx_ring[rx_id];
1299 staterr = rxdp->wb.upper.status_error;
1300 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1307 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1308 * is likely to be invalid and to be dropped by the various
1309 * validation checks performed by the network stack.
1311 * Allocate a new mbuf to replenish the RX ring descriptor.
1312 * If the allocation fails:
1313 * - arrange for that RX descriptor to be the first one
1314 * being parsed the next time the receive function is
1315 * invoked [on the same queue].
1317 * - Stop parsing the RX ring and return immediately.
1319 * This policy do not drop the packet received in the RX
1320 * descriptor for which the allocation of a new mbuf failed.
1321 * Thus, it allows that packet to be later retrieved if
1322 * mbuf have been freed in the mean time.
1323 * As a side effect, holding RX descriptors instead of
1324 * systematically giving them back to the NIC may lead to
1325 * RX ring exhaustion situations.
1326 * However, the NIC can gracefully prevent such situations
1327 * to happen by sending specific "back-pressure" flow control
1328 * frames to its peer(s).
1330 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1331 "ext_err_stat=0x%08x pkt_len=%u",
1332 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1333 (unsigned) rx_id, (unsigned) staterr,
1334 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1336 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1338 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1339 "queue_id=%u", (unsigned) rxq->port_id,
1340 (unsigned) rxq->queue_id);
1341 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1346 rxe = &sw_ring[rx_id];
1348 if (rx_id == rxq->nb_rx_desc)
1351 /* Prefetch next mbuf while processing current one. */
1352 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1355 * When next RX descriptor is on a cache-line boundary,
1356 * prefetch the next 4 RX descriptors and the next 8 pointers
1359 if ((rx_id & 0x3) == 0) {
1360 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1361 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1367 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1368 rxdp->read.hdr_addr = 0;
1369 rxdp->read.pkt_addr = dma_addr;
1372 * Initialize the returned mbuf.
1373 * 1) setup generic mbuf fields:
1374 * - number of segments,
1377 * - RX port identifier.
1378 * 2) integrate hardware offload data, if any:
1379 * - RSS flag & hash,
1380 * - IP checksum flag,
1381 * - VLAN TCI, if any,
1384 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1386 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1387 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1390 rxm->pkt_len = pkt_len;
1391 rxm->data_len = pkt_len;
1392 rxm->port = rxq->port_id;
1394 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
1396 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1397 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1399 pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1400 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1401 pkt_flags = pkt_flags |
1402 ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1403 rxm->ol_flags = pkt_flags;
1404 rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1406 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1407 rxm->hash.rss = rte_le_to_cpu_32(
1408 rxd.wb.lower.hi_dword.rss);
1409 else if (pkt_flags & PKT_RX_FDIR) {
1410 rxm->hash.fdir.hash = rte_le_to_cpu_16(
1411 rxd.wb.lower.hi_dword.csum_ip.csum) &
1412 IXGBE_ATR_HASH_MASK;
1413 rxm->hash.fdir.id = rte_le_to_cpu_16(
1414 rxd.wb.lower.hi_dword.csum_ip.ip_id);
1417 * Store the mbuf address into the next entry of the array
1418 * of returned packets.
1420 rx_pkts[nb_rx++] = rxm;
1422 rxq->rx_tail = rx_id;
1425 * If the number of free RX descriptors is greater than the RX free
1426 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1428 * Update the RDT with the value of the last processed RX descriptor
1429 * minus 1, to guarantee that the RDT register is never equal to the
1430 * RDH register, which creates a "full" ring situtation from the
1431 * hardware point of view...
1433 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1434 if (nb_hold > rxq->rx_free_thresh) {
1435 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1436 "nb_hold=%u nb_rx=%u",
1437 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1438 (unsigned) rx_id, (unsigned) nb_hold,
1440 rx_id = (uint16_t) ((rx_id == 0) ?
1441 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1442 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1445 rxq->nb_rx_hold = nb_hold;
1450 * Detect an RSC descriptor.
1452 static inline uint32_t
1453 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1455 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1456 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1460 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1462 * Fill the following info in the HEAD buffer of the Rx cluster:
1463 * - RX port identifier
1464 * - hardware offload data, if any:
1466 * - IP checksum flag
1467 * - VLAN TCI, if any
1469 * @head HEAD of the packet cluster
1470 * @desc HW descriptor to get data from
1471 * @port_id Port ID of the Rx queue
1474 ixgbe_fill_cluster_head_buf(
1475 struct rte_mbuf *head,
1476 union ixgbe_adv_rx_desc *desc,
1483 head->port = port_id;
1485 /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1486 * set in the pkt_flags field.
1488 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1489 pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info);
1490 pkt_flags = rx_desc_status_to_pkt_flags(staterr);
1491 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1492 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1493 head->ol_flags = pkt_flags;
1494 head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
1496 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1497 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1498 else if (pkt_flags & PKT_RX_FDIR) {
1499 head->hash.fdir.hash =
1500 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1501 & IXGBE_ATR_HASH_MASK;
1502 head->hash.fdir.id =
1503 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1508 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1510 * @rx_queue Rx queue handle
1511 * @rx_pkts table of received packets
1512 * @nb_pkts size of rx_pkts table
1513 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1515 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1516 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1518 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1519 * 1) When non-EOP RSC completion arrives:
1520 * a) Update the HEAD of the current RSC aggregation cluster with the new
1521 * segment's data length.
1522 * b) Set the "next" pointer of the current segment to point to the segment
1523 * at the NEXTP index.
1524 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1525 * in the sw_rsc_ring.
1526 * 2) When EOP arrives we just update the cluster's total length and offload
1527 * flags and deliver the cluster up to the upper layers. In our case - put it
1528 * in the rx_pkts table.
1530 * Returns the number of received packets/clusters (according to the "bulk
1531 * receive" interface).
1533 static inline uint16_t
1534 ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1537 struct ixgbe_rx_queue *rxq = rx_queue;
1538 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
1539 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
1540 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1541 uint16_t rx_id = rxq->rx_tail;
1543 uint16_t nb_hold = rxq->nb_rx_hold;
1544 uint16_t prev_id = rxq->rx_tail;
1546 while (nb_rx < nb_pkts) {
1548 struct ixgbe_rx_entry *rxe;
1549 struct ixgbe_scattered_rx_entry *sc_entry;
1550 struct ixgbe_scattered_rx_entry *next_sc_entry;
1551 struct ixgbe_rx_entry *next_rxe;
1552 struct rte_mbuf *first_seg;
1553 struct rte_mbuf *rxm;
1554 struct rte_mbuf *nmb;
1555 union ixgbe_adv_rx_desc rxd;
1558 volatile union ixgbe_adv_rx_desc *rxdp;
1563 * The code in this whole file uses the volatile pointer to
1564 * ensure the read ordering of the status and the rest of the
1565 * descriptor fields (on the compiler level only!!!). This is so
1566 * UGLY - why not to just use the compiler barrier instead? DPDK
1567 * even has the rte_compiler_barrier() for that.
1569 * But most importantly this is just wrong because this doesn't
1570 * ensure memory ordering in a general case at all. For
1571 * instance, DPDK is supposed to work on Power CPUs where
1572 * compiler barrier may just not be enough!
1574 * I tried to write only this function properly to have a
1575 * starting point (as a part of an LRO/RSC series) but the
1576 * compiler cursed at me when I tried to cast away the
1577 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
1578 * keeping it the way it is for now.
1580 * The code in this file is broken in so many other places and
1581 * will just not work on a big endian CPU anyway therefore the
1582 * lines below will have to be revisited together with the rest
1586 * - Get rid of "volatile" crap and let the compiler do its
1588 * - Use the proper memory barrier (rte_rmb()) to ensure the
1589 * memory ordering below.
1591 rxdp = &rx_ring[rx_id];
1592 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
1594 if (!(staterr & IXGBE_RXDADV_STAT_DD))
1599 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1600 "staterr=0x%x data_len=%u",
1601 rxq->port_id, rxq->queue_id, rx_id, staterr,
1602 rte_le_to_cpu_16(rxd.wb.upper.length));
1605 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1607 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
1608 "port_id=%u queue_id=%u",
1609 rxq->port_id, rxq->queue_id);
1611 rte_eth_devices[rxq->port_id].data->
1612 rx_mbuf_alloc_failed++;
1616 else if (nb_hold > rxq->rx_free_thresh) {
1617 uint16_t next_rdt = rxq->rx_free_trigger;
1619 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
1621 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
1623 nb_hold -= rxq->rx_free_thresh;
1625 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
1626 "port_id=%u queue_id=%u",
1627 rxq->port_id, rxq->queue_id);
1629 rte_eth_devices[rxq->port_id].data->
1630 rx_mbuf_alloc_failed++;
1636 rxe = &sw_ring[rx_id];
1637 eop = staterr & IXGBE_RXDADV_STAT_EOP;
1639 next_id = rx_id + 1;
1640 if (next_id == rxq->nb_rx_desc)
1643 /* Prefetch next mbuf while processing current one. */
1644 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
1647 * When next RX descriptor is on a cache-line boundary,
1648 * prefetch the next 4 RX descriptors and the next 4 pointers
1651 if ((next_id & 0x3) == 0) {
1652 rte_ixgbe_prefetch(&rx_ring[next_id]);
1653 rte_ixgbe_prefetch(&sw_ring[next_id]);
1660 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1662 * Update RX descriptor with the physical address of the
1663 * new data buffer of the new allocated mbuf.
1667 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1668 rxdp->read.hdr_addr = 0;
1669 rxdp->read.pkt_addr = dma;
1674 * Set data length & data buffer address of mbuf.
1676 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1677 rxm->data_len = data_len;
1682 * Get next descriptor index:
1683 * - For RSC it's in the NEXTP field.
1684 * - For a scattered packet - it's just a following
1687 if (ixgbe_rsc_count(&rxd))
1689 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1690 IXGBE_RXDADV_NEXTP_SHIFT;
1694 next_sc_entry = &sw_sc_ring[nextp_id];
1695 next_rxe = &sw_ring[nextp_id];
1696 rte_ixgbe_prefetch(next_rxe);
1699 sc_entry = &sw_sc_ring[rx_id];
1700 first_seg = sc_entry->fbuf;
1701 sc_entry->fbuf = NULL;
1704 * If this is the first buffer of the received packet,
1705 * set the pointer to the first mbuf of the packet and
1706 * initialize its context.
1707 * Otherwise, update the total length and the number of segments
1708 * of the current scattered packet, and update the pointer to
1709 * the last mbuf of the current packet.
1711 if (first_seg == NULL) {
1713 first_seg->pkt_len = data_len;
1714 first_seg->nb_segs = 1;
1716 first_seg->pkt_len += data_len;
1717 first_seg->nb_segs++;
1724 * If this is not the last buffer of the received packet, update
1725 * the pointer to the first mbuf at the NEXTP entry in the
1726 * sw_sc_ring and continue to parse the RX ring.
1729 rxm->next = next_rxe->mbuf;
1730 next_sc_entry->fbuf = first_seg;
1735 * This is the last buffer of the received packet - return
1736 * the current cluster to the user.
1740 /* Initialize the first mbuf of the returned packet */
1741 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
1745 * Deal with the case, when HW CRC srip is disabled.
1746 * That can't happen when LRO is enabled, but still could
1747 * happen for scattered RX mode.
1749 first_seg->pkt_len -= rxq->crc_len;
1750 if (unlikely(rxm->data_len <= rxq->crc_len)) {
1751 struct rte_mbuf *lp;
1753 for (lp = first_seg; lp->next != rxm; lp = lp->next)
1756 first_seg->nb_segs--;
1757 lp->data_len -= rxq->crc_len - rxm->data_len;
1759 rte_pktmbuf_free_seg(rxm);
1761 rxm->data_len -= rxq->crc_len;
1763 /* Prefetch data of first segment, if configured to do so. */
1764 rte_packet_prefetch((char *)first_seg->buf_addr +
1765 first_seg->data_off);
1768 * Store the mbuf address into the next entry of the array
1769 * of returned packets.
1771 rx_pkts[nb_rx++] = first_seg;
1775 * Record index of the next RX descriptor to probe.
1777 rxq->rx_tail = rx_id;
1780 * If the number of free RX descriptors is greater than the RX free
1781 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1783 * Update the RDT with the value of the last processed RX descriptor
1784 * minus 1, to guarantee that the RDT register is never equal to the
1785 * RDH register, which creates a "full" ring situtation from the
1786 * hardware point of view...
1788 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1789 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1790 "nb_hold=%u nb_rx=%u",
1791 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1794 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
1798 rxq->nb_rx_hold = nb_hold;
1803 ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1806 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
1810 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1813 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
1816 /*********************************************************************
1818 * Queue management functions
1820 **********************************************************************/
1823 * Create memzone for HW rings. malloc can't be used as the physical address is
1824 * needed. If the memzone is already created, then this function returns a ptr
1827 static const struct rte_memzone * __attribute__((cold))
1828 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1829 uint16_t queue_id, uint32_t ring_size, int socket_id)
1831 char z_name[RTE_MEMZONE_NAMESIZE];
1832 const struct rte_memzone *mz;
1834 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1835 dev->driver->pci_drv.name, ring_name,
1836 dev->data->port_id, queue_id);
1838 mz = rte_memzone_lookup(z_name);
1842 #ifdef RTE_LIBRTE_XEN_DOM0
1843 return rte_memzone_reserve_bounded(z_name, ring_size,
1844 socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
1846 return rte_memzone_reserve_aligned(z_name, ring_size,
1847 socket_id, 0, IXGBE_ALIGN);
1851 static void __attribute__((cold))
1852 ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
1856 if (txq->sw_ring != NULL) {
1857 for (i = 0; i < txq->nb_tx_desc; i++) {
1858 if (txq->sw_ring[i].mbuf != NULL) {
1859 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1860 txq->sw_ring[i].mbuf = NULL;
1866 static void __attribute__((cold))
1867 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
1870 txq->sw_ring != NULL)
1871 rte_free(txq->sw_ring);
1874 static void __attribute__((cold))
1875 ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
1877 if (txq != NULL && txq->ops != NULL) {
1878 txq->ops->release_mbufs(txq);
1879 txq->ops->free_swring(txq);
1884 void __attribute__((cold))
1885 ixgbe_dev_tx_queue_release(void *txq)
1887 ixgbe_tx_queue_release(txq);
1890 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
1891 static void __attribute__((cold))
1892 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
1894 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
1895 struct ixgbe_tx_entry *txe = txq->sw_ring;
1898 /* Zero out HW ring memory */
1899 for (i = 0; i < txq->nb_tx_desc; i++) {
1900 txq->tx_ring[i] = zeroed_desc;
1903 /* Initialize SW ring entries */
1904 prev = (uint16_t) (txq->nb_tx_desc - 1);
1905 for (i = 0; i < txq->nb_tx_desc; i++) {
1906 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
1907 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
1910 txe[prev].next_id = i;
1914 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1915 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1918 txq->nb_tx_used = 0;
1920 * Always allow 1 descriptor to be un-allocated to avoid
1921 * a H/W race condition
1923 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1924 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1926 memset((void*)&txq->ctx_cache, 0,
1927 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
1930 static const struct ixgbe_txq_ops def_txq_ops = {
1931 .release_mbufs = ixgbe_tx_queue_release_mbufs,
1932 .free_swring = ixgbe_tx_free_swring,
1933 .reset = ixgbe_reset_tx_queue,
1936 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1937 * the queue parameters. Used in tx_queue_setup by primary process and then
1938 * in dev_init by secondary process when attaching to an existing ethdev.
1940 void __attribute__((cold))
1941 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
1943 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1944 if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
1945 && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
1946 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1947 #ifdef RTE_IXGBE_INC_VECTOR
1948 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
1949 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
1950 ixgbe_txq_vec_setup(txq) == 0)) {
1951 PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
1952 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
1955 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
1957 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1959 " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
1960 (unsigned long)txq->txq_flags,
1961 (unsigned long)IXGBE_SIMPLE_FLAGS);
1963 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
1964 (unsigned long)txq->tx_rs_thresh,
1965 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
1966 dev->tx_pkt_burst = ixgbe_xmit_pkts;
1970 int __attribute__((cold))
1971 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1974 unsigned int socket_id,
1975 const struct rte_eth_txconf *tx_conf)
1977 const struct rte_memzone *tz;
1978 struct ixgbe_tx_queue *txq;
1979 struct ixgbe_hw *hw;
1980 uint16_t tx_rs_thresh, tx_free_thresh;
1982 PMD_INIT_FUNC_TRACE();
1983 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1986 * Validate number of transmit descriptors.
1987 * It must not exceed hardware maximum, and must be multiple
1990 if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
1991 (nb_desc > IXGBE_MAX_RING_DESC) ||
1992 (nb_desc < IXGBE_MIN_RING_DESC)) {
1997 * The following two parameters control the setting of the RS bit on
1998 * transmit descriptors.
1999 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2000 * descriptors have been used.
2001 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2002 * descriptors are used or if the number of descriptors required
2003 * to transmit a packet is greater than the number of free TX
2005 * The following constraints must be satisfied:
2006 * tx_rs_thresh must be greater than 0.
2007 * tx_rs_thresh must be less than the size of the ring minus 2.
2008 * tx_rs_thresh must be less than or equal to tx_free_thresh.
2009 * tx_rs_thresh must be a divisor of the ring size.
2010 * tx_free_thresh must be greater than 0.
2011 * tx_free_thresh must be less than the size of the ring minus 3.
2012 * One descriptor in the TX ring is used as a sentinel to avoid a
2013 * H/W race condition, hence the maximum threshold constraints.
2014 * When set to zero use default values.
2016 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2017 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2018 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2019 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2020 if (tx_rs_thresh >= (nb_desc - 2)) {
2021 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2022 "of TX descriptors minus 2. (tx_rs_thresh=%u "
2023 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2024 (int)dev->data->port_id, (int)queue_idx);
2027 if (tx_free_thresh >= (nb_desc - 3)) {
2028 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2029 "tx_free_thresh must be less than the number of "
2030 "TX descriptors minus 3. (tx_free_thresh=%u "
2031 "port=%d queue=%d)",
2032 (unsigned int)tx_free_thresh,
2033 (int)dev->data->port_id, (int)queue_idx);
2036 if (tx_rs_thresh > tx_free_thresh) {
2037 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2038 "tx_free_thresh. (tx_free_thresh=%u "
2039 "tx_rs_thresh=%u port=%d queue=%d)",
2040 (unsigned int)tx_free_thresh,
2041 (unsigned int)tx_rs_thresh,
2042 (int)dev->data->port_id,
2046 if ((nb_desc % tx_rs_thresh) != 0) {
2047 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2048 "number of TX descriptors. (tx_rs_thresh=%u "
2049 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2050 (int)dev->data->port_id, (int)queue_idx);
2055 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2056 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2057 * by the NIC and all descriptors are written back after the NIC
2058 * accumulates WTHRESH descriptors.
2060 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2061 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2062 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2063 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2064 (int)dev->data->port_id, (int)queue_idx);
2068 /* Free memory prior to re-allocation if needed... */
2069 if (dev->data->tx_queues[queue_idx] != NULL) {
2070 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2071 dev->data->tx_queues[queue_idx] = NULL;
2074 /* First allocate the tx queue data structure */
2075 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2076 RTE_CACHE_LINE_SIZE, socket_id);
2081 * Allocate TX ring hardware descriptors. A memzone large enough to
2082 * handle the maximum ring size is allocated in order to allow for
2083 * resizing in later calls to the queue setup function.
2085 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
2086 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2089 ixgbe_tx_queue_release(txq);
2093 txq->nb_tx_desc = nb_desc;
2094 txq->tx_rs_thresh = tx_rs_thresh;
2095 txq->tx_free_thresh = tx_free_thresh;
2096 txq->pthresh = tx_conf->tx_thresh.pthresh;
2097 txq->hthresh = tx_conf->tx_thresh.hthresh;
2098 txq->wthresh = tx_conf->tx_thresh.wthresh;
2099 txq->queue_id = queue_idx;
2100 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2101 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2102 txq->port_id = dev->data->port_id;
2103 txq->txq_flags = tx_conf->txq_flags;
2104 txq->ops = &def_txq_ops;
2105 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2108 * Modification to set VFTDT for virtual function if vf is detected
2110 if (hw->mac.type == ixgbe_mac_82599_vf ||
2111 hw->mac.type == ixgbe_mac_X540_vf ||
2112 hw->mac.type == ixgbe_mac_X550_vf ||
2113 hw->mac.type == ixgbe_mac_X550EM_x_vf)
2114 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2116 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2117 #ifndef RTE_LIBRTE_XEN_DOM0
2118 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
2120 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2122 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2124 /* Allocate software ring */
2125 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2126 sizeof(struct ixgbe_tx_entry) * nb_desc,
2127 RTE_CACHE_LINE_SIZE, socket_id);
2128 if (txq->sw_ring == NULL) {
2129 ixgbe_tx_queue_release(txq);
2132 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2133 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2135 /* set up vector or scalar TX function as appropriate */
2136 ixgbe_set_tx_function(dev, txq);
2138 txq->ops->reset(txq);
2140 dev->data->tx_queues[queue_idx] = txq;
2147 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2149 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2150 * in the sw_rsc_ring is not set to NULL but rather points to the next
2151 * mbuf of this RSC aggregation (that has not been completed yet and still
2152 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2153 * will just free first "nb_segs" segments of the cluster explicitly by calling
2154 * an rte_pktmbuf_free_seg().
2156 * @m scattered cluster head
2158 static void __attribute__((cold))
2159 ixgbe_free_sc_cluster(struct rte_mbuf *m)
2161 uint8_t i, nb_segs = m->nb_segs;
2162 struct rte_mbuf *next_seg;
2164 for (i = 0; i < nb_segs; i++) {
2166 rte_pktmbuf_free_seg(m);
2171 static void __attribute__((cold))
2172 ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2176 #ifdef RTE_IXGBE_INC_VECTOR
2177 /* SSE Vector driver has a different way of releasing mbufs. */
2178 if (rxq->rx_using_sse) {
2179 ixgbe_rx_queue_release_mbufs_vec(rxq);
2184 if (rxq->sw_ring != NULL) {
2185 for (i = 0; i < rxq->nb_rx_desc; i++) {
2186 if (rxq->sw_ring[i].mbuf != NULL) {
2187 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2188 rxq->sw_ring[i].mbuf = NULL;
2191 if (rxq->rx_nb_avail) {
2192 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2193 struct rte_mbuf *mb;
2194 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2195 rte_pktmbuf_free_seg(mb);
2197 rxq->rx_nb_avail = 0;
2201 if (rxq->sw_sc_ring)
2202 for (i = 0; i < rxq->nb_rx_desc; i++)
2203 if (rxq->sw_sc_ring[i].fbuf) {
2204 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2205 rxq->sw_sc_ring[i].fbuf = NULL;
2209 static void __attribute__((cold))
2210 ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2213 ixgbe_rx_queue_release_mbufs(rxq);
2214 rte_free(rxq->sw_ring);
2215 rte_free(rxq->sw_sc_ring);
2220 void __attribute__((cold))
2221 ixgbe_dev_rx_queue_release(void *rxq)
2223 ixgbe_rx_queue_release(rxq);
2227 * Check if Rx Burst Bulk Alloc function can be used.
2229 * 0: the preconditions are satisfied and the bulk allocation function
2231 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2232 * function must be used.
2234 static inline int __attribute__((cold))
2235 check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2240 * Make sure the following pre-conditions are satisfied:
2241 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2242 * rxq->rx_free_thresh < rxq->nb_rx_desc
2243 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2244 * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
2245 * Scattered packets are not supported. This should be checked
2246 * outside of this function.
2248 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2249 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2250 "rxq->rx_free_thresh=%d, "
2251 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2252 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2254 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2255 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2256 "rxq->rx_free_thresh=%d, "
2257 "rxq->nb_rx_desc=%d",
2258 rxq->rx_free_thresh, rxq->nb_rx_desc);
2260 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2261 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2262 "rxq->nb_rx_desc=%d, "
2263 "rxq->rx_free_thresh=%d",
2264 rxq->nb_rx_desc, rxq->rx_free_thresh);
2266 } else if (!(rxq->nb_rx_desc <
2267 (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
2268 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2269 "rxq->nb_rx_desc=%d, "
2270 "IXGBE_MAX_RING_DESC=%d, "
2271 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2272 rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
2273 RTE_PMD_IXGBE_RX_MAX_BURST);
2280 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
2281 static void __attribute__((cold))
2282 ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2284 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2286 uint16_t len = rxq->nb_rx_desc;
2289 * By default, the Rx queue setup function allocates enough memory for
2290 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
2291 * extra memory at the end of the descriptor ring to be zero'd out. A
2292 * pre-condition for using the Rx burst bulk alloc function is that the
2293 * number of descriptors is less than or equal to
2294 * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
2295 * constraints here to see if we need to zero out memory after the end
2296 * of the H/W descriptor ring.
2298 if (adapter->rx_bulk_alloc_allowed)
2299 /* zero out extra memory */
2300 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2303 * Zero out HW ring memory. Zero out extra memory at the end of
2304 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2305 * reads extra memory as zeros.
2307 for (i = 0; i < len; i++) {
2308 rxq->rx_ring[i] = zeroed_desc;
2312 * initialize extra software ring entries. Space for these extra
2313 * entries is always allocated
2315 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2316 for (i = rxq->nb_rx_desc; i < len; ++i) {
2317 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2320 rxq->rx_nb_avail = 0;
2321 rxq->rx_next_avail = 0;
2322 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2324 rxq->nb_rx_hold = 0;
2325 rxq->pkt_first_seg = NULL;
2326 rxq->pkt_last_seg = NULL;
2328 #ifdef RTE_IXGBE_INC_VECTOR
2329 rxq->rxrearm_start = 0;
2330 rxq->rxrearm_nb = 0;
2334 int __attribute__((cold))
2335 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2338 unsigned int socket_id,
2339 const struct rte_eth_rxconf *rx_conf,
2340 struct rte_mempool *mp)
2342 const struct rte_memzone *rz;
2343 struct ixgbe_rx_queue *rxq;
2344 struct ixgbe_hw *hw;
2346 struct ixgbe_adapter *adapter =
2347 (struct ixgbe_adapter *)dev->data->dev_private;
2349 PMD_INIT_FUNC_TRACE();
2350 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2353 * Validate number of receive descriptors.
2354 * It must not exceed hardware maximum, and must be multiple
2357 if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2358 (nb_desc > IXGBE_MAX_RING_DESC) ||
2359 (nb_desc < IXGBE_MIN_RING_DESC)) {
2363 /* Free memory prior to re-allocation if needed... */
2364 if (dev->data->rx_queues[queue_idx] != NULL) {
2365 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2366 dev->data->rx_queues[queue_idx] = NULL;
2369 /* First allocate the rx queue data structure */
2370 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2371 RTE_CACHE_LINE_SIZE, socket_id);
2375 rxq->nb_rx_desc = nb_desc;
2376 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2377 rxq->queue_id = queue_idx;
2378 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2379 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2380 rxq->port_id = dev->data->port_id;
2381 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2383 rxq->drop_en = rx_conf->rx_drop_en;
2384 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2387 * Allocate RX ring hardware descriptors. A memzone large enough to
2388 * handle the maximum ring size is allocated in order to allow for
2389 * resizing in later calls to the queue setup function.
2391 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
2392 RX_RING_SZ, socket_id);
2394 ixgbe_rx_queue_release(rxq);
2399 * Zero init all the descriptors in the ring.
2401 memset (rz->addr, 0, RX_RING_SZ);
2404 * Modified to setup VFRDT for Virtual Function
2406 if (hw->mac.type == ixgbe_mac_82599_vf ||
2407 hw->mac.type == ixgbe_mac_X540_vf ||
2408 hw->mac.type == ixgbe_mac_X550_vf ||
2409 hw->mac.type == ixgbe_mac_X550EM_x_vf) {
2411 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2413 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2417 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2419 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2421 #ifndef RTE_LIBRTE_XEN_DOM0
2422 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
2424 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2426 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
2429 * Certain constraints must be met in order to use the bulk buffer
2430 * allocation Rx burst function. If any of Rx queues doesn't meet them
2431 * the feature should be disabled for the whole port.
2433 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2434 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2435 "preconditions - canceling the feature for "
2436 "the whole port[%d]",
2437 rxq->queue_id, rxq->port_id);
2438 adapter->rx_bulk_alloc_allowed = false;
2442 * Allocate software ring. Allow for space at the end of the
2443 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2444 * function does not access an invalid memory region.
2447 if (adapter->rx_bulk_alloc_allowed)
2448 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2450 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2451 sizeof(struct ixgbe_rx_entry) * len,
2452 RTE_CACHE_LINE_SIZE, socket_id);
2453 if (!rxq->sw_ring) {
2454 ixgbe_rx_queue_release(rxq);
2459 * Always allocate even if it's not going to be needed in order to
2460 * simplify the code.
2462 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2463 * be requested in ixgbe_dev_rx_init(), which is called later from
2467 rte_zmalloc_socket("rxq->sw_sc_ring",
2468 sizeof(struct ixgbe_scattered_rx_entry) * len,
2469 RTE_CACHE_LINE_SIZE, socket_id);
2470 if (!rxq->sw_sc_ring) {
2471 ixgbe_rx_queue_release(rxq);
2475 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2476 "dma_addr=0x%"PRIx64,
2477 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2478 rxq->rx_ring_phys_addr);
2480 if (!rte_is_power_of_2(nb_desc)) {
2481 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
2482 "preconditions - canceling the feature for "
2483 "the whole port[%d]",
2484 rxq->queue_id, rxq->port_id);
2485 adapter->rx_vec_allowed = false;
2487 ixgbe_rxq_vec_setup(rxq);
2489 dev->data->rx_queues[queue_idx] = rxq;
2491 ixgbe_reset_rx_queue(adapter, rxq);
2497 ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2499 #define IXGBE_RXQ_SCAN_INTERVAL 4
2500 volatile union ixgbe_adv_rx_desc *rxdp;
2501 struct ixgbe_rx_queue *rxq;
2504 if (rx_queue_id >= dev->data->nb_rx_queues) {
2505 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
2509 rxq = dev->data->rx_queues[rx_queue_id];
2510 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2512 while ((desc < rxq->nb_rx_desc) &&
2513 (rxdp->wb.upper.status_error &
2514 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
2515 desc += IXGBE_RXQ_SCAN_INTERVAL;
2516 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
2517 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2518 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2519 desc - rxq->nb_rx_desc]);
2526 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2528 volatile union ixgbe_adv_rx_desc *rxdp;
2529 struct ixgbe_rx_queue *rxq = rx_queue;
2532 if (unlikely(offset >= rxq->nb_rx_desc))
2534 desc = rxq->rx_tail + offset;
2535 if (desc >= rxq->nb_rx_desc)
2536 desc -= rxq->nb_rx_desc;
2538 rxdp = &rxq->rx_ring[desc];
2539 return !!(rxdp->wb.upper.status_error &
2540 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
2543 void __attribute__((cold))
2544 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
2547 struct ixgbe_adapter *adapter =
2548 (struct ixgbe_adapter *)dev->data->dev_private;
2550 PMD_INIT_FUNC_TRACE();
2552 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2553 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
2555 txq->ops->release_mbufs(txq);
2556 txq->ops->reset(txq);
2560 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2561 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
2563 ixgbe_rx_queue_release_mbufs(rxq);
2564 ixgbe_reset_rx_queue(adapter, rxq);
2570 ixgbe_dev_free_queues(struct rte_eth_dev *dev)
2574 PMD_INIT_FUNC_TRACE();
2576 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2577 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
2578 dev->data->rx_queues[i] = NULL;
2580 dev->data->nb_rx_queues = 0;
2582 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2583 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
2584 dev->data->tx_queues[i] = NULL;
2586 dev->data->nb_tx_queues = 0;
2589 /*********************************************************************
2591 * Device RX/TX init functions
2593 **********************************************************************/
2596 * Receive Side Scaling (RSS)
2597 * See section 7.1.2.8 in the following document:
2598 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
2601 * The source and destination IP addresses of the IP header and the source
2602 * and destination ports of TCP/UDP headers, if any, of received packets are
2603 * hashed against a configurable random key to compute a 32-bit RSS hash result.
2604 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2605 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
2606 * RSS output index which is used as the RX queue index where to store the
2608 * The following output is supplied in the RX write-back descriptor:
2609 * - 32-bit result of the Microsoft RSS hash function,
2610 * - 4-bit RSS type field.
2614 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
2615 * Used as the default key.
2617 static uint8_t rss_intel_key[40] = {
2618 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2619 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2620 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2621 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2622 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2626 ixgbe_rss_disable(struct rte_eth_dev *dev)
2628 struct ixgbe_hw *hw;
2632 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2633 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2634 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2635 mrqc &= ~IXGBE_MRQC_RSSEN;
2636 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2640 ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
2650 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2651 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2653 hash_key = rss_conf->rss_key;
2654 if (hash_key != NULL) {
2655 /* Fill in RSS hash key */
2656 for (i = 0; i < 10; i++) {
2657 rss_key = hash_key[(i * 4)];
2658 rss_key |= hash_key[(i * 4) + 1] << 8;
2659 rss_key |= hash_key[(i * 4) + 2] << 16;
2660 rss_key |= hash_key[(i * 4) + 3] << 24;
2661 IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
2665 /* Set configured hashing protocols in MRQC register */
2666 rss_hf = rss_conf->rss_hf;
2667 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
2668 if (rss_hf & ETH_RSS_IPV4)
2669 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2670 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2671 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2672 if (rss_hf & ETH_RSS_IPV6)
2673 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2674 if (rss_hf & ETH_RSS_IPV6_EX)
2675 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2676 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2677 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2678 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
2679 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2680 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2681 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2682 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2683 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2684 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
2685 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2686 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
2690 ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2691 struct rte_eth_rss_conf *rss_conf)
2693 struct ixgbe_hw *hw;
2698 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2700 if (!ixgbe_rss_update_sp(hw->mac.type)) {
2701 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
2705 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2708 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
2709 * "RSS enabling cannot be done dynamically while it must be
2710 * preceded by a software reset"
2711 * Before changing anything, first check that the update RSS operation
2712 * does not attempt to disable RSS, if RSS was enabled at
2713 * initialization time, or does not attempt to enable RSS, if RSS was
2714 * disabled at initialization time.
2716 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
2717 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2718 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
2719 if (rss_hf != 0) /* Enable RSS */
2721 return 0; /* Nothing to do */
2724 if (rss_hf == 0) /* Disable RSS */
2726 ixgbe_hw_rss_hash_set(hw, rss_conf);
2731 ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2732 struct rte_eth_rss_conf *rss_conf)
2734 struct ixgbe_hw *hw;
2743 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2744 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
2745 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
2746 hash_key = rss_conf->rss_key;
2747 if (hash_key != NULL) {
2748 /* Return RSS hash key */
2749 for (i = 0; i < 10; i++) {
2750 rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
2751 hash_key[(i * 4)] = rss_key & 0x000000FF;
2752 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2753 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2754 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2758 /* Get RSS functions configured in MRQC register */
2759 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
2760 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
2761 rss_conf->rss_hf = 0;
2765 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
2766 rss_hf |= ETH_RSS_IPV4;
2767 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
2768 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2769 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
2770 rss_hf |= ETH_RSS_IPV6;
2771 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
2772 rss_hf |= ETH_RSS_IPV6_EX;
2773 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
2774 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2775 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
2776 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2777 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
2778 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2779 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
2780 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2781 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
2782 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2783 rss_conf->rss_hf = rss_hf;
2788 ixgbe_rss_configure(struct rte_eth_dev *dev)
2790 struct rte_eth_rss_conf rss_conf;
2791 struct ixgbe_hw *hw;
2795 uint16_t sp_reta_size;
2798 PMD_INIT_FUNC_TRACE();
2799 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2801 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
2804 * Fill in redirection table
2805 * The byte-swap is needed because NIC registers are in
2806 * little-endian order.
2809 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
2810 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
2812 if (j == dev->data->nb_rx_queues)
2814 reta = (reta << 8) | j;
2816 IXGBE_WRITE_REG(hw, reta_reg,
2821 * Configure the RSS key and the RSS protocols used to compute
2822 * the RSS hash of input packets.
2824 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2825 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
2826 ixgbe_rss_disable(dev);
2829 if (rss_conf.rss_key == NULL)
2830 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2831 ixgbe_hw_rss_hash_set(hw, &rss_conf);
2834 #define NUM_VFTA_REGISTERS 128
2835 #define NIC_RX_BUFFER_SIZE 0x200
2836 #define X550_RX_BUFFER_SIZE 0x180
2839 ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
2841 struct rte_eth_vmdq_dcb_conf *cfg;
2842 struct ixgbe_hw *hw;
2843 enum rte_eth_nb_pools num_pools;
2844 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
2846 uint8_t nb_tcs; /* number of traffic classes */
2849 PMD_INIT_FUNC_TRACE();
2850 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2851 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
2852 num_pools = cfg->nb_queue_pools;
2853 /* Check we have a valid number of pools */
2854 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
2855 ixgbe_rss_disable(dev);
2858 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
2859 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
2863 * split rx buffer up into sections, each for 1 traffic class
2865 switch (hw->mac.type) {
2866 case ixgbe_mac_X550:
2867 case ixgbe_mac_X550EM_x:
2868 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
2871 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
2874 for (i = 0 ; i < nb_tcs; i++) {
2875 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2876 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
2877 /* clear 10 bits. */
2878 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
2879 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2881 /* zero alloc all unused TCs */
2882 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2883 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
2884 rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
2885 /* clear 10 bits. */
2886 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
2889 /* MRQC: enable vmdq and dcb */
2890 mrqc = ((num_pools == ETH_16_POOLS) ? \
2891 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
2892 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2894 /* PFVTCTL: turn on virtualisation and set the default pool */
2895 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2896 if (cfg->enable_default_pool) {
2897 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
2899 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
2902 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
2904 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
2906 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
2908 * mapping is done with 3 bits per priority,
2909 * so shift by i*3 each time
2911 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
2913 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
2915 /* RTRPCS: DCB related */
2916 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
2918 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2919 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2920 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
2921 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2923 /* VFTA - enable all vlan filters */
2924 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
2925 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
2928 /* VFRE: pool enabling for receive - 16 or 32 */
2929 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
2930 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
2933 * MPSAR - allow pools to read specific mac addresses
2934 * In this case, all pools should be able to read from mac addr 0
2936 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
2937 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
2939 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
2940 for (i = 0; i < cfg->nb_pool_maps; i++) {
2941 /* set vlan id in VF register and set the valid bit */
2942 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
2943 (cfg->pool_map[i].vlan_id & 0xFFF)));
2945 * Put the allowed pools in VFB reg. As we only have 16 or 32
2946 * pools, we only need to use the first half of the register
2949 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
2954 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
2955 * @hw: pointer to hardware structure
2956 * @dcb_config: pointer to ixgbe_dcb_config structure
2959 ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
2960 struct ixgbe_dcb_config *dcb_config)
2965 PMD_INIT_FUNC_TRACE();
2966 if (hw->mac.type != ixgbe_mac_82598EB) {
2967 /* Disable the Tx desc arbiter so that MTQC can be changed */
2968 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2969 reg |= IXGBE_RTTDCS_ARBDIS;
2970 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2972 /* Enable DCB for Tx with 8 TCs */
2973 if (dcb_config->num_tcs.pg_tcs == 8) {
2974 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2977 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2979 if (dcb_config->vt_mode)
2980 reg |= IXGBE_MTQC_VT_ENA;
2981 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
2983 /* Disable drop for all queues */
2984 for (q = 0; q < 128; q++)
2985 IXGBE_WRITE_REG(hw, IXGBE_QDE,
2986 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
2988 /* Enable the Tx desc arbiter */
2989 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2990 reg &= ~IXGBE_RTTDCS_ARBDIS;
2991 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
2993 /* Enable Security TX Buffer IFG for DCB */
2994 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2995 reg |= IXGBE_SECTX_DCB;
2996 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3002 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3003 * @dev: pointer to rte_eth_dev structure
3004 * @dcb_config: pointer to ixgbe_dcb_config structure
3007 ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3008 struct ixgbe_dcb_config *dcb_config)
3010 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3011 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3012 struct ixgbe_hw *hw =
3013 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3015 PMD_INIT_FUNC_TRACE();
3016 if (hw->mac.type != ixgbe_mac_82598EB)
3017 /*PF VF Transmit Enable*/
3018 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3019 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3021 /*Configure general DCB TX parameters*/
3022 ixgbe_dcb_tx_hw_config(hw,dcb_config);
3027 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3028 struct ixgbe_dcb_config *dcb_config)
3030 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3031 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3032 struct ixgbe_dcb_tc_config *tc;
3035 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3036 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
3037 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3038 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3041 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3042 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3044 /* User Priority to Traffic Class mapping */
3045 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3046 j = vmdq_rx_conf->dcb_tc[i];
3047 tc = &dcb_config->tc_config[j];
3048 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3054 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3055 struct ixgbe_dcb_config *dcb_config)
3057 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3058 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3059 struct ixgbe_dcb_tc_config *tc;
3062 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3063 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
3064 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3065 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3068 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3069 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3072 /* User Priority to Traffic Class mapping */
3073 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3074 j = vmdq_tx_conf->dcb_tc[i];
3075 tc = &dcb_config->tc_config[j];
3076 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3083 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3084 struct ixgbe_dcb_config *dcb_config)
3086 struct rte_eth_dcb_rx_conf *rx_conf =
3087 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3088 struct ixgbe_dcb_tc_config *tc;
3091 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3092 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3094 /* User Priority to Traffic Class mapping */
3095 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3096 j = rx_conf->dcb_tc[i];
3097 tc = &dcb_config->tc_config[j];
3098 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
3104 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3105 struct ixgbe_dcb_config *dcb_config)
3107 struct rte_eth_dcb_tx_conf *tx_conf =
3108 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3109 struct ixgbe_dcb_tc_config *tc;
3112 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3113 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3115 /* User Priority to Traffic Class mapping */
3116 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3117 j = tx_conf->dcb_tc[i];
3118 tc = &dcb_config->tc_config[j];
3119 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
3125 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3126 * @hw: pointer to hardware structure
3127 * @dcb_config: pointer to ixgbe_dcb_config structure
3130 ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
3131 struct ixgbe_dcb_config *dcb_config)
3137 PMD_INIT_FUNC_TRACE();
3139 * Disable the arbiter before changing parameters
3140 * (always enable recycle mode; WSP)
3142 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3143 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3145 if (hw->mac.type != ixgbe_mac_82598EB) {
3146 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3147 if (dcb_config->num_tcs.pg_tcs == 4) {
3148 if (dcb_config->vt_mode)
3149 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3150 IXGBE_MRQC_VMDQRT4TCEN;
3152 /* no matter the mode is DCB or DCB_RSS, just
3153 * set the MRQE to RSSXTCEN. RSS is controlled
3156 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3157 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3158 IXGBE_MRQC_RTRSS4TCEN;
3161 if (dcb_config->num_tcs.pg_tcs == 8) {
3162 if (dcb_config->vt_mode)
3163 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3164 IXGBE_MRQC_VMDQRT8TCEN;
3166 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3167 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3168 IXGBE_MRQC_RTRSS8TCEN;
3172 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
3175 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3176 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3177 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
3178 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3180 /* VFTA - enable all vlan filters */
3181 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3182 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3186 * Configure Rx packet plane (recycle mode; WSP) and
3189 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3190 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3196 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3197 uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3199 switch (hw->mac.type) {
3200 case ixgbe_mac_82598EB:
3201 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3203 case ixgbe_mac_82599EB:
3204 case ixgbe_mac_X540:
3205 case ixgbe_mac_X550:
3206 case ixgbe_mac_X550EM_x:
3207 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3216 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3217 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3219 switch (hw->mac.type) {
3220 case ixgbe_mac_82598EB:
3221 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
3222 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
3224 case ixgbe_mac_82599EB:
3225 case ixgbe_mac_X540:
3226 case ixgbe_mac_X550:
3227 case ixgbe_mac_X550EM_x:
3228 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
3229 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
3236 #define DCB_RX_CONFIG 1
3237 #define DCB_TX_CONFIG 1
3238 #define DCB_TX_PB 1024
3240 * ixgbe_dcb_hw_configure - Enable DCB and configure
3241 * general DCB in VT mode and non-VT mode parameters
3242 * @dev: pointer to rte_eth_dev structure
3243 * @dcb_config: pointer to ixgbe_dcb_config structure
3246 ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3247 struct ixgbe_dcb_config *dcb_config)
3250 uint8_t i,pfc_en,nb_tcs;
3251 uint16_t pbsize, rx_buffer_size;
3252 uint8_t config_dcb_rx = 0;
3253 uint8_t config_dcb_tx = 0;
3254 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3255 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3256 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3257 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3258 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3259 struct ixgbe_dcb_tc_config *tc;
3260 uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3261 struct ixgbe_hw *hw =
3262 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3264 switch(dev->data->dev_conf.rxmode.mq_mode){
3265 case ETH_MQ_RX_VMDQ_DCB:
3266 dcb_config->vt_mode = true;
3267 if (hw->mac.type != ixgbe_mac_82598EB) {
3268 config_dcb_rx = DCB_RX_CONFIG;
3270 *get dcb and VT rx configuration parameters
3273 ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
3274 /*Configure general VMDQ and DCB RX parameters*/
3275 ixgbe_vmdq_dcb_configure(dev);
3279 case ETH_MQ_RX_DCB_RSS:
3280 dcb_config->vt_mode = false;
3281 config_dcb_rx = DCB_RX_CONFIG;
3282 /* Get dcb TX configuration parameters from rte_eth_conf */
3283 ixgbe_dcb_rx_config(dev, dcb_config);
3284 /*Configure general DCB RX parameters*/
3285 ixgbe_dcb_rx_hw_config(hw, dcb_config);
3288 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3291 switch (dev->data->dev_conf.txmode.mq_mode) {
3292 case ETH_MQ_TX_VMDQ_DCB:
3293 dcb_config->vt_mode = true;
3294 config_dcb_tx = DCB_TX_CONFIG;
3295 /* get DCB and VT TX configuration parameters from rte_eth_conf */
3296 ixgbe_dcb_vt_tx_config(dev,dcb_config);
3297 /*Configure general VMDQ and DCB TX parameters*/
3298 ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
3302 dcb_config->vt_mode = false;
3303 config_dcb_tx = DCB_TX_CONFIG;
3304 /*get DCB TX configuration parameters from rte_eth_conf*/
3305 ixgbe_dcb_tx_config(dev, dcb_config);
3306 /*Configure general DCB TX parameters*/
3307 ixgbe_dcb_tx_hw_config(hw, dcb_config);
3310 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3314 nb_tcs = dcb_config->num_tcs.pfc_tcs;
3316 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
3317 if(nb_tcs == ETH_4_TCS) {
3318 /* Avoid un-configured priority mapping to TC0 */
3320 uint8_t mask = 0xFF;
3321 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
3322 mask = (uint8_t)(mask & (~ (1 << map[i])));
3323 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
3324 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
3328 /* Re-configure 4 TCs BW */
3329 for (i = 0; i < nb_tcs; i++) {
3330 tc = &dcb_config->tc_config[i];
3331 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
3332 (uint8_t)(100 / nb_tcs);
3333 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
3334 (uint8_t)(100 / nb_tcs);
3336 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
3337 tc = &dcb_config->tc_config[i];
3338 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
3339 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
3343 switch (hw->mac.type) {
3344 case ixgbe_mac_X550:
3345 case ixgbe_mac_X550EM_x:
3346 rx_buffer_size = X550_RX_BUFFER_SIZE;
3349 rx_buffer_size = NIC_RX_BUFFER_SIZE;
3354 /* Set RX buffer size */
3355 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3356 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
3357 for (i = 0 ; i < nb_tcs; i++) {
3358 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3360 /* zero alloc all unused TCs */
3361 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3362 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3366 /* Only support an equally distributed Tx packet buffer strategy. */
3367 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
3368 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
3369 for (i = 0; i < nb_tcs; i++) {
3370 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3371 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3373 /* Clear unused TCs, if any, to zero buffer size*/
3374 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3375 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3376 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3380 /*Calculates traffic class credits*/
3381 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3382 IXGBE_DCB_TX_CONFIG);
3383 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
3384 IXGBE_DCB_RX_CONFIG);
3387 /* Unpack CEE standard containers */
3388 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
3389 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3390 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
3391 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
3392 /* Configure PG(ETS) RX */
3393 ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
3397 /* Unpack CEE standard containers */
3398 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
3399 ixgbe_dcb_unpack_max_cee(dcb_config, max);
3400 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
3401 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
3402 /* Configure PG(ETS) TX */
3403 ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
3406 /*Configure queue statistics registers*/
3407 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
3409 /* Check if the PFC is supported */
3410 if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3411 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3412 for (i = 0; i < nb_tcs; i++) {
3414 * If the TC count is 8,and the default high_water is 48,
3415 * the low_water is 16 as default.
3417 hw->fc.high_water[i] = (pbsize * 3 ) / 4;
3418 hw->fc.low_water[i] = pbsize / 4;
3419 /* Enable pfc for this TC */
3420 tc = &dcb_config->tc_config[i];
3421 tc->pfc = ixgbe_dcb_pfc_enabled;
3423 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3424 if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3426 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
3433 * ixgbe_configure_dcb - Configure DCB Hardware
3434 * @dev: pointer to rte_eth_dev
3436 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
3438 struct ixgbe_dcb_config *dcb_cfg =
3439 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
3440 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
3442 PMD_INIT_FUNC_TRACE();
3444 /* check support mq_mode for DCB */
3445 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
3446 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
3447 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
3450 if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
3453 /** Configure DCB hardware **/
3454 ixgbe_dcb_hw_configure(dev, dcb_cfg);
3460 * VMDq only support for 10 GbE NIC.
3463 ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3465 struct rte_eth_vmdq_rx_conf *cfg;
3466 struct ixgbe_hw *hw;
3467 enum rte_eth_nb_pools num_pools;
3468 uint32_t mrqc, vt_ctl, vlanctrl;
3472 PMD_INIT_FUNC_TRACE();
3473 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3474 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3475 num_pools = cfg->nb_queue_pools;
3477 ixgbe_rss_disable(dev);
3479 /* MRQC: enable vmdq */
3480 mrqc = IXGBE_MRQC_VMDQEN;
3481 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3483 /* PFVTCTL: turn on virtualisation and set the default pool */
3484 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3485 if (cfg->enable_default_pool)
3486 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3488 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3490 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3492 for (i = 0; i < (int)num_pools; i++) {
3493 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
3494 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
3497 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3498 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3499 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
3500 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3502 /* VFTA - enable all vlan filters */
3503 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3504 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
3506 /* VFRE: pool enabling for receive - 64 */
3507 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
3508 if (num_pools == ETH_64_POOLS)
3509 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
3512 * MPSAR - allow pools to read specific mac addresses
3513 * In this case, all pools should be able to read from mac addr 0
3515 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
3516 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
3518 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3519 for (i = 0; i < cfg->nb_pool_maps; i++) {
3520 /* set vlan id in VF register and set the valid bit */
3521 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
3522 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
3524 * Put the allowed pools in VFB reg. As we only have 16 or 64
3525 * pools, we only need to use the first half of the register
3528 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3529 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
3530 (cfg->pool_map[i].pools & UINT32_MAX));
3532 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
3533 ((cfg->pool_map[i].pools >> 32) \
3538 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
3539 if (cfg->enable_loop_back) {
3540 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3541 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
3542 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
3545 IXGBE_WRITE_FLUSH(hw);
3549 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
3550 * @hw: pointer to hardware structure
3553 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
3558 PMD_INIT_FUNC_TRACE();
3559 /*PF VF Transmit Enable*/
3560 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
3561 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
3563 /* Disable the Tx desc arbiter so that MTQC can be changed */
3564 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3565 reg |= IXGBE_RTTDCS_ARBDIS;
3566 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3568 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3569 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3571 /* Disable drop for all queues */
3572 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3573 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3574 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
3576 /* Enable the Tx desc arbiter */
3577 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3578 reg &= ~IXGBE_RTTDCS_ARBDIS;
3579 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3581 IXGBE_WRITE_FLUSH(hw);
3586 static int __attribute__((cold))
3587 ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
3589 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
3593 /* Initialize software ring entries */
3594 for (i = 0; i < rxq->nb_rx_desc; i++) {
3595 volatile union ixgbe_adv_rx_desc *rxd;
3596 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
3598 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
3599 (unsigned) rxq->queue_id);
3603 rte_mbuf_refcnt_set(mbuf, 1);
3605 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3607 mbuf->port = rxq->port_id;
3610 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
3611 rxd = &rxq->rx_ring[i];
3612 rxd->read.hdr_addr = 0;
3613 rxd->read.pkt_addr = dma_addr;
3621 ixgbe_config_vf_rss(struct rte_eth_dev *dev)
3623 struct ixgbe_hw *hw;
3626 ixgbe_rss_configure(dev);
3628 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3630 /* MRQC: enable VF RSS */
3631 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3632 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
3633 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3635 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
3639 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
3643 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
3647 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3653 ixgbe_config_vf_default(struct rte_eth_dev *dev)
3655 struct ixgbe_hw *hw =
3656 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3658 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3660 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3665 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3666 IXGBE_MRQC_VMDQRT4TCEN);
3670 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
3671 IXGBE_MRQC_VMDQRT8TCEN);
3675 "invalid pool number in IOV mode");
3682 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3684 struct ixgbe_hw *hw =
3685 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3687 if (hw->mac.type == ixgbe_mac_82598EB)
3690 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3692 * SRIOV inactive scheme
3693 * any DCB/RSS w/o VMDq multi-queue setting
3695 switch (dev->data->dev_conf.rxmode.mq_mode) {
3697 case ETH_MQ_RX_DCB_RSS:
3698 case ETH_MQ_RX_VMDQ_RSS:
3699 ixgbe_rss_configure(dev);
3702 case ETH_MQ_RX_VMDQ_DCB:
3703 ixgbe_vmdq_dcb_configure(dev);
3706 case ETH_MQ_RX_VMDQ_ONLY:
3707 ixgbe_vmdq_rx_hw_configure(dev);
3710 case ETH_MQ_RX_NONE:
3712 /* if mq_mode is none, disable rss mode.*/
3713 ixgbe_rss_disable(dev);
3718 * SRIOV active scheme
3719 * Support RSS together with VMDq & SRIOV
3721 switch (dev->data->dev_conf.rxmode.mq_mode) {
3723 case ETH_MQ_RX_VMDQ_RSS:
3724 ixgbe_config_vf_rss(dev);
3727 /* FIXME if support DCB/RSS together with VMDq & SRIOV */
3728 case ETH_MQ_RX_VMDQ_DCB:
3729 case ETH_MQ_RX_VMDQ_DCB_RSS:
3731 "Could not support DCB with VMDq & SRIOV");
3734 ixgbe_config_vf_default(dev);
3743 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
3745 struct ixgbe_hw *hw =
3746 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3750 if (hw->mac.type == ixgbe_mac_82598EB)
3753 /* disable arbiter before setting MTQC */
3754 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3755 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3756 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3758 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3760 * SRIOV inactive scheme
3761 * any DCB w/o VMDq multi-queue setting
3763 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
3764 ixgbe_vmdq_tx_hw_configure(hw);
3766 mtqc = IXGBE_MTQC_64Q_1PB;
3767 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3770 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3773 * SRIOV active scheme
3774 * FIXME if support DCB together with VMDq & SRIOV
3777 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
3780 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
3783 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
3787 mtqc = IXGBE_MTQC_64Q_1PB;
3788 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
3790 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3793 /* re-enable arbiter */
3794 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3795 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3801 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
3803 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
3804 * spec rev. 3.0 chapter 8.2.3.8.13.
3806 * @pool Memory pool of the Rx queue
3808 static inline uint32_t
3809 ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
3811 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
3813 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
3816 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
3819 return IXGBE_RSCCTL_MAXDESC_16;
3820 else if (maxdesc >= 8)
3821 return IXGBE_RSCCTL_MAXDESC_8;
3822 else if (maxdesc >= 4)
3823 return IXGBE_RSCCTL_MAXDESC_4;
3825 return IXGBE_RSCCTL_MAXDESC_1;
3829 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
3832 * (Taken from FreeBSD tree)
3833 * (yes this is all very magic and confusing :)
3836 * @entry the register array entry
3837 * @vector the MSIX vector for this queue
3841 ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
3843 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3846 vector |= IXGBE_IVAR_ALLOC_VAL;
3848 switch (hw->mac.type) {
3850 case ixgbe_mac_82598EB:
3852 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3854 entry += (type * 64);
3855 index = (entry >> 2) & 0x1F;
3856 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3857 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3858 ivar |= (vector << (8 * (entry & 0x3)));
3859 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3862 case ixgbe_mac_82599EB:
3863 case ixgbe_mac_X540:
3864 if (type == -1) { /* MISC IVAR */
3865 index = (entry & 1) * 8;
3866 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3867 ivar &= ~(0xFF << index);
3868 ivar |= (vector << index);
3869 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3870 } else { /* RX/TX IVARS */
3871 index = (16 * (entry & 1)) + (8 * type);
3872 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3873 ivar &= ~(0xFF << index);
3874 ivar |= (vector << index);
3875 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3885 void __attribute__((cold))
3886 ixgbe_set_rx_function(struct rte_eth_dev *dev)
3888 uint16_t i, rx_using_sse;
3889 struct ixgbe_adapter *adapter =
3890 (struct ixgbe_adapter *)dev->data->dev_private;
3893 * In order to allow Vector Rx there are a few configuration
3894 * conditions to be met and Rx Bulk Allocation should be allowed.
3896 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
3897 !adapter->rx_bulk_alloc_allowed) {
3898 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
3899 "preconditions or RTE_IXGBE_INC_VECTOR is "
3901 dev->data->port_id);
3903 adapter->rx_vec_allowed = false;
3907 * Initialize the appropriate LRO callback.
3909 * If all queues satisfy the bulk allocation preconditions
3910 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
3911 * Otherwise use a single allocation version.
3913 if (dev->data->lro) {
3914 if (adapter->rx_bulk_alloc_allowed) {
3915 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
3916 "allocation version");
3917 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3919 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
3920 "allocation version");
3921 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3923 } else if (dev->data->scattered_rx) {
3925 * Set the non-LRO scattered callback: there are Vector and
3926 * single allocation versions.
3928 if (adapter->rx_vec_allowed) {
3929 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
3930 "callback (port=%d).",
3931 dev->data->port_id);
3933 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
3934 } else if (adapter->rx_bulk_alloc_allowed) {
3935 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
3936 "allocation callback (port=%d).",
3937 dev->data->port_id);
3938 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
3940 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
3941 "single allocation) "
3942 "Scattered Rx callback "
3944 dev->data->port_id);
3946 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
3949 * Below we set "simple" callbacks according to port/queues parameters.
3950 * If parameters allow we are going to choose between the following
3954 * - Single buffer allocation (the simplest one)
3956 } else if (adapter->rx_vec_allowed) {
3957 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
3958 "burst size no less than %d (port=%d).",
3959 RTE_IXGBE_DESCS_PER_LOOP,
3960 dev->data->port_id);
3962 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
3963 } else if (adapter->rx_bulk_alloc_allowed) {
3964 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
3965 "satisfied. Rx Burst Bulk Alloc function "
3966 "will be used on port=%d.",
3967 dev->data->port_id);
3969 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
3971 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
3972 "satisfied, or Scattered Rx is requested "
3974 dev->data->port_id);
3976 dev->rx_pkt_burst = ixgbe_recv_pkts;
3979 /* Propagate information about RX function choice through all queues. */
3982 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
3983 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
3985 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3986 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3987 rxq->rx_using_sse = rx_using_sse;
3992 * ixgbe_set_rsc - configure RSC related port HW registers
3994 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
3995 * of 82599 Spec (x540 configuration is virtually the same).
3999 * Returns 0 in case of success or a non-zero error code
4002 ixgbe_set_rsc(struct rte_eth_dev *dev)
4004 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4005 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4006 struct rte_eth_dev_info dev_info = { 0 };
4007 bool rsc_capable = false;
4012 dev->dev_ops->dev_infos_get(dev, &dev_info);
4013 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4016 if (!rsc_capable && rx_conf->enable_lro) {
4017 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4022 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4024 if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
4026 * According to chapter of 4.6.7.2.1 of the Spec Rev.
4027 * 3.0 RSC configuration requires HW CRC stripping being
4028 * enabled. If user requested both HW CRC stripping off
4029 * and RSC on - return an error.
4031 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4036 /* RFCTL configuration */
4038 uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4039 if (rx_conf->enable_lro)
4041 * Since NFS packets coalescing is not supported - clear
4042 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4045 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4046 IXGBE_RFCTL_NFSR_DIS);
4048 rfctl |= IXGBE_RFCTL_RSC_DIS;
4050 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4053 /* If LRO hasn't been requested - we are done here. */
4054 if (!rx_conf->enable_lro)
4057 /* Set RDRXCTL.RSCACKC bit */
4058 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4059 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4060 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4062 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4063 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4064 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4066 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4068 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4070 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4072 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4075 * ixgbe PMD doesn't support header-split at the moment.
4077 * Following the 4.6.7.2.1 chapter of the 82599/x540
4078 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4079 * should be configured even if header split is not
4080 * enabled. We will configure it 128 bytes following the
4081 * recommendation in the spec.
4083 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4084 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4085 IXGBE_SRRCTL_BSIZEHDR_MASK;
4088 * TODO: Consider setting the Receive Descriptor Minimum
4089 * Threshold Size for an RSC case. This is not an obviously
4090 * beneficiary option but the one worth considering...
4093 rscctl |= IXGBE_RSCCTL_RSCEN;
4094 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4095 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4098 * RSC: Set ITR interval corresponding to 2K ints/s.
4100 * Full-sized RSC aggregations for a 10Gb/s link will
4101 * arrive at about 20K aggregation/s rate.
4103 * 2K inst/s rate will make only 10% of the
4104 * aggregations to be closed due to the interrupt timer
4105 * expiration for a streaming at wire-speed case.
4107 * For a sparse streaming case this setting will yield
4108 * at most 500us latency for a single RSC aggregation.
4110 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
4111 eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
4113 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4114 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4115 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4116 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4119 * RSC requires the mapping of the queue to the
4122 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4127 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4133 * Initializes Receive Unit.
4135 int __attribute__((cold))
4136 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4138 struct ixgbe_hw *hw;
4139 struct ixgbe_rx_queue *rxq;
4150 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4153 PMD_INIT_FUNC_TRACE();
4154 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4157 * Make sure receives are disabled while setting
4158 * up the RX context (registers, descriptor rings, etc.).
4160 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4161 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4163 /* Enable receipt of broadcasted frames */
4164 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4165 fctrl |= IXGBE_FCTRL_BAM;
4166 fctrl |= IXGBE_FCTRL_DPF;
4167 fctrl |= IXGBE_FCTRL_PMCF;
4168 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4171 * Configure CRC stripping, if any.
4173 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4174 if (rx_conf->hw_strip_crc)
4175 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
4177 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
4180 * Configure jumbo frame support, if any.
4182 if (rx_conf->jumbo_frame == 1) {
4183 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4184 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4185 maxfrs &= 0x0000FFFF;
4186 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4187 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4189 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4192 * If loopback mode is configured for 82599, set LPBK bit.
4194 if (hw->mac.type == ixgbe_mac_82599EB &&
4195 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4196 hlreg0 |= IXGBE_HLREG0_LPBK;
4198 hlreg0 &= ~IXGBE_HLREG0_LPBK;
4200 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4202 /* Setup RX queues */
4203 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4204 rxq = dev->data->rx_queues[i];
4207 * Reset crc_len in case it was changed after queue setup by a
4208 * call to configure.
4210 rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
4212 /* Setup the Base and Length of the Rx Descriptor Rings */
4213 bus_addr = rxq->rx_ring_phys_addr;
4214 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4215 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4216 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4217 (uint32_t)(bus_addr >> 32));
4218 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4219 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4220 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4221 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4223 /* Configure the SRRCTL register */
4224 #ifdef RTE_HEADER_SPLIT_ENABLE
4226 * Configure Header Split
4228 if (rx_conf->header_split) {
4229 if (hw->mac.type == ixgbe_mac_82599EB) {
4230 /* Must setup the PSRTYPE register */
4232 psrtype = IXGBE_PSRTYPE_TCPHDR |
4233 IXGBE_PSRTYPE_UDPHDR |
4234 IXGBE_PSRTYPE_IPV4HDR |
4235 IXGBE_PSRTYPE_IPV6HDR;
4236 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4238 srrctl = ((rx_conf->split_hdr_size <<
4239 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4240 IXGBE_SRRCTL_BSIZEHDR_MASK);
4241 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4244 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4246 /* Set if packets are dropped when no descriptors available */
4248 srrctl |= IXGBE_SRRCTL_DROP_EN;
4251 * Configure the RX buffer size in the BSIZEPACKET field of
4252 * the SRRCTL register of the queue.
4253 * The value is in 1 KB resolution. Valid values can be from
4256 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4257 RTE_PKTMBUF_HEADROOM);
4258 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4259 IXGBE_SRRCTL_BSIZEPKT_MASK);
4261 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4263 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4264 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4266 /* It adds dual VLAN length for supporting dual VLAN */
4267 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4268 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
4269 dev->data->scattered_rx = 1;
4272 if (rx_conf->enable_scatter)
4273 dev->data->scattered_rx = 1;
4276 * Device configured with multiple RX queues.
4278 ixgbe_dev_mq_rx_configure(dev);
4281 * Setup the Checksum Register.
4282 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4283 * Enable IP/L4 checkum computation by hardware if requested to do so.
4285 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
4286 rxcsum |= IXGBE_RXCSUM_PCSD;
4287 if (rx_conf->hw_ip_checksum)
4288 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4290 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
4292 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4294 if (hw->mac.type == ixgbe_mac_82599EB ||
4295 hw->mac.type == ixgbe_mac_X540) {
4296 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4297 if (rx_conf->hw_strip_crc)
4298 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4300 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
4301 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4302 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4305 rc = ixgbe_set_rsc(dev);
4309 ixgbe_set_rx_function(dev);
4315 * Initializes Transmit Unit.
4317 void __attribute__((cold))
4318 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
4320 struct ixgbe_hw *hw;
4321 struct ixgbe_tx_queue *txq;
4327 PMD_INIT_FUNC_TRACE();
4328 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4330 /* Enable TX CRC (checksum offload requirement) and hw padding
4331 * (TSO requirement) */
4332 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4333 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
4334 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4336 /* Setup the Base and Length of the Tx Descriptor Rings */
4337 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4338 txq = dev->data->tx_queues[i];
4340 bus_addr = txq->tx_ring_phys_addr;
4341 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
4342 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4343 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
4344 (uint32_t)(bus_addr >> 32));
4345 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
4346 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4347 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4348 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4349 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4352 * Disable Tx Head Writeback RO bit, since this hoses
4353 * bookkeeping if things aren't delivered in order.
4355 switch (hw->mac.type) {
4356 case ixgbe_mac_82598EB:
4357 txctrl = IXGBE_READ_REG(hw,
4358 IXGBE_DCA_TXCTRL(txq->reg_idx));
4359 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4360 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
4364 case ixgbe_mac_82599EB:
4365 case ixgbe_mac_X540:
4366 case ixgbe_mac_X550:
4367 case ixgbe_mac_X550EM_x:
4369 txctrl = IXGBE_READ_REG(hw,
4370 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
4371 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4372 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
4378 /* Device configured with multiple TX queues. */
4379 ixgbe_dev_mq_tx_configure(dev);
4383 * Set up link for 82599 loopback mode Tx->Rx.
4385 static inline void __attribute__((cold))
4386 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
4388 PMD_INIT_FUNC_TRACE();
4390 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
4391 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
4393 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
4402 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
4403 ixgbe_reset_pipeline_82599(hw);
4405 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
4411 * Start Transmit and Receive Units.
4413 int __attribute__((cold))
4414 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
4416 struct ixgbe_hw *hw;
4417 struct ixgbe_tx_queue *txq;
4418 struct ixgbe_rx_queue *rxq;
4425 PMD_INIT_FUNC_TRACE();
4426 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4428 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4429 txq = dev->data->tx_queues[i];
4430 /* Setup Transmit Threshold Registers */
4431 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4432 txdctl |= txq->pthresh & 0x7F;
4433 txdctl |= ((txq->hthresh & 0x7F) << 8);
4434 txdctl |= ((txq->wthresh & 0x7F) << 16);
4435 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4438 if (hw->mac.type != ixgbe_mac_82598EB) {
4439 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
4440 dmatxctl |= IXGBE_DMATXCTL_TE;
4441 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
4444 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4445 txq = dev->data->tx_queues[i];
4446 if (!txq->tx_deferred_start) {
4447 ret = ixgbe_dev_tx_queue_start(dev, i);
4453 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4454 rxq = dev->data->rx_queues[i];
4455 if (!rxq->rx_deferred_start) {
4456 ret = ixgbe_dev_rx_queue_start(dev, i);
4462 /* Enable Receive engine */
4463 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4464 if (hw->mac.type == ixgbe_mac_82598EB)
4465 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4466 rxctrl |= IXGBE_RXCTRL_RXEN;
4467 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4469 /* If loopback mode is enabled for 82599, set up the link accordingly */
4470 if (hw->mac.type == ixgbe_mac_82599EB &&
4471 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
4472 ixgbe_setup_loopback_link_82599(hw);
4478 * Start Receive Units for specified queue.
4480 int __attribute__((cold))
4481 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4483 struct ixgbe_hw *hw;
4484 struct ixgbe_rx_queue *rxq;
4488 PMD_INIT_FUNC_TRACE();
4489 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4491 if (rx_queue_id < dev->data->nb_rx_queues) {
4492 rxq = dev->data->rx_queues[rx_queue_id];
4494 /* Allocate buffers for descriptor rings */
4495 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
4496 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
4500 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4501 rxdctl |= IXGBE_RXDCTL_ENABLE;
4502 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4504 /* Wait until RX Enable ready */
4505 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4508 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4509 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4511 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
4514 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4515 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
4516 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4524 * Stop Receive Units for specified queue.
4526 int __attribute__((cold))
4527 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4529 struct ixgbe_hw *hw;
4530 struct ixgbe_adapter *adapter =
4531 (struct ixgbe_adapter *)dev->data->dev_private;
4532 struct ixgbe_rx_queue *rxq;
4536 PMD_INIT_FUNC_TRACE();
4537 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4539 if (rx_queue_id < dev->data->nb_rx_queues) {
4540 rxq = dev->data->rx_queues[rx_queue_id];
4542 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4543 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4544 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
4546 /* Wait until RX Enable ready */
4547 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4550 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
4551 } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
4553 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
4556 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4558 ixgbe_rx_queue_release_mbufs(rxq);
4559 ixgbe_reset_rx_queue(adapter, rxq);
4560 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4569 * Start Transmit Units for specified queue.
4571 int __attribute__((cold))
4572 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4574 struct ixgbe_hw *hw;
4575 struct ixgbe_tx_queue *txq;
4579 PMD_INIT_FUNC_TRACE();
4580 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4582 if (tx_queue_id < dev->data->nb_tx_queues) {
4583 txq = dev->data->tx_queues[tx_queue_id];
4584 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4585 txdctl |= IXGBE_TXDCTL_ENABLE;
4586 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4588 /* Wait until TX Enable ready */
4589 if (hw->mac.type == ixgbe_mac_82599EB) {
4590 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4593 txdctl = IXGBE_READ_REG(hw,
4594 IXGBE_TXDCTL(txq->reg_idx));
4595 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4597 PMD_INIT_LOG(ERR, "Could not enable "
4598 "Tx Queue %d", tx_queue_id);
4601 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
4602 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
4603 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4611 * Stop Transmit Units for specified queue.
4613 int __attribute__((cold))
4614 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4616 struct ixgbe_hw *hw;
4617 struct ixgbe_tx_queue *txq;
4619 uint32_t txtdh, txtdt;
4622 PMD_INIT_FUNC_TRACE();
4623 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4625 if (tx_queue_id < dev->data->nb_tx_queues) {
4626 txq = dev->data->tx_queues[tx_queue_id];
4628 /* Wait until TX queue is empty */
4629 if (hw->mac.type == ixgbe_mac_82599EB) {
4630 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4632 rte_delay_us(RTE_IXGBE_WAIT_100_US);
4633 txtdh = IXGBE_READ_REG(hw,
4634 IXGBE_TDH(txq->reg_idx));
4635 txtdt = IXGBE_READ_REG(hw,
4636 IXGBE_TDT(txq->reg_idx));
4637 } while (--poll_ms && (txtdh != txtdt));
4639 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
4640 "when stopping.", tx_queue_id);
4643 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
4644 txdctl &= ~IXGBE_TXDCTL_ENABLE;
4645 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
4647 /* Wait until TX Enable ready */
4648 if (hw->mac.type == ixgbe_mac_82599EB) {
4649 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
4652 txdctl = IXGBE_READ_REG(hw,
4653 IXGBE_TXDCTL(txq->reg_idx));
4654 } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
4656 PMD_INIT_LOG(ERR, "Could not disable "
4657 "Tx Queue %d", tx_queue_id);
4660 if (txq->ops != NULL) {
4661 txq->ops->release_mbufs(txq);
4662 txq->ops->reset(txq);
4664 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4672 ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4673 struct rte_eth_rxq_info *qinfo)
4675 struct ixgbe_rx_queue *rxq;
4677 rxq = dev->data->rx_queues[queue_id];
4679 qinfo->mp = rxq->mb_pool;
4680 qinfo->scattered_rx = dev->data->scattered_rx;
4681 qinfo->nb_desc = rxq->nb_rx_desc;
4683 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4684 qinfo->conf.rx_drop_en = rxq->drop_en;
4685 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4689 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4690 struct rte_eth_txq_info *qinfo)
4692 struct ixgbe_tx_queue *txq;
4694 txq = dev->data->tx_queues[queue_id];
4696 qinfo->nb_desc = txq->nb_tx_desc;
4698 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
4699 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
4700 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
4702 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4703 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
4704 qinfo->conf.txq_flags = txq->txq_flags;
4705 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4709 * [VF] Initializes Receive Unit.
4711 int __attribute__((cold))
4712 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
4714 struct ixgbe_hw *hw;
4715 struct ixgbe_rx_queue *rxq;
4717 uint32_t srrctl, psrtype = 0;
4722 PMD_INIT_FUNC_TRACE();
4723 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4725 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
4726 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4727 "it should be power of 2");
4731 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
4732 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4733 "it should be equal to or less than %d",
4734 hw->mac.max_rx_queues);
4739 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
4740 * disables the VF receipt of packets if the PF MTU is > 1500.
4741 * This is done to deal with 82599 limitations that imposes
4742 * the PF and all VFs to share the same MTU.
4743 * Then, the PF driver enables again the VF receipt of packet when
4744 * the VF driver issues a IXGBE_VF_SET_LPE request.
4745 * In the meantime, the VF device cannot be used, even if the VF driver
4746 * and the Guest VM network stack are ready to accept packets with a
4747 * size up to the PF MTU.
4748 * As a work-around to this PF behaviour, force the call to
4749 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
4750 * VF packets received can work in all cases.
4752 ixgbevf_rlpml_set_vf(hw,
4753 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
4755 /* Setup RX queues */
4756 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4757 rxq = dev->data->rx_queues[i];
4759 /* Allocate buffers for descriptor rings */
4760 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
4764 /* Setup the Base and Length of the Rx Descriptor Rings */
4765 bus_addr = rxq->rx_ring_phys_addr;
4767 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
4768 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4769 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
4770 (uint32_t)(bus_addr >> 32));
4771 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
4772 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4773 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
4774 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
4777 /* Configure the SRRCTL register */
4778 #ifdef RTE_HEADER_SPLIT_ENABLE
4780 * Configure Header Split
4782 if (dev->data->dev_conf.rxmode.header_split) {
4783 srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
4784 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4785 IXGBE_SRRCTL_BSIZEHDR_MASK);
4786 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4789 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4791 /* Set if packets are dropped when no descriptors available */
4793 srrctl |= IXGBE_SRRCTL_DROP_EN;
4796 * Configure the RX buffer size in the BSIZEPACKET field of
4797 * the SRRCTL register of the queue.
4798 * The value is in 1 KB resolution. Valid values can be from
4801 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4802 RTE_PKTMBUF_HEADROOM);
4803 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4804 IXGBE_SRRCTL_BSIZEPKT_MASK);
4807 * VF modification to write virtual function SRRCTL register
4809 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
4811 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4812 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4814 if (dev->data->dev_conf.rxmode.enable_scatter ||
4815 /* It adds dual VLAN length for supporting dual VLAN */
4816 (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4817 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
4818 if (!dev->data->scattered_rx)
4819 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
4820 dev->data->scattered_rx = 1;
4824 #ifdef RTE_HEADER_SPLIT_ENABLE
4825 if (dev->data->dev_conf.rxmode.header_split)
4826 /* Must setup the PSRTYPE register */
4827 psrtype = IXGBE_PSRTYPE_TCPHDR |
4828 IXGBE_PSRTYPE_UDPHDR |
4829 IXGBE_PSRTYPE_IPV4HDR |
4830 IXGBE_PSRTYPE_IPV6HDR;
4833 /* Set RQPL for VF RSS according to max Rx queue */
4834 psrtype |= (dev->data->nb_rx_queues >> 1) <<
4835 IXGBE_PSRTYPE_RQPL_SHIFT;
4836 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
4838 ixgbe_set_rx_function(dev);
4844 * [VF] Initializes Transmit Unit.
4846 void __attribute__((cold))
4847 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
4849 struct ixgbe_hw *hw;
4850 struct ixgbe_tx_queue *txq;
4855 PMD_INIT_FUNC_TRACE();
4856 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4858 /* Setup the Base and Length of the Tx Descriptor Rings */
4859 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4860 txq = dev->data->tx_queues[i];
4861 bus_addr = txq->tx_ring_phys_addr;
4862 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
4863 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4864 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
4865 (uint32_t)(bus_addr >> 32));
4866 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
4867 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
4868 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4869 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
4870 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
4873 * Disable Tx Head Writeback RO bit, since this hoses
4874 * bookkeeping if things aren't delivered in order.
4876 txctrl = IXGBE_READ_REG(hw,
4877 IXGBE_VFDCA_TXCTRL(i));
4878 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4879 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
4885 * [VF] Start Transmit and Receive Units.
4887 void __attribute__((cold))
4888 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4890 struct ixgbe_hw *hw;
4891 struct ixgbe_tx_queue *txq;
4892 struct ixgbe_rx_queue *rxq;
4898 PMD_INIT_FUNC_TRACE();
4899 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4901 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4902 txq = dev->data->tx_queues[i];
4903 /* Setup Transmit Threshold Registers */
4904 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4905 txdctl |= txq->pthresh & 0x7F;
4906 txdctl |= ((txq->hthresh & 0x7F) << 8);
4907 txdctl |= ((txq->wthresh & 0x7F) << 16);
4908 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4911 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4913 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4914 txdctl |= IXGBE_TXDCTL_ENABLE;
4915 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
4918 /* Wait until TX Enable ready */
4921 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
4922 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
4924 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
4926 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4928 rxq = dev->data->rx_queues[i];
4930 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4931 rxdctl |= IXGBE_RXDCTL_ENABLE;
4932 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
4934 /* Wait until RX Enable ready */
4938 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
4939 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4941 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
4943 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
4948 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
4949 int __attribute__((weak))
4950 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
4955 uint16_t __attribute__((weak))
4956 ixgbe_recv_pkts_vec(
4957 void __rte_unused *rx_queue,
4958 struct rte_mbuf __rte_unused **rx_pkts,
4959 uint16_t __rte_unused nb_pkts)
4964 uint16_t __attribute__((weak))
4965 ixgbe_recv_scattered_pkts_vec(
4966 void __rte_unused *rx_queue,
4967 struct rte_mbuf __rte_unused **rx_pkts,
4968 uint16_t __rte_unused nb_pkts)
4973 int __attribute__((weak))
4974 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)