/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include "ixgbe/ixgbe_api.h"
#include "ixgbe/ixgbe_vf.h"
#include "ixgbe_ethdev.h"
+#include "ixgbe/ixgbe_dcb.h"
+
+
+#define RTE_PMD_IXGBE_TX_MAX_BURST 32
+
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+#define RTE_PMD_IXGBE_RX_MAX_BURST 32
+#endif
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t rx_tail; /**< current value of RDT register. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
+ uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
+ uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+#endif
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
uint8_t port_id; /**< Device port identifier. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
+ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
+ struct rte_mbuf fake_mbuf;
+ /** hold packets to return to application */
+ struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
+#endif
};
/**
/**
* Structure to check if new context need be built
*/
+
struct ixgbe_advctx_info {
uint16_t flags; /**< ol_flags for context build. */
uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
- uint32_t vlan_macip_lens; /**< vlan, mac ip length. */
+ union rte_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */
};
/**
uint16_t last_desc_cleaned;
/** Total number of TX descriptors ready to be allocated. */
uint16_t nb_tx_free;
+ uint16_t tx_next_dd; /**< next desc to scan for DD bit */
+ uint16_t tx_next_rs; /**< next desc to set RS bit */
uint16_t queue_id; /**< TX queue index. */
uint8_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
+ uint32_t txq_flags; /**< Holds flags for this TXq */
uint32_t ctx_curr; /**< Hardware context states. */
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
* TX functions
*
**********************************************************************/
+
+/*
+ * The "simple" TX queue functions require that the following
+ * flags are set when the TX queue is configured:
+ * - ETH_TXQ_FLAGS_NOMULTSEGS
+ * - ETH_TXQ_FLAGS_NOVLANOFFL
+ * - ETH_TXQ_FLAGS_NOXSUMSCTP
+ * - ETH_TXQ_FLAGS_NOXSUMUDP
+ * - ETH_TXQ_FLAGS_NOXSUMTCP
+ * and that the RS bit threshold (tx_rs_thresh) is at least equal to
+ * RTE_PMD_IXGBE_TX_MAX_BURST.
+ */
+#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
+ ETH_TXQ_FLAGS_NOOFFLOADS)
+
+/*
+ * Check for descriptors with their DD bit set and free mbufs.
+ * Return the total number of buffers freed.
+ */
+static inline int
+ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+{
+ struct igb_tx_entry *txep;
+ uint32_t status;
+ int i;
+
+ /* check DD bit on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ if (! (status & IXGBE_ADVTXD_STAT_DD))
+ return 0;
+
+ /*
+ * first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
+
+ /* prefetch the mbufs that are about to be freed */
+ for (i = 0; i < txq->tx_rs_thresh; ++i)
+ rte_prefetch0((txep + i)->mbuf);
+
+ /* free buffers one at a time */
+ if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
+ for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ rte_mempool_put(txep->mbuf->pool, txep->mbuf);
+ txep->mbuf = NULL;
+ }
+ } else {
+ for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ rte_pktmbuf_free_seg(txep->mbuf);
+ txep->mbuf = NULL;
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free += txq->tx_rs_thresh;
+ txq->tx_next_dd += txq->tx_rs_thresh;
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = txq->tx_rs_thresh - 1;
+
+ return txq->tx_rs_thresh;
+}
+
+/*
+ * Populate descriptors with the following info:
+ * 1.) buffer_addr = phys_addr + headroom
+ * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
+ * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT
+ */
+
+/* Defines for Tx descriptor */
+#define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\
+ IXGBE_ADVTXD_DCMD_IFCS |\
+ IXGBE_ADVTXD_DCMD_DEXT |\
+ IXGBE_ADVTXD_DCMD_EOP)
+
+/* Populate 4 descriptors with data from 4 mbufs */
+static inline void
+tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+ int i;
+
+ for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
+ buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ pkt_len = (*pkts)->pkt.data_len;
+
+ /* write data to descriptor */
+ txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.cmd_type_len =
+ ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ txdp->read.olinfo_status =
+ (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ }
+}
+
+/* Populate 1 descriptor with data from 1 mbuf */
+static inline void
+tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+
+ buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ pkt_len = (*pkts)->pkt.data_len;
+
+ /* write data to descriptor */
+ txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.cmd_type_len =
+ ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ txdp->read.olinfo_status =
+ (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+}
+
+/*
+ * Fill H/W descriptor ring with mbuf data.
+ * Copy mbuf pointers to the S/W ring.
+ */
+static inline void
+ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
+ uint16_t nb_pkts)
+{
+ volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+ struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ const int N_PER_LOOP = 4;
+ const int N_PER_LOOP_MASK = N_PER_LOOP-1;
+ int mainpart, leftover;
+ int i, j;
+
+ /*
+ * Process most of the packets in chunks of N pkts. Any
+ * leftover packets will get processed one at a time.
+ */
+ mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
+ leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
+ for (i = 0; i < mainpart; i += N_PER_LOOP) {
+ /* Copy N mbuf pointers to the S/W ring */
+ for (j = 0; j < N_PER_LOOP; ++j) {
+ (txep + i + j)->mbuf = *(pkts + i + j);
+ }
+ tx4(txdp + i, pkts + i);
+ }
+
+ if (unlikely(leftover > 0)) {
+ for (i = 0; i < leftover; ++i) {
+ (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
+ tx1(txdp + mainpart + i, pkts + mainpart + i);
+ }
+ }
+}
+
+static inline uint16_t
+tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+ volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
+ uint16_t n = 0;
+
+ /*
+ * Begin scanning the H/W ring for done descriptors when the
+ * number of available descriptors drops below tx_free_thresh. For
+ * each done descriptor, free the associated buffer.
+ */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ixgbe_tx_free_bufs(txq);
+
+ /* Only use descriptors that are available */
+ nb_pkts = RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ /* Use exactly nb_pkts descriptors */
+ txq->nb_tx_free -= nb_pkts;
+
+ /*
+ * At this point, we know there are enough descriptors in the
+ * ring to transmit all the packets. This assumes that each
+ * mbuf contains a single segment, and that no new offloads
+ * are expected, which would require a new context descriptor.
+ */
+
+ /*
+ * See if we're going to wrap-around. If so, handle the top
+ * of the descriptor ring first, then do the bottom. If not,
+ * the processing looks just like the "bottom" part anyway...
+ */
+ if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
+ n = txq->nb_tx_desc - txq->tx_tail;
+ ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
+
+ /*
+ * We know that the last descriptor in the ring will need to
+ * have its RS bit set because tx_rs_thresh has to be
+ * a divisor of the ring size
+ */
+ tx_r[txq->tx_next_rs].read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+ txq->tx_next_rs = txq->tx_rs_thresh - 1;
+
+ txq->tx_tail = 0;
+ }
+
+ /* Fill H/W descriptor ring with mbuf data */
+ ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, nb_pkts - n);
+ txq->tx_tail += (nb_pkts - n);
+
+ /*
+ * Determine if RS bit should be set
+ * This is what we actually want:
+ * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
+ * but instead of subtracting 1 and doing >=, we can just do
+ * greater than without subtracting.
+ */
+ if (txq->tx_tail > txq->tx_next_rs) {
+ tx_r[txq->tx_next_rs].read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+ txq->tx_next_rs += txq->tx_rs_thresh;
+ if (txq->tx_next_rs >= txq->nb_tx_desc)
+ txq->tx_next_rs = txq->tx_rs_thresh - 1;
+ }
+
+ /*
+ * Check for wrap-around. This would only happen if we used
+ * up to the last descriptor in the ring, no more, no less.
+ */
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+
+ /* update tail pointer */
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+uint16_t
+ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+
+ /* Try to transmit at least chunks of TX_MAX_BURST pkts */
+ if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
+ return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
+
+ /* transmit more than the max burst, in chunks of TX_MAX_BURST */
+ nb_tx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+ n = RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
+ ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < n)
+ break;
+ }
+
+ return nb_tx;
+}
+
static inline void
ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].cmp_mask = cmp_mask;
- txq->ctx_cache[ctx_idx].vlan_macip_lens = vlan_macip_lens & cmp_mask;
+ txq->ctx_cache[ctx_idx].vlan_macip_lens.data =
+ vlan_macip_lens & cmp_mask;
ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
{
/* If match with the current used context */
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+ (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
(txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
return txq->ctx_curr;
}
/* What if match with the next context */
txq->ctx_curr ^= 1;
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+ (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
(txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
return txq->ctx_curr;
}
uint16_t nb_used;
uint16_t tx_ol_req;
uint32_t vlan_macip_lens;
- uint32_t ctx;
+ uint32_t ctx = 0;
uint32_t new_ctx;
txq = tx_queue;
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->pkt.vlan_tci << 16 |
- tx_pkt->pkt.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT |
- tx_pkt->pkt.l3_len;
+ vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
/* If hardware offload required */
tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK;
if (tx_ol_req) {
/* If new context need be built or reuse the exist ctx. */
- ctx = what_advctx_update(txq, tx_ol_req, vlan_macip_lens);
+ ctx = what_advctx_update(txq, tx_ol_req,
+ vlan_macip_lens);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IXGBE_CTX_NUM);
ctx = txq->ctx_curr;
IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
}
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+/*
+ * LOOK_AHEAD defines how many desc statuses to check beyond the
+ * current descriptor.
+ * It must be a pound define for optimal performance.
+ * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
+ * function only works with LOOK_AHEAD=8.
+ */
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD IXGBE: LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ int s[LOOK_AHEAD], nb_dd;
+ int i, j, nb_rx = 0;
+
+
+ /* get references to current descriptor and S/W ring entry */
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ /* check to make sure there is at least 1 packet to receive */
+ if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
+ return 0;
+
+ /*
+ * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
+ * reference packets that are ready to be received.
+ */
+ for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
+ i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
+ {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = LOOK_AHEAD-1; j >= 0; --j)
+ s[j] = rxdp[j].wb.upper.status_error;
+
+ /* Clear everything but the status bits (LSB) */
+ for (j = 0; j < LOOK_AHEAD; ++j)
+ s[j] &= IXGBE_RXDADV_STAT_DD;
+
+ /* Compute how many status bits were set */
+ nb_dd = s[0]+s[1]+s[2]+s[3]+s[4]+s[5]+s[6]+s[7];
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf format */
+ for (j = 0; j < nb_dd; ++j) {
+ mb = rxep[j].mbuf;
+ pkt_len = rxdp[j].wb.upper.length - rxq->crc_len;
+ mb->pkt.data_len = pkt_len;
+ mb->pkt.pkt_len = pkt_len;
+ mb->pkt.vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
+ mb->pkt.hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+
+ /* convert descriptor fields to rte mbuf flags */
+ mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags(
+ rxdp[j].wb.lower.lo_dword.data);
+ /* reuse status field from scan list */
+ mb->ol_flags |= rx_desc_status_to_pkt_flags(s[j]);
+ mb->ol_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ }
+
+ /* Move mbuf pointers from the S/W ring to the stage */
+ for (j = 0; j < LOOK_AHEAD; ++j) {
+ rxq->rx_stage[i + j] = rxep[j].mbuf;
+ }
+
+ /* stop if all requested packets could not be received */
+ if (nb_dd != LOOK_AHEAD)
+ break;
+ }
+
+ /* clear software ring entries so we can cleanup correctly */
+ for (i = 0; i < nb_rx; ++i)
+ rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+
+ return nb_rx;
+}
+
+static inline int
+ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx;
+ uint64_t dma_addr;
+ int diag, i;
+
+ /* allocate buffers in bulk directly into the S/W ring */
+ alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
+ rxep = &rxq->sw_ring[alloc_idx];
+ diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0))
+ return (-ENOMEM);
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; ++i) {
+ /* populate the static rte mbuf fields */
+ mb = rxep[i].mbuf;
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->type = RTE_MBUF_PKT;
+ mb->pkt.next = NULL;
+ mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->pkt.nb_segs = 1;
+ mb->pkt.in_port = rxq->port_id;
+
+ /* populate the descriptors */
+ dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ rxdp[i].read.hdr_addr = dma_addr;
+ rxdp[i].read.pkt_addr = dma_addr;
+ }
+
+ /* update tail pointer */
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
+
+ /* update state of internal queue structure */
+ rxq->rx_free_trigger += rxq->rx_free_thresh;
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = (rxq->rx_free_thresh - 1);
+
+ /* no errors */
+ return 0;
+}
+
+static inline uint16_t
+ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+ int i;
+
+ /* how many packets are ready to return? */
+ nb_pkts = RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ /* copy mbuf pointers to the application's packet list */
+ for (i = 0; i < nb_pkts; ++i)
+ rx_pkts[i] = stage[i];
+
+ /* update internal queue state */
+ rxq->rx_nb_avail -= nb_pkts;
+ rxq->rx_next_avail += nb_pkts;
+
+ return nb_pkts;
+}
+
+static inline uint16_t
+rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
+ uint16_t nb_rx = 0;
+
+ /* Any previously recv'd pkts will be returned from the Rx stage */
+ if (rxq->rx_nb_avail)
+ return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ /* Scan the H/W ring for packets to receive */
+ nb_rx = ixgbe_rx_scan_hw_ring(rxq);
+
+ /* update internal queue state */
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail += nb_rx;
+
+ /* if required, allocate new buffers to replenish descriptors */
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ if (ixgbe_rx_alloc_bufs(rxq) != 0) {
+ int i, j;
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u\n", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
+ /*
+ * Need to rewind any previous receives if we cannot
+ * allocate new buffers to replenish the old ones.
+ */
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail -= nb_rx;
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
+ rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+ return 0;
+ }
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ /* received any packets this loop? */
+ if (rxq->rx_nb_avail)
+ return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
+uint16_t
+ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
+ return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ /* request is relatively large, chunk it up */
+ nb_rx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+ n = RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
+ ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx += ret;
+ nb_pkts -= ret;
+ if (ret < n)
+ break;
+ }
+
+ return nb_rx;
+}
+#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
+
uint16_t
ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ rxm->pkt.vlan_macip.f.vlan_tci =
+ rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->pkt.vlan_tci =
+ first_seg->pkt.vlan_macip.f.vlan_tci =
rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, (uint64_t) ring_size,
+ return rte_memzone_reserve_aligned(z_name, ring_size,
socket_id, 0, IXGBE_ALIGN);
}
ixgbe_tx_queue_release(struct igb_tx_queue *txq)
{
if (txq != NULL) {
- ixgbe_tx_queue_release_mbufs(txq);
- rte_free(txq->sw_ring);
- rte_free(txq);
+ ixgbe_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_ring);
+ rte_free(txq);
}
}
prev = i;
}
+ txq->tx_next_dd = txq->tx_rs_thresh - 1;
+ txq->tx_next_rs = txq->tx_rs_thresh - 1;
+
txq->tx_tail = 0;
txq->nb_tx_used = 0;
/*
* tx_rs_thresh must be greater than 0.
* tx_rs_thresh must be less than the size of the ring minus 2.
* tx_rs_thresh must be less than or equal to tx_free_thresh.
+ * tx_rs_thresh must be a divisor of the ring size.
* tx_free_thresh must be greater than 0.
* tx_free_thresh must be less than the size of the ring minus 3.
* One descriptor in the TX ring is used as a sentinel to avoid a
RTE_LOG(ERR, PMD,
"tx_rs_thresh must be less than the "
"number of TX descriptors minus 2. "
- "(tx_rs_thresh=%u port=%d queue=%d)",
+ "(tx_rs_thresh=%u port=%d queue=%d)\n",
tx_rs_thresh, dev->data->port_id, queue_idx);
return -(EINVAL);
}
"tx_rs_thresh must be less than the "
"tx_free_thresh must be less than the "
"number of TX descriptors minus 3. "
- "(tx_free_thresh=%u port=%d queue=%d)",
+ "(tx_free_thresh=%u port=%d queue=%d)\n",
tx_free_thresh, dev->data->port_id, queue_idx);
return -(EINVAL);
}
"tx_rs_thresh must be less than or equal to "
"tx_free_thresh. "
"(tx_free_thresh=%u tx_rs_thresh=%u "
- "port=%d queue=%d)",
+ "port=%d queue=%d)\n",
tx_free_thresh, tx_rs_thresh,
dev->data->port_id, queue_idx);
return -(EINVAL);
}
+ if ((nb_desc % tx_rs_thresh) != 0) {
+ RTE_LOG(ERR, PMD,
+ "tx_rs_thresh must be a divisor of the"
+ "number of TX descriptors. "
+ "(tx_rs_thresh=%u port=%d queue=%d)\n",
+ tx_rs_thresh, dev->data->port_id, queue_idx);
+ return -(EINVAL);
+ }
/*
* If rs_bit_thresh is greater than 1, then TX WTHRESH should be
*/
if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
RTE_LOG(ERR, PMD,
- "TX WTHRESH should be set to 0 if "
+ "TX WTHRESH must be set to 0 if "
"tx_rs_thresh is greater than 1. "
- "TX WTHRESH will be set to 0. "
- "(tx_rs_thresh=%u port=%d queue=%d)",
+ "(tx_rs_thresh=%u port=%d queue=%d)\n",
tx_rs_thresh,
dev->data->port_id, queue_idx);
return -(EINVAL);
txq->wthresh = tx_conf->tx_thresh.wthresh;
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
+ txq->txq_flags = tx_conf->txq_flags;
/*
* Modification to set VFTDT for virtual function if vf is detected
dev->data->tx_queues[queue_idx] = txq;
- dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+ (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST))
+ dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+ else
+ dev->tx_pkt_burst = ixgbe_xmit_pkts;
return (0);
}
rxq->sw_ring[i].mbuf = NULL;
}
}
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ if (rxq->rx_nb_avail) {
+ for (i = 0; i < rxq->rx_nb_avail; ++i) {
+ struct rte_mbuf *mb;
+ mb = rxq->rx_stage[rxq->rx_next_avail + i];
+ rte_pktmbuf_free_seg(mb);
+ }
+ rxq->rx_nb_avail = 0;
+ }
+#endif
}
}
static void
ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
{
- ixgbe_rx_queue_release_mbufs(rxq);
- rte_free(rxq->sw_ring);
- rte_free(rxq);
+ if (rxq != NULL) {
+ ixgbe_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ }
}
void
ixgbe_rx_queue_release(rxq);
}
+/*
+ * Check if Rx Burst Bulk Alloc function can be used.
+ * Return
+ * 0: the preconditions are satisfied and the bulk allocation function
+ * can be used.
+ * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
+ * function must be used.
+ */
+static inline int
+check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
+{
+ int ret = 0;
- if (dev->data->rx_queues == NULL) {
- dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
- sizeof(struct igb_rx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (dev->data->rx_queues == NULL) {
- dev->data->nb_rx_queues = 0;
- return -ENOMEM;
- }
- }
- else {
- for (i = nb_queues; i < old_nb_queues; i++)
- ixgbe_rx_queue_release(dev->data->rx_queues[i]);
- rxq = rte_realloc(dev->data->rx_queues,
- sizeof(struct igb_rx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (rxq == NULL)
- return -ENOMEM;
- else
- dev->data->rx_queues = rxq;
- if (nb_queues > old_nb_queues)
- memset(&dev->data->rx_queues[old_nb_queues], 0,
- sizeof(struct igb_rx_queue *) *
- (nb_queues - old_nb_queues));
- }
- dev->data->nb_rx_queues = nb_queues;
- return 0;
+ /*
+ * Make sure the following pre-conditions are satisfied:
+ * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
+ * rxq->rx_free_thresh < rxq->nb_rx_desc
+ * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
+ * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
+ * Scattered packets are not supported. This should be checked
+ * outside of this function.
+ */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ if (! (rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST))
+ ret = -EINVAL;
+ else if (! (rxq->rx_free_thresh < rxq->nb_rx_desc))
+ ret = -EINVAL;
+ else if (! ((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0))
+ ret = -EINVAL;
+ else if (! (rxq->nb_rx_desc <
+ (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST)))
+ ret = -EINVAL;
+#else
+ ret = -EINVAL;
+#endif
+
+ return ret;
}
-/* (Re)set dynamic igb_rx_queue fields to defaults */
+/* Reset dynamic igb_rx_queue fields back to defaults */
static void
ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
{
unsigned i;
+ uint16_t len;
- /* Zero out HW ring memory */
- for (i = 0; i < rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc); i++) {
+ /*
+ * By default, the Rx queue setup function allocates enough memory for
+ * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
+ * extra memory at the end of the descriptor ring to be zero'd out. A
+ * pre-condition for using the Rx burst bulk alloc function is that the
+ * number of descriptors is less than or equal to
+ * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
+ * constraints here to see if we need to zero out memory after the end
+ * of the H/W descriptor ring.
+ */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
+ /* zero out extra memory */
+ len = rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST;
+ else
+#endif
+ /* do not zero out extra memory */
+ len = rxq->nb_rx_desc;
+
+ /*
+ * Zero out HW ring memory. Zero out extra memory at the end of
+ * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
+ * reads extra memory as zeros.
+ */
+ for (i = 0; i < len * sizeof(union ixgbe_adv_rx_desc); i++) {
((volatile char *)rxq->rx_ring)[i] = 0;
}
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ /*
+ * initialize extra software ring entries. Space for these extra
+ * entries is always allocated
+ */
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+ for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; ++i) {
+ rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+ }
+
+ rxq->rx_nb_avail = 0;
+ rxq->rx_next_avail = 0;
+ rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
+#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
const struct rte_memzone *rz;
struct igb_rx_queue *rxq;
struct ixgbe_hw *hw;
+ int use_def_burst_func = 1;
+ uint16_t len;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
rxq->port_id = dev->data->port_id;
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
ETHER_CRC_LEN);
+ rxq->drop_en = rx_conf->rx_drop_en;
/*
- * Allocate TX ring hardware descriptors. A memzone large enough to
+ * Allocate RX ring hardware descriptors. A memzone large enough to
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
- /* Allocate software ring */
+ /*
+ * Allocate software ring. Allow for space at the end of the
+ * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
+ * function does not access an invalid memory region.
+ */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ len = nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST;
+#else
+ len = nb_desc;
+#endif
rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
- sizeof(struct igb_rx_entry) * nb_desc,
+ sizeof(struct igb_rx_entry) * len,
CACHE_LINE_SIZE);
if (rxq->sw_ring == NULL) {
ixgbe_rx_queue_release(rxq);
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
+ /*
+ * Certain constaints must be met in order to use the bulk buffer
+ * allocation Rx burst function.
+ */
+ use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
+
+ /* Check if pre-conditions are satisfied, and no Scattered Rx */
+ if (!use_def_burst_func && !dev->data->scattered_rx) {
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.\n",
+ rxq->port_id, rxq->queue_id);
+ dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+#endif
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
+ "are not satisfied, Scattered Rx is requested, "
+ "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
+ "enabled (port=%d, queue=%d).\n",
+ rxq->port_id, rxq->queue_id);
+ }
dev->data->rx_queues[queue_idx] = rxq;
ixgbe_reset_rx_queue(rxq);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct igb_tx_queue *txq = dev->data->tx_queues[i];
- ixgbe_tx_queue_release_mbufs(txq);
- ixgbe_reset_tx_queue(txq);
+ if (txq != NULL) {
+ ixgbe_tx_queue_release_mbufs(txq);
+ ixgbe_reset_tx_queue(txq);
+ }
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct igb_rx_queue *rxq = dev->data->rx_queues[i];
- ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(rxq);
+ if (rxq != NULL) {
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(rxq);
+ }
}
}
}
}
+/**
+ * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ uint32_t q;
+
+ PMD_INIT_FUNC_TRACE();
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ /* Disable the Tx desc arbiter so that MTQC can be changed */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ /* Enable DCB for Tx with 8 TCs */
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ }
+ else {
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ }
+ if (dcb_config->vt_mode)
+ reg |= IXGBE_MTQC_VT_ENA;
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < 128; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+ /* Enable the Tx desc arbiter */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ /* Enable Security TX Buffer IFG for DCB */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+ }
+ return;
+}
+
+/**
+ * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ /*PF VF Transmit Enable*/
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
+ vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ /*Configure general DCB TX parameters*/
+ ixgbe_dcb_tx_hw_config(hw,dcb_config);
+ return;
+}
+
+static void
+ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i,j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
+ if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ }
+ else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_rx_conf->dcb_queue[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (1 << j);
+ }
+}
+
+static void
+ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i,j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
+ if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ }
+ else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_tx_conf->dcb_queue[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (1 << j);
+ }
+ return;
+}
+
+static void
+ixgbe_dcb_rx_config(struct rte_eth_dev *dev,struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i,j;
+
+ dcb_config->num_tcs.pg_tcs = rx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = rx_conf->nb_tcs;
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = rx_conf->dcb_queue[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (1 << j);
+ }
+}
+
+static void
+ixgbe_dcb_tx_config(struct rte_eth_dev *dev,struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_tx_conf *tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i,j;
+
+ dcb_config->num_tcs.pg_tcs = tx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = tx_conf->nb_tcs;
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = tx_conf->dcb_queue[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (1 << j);
+ }
+}
+
+/**
+ * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ uint32_t vlanctrl;
+ uint8_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ if (dcb_config->num_tcs.pg_tcs == 4) {
+ if (dcb_config->vt_mode)
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_VMDQRT4TCEN;
+ else {
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RT4TCEN;
+ }
+ }
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ if (dcb_config->vt_mode)
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_VMDQRT8TCEN;
+ else {
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RT8TCEN;
+ }
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+ }
+
+ /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+ }
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+ return;
+}
+
+static void
+ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
+ uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+ tsa, map);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
+ uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
+ break;
+ default:
+ break;
+ }
+}
+
+#define DCB_RX_CONFIG 1
+#define DCB_TX_CONFIG 1
+#define DCB_TX_PB 1024
+/**
+ * ixgbe_dcb_hw_configure - Enable DCB and configure
+ * general DCB in VT mode and non-VT mode parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static int
+ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ int ret = 0;
+ uint8_t i,pfc_en,nb_tcs;
+ uint16_t pbsize;
+ uint8_t config_dcb_rx = 0;
+ uint8_t config_dcb_tx = 0;
+ uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ struct ixgbe_dcb_tc_config *tc;
+ uint32_t max_frame = dev->data->max_frame_size;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch(dev->data->dev_conf.rxmode.mq_mode){
+ case ETH_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ config_dcb_rx = DCB_RX_CONFIG;
+ /*
+ *get dcb and VT rx configuration parameters
+ *from rte_eth_conf
+ */
+ ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
+ /*Configure general VMDQ and DCB RX parameters*/
+ ixgbe_vmdq_dcb_configure(dev);
+ }
+ break;
+ case ETH_DCB_RX:
+ dcb_config->vt_mode = false;
+ config_dcb_rx = DCB_RX_CONFIG;
+ /* Get dcb TX configuration parameters from rte_eth_conf */
+ ixgbe_dcb_rx_config(dev,dcb_config);
+ /*Configure general DCB RX parameters*/
+ ixgbe_dcb_rx_hw_config(hw, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration\n");
+ break;
+ }
+ switch (dev->data->dev_conf.txmode.mq_mode) {
+ case ETH_VMDQ_DCB_TX:
+ dcb_config->vt_mode = true;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /* get DCB and VT TX configuration parameters from rte_eth_conf */
+ ixgbe_dcb_vt_tx_config(dev,dcb_config);
+ /*Configure general VMDQ and DCB TX parameters*/
+ ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
+ break;
+
+ case ETH_DCB_TX:
+ dcb_config->vt_mode = false;
+ config_dcb_tx = DCB_RX_CONFIG;
+ /*get DCB TX configuration parameters from rte_eth_conf*/
+ ixgbe_dcb_tx_config(dev,dcb_config);
+ /*Configure general DCB TX parameters*/
+ ixgbe_dcb_tx_hw_config(hw, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration\n");
+ break;
+ }
+
+ nb_tcs = dcb_config->num_tcs.pfc_tcs;
+ /* Unpack map */
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
+ if(nb_tcs == ETH_4_TCS) {
+ /* Avoid un-configured priority mapping to TC0 */
+ uint8_t j = 4;
+ uint8_t mask = 0xFF;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+ mask &= ~ (1 << map[i]);
+ for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
+ if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+ map[j++] = i;
+ mask >>= 1;
+ }
+ /* Re-configure 4 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100 / nb_tcs;
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100 / nb_tcs;
+ }
+ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
+ }
+ }
+
+ if(config_dcb_rx) {
+ /* Set RX buffer size */
+ pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
+ for (i = 0 ; i < nb_tcs; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+ }
+ /* zero alloc all unused TCs */
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ }
+ }
+ if(config_dcb_tx) {
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
+ uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < nb_tcs; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+ }
+ }
+
+ /*Calculates traffic class credits*/
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+ IXGBE_DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+ IXGBE_DCB_RX_CONFIG);
+
+ if(config_dcb_rx) {
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
+ /* Configure PG(ETS) RX */
+ ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
+ }
+
+ if(config_dcb_tx) {
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+ /* Configure PG(ETS) TX */
+ ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
+ }
+
+ /*Configure queue statistics registers*/
+ ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
+
+ /* Check if the PFC is supported */
+ if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
+ for (i = 0; i < nb_tcs; i++) {
+ /*
+ * If the TC count is 8,and the default high_water is 48,
+ * the low_water is 16 as default.
+ */
+ hw->fc.high_water[i] = (pbsize * 3 ) / 4;
+ hw->fc.low_water[i] = pbsize / 4;
+ /* Enable pfc for this TC */
+ tc = &dcb_config->tc_config[i];
+ tc->pfc = ixgbe_dcb_pfc_enabled;
+ }
+ ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+ if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+ pfc_en &= 0x0F;
+ ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
+ }
+
+ return ret;
+}
+
+/**
+ * ixgbe_configure_dcb - Configure DCB Hardware
+ * @dev: pointer to rte_eth_dev
+ */
+void ixgbe_configure_dcb(struct rte_eth_dev *dev)
+{
+ struct ixgbe_dcb_config *dcb_cfg =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ /** Configure DCB hardware **/
+ if(((dev->data->dev_conf.rxmode.mq_mode != ETH_RSS) &&
+ (dev->data->nb_rx_queues == ETH_DCB_NUM_QUEUES))||
+ ((dev->data->dev_conf.txmode.mq_mode != ETH_DCB_NONE) &&
+ (dev->data->nb_tx_queues == ETH_DCB_NUM_QUEUES))) {
+ ixgbe_dcb_hw_configure(dev,dcb_cfg);
+ }
+ return;
+}
+
static int
ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
{
(unsigned) rxq->queue_id);
return (-ENOMEM);
}
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->type = RTE_MBUF_PKT;
+ mbuf->pkt.next = NULL;
+ mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mbuf->pkt.nb_segs = 1;
+ mbuf->pkt.in_port = rxq->port_id;
+
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
rxd = &rxq->rx_ring[i];
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
/*
* Configure the RX buffer size in the BSIZEPACKET field of
* the SRRCTL register of the queue.
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ IXGBE_RX_BUF_THRESHOLD > buf_size){
dev->data->scattered_rx = 1;
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
case ixgbe_mac_82598EB:
txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL(i));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i),
txctrl);
break;
default:
txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL_82599(i));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i),
txctrl);
break;
/* Allocate buffers for descriptor rings */
ret = ixgbe_alloc_rx_queue_mbufs(rxq);
- if (ret){
- return -1;
- }
+ if (ret)
+ return ret;
+
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
/*
* Configure the RX buffer size in the BSIZEPACKET field of
* the SRRCTL register of the queue.
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
}
+
return 0;
}
*/
txctrl = IXGBE_READ_REG(hw,
IXGBE_VFDCA_TXCTRL(i));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
txctrl);
}