/*-
* BSD LICENSE
- *
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
* are met:
- *
- * * Redistributions of source code must retain the above copyright
+ *
+ * * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <sys/queue.h>
-#include <endian.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "ixgbe/ixgbe_vf.h"
#include "ixgbe_ethdev.h"
#include "ixgbe/ixgbe_dcb.h"
+#include "ixgbe/ixgbe_common.h"
-#define RTE_PMD_IXGBE_TX_MAX_BURST 32
+#include "ixgbe_rxtx.h"
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-#define RTE_PMD_IXGBE_RX_MAX_BURST 32
-#endif
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
return (m);
}
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
- (char *)(mb)->buf_addr))
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
-/**
- * Structure associated with each descriptor of the RX ring of a RX queue.
- */
-struct igb_rx_entry {
- struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
-};
-
-/**
- * Structure associated with each descriptor of the TX ring of a TX queue.
- */
-struct igb_tx_entry {
- struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
- uint16_t next_id; /**< Index of next descriptor in ring. */
- uint16_t last_id; /**< Index of last scattered descriptor. */
-};
-
-/**
- * Structure associated with each RX queue.
- */
-struct igb_rx_queue {
- struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
- volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
- uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
- volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
- struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
- struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
- struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
- uint16_t nb_rx_desc; /**< number of RX descriptors. */
- uint16_t rx_tail; /**< current value of RDT register. */
- uint16_t nb_rx_hold; /**< number of held free RX desc. */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
- uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
- uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
-#endif
- uint16_t rx_free_thresh; /**< max free RX desc to hold. */
- uint16_t queue_id; /**< RX queue index. */
- uint8_t port_id; /**< Device port identifier. */
- uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
- uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
- struct rte_mbuf fake_mbuf;
- /** hold packets to return to application */
- struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
-#endif
-};
-
-/**
- * IXGBE CTX Constants
- */
-enum ixgbe_advctx_num {
- IXGBE_CTX_0 = 0, /**< CTX0 */
- IXGBE_CTX_1 = 1, /**< CTX1 */
- IXGBE_CTX_NUM = 2, /**< CTX NUMBER */
-};
-
-/**
- * Structure to check if new context need be built
- */
-
-struct ixgbe_advctx_info {
- uint16_t flags; /**< ol_flags for context build. */
- uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
- union rte_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */
-};
-
-/**
- * Structure associated with each TX queue.
- */
-struct igb_tx_queue {
- /** TX ring virtual address. */
- volatile union ixgbe_adv_tx_desc *tx_ring;
- uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
- struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
- volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
- uint16_t nb_tx_desc; /**< number of TX descriptors. */
- uint16_t tx_tail; /**< current value of TDT reg. */
- uint16_t tx_free_thresh;/**< minimum TX before freeing. */
- /** Number of TX descriptors to use before RS bit is set. */
- uint16_t tx_rs_thresh;
- /** Number of TX descriptors used since RS bit was set. */
- uint16_t nb_tx_used;
- /** Index to last TX descriptor to have been cleaned. */
- uint16_t last_desc_cleaned;
- /** Total number of TX descriptors ready to be allocated. */
- uint16_t nb_tx_free;
- uint16_t tx_next_dd; /**< next desc to scan for DD bit */
- uint16_t tx_next_rs; /**< next desc to set RS bit */
- uint16_t queue_id; /**< TX queue index. */
- uint8_t port_id; /**< Device port identifier. */
- uint8_t pthresh; /**< Prefetch threshold register. */
- uint8_t hthresh; /**< Host threshold register. */
- uint8_t wthresh; /**< Write-back threshold reg. */
- uint32_t txq_flags; /**< Holds flags for this TXq */
- uint32_t ctx_curr; /**< Hardware context states. */
- /** Hardware context0 history. */
- struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
-};
-
#if 1
#define RTE_PMD_USE_PREFETCH
#define rte_ixgbe_prefetch(p) do {} while(0)
#endif
-#ifdef RTE_PMD_PACKET_PREFETCH
-#define rte_packet_prefetch(p) rte_prefetch1(p)
-#else
-#define rte_packet_prefetch(p) do {} while(0)
-#endif
-
/*********************************************************************
*
* TX functions
*
**********************************************************************/
-/*
- * The "simple" TX queue functions require that the following
- * flags are set when the TX queue is configured:
- * - ETH_TXQ_FLAGS_NOMULTSEGS
- * - ETH_TXQ_FLAGS_NOVLANOFFL
- * - ETH_TXQ_FLAGS_NOXSUMSCTP
- * - ETH_TXQ_FLAGS_NOXSUMUDP
- * - ETH_TXQ_FLAGS_NOXSUMTCP
- * and that the RS bit threshold (tx_rs_thresh) is at least equal to
- * RTE_PMD_IXGBE_TX_MAX_BURST.
- */
-#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
/*
* Check for descriptors with their DD bit set and free mbufs.
* Return the total number of buffers freed.
*/
-static inline int
+static inline int __attribute__((always_inline))
ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
{
struct igb_tx_entry *txep;
}
/* buffers were freed, update counters */
- txq->nb_tx_free += txq->tx_rs_thresh;
- txq->tx_next_dd += txq->tx_rs_thresh;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = txq->tx_rs_thresh - 1;
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
return txq->tx_rs_thresh;
}
-/*
- * Populate descriptors with the following info:
- * 1.) buffer_addr = phys_addr + headroom
- * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
- * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT
- */
-
-/* Defines for Tx descriptor */
-#define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\
- IXGBE_ADVTXD_DCMD_IFCS |\
- IXGBE_ADVTXD_DCMD_DEXT |\
- IXGBE_ADVTXD_DCMD_EOP)
-
/* Populate 4 descriptors with data from 4 mbufs */
static inline void
tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
ixgbe_tx_free_bufs(txq);
/* Only use descriptors that are available */
- nb_pkts = RTE_MIN(txq->nb_tx_free, nb_pkts);
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
return 0;
/* Use exactly nb_pkts descriptors */
- txq->nb_tx_free -= nb_pkts;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
/*
* At this point, we know there are enough descriptors in the
* the processing looks just like the "bottom" part anyway...
*/
if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
- n = txq->nb_tx_desc - txq->tx_tail;
+ n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
/*
*/
tx_r[txq->tx_next_rs].read.cmd_type_len |=
rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
- txq->tx_next_rs = txq->tx_rs_thresh - 1;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
txq->tx_tail = 0;
}
/* Fill H/W descriptor ring with mbuf data */
- ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, nb_pkts - n);
- txq->tx_tail += (nb_pkts - n);
+ ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
/*
* Determine if RS bit should be set
if (txq->tx_tail > txq->tx_next_rs) {
tx_r[txq->tx_next_rs].read.cmd_type_len |=
rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
- txq->tx_next_rs += txq->tx_rs_thresh;
+ txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
+ txq->tx_rs_thresh);
if (txq->tx_next_rs >= txq->nb_tx_desc)
- txq->tx_next_rs = txq->tx_rs_thresh - 1;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
}
/*
nb_tx = 0;
while (nb_pkts) {
uint16_t ret, n;
- n = RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
- nb_tx += ret;
- nb_pkts -= ret;
+ nb_tx = (uint16_t)(nb_tx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
if (ret < n)
break;
}
uint16_t nb_tx_to_clean;
/* Determine the last descriptor needing to be cleaned */
- desc_to_clean_to = last_desc_cleaned + txq->tx_rs_thresh;
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
if (desc_to_clean_to >= nb_tx_desc)
- desc_to_clean_to = desc_to_clean_to - nb_tx_desc;
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
/* Figure out how many descriptors will be cleaned */
if (last_desc_cleaned > desc_to_clean_to)
- nb_tx_to_clean = ((nb_tx_desc - last_desc_cleaned) +
- desc_to_clean_to);
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
else
- nb_tx_to_clean = desc_to_clean_to - last_desc_cleaned;
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
PMD_TX_FREE_LOG(DEBUG,
"Cleaning %4u TX descriptors: %4u to %4u "
/* Update the txq to reflect the last descriptor that was cleaned */
txq->last_desc_cleaned = desc_to_clean_to;
- txq->nb_tx_free += nb_tx_to_clean;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
/* No Error */
return (0);
vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
/* If hardware offload required */
- tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK;
+ tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
if (tx_ol_req) {
/* If new context need be built or reuse the exist ctx. */
ctx = what_advctx_update(txq, tx_ol_req,
* This will always be the number of segments + the number of
* Context descriptors required to transmit the packet
*/
- nb_used = tx_pkt->pkt.nb_segs + new_ctx;
+ nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
/*
* The number of descriptors that must be allocated for a
* The last packet data descriptor needs End Of Packet (EOP)
*/
cmd_type_len |= IXGBE_TXD_CMD_EOP;
- txq->nb_tx_used += nb_used;
- txq->nb_tx_free -= nb_used;
+ txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
/* Set RS bit only on threshold packets' last descriptor */
if (txq->nb_tx_used >= txq->tx_rs_thresh) {
ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
#endif
- return (pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]);
+ return (uint16_t)(pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]);
}
static inline uint16_t
* Do not check whether L3/L4 rx checksum done by NIC or not,
* That can be found from rte_eth_rxmode.hw_ip_checksum flag
*/
- pkt_flags = (uint16_t) (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
+ pkt_flags = (uint16_t)((rx_status & IXGBE_RXD_STAT_VP) ?
+ PKT_RX_VLAN_PKT : 0);
#ifdef RTE_LIBRTE_IEEE1588
if (rx_status & IXGBE_RXD_STAT_TMST)
- pkt_flags = (pkt_flags | PKT_RX_IEEE1588_TMST);
+ pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
#endif
return pkt_flags;
}
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* LOOK_AHEAD defines how many desc statuses to check beyond the
- * current descriptor.
+ * current descriptor.
* It must be a pound define for optimal performance.
* Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
* function only works with LOOK_AHEAD=8.
for (j = LOOK_AHEAD-1; j >= 0; --j)
s[j] = rxdp[j].wb.upper.status_error;
- /* Clear everything but the status bits (LSB) */
+ /* Compute how many status bits were set */
+ nb_dd = 0;
for (j = 0; j < LOOK_AHEAD; ++j)
- s[j] &= IXGBE_RXDADV_STAT_DD;
+ nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
- /* Compute how many status bits were set */
- nb_dd = s[0]+s[1]+s[2]+s[3]+s[4]+s[5]+s[6]+s[7];
nb_rx += nb_dd;
/* Translate descriptor info to mbuf format */
for (j = 0; j < nb_dd; ++j) {
mb = rxep[j].mbuf;
- pkt_len = rxdp[j].wb.upper.length - rxq->crc_len;
+ pkt_len = (uint16_t)(rxdp[j].wb.upper.length -
+ rxq->crc_len);
mb->pkt.data_len = pkt_len;
mb->pkt.pkt_len = pkt_len;
mb->pkt.vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags(
rxdp[j].wb.lower.lo_dword.data);
/* reuse status field from scan list */
- mb->ol_flags |= rx_desc_status_to_pkt_flags(s[j]);
- mb->ol_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ mb->ol_flags = (uint16_t)(mb->ol_flags |
+ rx_desc_status_to_pkt_flags(s[j]));
+ mb->ol_flags = (uint16_t)(mb->ol_flags |
+ rx_desc_error_to_pkt_flags(s[j]));
}
/* Move mbuf pointers from the S/W ring to the stage */
}
/* clear software ring entries so we can cleanup correctly */
- for (i = 0; i < nb_rx; ++i)
+ for (i = 0; i < nb_rx; ++i) {
rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+ }
+
return nb_rx;
}
int diag, i;
/* allocate buffers in bulk directly into the S/W ring */
- alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
+ alloc_idx = (uint16_t)(rxq->rx_free_trigger -
+ (rxq->rx_free_thresh - 1));
rxep = &rxq->sw_ring[alloc_idx];
diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
rxq->rx_free_thresh);
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
/* update state of internal queue structure */
- rxq->rx_free_trigger += rxq->rx_free_thresh;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger +
+ rxq->rx_free_thresh);
if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
- rxq->rx_free_trigger = (rxq->rx_free_thresh - 1);
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
/* no errors */
return 0;
int i;
/* how many packets are ready to return? */
- nb_pkts = RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
/* copy mbuf pointers to the application's packet list */
for (i = 0; i < nb_pkts; ++i)
rx_pkts[i] = stage[i];
/* update internal queue state */
- rxq->rx_nb_avail -= nb_pkts;
- rxq->rx_next_avail += nb_pkts;
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
return nb_pkts;
}
return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
/* Scan the H/W ring for packets to receive */
- nb_rx = ixgbe_rx_scan_hw_ring(rxq);
+ nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
/* update internal queue state */
rxq->rx_next_avail = 0;
rxq->rx_nb_avail = nb_rx;
- rxq->rx_tail += nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
/* if required, allocate new buffers to replenish descriptors */
if (rxq->rx_tail > rxq->rx_free_trigger) {
* allocate new buffers to replenish the old ones.
*/
rxq->rx_nb_avail = 0;
- rxq->rx_tail -= nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
nb_rx = 0;
while (nb_pkts) {
uint16_t ret, n;
- n = RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
- nb_rx += ret;
- nb_pkts -= ret;
+ nb_rx = (uint16_t)(nb_rx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
if (ret < n)
break;
}
rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
- pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr));
+ pkt_flags = (uint16_t)(pkt_flags |
+ rx_desc_status_to_pkt_flags(staterr));
+ pkt_flags = (uint16_t)(pkt_flags |
+ rx_desc_error_to_pkt_flags(staterr));
rxm->ol_flags = pkt_flags;
if (likely(pkt_flags & PKT_RX_RSS_HASH))
rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = (pkt_flags |
- rx_desc_status_to_pkt_flags(staterr));
- pkt_flags = (pkt_flags |
- rx_desc_error_to_pkt_flags(staterr));
+ pkt_flags = (uint16_t)(pkt_flags |
+ rx_desc_status_to_pkt_flags(staterr));
+ pkt_flags = (uint16_t)(pkt_flags |
+ rx_desc_error_to_pkt_flags(staterr));
first_seg->ol_flags = pkt_flags;
if (likely(pkt_flags & PKT_RX_RSS_HASH))
* descriptors should meet the following condition:
* (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
*/
-#define IXGBE_MIN_RING_DESC 64
+#define IXGBE_MIN_RING_DESC 32
#define IXGBE_MAX_RING_DESC 4096
/*
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, (uint64_t) ring_size,
- socket_id, 0, IXGBE_ALIGN);
+#ifdef RTE_LIBRTE_XEN_DOM0
+ return rte_memzone_reserve_bounded(z_name, ring_size,
+ socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
+#else
+ return rte_memzone_reserve_aligned(z_name, ring_size,
+ socket_id, 0, IXGBE_ALIGN);
+#endif
}
static void
}
static void
-ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+ixgbe_tx_free_swring(struct igb_tx_queue *txq)
{
- if (txq != NULL) {
- ixgbe_tx_queue_release_mbufs(txq);
+ if (txq != NULL &&
+ txq->sw_ring != NULL)
rte_free(txq->sw_ring);
+}
+
+static void
+ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+{
+ if (txq != NULL && txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->free_swring(txq);
rte_free(txq);
}
}
static void
ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
{
+ static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
+ .buffer_addr = 0}};
struct igb_tx_entry *txe = txq->sw_ring;
uint16_t prev, i;
/* Zero out HW ring memory */
- for (i = 0; i < sizeof(union ixgbe_adv_tx_desc) * txq->nb_tx_desc; i++) {
- ((volatile char *)txq->tx_ring)[i] = 0;
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i] = zeroed_desc;
}
/* Initialize SW ring entries */
prev = i;
}
- txq->tx_next_dd = txq->tx_rs_thresh - 1;
- txq->tx_next_rs = txq->tx_rs_thresh - 1;
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
txq->tx_tail = 0;
txq->nb_tx_used = 0;
IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
}
+static struct ixgbe_txq_ops def_txq_ops = {
+ .release_mbufs = ixgbe_tx_queue_release_mbufs,
+ .free_swring = ixgbe_tx_free_swring,
+ .reset = ixgbe_reset_tx_queue,
+};
+
int
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
* H/W race condition, hence the maximum threshold constraints.
* When set to zero use default values.
*/
- tx_rs_thresh = (tx_conf->tx_rs_thresh) ?
- tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH;
- tx_free_thresh = (tx_conf->tx_free_thresh) ?
- tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
if (tx_rs_thresh >= (nb_desc - 2)) {
- RTE_LOG(ERR, PMD,
- "tx_rs_thresh must be less than the "
- "number of TX descriptors minus 2. "
- "(tx_rs_thresh=%u port=%d queue=%d)\n",
- tx_rs_thresh, dev->data->port_id, queue_idx);
+ RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the number "
+ "of TX descriptors minus 2. (tx_rs_thresh=%u port=%d "
+ "queue=%d)\n", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
if (tx_free_thresh >= (nb_desc - 3)) {
- RTE_LOG(ERR, PMD,
- "tx_rs_thresh must be less than the "
- "tx_free_thresh must be less than the "
- "number of TX descriptors minus 3. "
- "(tx_free_thresh=%u port=%d queue=%d)\n",
- tx_free_thresh, dev->data->port_id, queue_idx);
+ RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the "
+ "tx_free_thresh must be less than the number of TX "
+ "descriptors minus 3. (tx_free_thresh=%u port=%d "
+ "queue=%d)\n", (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
if (tx_rs_thresh > tx_free_thresh) {
- RTE_LOG(ERR, PMD,
- "tx_rs_thresh must be less than or equal to "
- "tx_free_thresh. "
- "(tx_free_thresh=%u tx_rs_thresh=%u "
- "port=%d queue=%d)\n",
- tx_free_thresh, tx_rs_thresh,
- dev->data->port_id, queue_idx);
+ RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or equal to "
+ "tx_free_thresh. (tx_free_thresh=%u tx_rs_thresh=%u "
+ "port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
+ (unsigned int)tx_rs_thresh, (int)dev->data->port_id,
+ (int)queue_idx);
return -(EINVAL);
}
if ((nb_desc % tx_rs_thresh) != 0) {
- RTE_LOG(ERR, PMD,
- "tx_rs_thresh must be a divisor of the"
- "number of TX descriptors. "
- "(tx_rs_thresh=%u port=%d queue=%d)\n",
- tx_rs_thresh, dev->data->port_id, queue_idx);
+ RTE_LOG(ERR, PMD, "tx_rs_thresh must be a divisor of the "
+ "number of TX descriptors. (tx_rs_thresh=%u port=%d "
+ "queue=%d)\n", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
* accumulates WTHRESH descriptors.
*/
if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
- RTE_LOG(ERR, PMD,
- "TX WTHRESH must be set to 0 if "
- "tx_rs_thresh is greater than 1. "
- "(tx_rs_thresh=%u port=%d queue=%d)\n",
- tx_rs_thresh,
- dev->data->port_id, queue_idx);
+ RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if "
+ "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
+ "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
/* Free memory prior to re-allocation if needed... */
- if (dev->data->tx_queues[queue_idx] != NULL)
+ if (dev->data->tx_queues[queue_idx] != NULL) {
ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
/* First allocate the tx queue data structure */
- txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
- CACHE_LINE_SIZE);
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
+ CACHE_LINE_SIZE, socket_id);
if (txq == NULL)
return (-ENOMEM);
txq->hthresh = tx_conf->tx_thresh.hthresh;
txq->wthresh = tx_conf->tx_thresh.wthresh;
txq->queue_id = queue_idx;
+ txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
+ txq->ops = &def_txq_ops;
+ txq->start_tx_per_q = tx_conf->start_tx_per_q;
/*
* Modification to set VFTDT for virtual function if vf is detected
if (hw->mac.type == ixgbe_mac_82599_vf)
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
else
- txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(queue_idx));
-
+ txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
+#ifndef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
+#else
+ txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+#endif
txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
/* Allocate software ring */
- txq->sw_ring = rte_zmalloc("txq->sw_ring",
- sizeof(struct igb_tx_entry) * nb_desc,
- CACHE_LINE_SIZE);
+ txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
+ sizeof(struct igb_tx_entry) * nb_desc,
+ CACHE_LINE_SIZE, socket_id);
if (txq->sw_ring == NULL) {
ixgbe_tx_queue_release(txq);
return (-ENOMEM);
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
- ixgbe_reset_tx_queue(txq);
-
- dev->data->tx_queues[queue_idx] = txq;
-
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
- (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST))
- dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
- else
+ (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
+ PMD_INIT_LOG(INFO, "Using simple tx code path\n");
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+ ixgbe_txq_vec_setup(txq, socket_id) == 0) {
+ PMD_INIT_LOG(INFO, "Vector tx enabled.\n");
+ dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
+ }
+ else
+#endif
+ dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+ } else {
+ PMD_INIT_LOG(INFO, "Using full-featured tx code path\n");
+ PMD_INIT_LOG(INFO, " - txq_flags = %lx [IXGBE_SIMPLE_FLAGS=%lx]\n", (long unsigned)txq->txq_flags, (long unsigned)IXGBE_SIMPLE_FLAGS);
+ PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu [RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n", (long unsigned)txq->tx_rs_thresh, (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ }
+
+ txq->ops->reset(txq);
+
+ dev->data->tx_queues[queue_idx] = txq;
+
return (0);
}
* function must be used.
*/
static inline int
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
+#else
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
+#endif
{
int ret = 0;
static void
ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
{
+ static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
+ .pkt_addr = 0}};
unsigned i;
uint16_t len;
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
/* zero out extra memory */
- len = rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST;
+ len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
else
#endif
/* do not zero out extra memory */
* the H/W ring so look-ahead logic in Rx Burst bulk alloc function
* reads extra memory as zeros.
*/
- for (i = 0; i < len * sizeof(union ixgbe_adv_rx_desc); i++) {
- ((volatile char *)rxq->rx_ring)[i] = 0;
+ for (i = 0; i < len; i++) {
+ rxq->rx_ring[i] = zeroed_desc;
}
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
- rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
}
/* Free memory prior to re-allocation if needed... */
- if (dev->data->rx_queues[queue_idx] != NULL)
+ if (dev->data->rx_queues[queue_idx] != NULL) {
ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
/* First allocate the rx queue data structure */
- rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
- CACHE_LINE_SIZE);
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
+ CACHE_LINE_SIZE, socket_id);
if (rxq == NULL)
return (-ENOMEM);
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
+ rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
- ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
+ 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
+ rxq->start_rx_per_q = rx_conf->start_rx_per_q;
/*
* Allocate RX ring hardware descriptors. A memzone large enough to
* resizing in later calls to the queue setup function.
*/
rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
- IXGBE_MAX_RING_DESC * sizeof(union ixgbe_adv_rx_desc),
- socket_id);
+ RX_RING_SZ, socket_id);
if (rz == NULL) {
ixgbe_rx_queue_release(rxq);
return (-ENOMEM);
}
+
/*
- * Modified to setup VFRDT for Virtual Function
+ * Zero init all the descriptors in the ring.
*/
- if (hw->mac.type == ixgbe_mac_82599_vf)
- rxq->rdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
- else
- rxq->rdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(queue_idx));
+ memset (rz->addr, 0, RX_RING_SZ);
+ /*
+ * Modified to setup VFRDT for Virtual Function
+ */
+ if (hw->mac.type == ixgbe_mac_82599_vf) {
+ rxq->rdt_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
+ rxq->rdh_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
+ }
+ else {
+ rxq->rdt_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
+ rxq->rdh_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
+ }
+#ifndef RTE_LIBRTE_XEN_DOM0
rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
+#else
+ rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#endif
rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
/*
- * Allocate software ring. Allow for space at the end of the
+ * Allocate software ring. Allow for space at the end of the
* S/W ring to make sure look-ahead logic in bulk alloc Rx burst
* function does not access an invalid memory region.
*/
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- len = nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST;
+ len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
#else
len = nb_desc;
#endif
- rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
- sizeof(struct igb_rx_entry) * len,
- CACHE_LINE_SIZE);
+ rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
+ sizeof(struct igb_rx_entry) * len,
+ CACHE_LINE_SIZE, socket_id);
if (rxq->sw_ring == NULL) {
ixgbe_rx_queue_release(rxq);
return (-ENOMEM);
rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
/*
- * Certain constaints must be met in order to use the bulk buffer
+ * Certain constraints must be met in order to use the bulk buffer
* allocation Rx burst function.
*/
use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
"used on port=%d, queue=%d.\n",
rxq->port_id, rxq->queue_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (!ixgbe_rx_vec_condition_check(dev)) {
+ PMD_INIT_LOG(INFO, "Vector rx enabled.\n");
+ ixgbe_rxq_vec_setup(rxq, socket_id);
+ dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+ }
+#endif
#endif
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
return 0;
}
+uint32_t
+ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define IXGBE_RXQ_SCAN_INTERVAL 4
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_queue *rxq;
+ uint32_t desc = 0;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
+ return 0;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+
+ while ((desc < rxq->nb_rx_desc) &&
+ (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
+ desc += IXGBE_RXQ_SCAN_INTERVAL;
+ rxdp += IXGBE_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_queue *rxq = rx_queue;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return 0;
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &rxq->rx_ring[desc];
+ return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
+}
+
void
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct igb_tx_queue *txq = dev->data->tx_queues[i];
if (txq != NULL) {
- ixgbe_tx_queue_release_mbufs(txq);
- ixgbe_reset_tx_queue(txq);
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
}
}
}
static void
-ixgbe_rss_configure(struct rte_eth_dev *dev)
+ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
{
- struct ixgbe_hw *hw;
- uint8_t *hash_key;
- uint32_t rss_key;
+ uint8_t *hash_key;
uint32_t mrqc;
- uint32_t reta;
+ uint32_t rss_key;
uint16_t rss_hf;
uint16_t i;
- uint16_t j;
-
- PMD_INIT_FUNC_TRACE();
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
- if (rss_hf == 0) { /* Disable RSS */
- ixgbe_rss_disable(dev);
- return;
- }
- hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
- if (hash_key == NULL)
- hash_key = rss_intel_key; /* Default hash key */
-
- /* Fill in RSS hash key */
- for (i = 0; i < 10; i++) {
- rss_key = hash_key[(i * 4)];
- rss_key |= hash_key[(i * 4) + 1] << 8;
- rss_key |= hash_key[(i * 4) + 2] << 16;
- rss_key |= hash_key[(i * 4) + 3] << 24;
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
- }
- /* Fill in redirection table */
- reta = 0;
- for (i = 0, j = 0; i < 128; i++, j++) {
- if (j == dev->data->nb_rx_queues) j = 0;
- reta = (reta << 8) | j;
- if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), rte_bswap32(reta));
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL) {
+ /* Fill in RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = hash_key[(i * 4)];
+ rss_key |= hash_key[(i * 4) + 1] << 8;
+ rss_key |= hash_key[(i * 4) + 2] << 16;
+ rss_key |= hash_key[(i * 4) + 3] << 24;
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
+ }
}
- /* Set configured hashing functions in MRQC register */
- mrqc = IXGBE_MRQC_RSSEN; /* RSS enable */
+ /* Set configured hashing protocols in MRQC register */
+ rss_hf = rss_conf->rss_hf;
+ mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
if (rss_hf & ETH_RSS_IPV4)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
if (rss_hf & ETH_RSS_IPV4_TCP)
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
}
+int
+ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mrqc;
+ uint16_t rss_hf;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
+ * "RSS enabling cannot be done dynamically while it must be
+ * preceded by a software reset"
+ * Before changing anything, first check that the update RSS operation
+ * does not attempt to disable RSS, if RSS was enabled at
+ * initialization time, or does not attempt to enable RSS, if RSS was
+ * disabled at initialization time.
+ */
+ rss_hf = rss_conf->rss_hf;
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
+ if (rss_hf != 0) /* Enable RSS */
+ return -(EINVAL);
+ return 0; /* Nothing to do */
+ }
+ /* RSS enabled */
+ if (rss_hf == 0) /* Disable RSS */
+ return -(EINVAL);
+ ixgbe_hw_rss_hash_set(hw, rss_conf);
+ return 0;
+}
+
+int
+ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct ixgbe_hw *hw;
+ uint8_t *hash_key;
+ uint32_t mrqc;
+ uint32_t rss_key;
+ uint16_t rss_hf;
+ uint16_t i;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL) {
+ /* Return RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i);
+ hash_key[(i * 4)] = rss_key & 0x000000FF;
+ hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
+ hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
+ hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
+ }
+ }
+
+ /* Get RSS functions configured in MRQC register */
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
+ rss_conf->rss_hf = 0;
+ return 0;
+ }
+ rss_hf = 0;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
+ rss_hf |= ETH_RSS_IPV4;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
+ rss_hf |= ETH_RSS_IPV4_TCP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
+ rss_hf |= ETH_RSS_IPV6;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
+ rss_hf |= ETH_RSS_IPV6_EX;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
+ rss_hf |= ETH_RSS_IPV6_TCP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
+ rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
+ rss_hf |= ETH_RSS_IPV4_UDP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
+ rss_hf |= ETH_RSS_IPV6_UDP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
+ rss_hf |= ETH_RSS_IPV6_UDP_EX;
+ rss_conf->rss_hf = rss_hf;
+ return 0;
+}
+
+static void
+ixgbe_rss_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rss_conf rss_conf;
+ struct ixgbe_hw *hw;
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < 128; i++, j++) {
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ reta = (reta << 8) | j;
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
+ rte_bswap32(reta));
+ }
+
+ /*
+ * Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if (rss_conf.rss_hf == 0) {
+ ixgbe_rss_disable(dev);
+ return;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ ixgbe_hw_rss_hash_set(hw, &rss_conf);
+}
+
#define NUM_VFTA_REGISTERS 128
#define NIC_RX_BUFFER_SIZE 0x200
} else {
vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
}
+
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
/* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*/
-static void
+static void
ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t q;
-
+
PMD_INIT_FUNC_TRACE();
if (hw->mac.type != ixgbe_mac_82598EB) {
/* Disable the Tx desc arbiter so that MTQC can be changed */
{
struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
- struct ixgbe_hw *hw =
+ struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
+
PMD_INIT_FUNC_TRACE();
- if (hw->mac.type != ixgbe_mac_82598EB)
+ if (hw->mac.type != ixgbe_mac_82598EB)
/*PF VF Transmit Enable*/
IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
-
+
/*Configure general DCB TX parameters*/
ixgbe_dcb_tx_hw_config(hw,dcb_config);
return;
}
-static void
+static void
ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
{
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = vmdq_rx_conf->dcb_queue[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (1 << j);
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
+ (uint8_t)(1 << j);
}
}
-static void
+static void
ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
-{
+{
struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
struct ixgbe_dcb_tc_config *tc;
uint8_t i,j;
-
+
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = vmdq_tx_conf->dcb_queue[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (1 << j);
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
+ (uint8_t)(1 << j);
}
return;
}
-static void
-ixgbe_dcb_rx_config(struct rte_eth_dev *dev,struct ixgbe_dcb_config *dcb_config)
+static void
+ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
{
struct rte_eth_dcb_rx_conf *rx_conf =
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
struct ixgbe_dcb_tc_config *tc;
uint8_t i,j;
- dcb_config->num_tcs.pg_tcs = rx_conf->nb_tcs;
- dcb_config->num_tcs.pfc_tcs = rx_conf->nb_tcs;
-
- /* User Priority to Traffic Class mapping */
+ dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
+
+ /* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = rx_conf->dcb_queue[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (1 << j);
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
+ (uint8_t)(1 << j);
}
}
-static void
-ixgbe_dcb_tx_config(struct rte_eth_dev *dev,struct ixgbe_dcb_config *dcb_config)
+static void
+ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
{
struct rte_eth_dcb_tx_conf *tx_conf =
&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
struct ixgbe_dcb_tc_config *tc;
uint8_t i,j;
- dcb_config->num_tcs.pg_tcs = tx_conf->nb_tcs;
- dcb_config->num_tcs.pfc_tcs = tx_conf->nb_tcs;
-
- /* User Priority to Traffic Class mapping */
+ dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
+
+ /* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = tx_conf->dcb_queue[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (1 << j);
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
+ (uint8_t)(1 << j);
}
}
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
-
+
/* VFTA - enable all vlan filters */
for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
*/
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
-
+
return;
}
-static void
+static void
ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
{
}
}
-static void
+static void
ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
{
#define DCB_TX_CONFIG 1
#define DCB_TX_PB 1024
/**
- * ixgbe_dcb_hw_configure - Enable DCB and configure
+ * ixgbe_dcb_hw_configure - Enable DCB and configure
* general DCB in VT mode and non-VT mode parameters
* @dev: pointer to rte_eth_dev structure
* @dcb_config: pointer to ixgbe_dcb_config structure
uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
struct ixgbe_dcb_tc_config *tc;
uint32_t max_frame = dev->data->max_frame_size;
- struct ixgbe_hw *hw =
+ struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
switch(dev->data->dev_conf.rxmode.mq_mode){
- case ETH_VMDQ_DCB:
+ case ETH_MQ_RX_VMDQ_DCB:
dcb_config->vt_mode = true;
if (hw->mac.type != ixgbe_mac_82598EB) {
config_dcb_rx = DCB_RX_CONFIG;
/*
- *get dcb and VT rx configuration parameters
+ *get dcb and VT rx configuration parameters
*from rte_eth_conf
*/
ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
ixgbe_vmdq_dcb_configure(dev);
}
break;
- case ETH_DCB_RX:
+ case ETH_MQ_RX_DCB:
dcb_config->vt_mode = false;
config_dcb_rx = DCB_RX_CONFIG;
/* Get dcb TX configuration parameters from rte_eth_conf */
break;
}
switch (dev->data->dev_conf.txmode.mq_mode) {
- case ETH_VMDQ_DCB_TX:
+ case ETH_MQ_TX_VMDQ_DCB:
dcb_config->vt_mode = true;
config_dcb_tx = DCB_TX_CONFIG;
/* get DCB and VT TX configuration parameters from rte_eth_conf */
ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
break;
- case ETH_DCB_TX:
+ case ETH_MQ_TX_DCB:
dcb_config->vt_mode = false;
- config_dcb_tx = DCB_RX_CONFIG;
+ config_dcb_tx = DCB_TX_CONFIG;
/*get DCB TX configuration parameters from rte_eth_conf*/
ixgbe_dcb_tx_config(dev,dcb_config);
/*Configure general DCB TX parameters*/
/* Avoid un-configured priority mapping to TC0 */
uint8_t j = 4;
uint8_t mask = 0xFF;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
- mask &= ~ (1 << map[i]);
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+ mask = (uint8_t)(mask & (~ (1 << map[i])));
for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
map[j++] = i;
/* Re-configure 4 TCs BW */
for (i = 0; i < nb_tcs; i++) {
tc = &dcb_config->tc_config[i];
- tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100 / nb_tcs;
- tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100 / nb_tcs;
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
}
for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
tc = &dcb_config->tc_config[i];
void ixgbe_configure_dcb(struct rte_eth_dev *dev)
{
struct ixgbe_dcb_config *dcb_cfg =
- IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
-
- PMD_INIT_FUNC_TRACE();
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* check support mq_mode for DCB */
+ if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
+ return;
+
+ if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
+ return;
+
/** Configure DCB hardware **/
- if(((dev->data->dev_conf.rxmode.mq_mode != ETH_RSS) &&
- (dev->data->nb_rx_queues == ETH_DCB_NUM_QUEUES))||
- ((dev->data->dev_conf.txmode.mq_mode != ETH_DCB_NONE) &&
- (dev->data->nb_tx_queues == ETH_DCB_NUM_QUEUES))) {
- ixgbe_dcb_hw_configure(dev,dcb_cfg);
- }
+ ixgbe_dcb_hw_configure(dev,dcb_cfg);
+
return;
}
-static int
-ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+/*
+ * VMDq only support for 10 GbE NIC.
+ */
+static void
+ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
{
- struct igb_rx_entry *rxe = rxq->sw_ring;
- uint64_t dma_addr;
- unsigned i;
+ struct rte_eth_vmdq_rx_conf *cfg;
+ struct ixgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, vlanctrl;
+ int i;
- /* Initialize software ring entries */
- for (i = 0; i < rxq->nb_rx_desc; i++) {
- volatile union ixgbe_adv_rx_desc *rxd;
- struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
- if (mbuf == NULL) {
- PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n",
- (unsigned) rxq->queue_id);
- return (-ENOMEM);
- }
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+ num_pools = cfg->nb_queue_pools;
- rte_mbuf_refcnt_set(mbuf, 1);
- mbuf->type = RTE_MBUF_PKT;
- mbuf->pkt.next = NULL;
+ ixgbe_rss_disable(dev);
+
+ /* MRQC: enable vmdq */
+ mrqc = IXGBE_MRQC_VMDQEN;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ /* PFVTCTL: turn on virtualisation and set the default pool */
+ vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
+ if (cfg->enable_default_pool)
+ vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
+ else
+ vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+
+ /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
+
+ /* VFRE: pool enabling for receive - 64 */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
+ if (num_pools == ETH_64_POOLS)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
+
+ /*
+ * MPSAR - allow pools to read specific mac addresses
+ * In this case, all pools should be able to read from mac addr 0
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
+
+ /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+ (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
+ /*
+ * Put the allowed pools in VFB reg. As we only have 16 or 64
+ * pools, we only need to use the first half of the register
+ * i.e. bits 0-31
+ */
+ if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
+ (cfg->pool_map[i].pools & UINT32_MAX));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
+ ((cfg->pool_map[i].pools >> 32) \
+ & UINT32_MAX));
+
+ }
+
+ /* PFDMA Tx General Switch Control Enables VMDQ loopback */
+ if (cfg->enable_loop_back) {
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
+ * @hw: pointer to hardware structure
+ */
+static void
+ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
+{
+ uint32_t reg;
+ uint32_t q;
+
+ PMD_INIT_FUNC_TRACE();
+ /*PF VF Transmit Enable*/
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
+
+ /* Disable the Tx desc arbiter so that MTQC can be changed */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+ /* Enable the Tx desc arbiter */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ return;
+}
+
+static int
+ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+{
+ struct igb_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ unsigned i;
+
+ /* Initialize software ring entries */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile union ixgbe_adv_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n",
+ (unsigned) rxq->queue_id);
+ return (-ENOMEM);
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->type = RTE_MBUF_PKT;
+ mbuf->pkt.next = NULL;
mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
mbuf->pkt.nb_segs = 1;
mbuf->pkt.in_port = rxq->port_id;
return 0;
}
+static int
+ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return 0;
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * any DCB/RSS w/o VMDq multi-queue setting
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ ixgbe_rss_configure(dev);
+ break;
+
+ case ETH_MQ_RX_VMDQ_DCB:
+ ixgbe_vmdq_dcb_configure(dev);
+ break;
+
+ case ETH_MQ_RX_VMDQ_ONLY:
+ ixgbe_vmdq_rx_hw_configure(dev);
+ break;
+
+ case ETH_MQ_RX_NONE:
+ /* if mq_mode is none, disable rss mode.*/
+ default: ixgbe_rss_disable(dev);
+ }
+ } else {
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ /*
+ * SRIOV active scheme
+ * FIXME if support DCB/RSS together with VMDq & SRIOV
+ */
+ case ETH_64_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQEN);
+ break;
+
+ case ETH_32_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT4TCEN);
+ break;
+
+ case ETH_16_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN);
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "invalid pool number in IOV mode\n");
+ }
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mtqc;
+ uint32_t rttdcs;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return 0;
+
+ /* disable arbiter before setting MTQC */
+ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ rttdcs |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * any DCB w/o VMDq multi-queue setting
+ */
+ if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+ ixgbe_vmdq_tx_hw_configure(hw);
+ else {
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
+ }
+ } else {
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+
+ /*
+ * SRIOV active scheme
+ * FIXME if support DCB together with VMDq & SRIOV
+ */
+ case ETH_64_POOLS:
+ mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
+ break;
+ case ETH_32_POOLS:
+ mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
+ break;
+ case ETH_16_POOLS:
+ mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
+ IXGBE_MTQC_8TC_8TQ;
+ break;
+ default:
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ RTE_LOG(ERR, PMD, "invalid pool number in IOV mode\n");
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
+ }
+
+ /* re-enable arbiter */
+ rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+ return 0;
+}
+
/*
* Initializes Receive Unit.
*/
uint32_t rxcsum;
uint16_t buf_size;
uint16_t i;
- int ret;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
} else
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+ /*
+ * If loopback mode is configured for 82599, set LPBK bit.
+ */
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ hlreg0 |= IXGBE_HLREG0_LPBK;
+ else
+ hlreg0 &= ~IXGBE_HLREG0_LPBK;
+
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- /* Allocate buffers for descriptor rings */
- ret = ixgbe_alloc_rx_queue_mbufs(rxq);
- if (ret)
- return ret;
-
/*
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
- IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
(uint32_t)(bus_addr & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i),
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
(uint32_t)(bus_addr >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
+ IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
- IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
/* Configure the SRRCTL register */
#ifdef RTE_HEADER_SPLIT_ENABLE
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
IXGBE_PSRTYPE_IPV6HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), psrtype);
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
}
srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
* The value is in 1 KB resolution. Valid values can be from
* 1 KB to 16 KB.
*/
- mbp_priv = (struct rte_pktmbuf_pool_private *)
- ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
+ mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
RTE_PKTMBUF_HEADROOM);
srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
IXGBE_SRRCTL_BSIZEPKT_MASK);
- IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
- IXGBE_RX_BUF_THRESHOLD > buf_size){
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
dev->data->scattered_rx = 1;
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
}
/*
- * Configure RSS if device configured with multiple RX queues.
+ * Device configured with multiple RX queues.
*/
- if (hw->mac.type == ixgbe_mac_82599EB) {
- if (dev->data->nb_rx_queues > 1)
- switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_RSS:
- ixgbe_rss_configure(dev);
- break;
-
- case ETH_VMDQ_DCB:
- ixgbe_vmdq_dcb_configure(dev);
- break;
-
- default: ixgbe_rss_disable(dev);
- }
- else
- ixgbe_rss_disable(dev);
- }
+ ixgbe_dev_mq_rx_configure(dev);
/*
* Setup the Checksum Register.
uint64_t bus_addr;
uint32_t hlreg0;
uint32_t txctrl;
- uint32_t rttdcs;
uint16_t i;
PMD_INIT_FUNC_TRACE();
txq = dev->data->tx_queues[i];
bus_addr = txq->tx_ring_phys_addr;
- IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
(uint32_t)(bus_addr & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i),
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
(uint32_t)(bus_addr >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
+ IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
/* Setup the HW Tx Head and TX Tail descriptor pointers */
- IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
/*
* Disable Tx Head Writeback RO bit, since this hoses
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
txctrl = IXGBE_READ_REG(hw,
- IXGBE_DCA_TXCTRL(i));
+ IXGBE_DCA_TXCTRL(txq->reg_idx));
txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i),
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
txctrl);
break;
case ixgbe_mac_X540:
default:
txctrl = IXGBE_READ_REG(hw,
- IXGBE_DCA_TXCTRL_82599(i));
+ IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i),
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
txctrl);
break;
}
}
- if (hw->mac.type != ixgbe_mac_82598EB) {
- /* disable arbiter before setting MTQC */
- rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
- rttdcs |= IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
-
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+ /* Device configured with multiple TX queues. */
+ ixgbe_dev_mq_tx_configure(dev);
+}
- /* re-enable arbiter */
- rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+/*
+ * Set up link for 82599 loopback mode Tx->Rx.
+ */
+static inline void
+ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
+{
+ DEBUGFUNC("ixgbe_setup_loopback_link_82599");
+
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
+ IXGBE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Could not enable loopback mode\n");
+ /* ignore error */
+ return;
+ }
}
+
+ /* Restart link */
+ IXGBE_WRITE_REG(hw,
+ IXGBE_AUTOC,
+ IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
+ ixgbe_reset_pipeline_82599(hw);
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ msec_delay(50);
}
+
/*
* Start Transmit and Receive Units.
*/
struct igb_rx_queue *rxq;
uint32_t txdctl;
uint32_t dmatxctl;
- uint32_t rxdctl;
uint32_t rxctrl;
uint16_t i;
- int poll_ms;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
/* Setup Transmit Threshold Registers */
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
txdctl |= txq->pthresh & 0x7F;
txdctl |= ((txq->hthresh & 0x7F) << 8);
txdctl |= ((txq->wthresh & 0x7F) << 16);
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
}
if (hw->mac.type != ixgbe_mac_82598EB) {
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
-
- /* Wait until TX Enable ready */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = 10;
- do {
- rte_delay_ms(1);
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
- } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable "
- "Tx Queue %d\n", i);
- }
+ txq = dev->data->tx_queues[i];
+ if (!txq->start_tx_per_q)
+ ixgbe_dev_tx_queue_start(dev, i);
}
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ if (!rxq->start_rx_per_q)
+ ixgbe_dev_rx_queue_start(dev, i);
+ }
+
+ /* Enable Receive engine */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ rxctrl |= IXGBE_RXCTRL_DMBYPS;
+ rxctrl |= IXGBE_RXCTRL_RXEN;
+ hw->mac.ops.enable_rx_dma(hw, rxctrl);
+
+ /* If loopback mode is enabled for 82599, set up the link accordingly */
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ ixgbe_setup_loopback_link_82599(hw);
+
+}
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int
+ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct igb_rx_queue *rxq;
+ uint32_t rxdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ /* Allocate buffers for descriptor rings */
+ if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR,
+ "Could not alloc mbuf for queue:%d\n",
+ rx_queue_id);
+ return -1;
+ }
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
rxdctl |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
/* Wait until RX Enable ready */
- poll_ms = 10;
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
do {
rte_delay_ms(1);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not enable "
- "Rx Queue %d\n", i);
+ "Rx Queue %d\n", rx_queue_id);
rte_wmb();
- IXGBE_WRITE_REG(hw, IXGBE_RDT(i), rxq->nb_rx_desc - 1);
- }
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ } else
+ return -1;
- /* Enable Receive engine */
- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- if (hw->mac.type == ixgbe_mac_82598EB)
- rxctrl |= IXGBE_RXCTRL_DMBYPS;
- rxctrl |= IXGBE_RXCTRL_RXEN;
- hw->mac.ops.enable_rx_dma(hw, rxctrl);
+ return 0;
+}
+
+/*
+ * Stop Receive Units for specified queue.
+ */
+int
+ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct igb_rx_queue *rxq;
+ uint32_t rxdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable "
+ "Rx Queue %d\n", rx_queue_id);
+
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
+
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(rxq);
+ } else
+ return -1;
+
+ return 0;
}
+/*
+ * Start Transmit Units for specified queue.
+ */
+int
+ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct igb_tx_queue *txq;
+ uint32_t txdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+ /* Wait until TX Enable ready */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
+ IXGBE_TXDCTL(txq->reg_idx));
+ } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable "
+ "Tx Queue %d\n", tx_queue_id);
+ }
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+ } else
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int
+ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct igb_tx_queue *txq;
+ uint32_t txdctl;
+ uint32_t txtdh, txtdt;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /* Wait until TX queue is empty */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
+ txtdh = IXGBE_READ_REG(hw,
+ IXGBE_TDH(txq->reg_idx));
+ txtdt = IXGBE_READ_REG(hw,
+ IXGBE_TDT(txq->reg_idx));
+ } while (--poll_ms && (txtdh != txtdt));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR,
+ "Tx Queue %d is not empty when stopping.\n",
+ tx_queue_id);
+ }
+
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+ /* Wait until TX Enable ready */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
+ IXGBE_TXDCTL(txq->reg_idx));
+ } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable "
+ "Tx Queue %d\n", tx_queue_id);
+ }
+
+ if (txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ } else
+ return -1;
+
+ return 0;
+}
+
/*
* [VF] Initializes Receive Unit.
*/
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /* setup MTU */
+ ixgbevf_rlpml_set_vf(hw,
+ (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
/* Setup RX queues */
dev->rx_pkt_burst = ixgbe_recv_pkts;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
* The value is in 1 KB resolution. Valid values can be from
* 1 KB to 16 KB.
*/
- mbp_priv = (struct rte_pktmbuf_pool_private *)
- ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
+ mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
RTE_PKTMBUF_HEADROOM);
srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
dev->data->scattered_rx = 1;
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}