#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
* Return the total number of buffers freed.
*/
static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry *txep;
+ struct ixgbe_tx_entry *txep;
uint32_t status;
int i;
* Copy mbuf pointers to the S/W ring.
*/
static inline void
-ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
+ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
- struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP-1;
int mainpart, leftover;
tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
uint16_t n = 0;
}
static inline void
-ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
+ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
{
* or create a new context descriptor.
*/
static inline uint32_t
-what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
+what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
union ixgbe_tx_offload tx_offload)
{
/* If match with the current used context */
/* Reset transmit descriptors after they have been used */
static inline int
-ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
+ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry *sw_ring = txq->sw_ring;
+ struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq;
- struct igb_tx_entry *sw_ring;
- struct igb_tx_entry *txe, *txn;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_tx_entry *sw_ring;
+ struct ixgbe_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
volatile union ixgbe_adv_tx_desc *txd;
struct rte_mbuf *tx_pkt;
{
uint64_t pkt_flags;
- static uint64_t ip_pkt_types_map[16] = {
+ static const uint64_t ip_pkt_types_map[16] = {
0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
PKT_RX_IPV6_HDR, 0, 0, 0,
PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
};
- static uint64_t ip_rss_types_map[16] = {
+ static const uint64_t ip_rss_types_map[16] = {
0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, 0, 0,
#error "PMD IXGBE: LOOK_AHEAD must be 8\n"
#endif
static inline int
-ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
+ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep;
+ struct ixgbe_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t pkt_len;
uint64_t pkt_flags;
}
static inline int
-ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
+ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep;
+ struct ixgbe_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t alloc_idx;
- uint64_t dma_addr;
+ __le64 dma_addr;
int diag, i;
/* allocate buffers in bulk directly into the S/W ring */
mb->port = rxq->port_id;
/* populate the descriptors */
- dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
rxdp[i].read.hdr_addr = dma_addr;
rxdp[i].read.pkt_addr = dma_addr;
}
}
static inline uint16_t
-ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
+ struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
uint16_t nb_rx = 0;
/* Any previously recv'd pkts will be returned from the Rx stage */
}
/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
-uint16_t
+static uint16_t
ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
volatile union ixgbe_adv_rx_desc *rx_ring;
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
- struct igb_rx_entry *rxe;
+ struct ixgbe_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *rxe;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
union ixgbe_adv_rx_desc rxd;
ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
volatile union ixgbe_adv_rx_desc *rx_ring;
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
- struct igb_rx_entry *rxe;
+ struct ixgbe_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *rxe;
struct rte_mbuf *first_seg;
struct rte_mbuf *last_seg;
struct rte_mbuf *rxm;
first_seg->ol_flags = pkt_flags;
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+ first_seg->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
first_seg->hash.fdir.hash =
- (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
+ rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.csum)
+ & IXGBE_ATR_HASH_MASK;
first_seg->hash.fdir.id =
- rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.ip_id);
}
/* Prefetch data of first segment, if configured to do so. */
}
static void
-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
}
static void
-ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq != NULL &&
txq->sw_ring != NULL)
}
static void
-ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
txq->ops->release_mbufs(txq);
ixgbe_tx_queue_release(txq);
}
-/* (Re)set dynamic igb_tx_queue fields to defaults */
+/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
static void
-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
.buffer_addr = 0}};
- struct igb_tx_entry *txe = txq->sw_ring;
+ struct ixgbe_tx_entry *txe = txq->sw_ring;
uint16_t prev, i;
/* Zero out HW ring memory */
IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
}
-static struct ixgbe_txq_ops def_txq_ops = {
+static const struct ixgbe_txq_ops def_txq_ops = {
.release_mbufs = ixgbe_tx_queue_release_mbufs,
.free_swring = ixgbe_tx_free_swring,
.reset = ixgbe_reset_tx_queue,
* in dev_init by secondary process when attaching to an existing ethdev.
*/
void
-set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq)
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
const struct rte_eth_txconf *tx_conf)
{
const struct rte_memzone *tz;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
}
/* First allocate the tx queue data structure */
- txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL)
return (-ENOMEM);
* Modification to set VFTDT for virtual function if vf is detected
*/
if (hw->mac.type == ixgbe_mac_82599_vf ||
- hw->mac.type == ixgbe_mac_X540_vf)
+ hw->mac.type == ixgbe_mac_X540_vf ||
+ hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_x_vf)
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
else
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
/* Allocate software ring */
txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
- sizeof(struct igb_tx_entry) * nb_desc,
+ sizeof(struct ixgbe_tx_entry) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->sw_ring == NULL) {
ixgbe_tx_queue_release(txq);
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
/* set up vector or scalar TX function as appropriate */
- set_tx_function(dev, txq);
+ ixgbe_set_tx_function(dev, txq);
txq->ops->reset(txq);
}
static void
-ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
{
unsigned i;
}
static void
-ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
{
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
*/
static inline int
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
#else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
#endif
{
int ret = 0;
return ret;
}
-/* Reset dynamic igb_rx_queue fields back to defaults */
+/* Reset dynamic ixgbe_rx_queue fields back to defaults */
static void
-ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
+ixgbe_reset_rx_queue(struct ixgbe_rx_queue *rxq)
{
static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
.pkt_addr = 0}};
struct rte_mempool *mp)
{
const struct rte_memzone *rz;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
struct ixgbe_hw *hw;
int use_def_burst_func = 1;
uint16_t len;
}
/* First allocate the rx queue data structure */
- rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq == NULL)
return (-ENOMEM);
* Modified to setup VFRDT for Virtual Function
*/
if (hw->mac.type == ixgbe_mac_82599_vf ||
- hw->mac.type == ixgbe_mac_X540_vf) {
+ hw->mac.type == ixgbe_mac_X540_vf ||
+ hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_x_vf) {
rxq->rdt_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
rxq->rdh_reg_addr =
len = nb_desc;
#endif
rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
- sizeof(struct igb_rx_entry) * len,
+ sizeof(struct ixgbe_rx_entry) * len,
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq->sw_ring == NULL) {
ixgbe_rx_queue_release(rxq);
rxq->port_id, rxq->queue_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
#ifdef RTE_IXGBE_INC_VECTOR
- if (!ixgbe_rx_vec_condition_check(dev)) {
+ if (!ixgbe_rx_vec_condition_check(dev) &&
+ (rte_is_power_of_2(nb_desc))) {
PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
"sure RX burst size no less than 32.");
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
{
#define IXGBE_RXQ_SCAN_INTERVAL 4
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t desc = 0;
if (rx_queue_id >= dev->data->nb_rx_queues) {
ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq = rx_queue;
+ struct ixgbe_rx_queue *rxq = rx_queue;
uint32_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct igb_tx_queue *txq = dev->data->tx_queues[i];
+ struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
if (txq != NULL) {
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct igb_rx_queue *rxq = dev->data->rx_queues[i];
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
ixgbe_reset_rx_queue(rxq);
}
static int
-ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
- struct igb_rx_entry *rxe = rxq->sw_ring;
+ struct ixgbe_rx_entry *rxe = rxq->sw_ring;
uint64_t dma_addr;
unsigned i;
ixgbe_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
struct rte_pktmbuf_pool_private *mbp_priv;
uint64_t bus_addr;
uint32_t rxctrl;
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- /* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ if (dev->data->dev_conf.rxmode.enable_scatter ||
+ /* It adds dual VLAN length for supporting dual VLAN */
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ if (rte_is_power_of_2(rxq->nb_rx_desc))
+ dev->rx_pkt_burst =
+ ixgbe_recv_scattered_pkts_vec;
+ else
#endif
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
- if (!dev->data->scattered_rx)
- PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
- dev->data->scattered_rx = 1;
- }
-
/*
* Device configured with multiple RX queues.
*/
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- if (hw->mac.type == ixgbe_mac_82599EB) {
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
if (dev->data->dev_conf.rxmode.hw_strip_crc)
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint64_t bus_addr;
uint32_t hlreg0;
uint32_t txctrl;
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
- struct igb_rx_queue *rxq;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t dmatxctl;
uint32_t rxctrl;
ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint32_t txdctl;
int poll_ms;
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint32_t txdctl;
uint32_t txtdh, txtdt;
int poll_ms;
ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
struct rte_pktmbuf_pool_private *mbp_priv;
uint64_t bus_addr;
uint32_t srrctl, psrtype = 0;
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- /* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ if (dev->data->dev_conf.rxmode.enable_scatter ||
+ /* It adds dual VLAN length for supporting dual VLAN */
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ if (rte_is_power_of_2(rxq->nb_rx_desc))
+ dev->rx_pkt_burst =
+ ixgbe_recv_scattered_pkts_vec;
+ else
#endif
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
}
IXGBE_PSRTYPE_RQPL_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
- if (dev->data->dev_conf.rxmode.enable_scatter) {
- if (!dev->data->scattered_rx)
- PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
- dev->data->scattered_rx = 1;
- }
-
return 0;
}
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint64_t bus_addr;
uint32_t txctrl;
uint16_t i;
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
- struct igb_rx_queue *rxq;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t rxdctl;
uint16_t i;