ixgbe: cleanups
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_rxtx.c
index e6766b3..7173db8 100644 (file)
@@ -53,7 +53,6 @@
 #include <rte_memory.h>
 #include <rte_memzone.h>
 #include <rte_launch.h>
-#include <rte_tailq.h>
 #include <rte_eal.h>
 #include <rte_per_lcore.h>
 #include <rte_lcore.h>
 #include "ixgbe/ixgbe_common.h"
 #include "ixgbe_rxtx.h"
 
-#define IXGBE_RSS_OFFLOAD_ALL ( \
-               ETH_RSS_IPV4 | \
-               ETH_RSS_IPV4_TCP | \
-               ETH_RSS_IPV6 | \
-               ETH_RSS_IPV6_EX | \
-               ETH_RSS_IPV6_TCP | \
-               ETH_RSS_IPV6_TCP_EX | \
-               ETH_RSS_IPV4_UDP | \
-               ETH_RSS_IPV6_UDP | \
-               ETH_RSS_IPV6_UDP_EX)
-
 /* Bit Mask to indicate what bits required for building TX context */
 #define IXGBE_TX_OFFLOAD_MASK (                         \
                PKT_TX_VLAN_PKT |                \
@@ -133,9 +121,9 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
  * Return the total number of buffers freed.
  */
 static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
 {
-       struct igb_tx_entry *txep;
+       struct ixgbe_tx_entry *txep;
        uint32_t status;
        int i;
 
@@ -219,11 +207,11 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
  * Copy mbuf pointers to the S/W ring.
  */
 static inline void
-ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
+ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
                      uint16_t nb_pkts)
 {
        volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
-       struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+       struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
        const int N_PER_LOOP = 4;
        const int N_PER_LOOP_MASK = N_PER_LOOP-1;
        int mainpart, leftover;
@@ -255,7 +243,7 @@ static inline uint16_t
 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
             uint16_t nb_pkts)
 {
-       struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
        volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
        uint16_t n = 0;
 
@@ -363,7 +351,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 }
 
 static inline void
-ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
+ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
                volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
                uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
 {
@@ -453,7 +441,7 @@ ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
  * or create a new context descriptor.
  */
 static inline uint32_t
-what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
+what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
                union ixgbe_tx_offload tx_offload)
 {
        /* If match with the current used context */
@@ -509,9 +497,9 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 
 /* Reset transmit descriptors after they have been used */
 static inline int
-ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
+ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
 {
-       struct igb_tx_entry *sw_ring = txq->sw_ring;
+       struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
        volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
        uint16_t last_desc_cleaned = txq->last_desc_cleaned;
        uint16_t nb_tx_desc = txq->nb_tx_desc;
@@ -570,9 +558,9 @@ uint16_t
 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts)
 {
-       struct igb_tx_queue *txq;
-       struct igb_tx_entry *sw_ring;
-       struct igb_tx_entry *txe, *txn;
+       struct ixgbe_tx_queue *txq;
+       struct ixgbe_tx_entry *sw_ring;
+       struct ixgbe_tx_entry *txe, *txn;
        volatile union ixgbe_adv_tx_desc *txr;
        volatile union ixgbe_adv_tx_desc *txd;
        struct rte_mbuf     *tx_pkt;
@@ -871,14 +859,14 @@ rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
 {
        uint64_t pkt_flags;
 
-       static uint64_t ip_pkt_types_map[16] = {
+       static const uint64_t ip_pkt_types_map[16] = {
                0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
                PKT_RX_IPV6_HDR, 0, 0, 0,
                PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
                PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
        };
 
-       static uint64_t ip_rss_types_map[16] = {
+       static const uint64_t ip_rss_types_map[16] = {
                0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
                0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
                PKT_RX_RSS_HASH, 0, 0, 0,
@@ -949,10 +937,10 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
 #endif
 static inline int
-ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
+ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
 {
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_entry *rxep;
+       struct ixgbe_rx_entry *rxep;
        struct rte_mbuf *mb;
        uint16_t pkt_len;
        uint64_t pkt_flags;
@@ -1033,18 +1021,17 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
 }
 
 static inline int
-ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
+ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)
 {
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_entry *rxep;
+       struct ixgbe_rx_entry *rxep;
        struct rte_mbuf *mb;
        uint16_t alloc_idx;
-       uint64_t dma_addr;
+       __le64 dma_addr;
        int diag, i;
 
        /* allocate buffers in bulk directly into the S/W ring */
-       alloc_idx = (uint16_t)(rxq->rx_free_trigger -
-                               (rxq->rx_free_thresh - 1));
+       alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
        rxep = &rxq->sw_ring[alloc_idx];
        diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
                                    rxq->rx_free_thresh);
@@ -1062,7 +1049,7 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
                mb->port = rxq->port_id;
 
                /* populate the descriptors */
-               dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+               dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
                rxdp[i].read.hdr_addr = dma_addr;
                rxdp[i].read.pkt_addr = dma_addr;
        }
@@ -1072,17 +1059,16 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
        IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
 
        /* update state of internal queue structure */
-       rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger +
-                                               rxq->rx_free_thresh);
+       rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
        if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
-               rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+               rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
 
        /* no errors */
        return 0;
 }
 
 static inline uint16_t
-ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts)
 {
        struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
@@ -1106,7 +1092,7 @@ static inline uint16_t
 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
             uint16_t nb_pkts)
 {
-       struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
+       struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
        uint16_t nb_rx = 0;
 
        /* Any previously recv'd pkts will be returned from the Rx stage */
@@ -1156,7 +1142,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 }
 
 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
-uint16_t
+static uint16_t
 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
                           uint16_t nb_pkts)
 {
@@ -1182,17 +1168,28 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
 
        return nb_rx;
 }
+
+#else
+
+/* Stub to avoid extra ifdefs */
+static uint16_t
+ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue,
+       __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+       return 0;
+}
+
 #endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
 
 uint16_t
 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        volatile union ixgbe_adv_rx_desc *rx_ring;
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_entry *sw_ring;
-       struct igb_rx_entry *rxe;
+       struct ixgbe_rx_entry *sw_ring;
+       struct ixgbe_rx_entry *rxe;
        struct rte_mbuf *rxm;
        struct rte_mbuf *nmb;
        union ixgbe_adv_rx_desc rxd;
@@ -1370,11 +1367,11 @@ uint16_t
 ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                          uint16_t nb_pkts)
 {
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        volatile union ixgbe_adv_rx_desc *rx_ring;
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_entry *sw_ring;
-       struct igb_rx_entry *rxe;
+       struct ixgbe_rx_entry *sw_ring;
+       struct ixgbe_rx_entry *rxe;
        struct rte_mbuf *first_seg;
        struct rte_mbuf *last_seg;
        struct rte_mbuf *rxm;
@@ -1570,13 +1567,14 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                first_seg->ol_flags = pkt_flags;
 
                if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                       first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+                       first_seg->hash.rss =
+                                   rte_le_to_cpu_32(rxd.wb.lower.hi_dword.rss);
                else if (pkt_flags & PKT_RX_FDIR) {
                        first_seg->hash.fdir.hash =
-                               (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
-                                          & IXGBE_ATR_HASH_MASK);
+                           rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.csum)
+                                          & IXGBE_ATR_HASH_MASK;
                        first_seg->hash.fdir.id =
-                               rxd.wb.lower.hi_dword.csum_ip.ip_id;
+                         rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.ip_id);
                }
 
                /* Prefetch data of first segment, if configured to do so. */
@@ -1686,7 +1684,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
 }
 
 static void
-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
 {
        unsigned i;
 
@@ -1701,7 +1699,7 @@ ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
 }
 
 static void
-ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
 {
        if (txq != NULL &&
            txq->sw_ring != NULL)
@@ -1709,7 +1707,7 @@ ixgbe_tx_free_swring(struct igb_tx_queue *txq)
 }
 
 static void
-ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
 {
        if (txq != NULL && txq->ops != NULL) {
                txq->ops->release_mbufs(txq);
@@ -1724,13 +1722,13 @@ ixgbe_dev_tx_queue_release(void *txq)
        ixgbe_tx_queue_release(txq);
 }
 
-/* (Re)set dynamic igb_tx_queue fields to defaults */
+/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
 static void
-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
 {
        static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
                        .buffer_addr = 0}};
-       struct igb_tx_entry *txe = txq->sw_ring;
+       struct ixgbe_tx_entry *txe = txq->sw_ring;
        uint16_t prev, i;
 
        /* Zero out HW ring memory */
@@ -1765,7 +1763,7 @@ ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
                IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
 }
 
-static struct ixgbe_txq_ops def_txq_ops = {
+static const struct ixgbe_txq_ops def_txq_ops = {
        .release_mbufs = ixgbe_tx_queue_release_mbufs,
        .free_swring = ixgbe_tx_free_swring,
        .reset = ixgbe_reset_tx_queue,
@@ -1776,7 +1774,7 @@ static struct ixgbe_txq_ops def_txq_ops = {
  * in dev_init by secondary process when attaching to an existing ethdev.
  */
 void
-set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq)
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 {
        /* Use a simple Tx queue (no offloads, no multi segs) if possible */
        if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
@@ -1813,7 +1811,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                         const struct rte_eth_txconf *tx_conf)
 {
        const struct rte_memzone *tz;
-       struct igb_tx_queue *txq;
+       struct ixgbe_tx_queue *txq;
        struct ixgbe_hw     *hw;
        uint16_t tx_rs_thresh, tx_free_thresh;
 
@@ -1910,7 +1908,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        }
 
        /* First allocate the tx queue data structure */
-       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
+       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (txq == NULL)
                return (-ENOMEM);
@@ -1946,7 +1944,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
         * Modification to set VFTDT for virtual function if vf is detected
         */
        if (hw->mac.type == ixgbe_mac_82599_vf ||
-           hw->mac.type == ixgbe_mac_X540_vf)
+           hw->mac.type == ixgbe_mac_X540_vf ||
+           hw->mac.type == ixgbe_mac_X550_vf ||
+           hw->mac.type == ixgbe_mac_X550EM_x_vf)
                txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
        else
                txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
@@ -1959,7 +1959,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        /* Allocate software ring */
        txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
-                               sizeof(struct igb_tx_entry) * nb_desc,
+                               sizeof(struct ixgbe_tx_entry) * nb_desc,
                                RTE_CACHE_LINE_SIZE, socket_id);
        if (txq->sw_ring == NULL) {
                ixgbe_tx_queue_release(txq);
@@ -1969,7 +1969,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
 
        /* set up vector or scalar TX function as appropriate */
-       set_tx_function(dev, txq);
+       ixgbe_set_tx_function(dev, txq);
 
        txq->ops->reset(txq);
 
@@ -1980,7 +1980,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 }
 
 static void
-ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
 {
        unsigned i;
 
@@ -2005,7 +2005,7 @@ ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
 }
 
 static void
-ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
 {
        if (rxq != NULL) {
                ixgbe_rx_queue_release_mbufs(rxq);
@@ -2030,9 +2030,9 @@ ixgbe_dev_rx_queue_release(void *rxq)
  */
 static inline int
 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
 #else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
 #endif
 {
        int ret = 0;
@@ -2082,14 +2082,14 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
        return ret;
 }
 
-/* Reset dynamic igb_rx_queue fields back to defaults */
+/* Reset dynamic ixgbe_rx_queue fields back to defaults */
 static void
-ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
+ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct ixgbe_rx_queue *rxq)
 {
        static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
                        .pkt_addr = 0}};
        unsigned i;
-       uint16_t len;
+       uint16_t len = rxq->nb_rx_desc;
 
        /*
         * By default, the Rx queue setup function allocates enough memory for
@@ -2101,14 +2101,9 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
         * constraints here to see if we need to zero out memory after the end
         * of the H/W descriptor ring.
         */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-       if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
+       if (hw->rx_bulk_alloc_allowed)
                /* zero out extra memory */
-               len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
-       else
-#endif
-               /* do not zero out extra memory */
-               len = rxq->nb_rx_desc;
+               len += RTE_PMD_IXGBE_RX_MAX_BURST;
 
        /*
         * Zero out HW ring memory. Zero out extra memory at the end of
@@ -2125,8 +2120,8 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
         * entries is always allocated
         */
        memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
-       for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; ++i) {
-               rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+       for (i = rxq->nb_rx_desc; i < len; ++i) {
+               rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
        }
 
        rxq->rx_nb_avail = 0;
@@ -2148,9 +2143,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                         struct rte_mempool *mp)
 {
        const struct rte_memzone *rz;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        struct ixgbe_hw     *hw;
-       int use_def_burst_func = 1;
        uint16_t len;
 
        PMD_INIT_FUNC_TRACE();
@@ -2174,7 +2168,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        }
 
        /* First allocate the rx queue data structure */
-       rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
+       rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (rxq == NULL)
                return (-ENOMEM);
@@ -2211,7 +2205,9 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
         * Modified to setup VFRDT for Virtual Function
         */
        if (hw->mac.type == ixgbe_mac_82599_vf ||
-           hw->mac.type == ixgbe_mac_X540_vf) {
+           hw->mac.type == ixgbe_mac_X540_vf ||
+           hw->mac.type == ixgbe_mac_X550_vf ||
+           hw->mac.type == ixgbe_mac_X550EM_x_vf) {
                rxq->rdt_reg_addr =
                        IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
                rxq->rdh_reg_addr =
@@ -2230,18 +2226,30 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 #endif
        rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
 
+       /*
+        * Certain constraints must be met in order to use the bulk buffer
+        * allocation Rx burst function. If any of Rx queues doesn't meet them
+        * the feature should be disabled for the whole port.
+        */
+       if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
+               PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
+                                   "preconditions - canceling the feature for "
+                                   "the whole port[%d]",
+                            rxq->queue_id, rxq->port_id);
+               hw->rx_bulk_alloc_allowed = false;
+       }
+
        /*
         * Allocate software ring. Allow for space at the end of the
         * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
         * function does not access an invalid memory region.
         */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-       len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
-#else
        len = nb_desc;
-#endif
+       if (hw->rx_bulk_alloc_allowed)
+               len += RTE_PMD_IXGBE_RX_MAX_BURST;
+
        rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
-                                         sizeof(struct igb_rx_entry) * len,
+                                         sizeof(struct ixgbe_rx_entry) * len,
                                          RTE_CACHE_LINE_SIZE, socket_id);
        if (rxq->sw_ring == NULL) {
                ixgbe_rx_queue_release(rxq);
@@ -2250,41 +2258,18 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
                     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
 
-       /*
-        * Certain constraints must be met in order to use the bulk buffer
-        * allocation Rx burst function.
-        */
-       use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
+       if (!rte_is_power_of_2(nb_desc)) {
+               PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
+                                   "preconditions - canceling the feature for "
+                                   "the whole port[%d]",
+                            rxq->queue_id, rxq->port_id);
+               hw->rx_vec_allowed = false;
+       } else
+               ixgbe_rxq_vec_setup(rxq);
 
-#ifdef RTE_IXGBE_INC_VECTOR
-       ixgbe_rxq_vec_setup(rxq);
-#endif
-       /* Check if pre-conditions are satisfied, and no Scattered Rx */
-       if (!use_def_burst_func && !dev->data->scattered_rx) {
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-               PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
-                            "satisfied. Rx Burst Bulk Alloc function will be "
-                            "used on port=%d, queue=%d.",
-                            rxq->port_id, rxq->queue_id);
-               dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
-#ifdef RTE_IXGBE_INC_VECTOR
-               if (!ixgbe_rx_vec_condition_check(dev)) {
-                       PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
-                                    "sure RX burst size no less than 32.");
-                       dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
-               }
-#endif
-#endif
-       } else {
-               PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
-                            "are not satisfied, Scattered Rx is requested, "
-                            "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
-                            "enabled (port=%d, queue=%d).",
-                            rxq->port_id, rxq->queue_id);
-       }
        dev->data->rx_queues[queue_idx] = rxq;
 
-       ixgbe_reset_rx_queue(rxq);
+       ixgbe_reset_rx_queue(hw, rxq);
 
        return 0;
 }
@@ -2294,7 +2279,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
 #define IXGBE_RXQ_SCAN_INTERVAL 4
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        uint32_t desc = 0;
 
        if (rx_queue_id >= dev->data->nb_rx_queues) {
@@ -2321,7 +2306,7 @@ int
 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
 {
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_queue *rxq = rx_queue;
+       struct ixgbe_rx_queue *rxq = rx_queue;
        uint32_t desc;
 
        if (unlikely(offset >= rxq->nb_rx_desc))
@@ -2338,11 +2323,12 @@ void
 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
 {
        unsigned i;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               struct igb_tx_queue *txq = dev->data->tx_queues[i];
+               struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
                if (txq != NULL) {
                        txq->ops->release_mbufs(txq);
                        txq->ops->reset(txq);
@@ -2350,10 +2336,10 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
        }
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               struct igb_rx_queue *rxq = dev->data->rx_queues[i];
+               struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
                if (rxq != NULL) {
                        ixgbe_rx_queue_release_mbufs(rxq);
-                       ixgbe_reset_rx_queue(rxq);
+                       ixgbe_reset_rx_queue(hw, rxq);
                }
        }
 }
@@ -2432,19 +2418,19 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
        mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
        if (rss_hf & ETH_RSS_IPV4)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
-       if (rss_hf & ETH_RSS_IPV4_TCP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
        if (rss_hf & ETH_RSS_IPV6)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
        if (rss_hf & ETH_RSS_IPV6_EX)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
-       if (rss_hf & ETH_RSS_IPV6_TCP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
        if (rss_hf & ETH_RSS_IPV6_TCP_EX)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
-       if (rss_hf & ETH_RSS_IPV4_UDP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
-       if (rss_hf & ETH_RSS_IPV6_UDP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
        if (rss_hf & ETH_RSS_IPV6_UDP_EX)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
@@ -2518,19 +2504,19 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
                rss_hf |= ETH_RSS_IPV4;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
-               rss_hf |= ETH_RSS_IPV4_TCP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
                rss_hf |= ETH_RSS_IPV6;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
                rss_hf |= ETH_RSS_IPV6_EX;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
-               rss_hf |= ETH_RSS_IPV6_TCP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
                rss_hf |= ETH_RSS_IPV6_TCP_EX;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
-               rss_hf |= ETH_RSS_IPV4_UDP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
-               rss_hf |= ETH_RSS_IPV6_UDP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
                rss_hf |= ETH_RSS_IPV6_UDP_EX;
        rss_conf->rss_hf = rss_hf;
@@ -3306,9 +3292,9 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
 }
 
 static int
-ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
 {
-       struct igb_rx_entry *rxe = rxq->sw_ring;
+       struct ixgbe_rx_entry *rxe = rxq->sw_ring;
        uint64_t dma_addr;
        unsigned i;
 
@@ -3515,6 +3501,74 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
        return 0;
 }
 
+void ixgbe_set_rx_function(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * In order to allow Vector Rx there are a few configuration
+        * conditions to be met and Rx Bulk Allocation should be allowed.
+        */
+       if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
+           !hw->rx_bulk_alloc_allowed) {
+               PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
+                                   "preconditions or RTE_IXGBE_INC_VECTOR is "
+                                   "not enabled",
+                            dev->data->port_id);
+
+               hw->rx_vec_allowed = false;
+       }
+
+       if (dev->data->scattered_rx) {
+               /*
+                * Set the non-LRO scattered callback: there are Vector and
+                * single allocation versions.
+                */
+               if (hw->rx_vec_allowed) {
+                       PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+                                           "callback (port=%d).",
+                                    dev->data->port_id);
+
+                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+               } else {
+                       PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector) "
+                                           "Scattered Rx callback "
+                                           "(port=%d).",
+                                    dev->data->port_id);
+
+                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+               }
+       /*
+        * Below we set "simple" callbacks according to port/queues parameters.
+        * If parameters allow we are going to choose between the following
+        * callbacks:
+        *    - Vector
+        *    - Bulk Allocation
+        *    - Single buffer allocation (the simplest one)
+        */
+       } else if (hw->rx_vec_allowed) {
+               PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX "
+                                  "burst size no less than 32.");
+
+               dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+       } else if (hw->rx_bulk_alloc_allowed) {
+               PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+                                   "satisfied. Rx Burst Bulk Alloc function "
+                                   "will be used on port=%d.",
+                            dev->data->port_id);
+
+               dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+       } else {
+               PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+                                   "satisfied, or Scattered Rx is requested, "
+                                   "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
+                                   "is not enabled (port=%d).",
+                            dev->data->port_id);
+
+               dev->rx_pkt_burst = ixgbe_recv_pkts;
+       }
+}
+
 /*
  * Initializes Receive Unit.
  */
@@ -3522,7 +3576,7 @@ int
 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        struct rte_pktmbuf_pool_private *mbp_priv;
        uint64_t bus_addr;
        uint32_t rxctrl;
@@ -3534,6 +3588,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        uint32_t rxcsum;
        uint16_t buf_size;
        uint16_t i;
+       struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
 
        PMD_INIT_FUNC_TRACE();
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -3556,7 +3611,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
         * Configure CRC stripping, if any.
         */
        hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-       if (dev->data->dev_conf.rxmode.hw_strip_crc)
+       if (rx_conf->hw_strip_crc)
                hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
        else
                hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
@@ -3564,11 +3619,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        /*
         * Configure jumbo frame support, if any.
         */
-       if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+       if (rx_conf->jumbo_frame == 1) {
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
                maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
                maxfrs &= 0x0000FFFF;
-               maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+               maxfrs |= (rx_conf->max_rx_pkt_len << 16);
                IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
        } else
                hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
@@ -3592,9 +3647,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                 * Reset crc_len in case it was changed after queue setup by a
                 * call to configure.
                 */
-               rxq->crc_len = (uint8_t)
-                               ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
-                               ETHER_CRC_LEN);
+               rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
 
                /* Setup the Base and Length of the Rx Descriptor Rings */
                bus_addr = rxq->rx_ring_phys_addr;
@@ -3612,7 +3665,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                /*
                 * Configure Header Split
                 */
-               if (dev->data->dev_conf.rxmode.header_split) {
+               if (rx_conf->header_split) {
                        if (hw->mac.type == ixgbe_mac_82599EB) {
                                /* Must setup the PSRTYPE register */
                                uint32_t psrtype;
@@ -3622,7 +3675,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                                        IXGBE_PSRTYPE_IPV6HDR;
                                IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
                        }
-                       srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
+                       srrctl = ((rx_conf->split_hdr_size <<
                                IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
                                IXGBE_SRRCTL_BSIZEHDR_MASK);
                        srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
@@ -3651,29 +3704,15 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                                       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
                /* It adds dual VLAN length for supporting dual VLAN */
-               if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                               2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
-                       if (!dev->data->scattered_rx)
-                               PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+               if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+                                           2 * IXGBE_VLAN_TAG_SIZE > buf_size)
                        dev->data->scattered_rx = 1;
-#ifdef RTE_IXGBE_INC_VECTOR
-                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
-                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
-               }
        }
 
-       if (dev->data->dev_conf.rxmode.enable_scatter) {
-               if (!dev->data->scattered_rx)
-                       PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
-               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
-               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
+       if (rx_conf->enable_scatter)
                dev->data->scattered_rx = 1;
-       }
+
+       ixgbe_set_rx_function(dev);
 
        /*
         * Device configured with multiple RX queues.
@@ -3687,16 +3726,17 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
         */
        rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
        rxcsum |= IXGBE_RXCSUM_PCSD;
-       if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+       if (rx_conf->hw_ip_checksum)
                rxcsum |= IXGBE_RXCSUM_IPPCSE;
        else
                rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
 
        IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
 
-       if (hw->mac.type == ixgbe_mac_82599EB) {
+       if (hw->mac.type == ixgbe_mac_82599EB ||
+           hw->mac.type == ixgbe_mac_X540) {
                rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-               if (dev->data->dev_conf.rxmode.hw_strip_crc)
+               if (rx_conf->hw_strip_crc)
                        rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
                else
                        rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
@@ -3714,7 +3754,7 @@ void
 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
+       struct ixgbe_tx_queue *txq;
        uint64_t bus_addr;
        uint32_t hlreg0;
        uint32_t txctrl;
@@ -3810,8 +3850,8 @@ int
 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_tx_queue *txq;
+       struct ixgbe_rx_queue *rxq;
        uint32_t txdctl;
        uint32_t dmatxctl;
        uint32_t rxctrl;
@@ -3877,7 +3917,7 @@ int
 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
        struct ixgbe_hw     *hw;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        uint32_t rxdctl;
        int poll_ms;
 
@@ -3922,7 +3962,7 @@ int
 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
        struct ixgbe_hw     *hw;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        uint32_t rxdctl;
        int poll_ms;
 
@@ -3949,7 +3989,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
                rte_delay_us(RTE_IXGBE_WAIT_100_US);
 
                ixgbe_rx_queue_release_mbufs(rxq);
-               ixgbe_reset_rx_queue(rxq);
+               ixgbe_reset_rx_queue(hw, rxq);
        } else
                return -1;
 
@@ -3964,7 +4004,7 @@ int
 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
+       struct ixgbe_tx_queue *txq;
        uint32_t txdctl;
        int poll_ms;
 
@@ -4005,7 +4045,7 @@ int
 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
+       struct ixgbe_tx_queue *txq;
        uint32_t txdctl;
        uint32_t txtdh, txtdt;
        int poll_ms;
@@ -4065,7 +4105,7 @@ int
 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        struct rte_pktmbuf_pool_private *mbp_priv;
        uint64_t bus_addr;
        uint32_t srrctl, psrtype = 0;
@@ -4167,17 +4207,20 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
                buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
                                       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-               /* It adds dual VLAN length for supporting dual VLAN */
-               if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+               if (dev->data->dev_conf.rxmode.enable_scatter ||
+                   /* It adds dual VLAN length for supporting dual VLAN */
+                   (dev->data->dev_conf.rxmode.max_rx_pkt_len +
                                2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
                        if (!dev->data->scattered_rx)
                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                        dev->data->scattered_rx = 1;
 #ifdef RTE_IXGBE_INC_VECTOR
-                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
-                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+                       if (rte_is_power_of_2(rxq->nb_rx_desc))
+                               dev->rx_pkt_burst =
+                                       ixgbe_recv_scattered_pkts_vec;
+                       else
 #endif
+                               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
                }
        }
 
@@ -4195,17 +4238,6 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
                IXGBE_PSRTYPE_RQPL_SHIFT;
        IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
 
-       if (dev->data->dev_conf.rxmode.enable_scatter) {
-               if (!dev->data->scattered_rx)
-                       PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
-               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
-               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
-               dev->data->scattered_rx = 1;
-       }
-
        return 0;
 }
 
@@ -4216,7 +4248,7 @@ void
 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
+       struct ixgbe_tx_queue *txq;
        uint64_t bus_addr;
        uint32_t txctrl;
        uint16_t i;
@@ -4257,8 +4289,8 @@ void
 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_tx_queue *txq;
+       struct ixgbe_rx_queue *rxq;
        uint32_t txdctl;
        uint32_t rxdctl;
        uint16_t i;
@@ -4313,3 +4345,34 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 
        }
 }
+
+/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+       return -1;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_pkts_vec(
+       void __rte_unused *rx_queue,
+       struct rte_mbuf __rte_unused **rx_pkts,
+       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_scattered_pkts_vec(
+       void __rte_unused *rx_queue,
+       struct rte_mbuf __rte_unused **rx_pkts,
+       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
+int __attribute__((weak))
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
+{
+       return -1;
+}