ixgbe: fix Rx CRC stripping for X540
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_rxtx.c
index 5c36bff..92be61e 100644 (file)
@@ -53,7 +53,6 @@
 #include <rte_memory.h>
 #include <rte_memzone.h>
 #include <rte_launch.h>
-#include <rte_tailq.h>
 #include <rte_eal.h>
 #include <rte_per_lcore.h>
 #include <rte_lcore.h>
 #include "ixgbe/ixgbe_common.h"
 #include "ixgbe_rxtx.h"
 
-#define IXGBE_RSS_OFFLOAD_ALL ( \
-               ETH_RSS_IPV4 | \
-               ETH_RSS_IPV4_TCP | \
-               ETH_RSS_IPV6 | \
-               ETH_RSS_IPV6_EX | \
-               ETH_RSS_IPV6_TCP | \
-               ETH_RSS_IPV6_TCP_EX | \
-               ETH_RSS_IPV4_UDP | \
-               ETH_RSS_IPV6_UDP | \
-               ETH_RSS_IPV6_UDP_EX)
-
 /* Bit Mask to indicate what bits required for building TX context */
 #define IXGBE_TX_OFFLOAD_MASK (                         \
                PKT_TX_VLAN_PKT |                \
@@ -133,9 +121,9 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
  * Return the total number of buffers freed.
  */
 static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
 {
-       struct igb_tx_entry *txep;
+       struct ixgbe_tx_entry *txep;
        uint32_t status;
        int i;
 
@@ -219,11 +207,11 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
  * Copy mbuf pointers to the S/W ring.
  */
 static inline void
-ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
+ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
                      uint16_t nb_pkts)
 {
        volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
-       struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+       struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
        const int N_PER_LOOP = 4;
        const int N_PER_LOOP_MASK = N_PER_LOOP-1;
        int mainpart, leftover;
@@ -255,7 +243,7 @@ static inline uint16_t
 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
             uint16_t nb_pkts)
 {
-       struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
        volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
        uint16_t n = 0;
 
@@ -363,7 +351,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 }
 
 static inline void
-ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
+ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
                volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
                uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
 {
@@ -453,7 +441,7 @@ ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
  * or create a new context descriptor.
  */
 static inline uint32_t
-what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
+what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
                union ixgbe_tx_offload tx_offload)
 {
        /* If match with the current used context */
@@ -509,9 +497,9 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 
 /* Reset transmit descriptors after they have been used */
 static inline int
-ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
+ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
 {
-       struct igb_tx_entry *sw_ring = txq->sw_ring;
+       struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
        volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
        uint16_t last_desc_cleaned = txq->last_desc_cleaned;
        uint16_t nb_tx_desc = txq->nb_tx_desc;
@@ -570,9 +558,9 @@ uint16_t
 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts)
 {
-       struct igb_tx_queue *txq;
-       struct igb_tx_entry *sw_ring;
-       struct igb_tx_entry *txe, *txn;
+       struct ixgbe_tx_queue *txq;
+       struct ixgbe_tx_entry *sw_ring;
+       struct ixgbe_tx_entry *txe, *txn;
        volatile union ixgbe_adv_tx_desc *txr;
        volatile union ixgbe_adv_tx_desc *txd;
        struct rte_mbuf     *tx_pkt;
@@ -871,14 +859,14 @@ rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
 {
        uint64_t pkt_flags;
 
-       static uint64_t ip_pkt_types_map[16] = {
+       static const uint64_t ip_pkt_types_map[16] = {
                0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
                PKT_RX_IPV6_HDR, 0, 0, 0,
                PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
                PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
        };
 
-       static uint64_t ip_rss_types_map[16] = {
+       static const uint64_t ip_rss_types_map[16] = {
                0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
                0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
                PKT_RX_RSS_HASH, 0, 0, 0,
@@ -949,10 +937,10 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
 #endif
 static inline int
-ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
+ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
 {
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_entry *rxep;
+       struct ixgbe_rx_entry *rxep;
        struct rte_mbuf *mb;
        uint16_t pkt_len;
        uint64_t pkt_flags;
@@ -1033,13 +1021,13 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
 }
 
 static inline int
-ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
+ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)
 {
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_entry *rxep;
+       struct ixgbe_rx_entry *rxep;
        struct rte_mbuf *mb;
        uint16_t alloc_idx;
-       uint64_t dma_addr;
+       __le64 dma_addr;
        int diag, i;
 
        /* allocate buffers in bulk directly into the S/W ring */
@@ -1062,7 +1050,7 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
                mb->port = rxq->port_id;
 
                /* populate the descriptors */
-               dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+               dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
                rxdp[i].read.hdr_addr = dma_addr;
                rxdp[i].read.pkt_addr = dma_addr;
        }
@@ -1082,7 +1070,7 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
 }
 
 static inline uint16_t
-ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts)
 {
        struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
@@ -1106,7 +1094,7 @@ static inline uint16_t
 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
             uint16_t nb_pkts)
 {
-       struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
+       struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
        uint16_t nb_rx = 0;
 
        /* Any previously recv'd pkts will be returned from the Rx stage */
@@ -1156,7 +1144,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 }
 
 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
-uint16_t
+static uint16_t
 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
                           uint16_t nb_pkts)
 {
@@ -1188,11 +1176,11 @@ uint16_t
 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        volatile union ixgbe_adv_rx_desc *rx_ring;
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_entry *sw_ring;
-       struct igb_rx_entry *rxe;
+       struct ixgbe_rx_entry *sw_ring;
+       struct ixgbe_rx_entry *rxe;
        struct rte_mbuf *rxm;
        struct rte_mbuf *nmb;
        union ixgbe_adv_rx_desc rxd;
@@ -1370,11 +1358,11 @@ uint16_t
 ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                          uint16_t nb_pkts)
 {
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        volatile union ixgbe_adv_rx_desc *rx_ring;
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_entry *sw_ring;
-       struct igb_rx_entry *rxe;
+       struct ixgbe_rx_entry *sw_ring;
+       struct ixgbe_rx_entry *rxe;
        struct rte_mbuf *first_seg;
        struct rte_mbuf *last_seg;
        struct rte_mbuf *rxm;
@@ -1570,13 +1558,14 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                first_seg->ol_flags = pkt_flags;
 
                if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                       first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+                       first_seg->hash.rss =
+                                   rte_le_to_cpu_32(rxd.wb.lower.hi_dword.rss);
                else if (pkt_flags & PKT_RX_FDIR) {
                        first_seg->hash.fdir.hash =
-                               (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
-                                          & IXGBE_ATR_HASH_MASK);
+                           rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.csum)
+                                          & IXGBE_ATR_HASH_MASK;
                        first_seg->hash.fdir.id =
-                               rxd.wb.lower.hi_dword.csum_ip.ip_id;
+                         rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.ip_id);
                }
 
                /* Prefetch data of first segment, if configured to do so. */
@@ -1686,7 +1675,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
 }
 
 static void
-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
 {
        unsigned i;
 
@@ -1701,7 +1690,7 @@ ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
 }
 
 static void
-ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
 {
        if (txq != NULL &&
            txq->sw_ring != NULL)
@@ -1709,7 +1698,7 @@ ixgbe_tx_free_swring(struct igb_tx_queue *txq)
 }
 
 static void
-ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
 {
        if (txq != NULL && txq->ops != NULL) {
                txq->ops->release_mbufs(txq);
@@ -1724,13 +1713,13 @@ ixgbe_dev_tx_queue_release(void *txq)
        ixgbe_tx_queue_release(txq);
 }
 
-/* (Re)set dynamic igb_tx_queue fields to defaults */
+/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
 static void
-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
 {
        static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
                        .buffer_addr = 0}};
-       struct igb_tx_entry *txe = txq->sw_ring;
+       struct ixgbe_tx_entry *txe = txq->sw_ring;
        uint16_t prev, i;
 
        /* Zero out HW ring memory */
@@ -1765,12 +1754,46 @@ ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
                IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
 }
 
-static struct ixgbe_txq_ops def_txq_ops = {
+static const struct ixgbe_txq_ops def_txq_ops = {
        .release_mbufs = ixgbe_tx_queue_release_mbufs,
        .free_swring = ixgbe_tx_free_swring,
        .reset = ixgbe_reset_tx_queue,
 };
 
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
+{
+       /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+       if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
+                       && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
+               PMD_INIT_LOG(INFO, "Using simple tx code path");
+#ifdef RTE_IXGBE_INC_VECTOR
+               if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+                               (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+                                       ixgbe_txq_vec_setup(txq) == 0)) {
+                       PMD_INIT_LOG(INFO, "Vector tx enabled.");
+                       dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
+               } else
+#endif
+               dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+       } else {
+               PMD_INIT_LOG(INFO, "Using full-featured tx code path");
+               PMD_INIT_LOG(INFO,
+                               " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
+                               (unsigned long)txq->txq_flags,
+                               (unsigned long)IXGBE_SIMPLE_FLAGS);
+               PMD_INIT_LOG(INFO,
+                               " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
+                               (unsigned long)txq->tx_rs_thresh,
+                               (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
+               dev->tx_pkt_burst = ixgbe_xmit_pkts;
+       }
+}
+
 int
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                         uint16_t queue_idx,
@@ -1779,7 +1802,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                         const struct rte_eth_txconf *tx_conf)
 {
        const struct rte_memzone *tz;
-       struct igb_tx_queue *txq;
+       struct ixgbe_tx_queue *txq;
        struct ixgbe_hw     *hw;
        uint16_t tx_rs_thresh, tx_free_thresh;
 
@@ -1876,7 +1899,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        }
 
        /* First allocate the tx queue data structure */
-       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
+       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (txq == NULL)
                return (-ENOMEM);
@@ -1911,7 +1934,10 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        /*
         * Modification to set VFTDT for virtual function if vf is detected
         */
-       if (hw->mac.type == ixgbe_mac_82599_vf)
+       if (hw->mac.type == ixgbe_mac_82599_vf ||
+           hw->mac.type == ixgbe_mac_X540_vf ||
+           hw->mac.type == ixgbe_mac_X550_vf ||
+           hw->mac.type == ixgbe_mac_X550EM_x_vf)
                txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
        else
                txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
@@ -1924,7 +1950,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        /* Allocate software ring */
        txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
-                               sizeof(struct igb_tx_entry) * nb_desc,
+                               sizeof(struct ixgbe_tx_entry) * nb_desc,
                                RTE_CACHE_LINE_SIZE, socket_id);
        if (txq->sw_ring == NULL) {
                ixgbe_tx_queue_release(txq);
@@ -1933,31 +1959,8 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
                     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
 
-       /* Use a simple Tx queue (no offloads, no multi segs) if possible */
-       if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
-           (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
-               PMD_INIT_LOG(INFO, "Using simple tx code path");
-#ifdef RTE_IXGBE_INC_VECTOR
-               if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
-                   ixgbe_txq_vec_setup(txq) == 0) {
-                       PMD_INIT_LOG(INFO, "Vector tx enabled.");
-                       dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
-               }
-               else
-#endif
-                       dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
-       } else {
-               PMD_INIT_LOG(INFO, "Using full-featured tx code path");
-               PMD_INIT_LOG(INFO, " - txq_flags = %lx "
-                            "[IXGBE_SIMPLE_FLAGS=%lx]",
-                            (long unsigned)txq->txq_flags,
-                            (long unsigned)IXGBE_SIMPLE_FLAGS);
-               PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu "
-                            "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
-                            (long unsigned)txq->tx_rs_thresh,
-                            (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
-               dev->tx_pkt_burst = ixgbe_xmit_pkts;
-       }
+       /* set up vector or scalar TX function as appropriate */
+       ixgbe_set_tx_function(dev, txq);
 
        txq->ops->reset(txq);
 
@@ -1968,7 +1971,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 }
 
 static void
-ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
 {
        unsigned i;
 
@@ -1993,7 +1996,7 @@ ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
 }
 
 static void
-ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
 {
        if (rxq != NULL) {
                ixgbe_rx_queue_release_mbufs(rxq);
@@ -2018,9 +2021,9 @@ ixgbe_dev_rx_queue_release(void *rxq)
  */
 static inline int
 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
 #else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
 #endif
 {
        int ret = 0;
@@ -2070,9 +2073,9 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
        return ret;
 }
 
-/* Reset dynamic igb_rx_queue fields back to defaults */
+/* Reset dynamic ixgbe_rx_queue fields back to defaults */
 static void
-ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
+ixgbe_reset_rx_queue(struct ixgbe_rx_queue *rxq)
 {
        static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
                        .pkt_addr = 0}};
@@ -2136,7 +2139,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                         struct rte_mempool *mp)
 {
        const struct rte_memzone *rz;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        struct ixgbe_hw     *hw;
        int use_def_burst_func = 1;
        uint16_t len;
@@ -2162,7 +2165,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        }
 
        /* First allocate the rx queue data structure */
-       rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
+       rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (rxq == NULL)
                return (-ENOMEM);
@@ -2198,7 +2201,10 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        /*
         * Modified to setup VFRDT for Virtual Function
         */
-       if (hw->mac.type == ixgbe_mac_82599_vf) {
+       if (hw->mac.type == ixgbe_mac_82599_vf ||
+           hw->mac.type == ixgbe_mac_X540_vf ||
+           hw->mac.type == ixgbe_mac_X550_vf ||
+           hw->mac.type == ixgbe_mac_X550EM_x_vf) {
                rxq->rdt_reg_addr =
                        IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
                rxq->rdh_reg_addr =
@@ -2228,7 +2234,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        len = nb_desc;
 #endif
        rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
-                                         sizeof(struct igb_rx_entry) * len,
+                                         sizeof(struct ixgbe_rx_entry) * len,
                                          RTE_CACHE_LINE_SIZE, socket_id);
        if (rxq->sw_ring == NULL) {
                ixgbe_rx_queue_release(rxq);
@@ -2255,7 +2261,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                             rxq->port_id, rxq->queue_id);
                dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
 #ifdef RTE_IXGBE_INC_VECTOR
-               if (!ixgbe_rx_vec_condition_check(dev)) {
+               if (!ixgbe_rx_vec_condition_check(dev) &&
+                   (rte_is_power_of_2(nb_desc))) {
                        PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
                                     "sure RX burst size no less than 32.");
                        dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
@@ -2281,7 +2288,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
 #define IXGBE_RXQ_SCAN_INTERVAL 4
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        uint32_t desc = 0;
 
        if (rx_queue_id >= dev->data->nb_rx_queues) {
@@ -2308,7 +2315,7 @@ int
 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
 {
        volatile union ixgbe_adv_rx_desc *rxdp;
-       struct igb_rx_queue *rxq = rx_queue;
+       struct ixgbe_rx_queue *rxq = rx_queue;
        uint32_t desc;
 
        if (unlikely(offset >= rxq->nb_rx_desc))
@@ -2329,7 +2336,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               struct igb_tx_queue *txq = dev->data->tx_queues[i];
+               struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
                if (txq != NULL) {
                        txq->ops->release_mbufs(txq);
                        txq->ops->reset(txq);
@@ -2337,7 +2344,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
        }
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               struct igb_rx_queue *rxq = dev->data->rx_queues[i];
+               struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
                if (rxq != NULL) {
                        ixgbe_rx_queue_release_mbufs(rxq);
                        ixgbe_reset_rx_queue(rxq);
@@ -2419,19 +2426,19 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
        mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
        if (rss_hf & ETH_RSS_IPV4)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
-       if (rss_hf & ETH_RSS_IPV4_TCP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
        if (rss_hf & ETH_RSS_IPV6)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
        if (rss_hf & ETH_RSS_IPV6_EX)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
-       if (rss_hf & ETH_RSS_IPV6_TCP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
        if (rss_hf & ETH_RSS_IPV6_TCP_EX)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
-       if (rss_hf & ETH_RSS_IPV4_UDP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
-       if (rss_hf & ETH_RSS_IPV6_UDP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
        if (rss_hf & ETH_RSS_IPV6_UDP_EX)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
@@ -2505,19 +2512,19 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
                rss_hf |= ETH_RSS_IPV4;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
-               rss_hf |= ETH_RSS_IPV4_TCP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
                rss_hf |= ETH_RSS_IPV6;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
                rss_hf |= ETH_RSS_IPV6_EX;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
-               rss_hf |= ETH_RSS_IPV6_TCP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
                rss_hf |= ETH_RSS_IPV6_TCP_EX;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
-               rss_hf |= ETH_RSS_IPV4_UDP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
-               rss_hf |= ETH_RSS_IPV6_UDP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
        if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
                rss_hf |= ETH_RSS_IPV6_UDP_EX;
        rss_conf->rss_hf = rss_hf;
@@ -3293,9 +3300,9 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
 }
 
 static int
-ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
 {
-       struct igb_rx_entry *rxe = rxq->sw_ring;
+       struct ixgbe_rx_entry *rxe = rxq->sw_ring;
        uint64_t dma_addr;
        unsigned i;
 
@@ -3326,6 +3333,67 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
        return 0;
 }
 
+static int
+ixgbe_config_vf_rss(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw;
+       uint32_t mrqc;
+
+       ixgbe_rss_configure(dev);
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* MRQC: enable VF RSS */
+       mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+       mrqc &= ~IXGBE_MRQC_MRQE_MASK;
+       switch (RTE_ETH_DEV_SRIOV(dev).active) {
+       case ETH_64_POOLS:
+               mrqc |= IXGBE_MRQC_VMDQRSS64EN;
+               break;
+
+       case ETH_32_POOLS:
+               mrqc |= IXGBE_MRQC_VMDQRSS32EN;
+               break;
+
+       default:
+               PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
+               return -EINVAL;
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+       return 0;
+}
+
+static int
+ixgbe_config_vf_default(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       switch (RTE_ETH_DEV_SRIOV(dev).active) {
+       case ETH_64_POOLS:
+               IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+                       IXGBE_MRQC_VMDQEN);
+               break;
+
+       case ETH_32_POOLS:
+               IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+                       IXGBE_MRQC_VMDQRT4TCEN);
+               break;
+
+       case ETH_16_POOLS:
+               IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+                       IXGBE_MRQC_VMDQRT8TCEN);
+               break;
+       default:
+               PMD_INIT_LOG(ERR,
+                       "invalid pool number in IOV mode");
+               break;
+       }
+       return 0;
+}
+
 static int
 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 {
@@ -3358,24 +3426,25 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
                        default: ixgbe_rss_disable(dev);
                }
        } else {
-               switch (RTE_ETH_DEV_SRIOV(dev).active) {
                /*
                 * SRIOV active scheme
-                * FIXME if support DCB/RSS together with VMDq & SRIOV
+                * Support RSS together with VMDq & SRIOV
                 */
-               case ETH_64_POOLS:
-                       IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQEN);
-                       break;
-
-               case ETH_32_POOLS:
-                       IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT4TCEN);
+               switch (dev->data->dev_conf.rxmode.mq_mode) {
+               case ETH_MQ_RX_RSS:
+               case ETH_MQ_RX_VMDQ_RSS:
+                       ixgbe_config_vf_rss(dev);
                        break;
 
-               case ETH_16_POOLS:
-                       IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN);
-                       break;
+               /* FIXME if support DCB/RSS together with VMDq & SRIOV */
+               case ETH_MQ_RX_VMDQ_DCB:
+               case ETH_MQ_RX_VMDQ_DCB_RSS:
+                       PMD_INIT_LOG(ERR,
+                               "Could not support DCB with VMDq & SRIOV");
+                       return -1;
                default:
-                       PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+                       ixgbe_config_vf_default(dev);
+                       break;
                }
        }
 
@@ -3447,7 +3516,7 @@ int
 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        struct rte_pktmbuf_pool_private *mbp_priv;
        uint64_t bus_addr;
        uint32_t rxctrl;
@@ -3548,9 +3617,9 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                                IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
                        }
                        srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
-                                  IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
-                       srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+                               IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+                               IXGBE_SRRCTL_BSIZEHDR_MASK);
+                       srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
                } else
 #endif
                        srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -3575,31 +3644,23 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
                                       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-               /* It adds dual VLAN length for supporting dual VLAN */
-               if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+               if (dev->data->dev_conf.rxmode.enable_scatter ||
+                   /* It adds dual VLAN length for supporting dual VLAN */
+                   (dev->data->dev_conf.rxmode.max_rx_pkt_len +
                                2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
                        if (!dev->data->scattered_rx)
                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                        dev->data->scattered_rx = 1;
 #ifdef RTE_IXGBE_INC_VECTOR
-                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
-                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+                       if (rte_is_power_of_2(rxq->nb_rx_desc))
+                               dev->rx_pkt_burst =
+                                       ixgbe_recv_scattered_pkts_vec;
+                       else
 #endif
+                               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
                }
        }
 
-       if (dev->data->dev_conf.rxmode.enable_scatter) {
-               if (!dev->data->scattered_rx)
-                       PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
-               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
-               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
-               dev->data->scattered_rx = 1;
-       }
-
        /*
         * Device configured with multiple RX queues.
         */
@@ -3619,7 +3680,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 
        IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
 
-       if (hw->mac.type == ixgbe_mac_82599EB) {
+       if (hw->mac.type == ixgbe_mac_82599EB ||
+           hw->mac.type == ixgbe_mac_X540) {
                rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
                if (dev->data->dev_conf.rxmode.hw_strip_crc)
                        rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
@@ -3639,7 +3701,7 @@ void
 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
+       struct ixgbe_tx_queue *txq;
        uint64_t bus_addr;
        uint32_t hlreg0;
        uint32_t txctrl;
@@ -3731,16 +3793,17 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
 /*
  * Start Transmit and Receive Units.
  */
-void
+int
 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_tx_queue *txq;
+       struct ixgbe_rx_queue *rxq;
        uint32_t txdctl;
        uint32_t dmatxctl;
        uint32_t rxctrl;
        uint16_t i;
+       int ret = 0;
 
        PMD_INIT_FUNC_TRACE();
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -3763,14 +3826,20 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                txq = dev->data->tx_queues[i];
-               if (!txq->tx_deferred_start)
-                       ixgbe_dev_tx_queue_start(dev, i);
+               if (!txq->tx_deferred_start) {
+                       ret = ixgbe_dev_tx_queue_start(dev, i);
+                       if (ret < 0)
+                               return ret;
+               }
        }
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
-               if (!rxq->rx_deferred_start)
-                       ixgbe_dev_rx_queue_start(dev, i);
+               if (!rxq->rx_deferred_start) {
+                       ret = ixgbe_dev_rx_queue_start(dev, i);
+                       if (ret < 0)
+                               return ret;
+               }
        }
 
        /* Enable Receive engine */
@@ -3785,6 +3854,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
                        dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
                ixgbe_setup_loopback_link_82599(hw);
 
+       return 0;
 }
 
 /*
@@ -3794,7 +3864,7 @@ int
 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
        struct ixgbe_hw     *hw;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        uint32_t rxdctl;
        int poll_ms;
 
@@ -3839,7 +3909,7 @@ int
 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
        struct ixgbe_hw     *hw;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        uint32_t rxdctl;
        int poll_ms;
 
@@ -3881,7 +3951,7 @@ int
 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
+       struct ixgbe_tx_queue *txq;
        uint32_t txdctl;
        int poll_ms;
 
@@ -3922,7 +3992,7 @@ int
 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
+       struct ixgbe_tx_queue *txq;
        uint32_t txdctl;
        uint32_t txtdh, txtdt;
        int poll_ms;
@@ -3982,10 +4052,10 @@ int
 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_rx_queue *rxq;
        struct rte_pktmbuf_pool_private *mbp_priv;
        uint64_t bus_addr;
-       uint32_t srrctl;
+       uint32_t srrctl, psrtype = 0;
        uint16_t buf_size;
        uint16_t i;
        int ret;
@@ -3993,6 +4063,19 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+       if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
+               PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+                       "it should be power of 2");
+               return -1;
+       }
+
+       if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
+               PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+                       "it should be equal to or less than %d",
+                       hw->mac.max_rx_queues);
+               return -1;
+       }
+
        /*
         * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
         * disables the VF receipt of packets if the PF MTU is > 1500.
@@ -4039,20 +4122,10 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
                 * Configure Header Split
                 */
                if (dev->data->dev_conf.rxmode.header_split) {
-
-                       /* Must setup the PSRTYPE register */
-                       uint32_t psrtype;
-                       psrtype = IXGBE_PSRTYPE_TCPHDR |
-                               IXGBE_PSRTYPE_UDPHDR   |
-                               IXGBE_PSRTYPE_IPV4HDR  |
-                               IXGBE_PSRTYPE_IPV6HDR;
-
-                       IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE(i), psrtype);
-
                        srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
-                                  IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
-                       srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+                               IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+                               IXGBE_SRRCTL_BSIZEHDR_MASK);
+                       srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
                } else
 #endif
                        srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -4081,30 +4154,36 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
                buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
                                       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-               /* It adds dual VLAN length for supporting dual VLAN */
-               if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+               if (dev->data->dev_conf.rxmode.enable_scatter ||
+                   /* It adds dual VLAN length for supporting dual VLAN */
+                   (dev->data->dev_conf.rxmode.max_rx_pkt_len +
                                2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
                        if (!dev->data->scattered_rx)
                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                        dev->data->scattered_rx = 1;
 #ifdef RTE_IXGBE_INC_VECTOR
-                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
-                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+                       if (rte_is_power_of_2(rxq->nb_rx_desc))
+                               dev->rx_pkt_burst =
+                                       ixgbe_recv_scattered_pkts_vec;
+                       else
 #endif
+                               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
                }
        }
 
-       if (dev->data->dev_conf.rxmode.enable_scatter) {
-               if (!dev->data->scattered_rx)
-                       PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
-               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
-               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+#ifdef RTE_HEADER_SPLIT_ENABLE
+       if (dev->data->dev_conf.rxmode.header_split)
+               /* Must setup the PSRTYPE register */
+               psrtype = IXGBE_PSRTYPE_TCPHDR |
+                       IXGBE_PSRTYPE_UDPHDR   |
+                       IXGBE_PSRTYPE_IPV4HDR  |
+                       IXGBE_PSRTYPE_IPV6HDR;
 #endif
-               dev->data->scattered_rx = 1;
-       }
+
+       /* Set RQPL for VF RSS according to max Rx queue */
+       psrtype |= (dev->data->nb_rx_queues >> 1) <<
+               IXGBE_PSRTYPE_RQPL_SHIFT;
+       IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
 
        return 0;
 }
@@ -4116,7 +4195,7 @@ void
 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
+       struct ixgbe_tx_queue *txq;
        uint64_t bus_addr;
        uint32_t txctrl;
        uint16_t i;
@@ -4157,8 +4236,8 @@ void
 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct igb_tx_queue *txq;
-       struct igb_rx_queue *rxq;
+       struct ixgbe_tx_queue *txq;
+       struct ixgbe_rx_queue *rxq;
        uint32_t txdctl;
        uint32_t rxdctl;
        uint16_t i;