mbuf: replace data pointer by an offset
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_rxtx.c
index fd0885a..c661335 100644 (file)
@@ -1,13 +1,13 @@
 /*-
  *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
  *   All rights reserved.
- * 
+ *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
  *   are met:
- * 
+ *
  *     * Redistributions of source code must retain the above copyright
  *       notice, this list of conditions and the following disclaimer.
  *     * Redistributions in binary form must reproduce the above copyright
@@ -17,7 +17,7 @@
  *     * Neither the name of Intel Corporation nor the names of its
  *       contributors may be used to endorse or promote products derived
  *       from this software without specific prior written permission.
- * 
+ *
  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -33,7 +33,6 @@
 
 #include <sys/queue.h>
 
-#include <endian.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include "ixgbe/ixgbe_vf.h"
 #include "ixgbe_ethdev.h"
 #include "ixgbe/ixgbe_dcb.h"
-
-
-#define RTE_PMD_IXGBE_TX_MAX_BURST 32
-
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-#define RTE_PMD_IXGBE_RX_MAX_BURST 32
-#endif
+#include "ixgbe/ixgbe_common.h"
+#include "ixgbe_rxtx.h"
+
+#define IXGBE_RSS_OFFLOAD_ALL ( \
+               ETH_RSS_IPV4 | \
+               ETH_RSS_IPV4_TCP | \
+               ETH_RSS_IPV6 | \
+               ETH_RSS_IPV6_EX | \
+               ETH_RSS_IPV6_TCP | \
+               ETH_RSS_IPV6_TCP_EX | \
+               ETH_RSS_IPV4_UDP | \
+               ETH_RSS_IPV6_UDP | \
+               ETH_RSS_IPV6_UDP_EX)
 
 static inline struct rte_mbuf *
 rte_rxmbuf_alloc(struct rte_mempool *mp)
@@ -91,120 +96,10 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
        struct rte_mbuf *m;
 
        m = __rte_mbuf_raw_alloc(mp);
-       __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
+       __rte_mbuf_sanity_check_raw(m, 0);
        return (m);
 }
 
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-       (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
-       (char *)(mb)->buf_addr))
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
-       (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
-/**
- * Structure associated with each descriptor of the RX ring of a RX queue.
- */
-struct igb_rx_entry {
-       struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
-};
-
-/**
- * Structure associated with each descriptor of the TX ring of a TX queue.
- */
-struct igb_tx_entry {
-       struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
-       uint16_t next_id; /**< Index of next descriptor in ring. */
-       uint16_t last_id; /**< Index of last scattered descriptor. */
-};
-
-/**
- * Structure associated with each RX queue.
- */
-struct igb_rx_queue {
-       struct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */
-       volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
-       uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
-       volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
-       volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
-       struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
-       struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
-       struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
-       uint16_t            nb_rx_desc; /**< number of RX descriptors. */
-       uint16_t            rx_tail;  /**< current value of RDT register. */
-       uint16_t            nb_rx_hold; /**< number of held free RX desc. */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-       uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
-       uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
-       uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
-#endif
-       uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
-       uint16_t            queue_id; /**< RX queue index. */
-       uint16_t            reg_idx;  /**< RX queue register index. */
-       uint8_t             port_id;  /**< Device port identifier. */
-       uint8_t             crc_len;  /**< 0 if CRC stripped, 4 otherwise. */
-       uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-       /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
-       struct rte_mbuf fake_mbuf;
-       /** hold packets to return to application */
-       struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
-#endif
-};
-
-/**
- * IXGBE CTX Constants
- */
-enum ixgbe_advctx_num {
-       IXGBE_CTX_0    = 0, /**< CTX0 */
-       IXGBE_CTX_1    = 1, /**< CTX1  */
-       IXGBE_CTX_NUM  = 2, /**< CTX NUMBER  */
-};
-
-/**
- * Structure to check if new context need be built
- */
-
-struct ixgbe_advctx_info {
-       uint16_t flags;           /**< ol_flags for context build. */
-       uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
-       union rte_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */
-};
-
-/**
- * Structure associated with each TX queue.
- */
-struct igb_tx_queue {
-       /** TX ring virtual address. */
-       volatile union ixgbe_adv_tx_desc *tx_ring;
-       uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
-       struct igb_tx_entry *sw_ring;      /**< virtual address of SW ring. */
-       volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
-       uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
-       uint16_t            tx_tail;       /**< current value of TDT reg. */
-       uint16_t            tx_free_thresh;/**< minimum TX before freeing. */
-       /** Number of TX descriptors to use before RS bit is set. */
-       uint16_t            tx_rs_thresh;
-       /** Number of TX descriptors used since RS bit was set. */
-       uint16_t            nb_tx_used;
-       /** Index to last TX descriptor to have been cleaned. */
-       uint16_t            last_desc_cleaned;
-       /** Total number of TX descriptors ready to be allocated. */
-       uint16_t            nb_tx_free;
-       uint16_t tx_next_dd; /**< next desc to scan for DD bit */
-       uint16_t tx_next_rs; /**< next desc to set RS bit */
-       uint16_t            queue_id;      /**< TX queue index. */
-       uint16_t            reg_idx;       /**< TX queue register index. */
-       uint8_t             port_id;       /**< Device port identifier. */
-       uint8_t             pthresh;       /**< Prefetch threshold register. */
-       uint8_t             hthresh;       /**< Host threshold register. */
-       uint8_t             wthresh;       /**< Write-back threshold reg. */
-       uint32_t txq_flags; /**< Holds flags for this TXq */
-       uint32_t            ctx_curr;      /**< Hardware context states. */
-       /** Hardware context0 history. */
-       struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
-};
-
 
 #if 1
 #define RTE_PMD_USE_PREFETCH
@@ -219,32 +114,12 @@ struct igb_tx_queue {
 #define rte_ixgbe_prefetch(p)   do {} while(0)
 #endif
 
-#ifdef RTE_PMD_PACKET_PREFETCH
-#define rte_packet_prefetch(p)  rte_prefetch1(p)
-#else
-#define rte_packet_prefetch(p)  do {} while(0)
-#endif
-
 /*********************************************************************
  *
  *  TX functions
  *
  **********************************************************************/
 
-/*
- * The "simple" TX queue functions require that the following
- * flags are set when the TX queue is configured:
- *  - ETH_TXQ_FLAGS_NOMULTSEGS
- *  - ETH_TXQ_FLAGS_NOVLANOFFL
- *  - ETH_TXQ_FLAGS_NOXSUMSCTP
- *  - ETH_TXQ_FLAGS_NOXSUMUDP
- *  - ETH_TXQ_FLAGS_NOXSUMTCP
- * and that the RS bit threshold (tx_rs_thresh) is at least equal to
- * RTE_PMD_IXGBE_TX_MAX_BURST.
- */
-#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
-                           ETH_TXQ_FLAGS_NOOFFLOADS)
-
 /*
  * Check for descriptors with their DD bit set and free mbufs.
  * Return the total number of buffers freed.
@@ -293,19 +168,6 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
        return txq->tx_rs_thresh;
 }
 
-/*
- * Populate descriptors with the following info:
- * 1.) buffer_addr = phys_addr + headroom
- * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
- * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT
- */
-
-/* Defines for Tx descriptor */
-#define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\
-                        IXGBE_ADVTXD_DCMD_IFCS |\
-                        IXGBE_ADVTXD_DCMD_DEXT |\
-                        IXGBE_ADVTXD_DCMD_EOP)
-
 /* Populate 4 descriptors with data from 4 mbufs */
 static inline void
 tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
@@ -316,7 +178,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
 
        for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
                buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
-               pkt_len = (*pkts)->pkt.data_len;
+               pkt_len = (*pkts)->data_len;
 
                /* write data to descriptor */
                txdp->read.buffer_addr = buf_dma_addr;
@@ -335,7 +197,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
        uint32_t pkt_len;
 
        buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
-       pkt_len = (*pkts)->pkt.data_len;
+       pkt_len = (*pkts)->data_len;
 
        /* write data to descriptor */
        txdp->read.buffer_addr = buf_dma_addr;
@@ -678,6 +540,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        volatile union ixgbe_adv_tx_desc *txd;
        struct rte_mbuf     *tx_pkt;
        struct rte_mbuf     *m_seg;
+       union ixgbe_vlan_macip vlan_macip_lens;
        uint64_t buf_dma_addr;
        uint32_t olinfo_status;
        uint32_t cmd_type_len;
@@ -689,7 +552,6 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint16_t nb_tx;
        uint16_t nb_used;
        uint16_t tx_ol_req;
-       uint32_t vlan_macip_lens;
        uint32_t ctx = 0;
        uint32_t new_ctx;
 
@@ -708,7 +570,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
                new_ctx = 0;
                tx_pkt = *tx_pkts++;
-               pkt_len = tx_pkt->pkt.pkt_len;
+               pkt_len = tx_pkt->pkt_len;
 
                RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
@@ -717,14 +579,15 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * are needed for offload functionality.
                 */
                ol_flags = tx_pkt->ol_flags;
-               vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
+               vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
+               vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
 
                /* If hardware offload required */
                tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
                if (tx_ol_req) {
                        /* If new context need be built or reuse the exist ctx. */
                        ctx = what_advctx_update(txq, tx_ol_req,
-                               vlan_macip_lens);
+                               vlan_macip_lens.data);
                        /* Only allocate context descriptor if required*/
                        new_ctx = (ctx == IXGBE_CTX_NUM);
                        ctx = txq->ctx_curr;
@@ -735,7 +598,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * This will always be the number of segments + the number of
                 * Context descriptors required to transmit the packet
                 */
-               nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
+               nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
 
                /*
                 * The number of descriptors that must be allocated for a
@@ -866,7 +729,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                                }
 
                                ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
-                                   vlan_macip_lens);
+                                   vlan_macip_lens.data);
 
                                txe->last_id = tx_last;
                                tx_id = txe->next_id;
@@ -895,7 +758,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        /*
                         * Set up Transmit Data Descriptor.
                         */
-                       slen = m_seg->pkt.data_len;
+                       slen = m_seg->data_len;
                        buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
                        txd->read.buffer_addr =
                                rte_cpu_to_le_64(buf_dma_addr);
@@ -906,7 +769,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        txe->last_id = tx_last;
                        tx_id = txe->next_id;
                        txe = txn;
-                       m_seg = m_seg->pkt.next;
+                       m_seg = m_seg->next;
                } while (m_seg != NULL);
 
                /*
@@ -1024,7 +887,7 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
 /*
  * LOOK_AHEAD defines how many desc statuses to check beyond the
- * current descriptor. 
+ * current descriptor.
  * It must be a pound define for optimal performance.
  * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
  * function only works with LOOK_AHEAD=8.
@@ -1063,12 +926,11 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
                for (j = LOOK_AHEAD-1; j >= 0; --j)
                        s[j] = rxdp[j].wb.upper.status_error;
 
-               /* Clear everything but the status bits (LSB) */
+               /* Compute how many status bits were set */
+               nb_dd = 0;
                for (j = 0; j < LOOK_AHEAD; ++j)
-                       s[j] &= IXGBE_RXDADV_STAT_DD;
+                       nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
 
-               /* Compute how many status bits were set */
-               nb_dd = s[0]+s[1]+s[2]+s[3]+s[4]+s[5]+s[6]+s[7];
                nb_rx += nb_dd;
 
                /* Translate descriptor info to mbuf format */
@@ -1076,10 +938,10 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
                        mb = rxep[j].mbuf;
                        pkt_len = (uint16_t)(rxdp[j].wb.upper.length -
                                                        rxq->crc_len);
-                       mb->pkt.data_len = pkt_len;
-                       mb->pkt.pkt_len = pkt_len;
-                       mb->pkt.vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
-                       mb->pkt.hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+                       mb->data_len = pkt_len;
+                       mb->pkt_len = pkt_len;
+                       mb->vlan_tci = rxdp[j].wb.upper.vlan;
+                       mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
 
                        /* convert descriptor fields to rte mbuf flags */
                        mb->ol_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
@@ -1134,11 +996,10 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
                /* populate the static rte mbuf fields */
                mb = rxep[i].mbuf;
                rte_mbuf_refcnt_set(mb, 1);
-               mb->type = RTE_MBUF_PKT;
-               mb->pkt.next = NULL;
-               mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
-               mb->pkt.nb_segs = 1;
-               mb->pkt.in_port = rxq->port_id;
+               mb->next = NULL;
+               mb->data_off = RTE_PKTMBUF_HEADROOM;
+               mb->nb_segs = 1;
+               mb->port = rxq->port_id;
 
                /* populate the descriptors */
                dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
@@ -1387,18 +1248,17 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 */
                pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
                                      rxq->crc_len);
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-               rte_packet_prefetch(rxm->pkt.data);
-               rxm->pkt.nb_segs = 1;
-               rxm->pkt.next = NULL;
-               rxm->pkt.pkt_len = pkt_len;
-               rxm->pkt.data_len = pkt_len;
-               rxm->pkt.in_port = rxq->port_id;
+               rxm->data_off = RTE_PKTMBUF_HEADROOM;
+               rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+               rxm->nb_segs = 1;
+               rxm->next = NULL;
+               rxm->pkt_len = pkt_len;
+               rxm->data_len = pkt_len;
+               rxm->port = rxq->port_id;
 
                hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
                /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-               rxm->pkt.vlan_macip.f.vlan_tci =
-                       rte_le_to_cpu_16(rxd.wb.upper.vlan);
+               rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
 
                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
                pkt_flags = (uint16_t)(pkt_flags |
@@ -1408,12 +1268,12 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                rxm->ol_flags = pkt_flags;
 
                if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                       rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+                       rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
                else if (pkt_flags & PKT_RX_FDIR) {
-                       rxm->pkt.hash.fdir.hash =
+                       rxm->hash.fdir.hash =
                                (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
                                           & IXGBE_ATR_HASH_MASK);
-                       rxm->pkt.hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+                       rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
                }
                /*
                 * Store the mbuf address into the next entry of the array
@@ -1570,8 +1430,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 * Set data length & data buffer address of mbuf.
                 */
                data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
-               rxm->pkt.data_len = data_len;
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rxm->data_len = data_len;
+               rxm->data_off = RTE_PKTMBUF_HEADROOM;
 
                /*
                 * If this is the first buffer of the received packet,
@@ -1583,13 +1443,13 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 */
                if (first_seg == NULL) {
                        first_seg = rxm;
-                       first_seg->pkt.pkt_len = data_len;
-                       first_seg->pkt.nb_segs = 1;
+                       first_seg->pkt_len = data_len;
+                       first_seg->nb_segs = 1;
                } else {
-                       first_seg->pkt.pkt_len = (uint16_t)(first_seg->pkt.pkt_len
+                       first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
                                        + data_len);
-                       first_seg->pkt.nb_segs++;
-                       last_seg->pkt.next = rxm;
+                       first_seg->nb_segs++;
+                       last_seg->next = rxm;
                }
 
                /*
@@ -1612,18 +1472,18 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 *     mbuf, subtract the length of that CRC part from the
                 *     data length of the previous mbuf.
                 */
-               rxm->pkt.next = NULL;
+               rxm->next = NULL;
                if (unlikely(rxq->crc_len > 0)) {
-                       first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+                       first_seg->pkt_len -= ETHER_CRC_LEN;
                        if (data_len <= ETHER_CRC_LEN) {
                                rte_pktmbuf_free_seg(rxm);
-                               first_seg->pkt.nb_segs--;
-                               last_seg->pkt.data_len = (uint16_t)
-                                       (last_seg->pkt.data_len -
+                               first_seg->nb_segs--;
+                               last_seg->data_len = (uint16_t)
+                                       (last_seg->data_len -
                                         (ETHER_CRC_LEN - data_len));
-                               last_seg->pkt.next = NULL;
+                               last_seg->next = NULL;
                        } else
-                               rxm->pkt.data_len =
+                               rxm->data_len =
                                        (uint16_t) (data_len - ETHER_CRC_LEN);
                }
 
@@ -1636,14 +1496,13 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 *      - VLAN TCI, if any,
                 *      - error flags.
                 */
-               first_seg->pkt.in_port = rxq->port_id;
+               first_seg->port = rxq->port_id;
 
                /*
                 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
                 * set in the pkt_flags field.
                 */
-               first_seg->pkt.vlan_macip.f.vlan_tci =
-                               rte_le_to_cpu_16(rxd.wb.upper.vlan);
+               first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
                hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
                pkt_flags = (uint16_t)(pkt_flags |
@@ -1653,17 +1512,18 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                first_seg->ol_flags = pkt_flags;
 
                if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                       first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+                       first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
                else if (pkt_flags & PKT_RX_FDIR) {
-                       first_seg->pkt.hash.fdir.hash =
+                       first_seg->hash.fdir.hash =
                                (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
                                           & IXGBE_ATR_HASH_MASK);
-                       first_seg->pkt.hash.fdir.id =
+                       first_seg->hash.fdir.id =
                                rxd.wb.lower.hi_dword.csum_ip.ip_id;
                }
 
                /* Prefetch data of first segment, if configured to do so. */
-               rte_packet_prefetch(first_seg->pkt.data);
+               rte_packet_prefetch((char *)first_seg->buf_addr +
+                       first_seg->data_off);
 
                /*
                 * Store the mbuf address into the next entry of the array
@@ -1735,7 +1595,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
  * descriptors should meet the following condition:
  *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
  */
-#define IXGBE_MIN_RING_DESC 64
+#define IXGBE_MIN_RING_DESC 32
 #define IXGBE_MAX_RING_DESC 4096
 
 /*
@@ -1750,7 +1610,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
        char z_name[RTE_MEMZONE_NAMESIZE];
        const struct rte_memzone *mz;
 
-       rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+       snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
                        dev->driver->pci_drv.name, ring_name,
                        dev->data->port_id, queue_id);
 
@@ -1758,8 +1618,13 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
        if (mz)
                return mz;
 
+#ifdef RTE_LIBRTE_XEN_DOM0
+       return rte_memzone_reserve_bounded(z_name, ring_size,
+               socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
+#else
        return rte_memzone_reserve_aligned(z_name, ring_size,
-                       socket_id, 0, IXGBE_ALIGN);
+               socket_id, 0, IXGBE_ALIGN);
+#endif
 }
 
 static void
@@ -1778,11 +1643,19 @@ ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
 }
 
 static void
-ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+ixgbe_tx_free_swring(struct igb_tx_queue *txq)
 {
-       if (txq != NULL) {
-               ixgbe_tx_queue_release_mbufs(txq);
+       if (txq != NULL &&
+           txq->sw_ring != NULL)
                rte_free(txq->sw_ring);
+}
+
+static void
+ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+{
+       if (txq != NULL && txq->ops != NULL) {
+               txq->ops->release_mbufs(txq);
+               txq->ops->free_swring(txq);
                rte_free(txq);
        }
 }
@@ -1797,12 +1670,14 @@ ixgbe_dev_tx_queue_release(void *txq)
 static void
 ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
 {
+       static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
+                       .buffer_addr = 0}};
        struct igb_tx_entry *txe = txq->sw_ring;
        uint16_t prev, i;
 
        /* Zero out HW ring memory */
-       for (i = 0; i < sizeof(union ixgbe_adv_tx_desc) * txq->nb_tx_desc; i++) {
-               ((volatile char *)txq->tx_ring)[i] = 0;
+       for (i = 0; i < txq->nb_tx_desc; i++) {
+               txq->tx_ring[i] = zeroed_desc;
        }
 
        /* Initialize SW ring entries */
@@ -1832,6 +1707,12 @@ ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
                IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
 }
 
+static struct ixgbe_txq_ops def_txq_ops = {
+       .release_mbufs = ixgbe_tx_queue_release_mbufs,
+       .free_swring = ixgbe_tx_free_swring,
+       .reset = ixgbe_reset_tx_queue,
+};
+
 int
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                         uint16_t queue_idx,
@@ -1928,8 +1809,10 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        }
 
        /* Free memory prior to re-allocation if needed... */
-       if (dev->data->tx_queues[queue_idx] != NULL)
+       if (dev->data->tx_queues[queue_idx] != NULL) {
                ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               dev->data->tx_queues[queue_idx] = NULL;
+       }
 
        /* First allocate the tx queue data structure */
        txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
@@ -1957,10 +1840,12 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->hthresh = tx_conf->tx_thresh.hthresh;
        txq->wthresh = tx_conf->tx_thresh.wthresh;
        txq->queue_id = queue_idx;
-       txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? 
+       txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
                queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
        txq->port_id = dev->data->port_id;
        txq->txq_flags = tx_conf->txq_flags;
+       txq->ops = &def_txq_ops;
+       txq->start_tx_per_q = tx_conf->start_tx_per_q;
 
        /*
         * Modification to set VFTDT for virtual function if vf is detected
@@ -1969,14 +1854,17 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
        else
                txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
-
+#ifndef        RTE_LIBRTE_XEN_DOM0
        txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
+#else
+       txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+#endif
        txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
 
        /* Allocate software ring */
        txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
-                                  sizeof(struct igb_tx_entry) * nb_desc,
-                                  CACHE_LINE_SIZE, socket_id);
+                               sizeof(struct igb_tx_entry) * nb_desc,
+                               CACHE_LINE_SIZE, socket_id);
        if (txq->sw_ring == NULL) {
                ixgbe_tx_queue_release(txq);
                return (-ENOMEM);
@@ -1984,15 +1872,19 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
                     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
 
-       ixgbe_reset_tx_queue(txq);
-
-       dev->data->tx_queues[queue_idx] = txq;
-
        /* Use a simple Tx queue (no offloads, no multi segs) if possible */
        if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
            (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
                PMD_INIT_LOG(INFO, "Using simple tx code path\n");
-               dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+#ifdef RTE_IXGBE_INC_VECTOR
+               if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+                   ixgbe_txq_vec_setup(txq, socket_id) == 0) {
+                       PMD_INIT_LOG(INFO, "Vector tx enabled.\n");
+                       dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
+               }
+               else
+#endif
+                       dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
        } else {
                PMD_INIT_LOG(INFO, "Using full-featured tx code path\n");
                PMD_INIT_LOG(INFO, " - txq_flags = %lx [IXGBE_SIMPLE_FLAGS=%lx]\n", (long unsigned)txq->txq_flags, (long unsigned)IXGBE_SIMPLE_FLAGS);
@@ -2000,6 +1892,11 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                dev->tx_pkt_burst = ixgbe_xmit_pkts;
        }
 
+       txq->ops->reset(txq);
+
+       dev->data->tx_queues[queue_idx] = txq;
+
+
        return (0);
 }
 
@@ -2091,6 +1988,8 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
 static void
 ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
 {
+       static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
+                       .pkt_addr = 0}};
        unsigned i;
        uint16_t len;
 
@@ -2118,8 +2017,8 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
         * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
         * reads extra memory as zeros.
         */
-       for (i = 0; i < len * sizeof(union ixgbe_adv_rx_desc); i++) {
-               ((volatile char *)rxq->rx_ring)[i] = 0;
+       for (i = 0; i < len; i++) {
+               rxq->rx_ring[i] = zeroed_desc;
        }
 
 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
@@ -2171,8 +2070,10 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        }
 
        /* Free memory prior to re-allocation if needed... */
-       if (dev->data->rx_queues[queue_idx] != NULL)
+       if (dev->data->rx_queues[queue_idx] != NULL) {
                ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               dev->data->rx_queues[queue_idx] = NULL;
+       }
 
        /* First allocate the rx queue data structure */
        rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
@@ -2183,12 +2084,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->nb_rx_desc = nb_desc;
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
        rxq->queue_id = queue_idx;
-       rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? 
+       rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
                queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
        rxq->port_id = dev->data->port_id;
        rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
                                                        0 : ETHER_CRC_LEN);
        rxq->drop_en = rx_conf->rx_drop_en;
+       rxq->start_rx_per_q = rx_conf->start_rx_per_q;
 
        /*
         * Allocate RX ring hardware descriptors. A memzone large enough to
@@ -2196,12 +2098,17 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
         * resizing in later calls to the queue setup function.
         */
        rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
-                       IXGBE_MAX_RING_DESC * sizeof(union ixgbe_adv_rx_desc),
-                       socket_id);
+                                  RX_RING_SZ, socket_id);
        if (rz == NULL) {
                ixgbe_rx_queue_release(rxq);
                return (-ENOMEM);
        }
+
+       /*
+        * Zero init all the descriptors in the ring.
+        */
+       memset (rz->addr, 0, RX_RING_SZ);
+
        /*
         * Modified to setup VFRDT for Virtual Function
         */
@@ -2217,12 +2124,15 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                rxq->rdh_reg_addr =
                        IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
        }
-
+#ifndef RTE_LIBRTE_XEN_DOM0
        rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
+#else
+       rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#endif
        rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
 
        /*
-        * Allocate software ring. Allow for space at the end of the 
+        * Allocate software ring. Allow for space at the end of the
         * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
         * function does not access an invalid memory region.
         */
@@ -2242,7 +2152,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
 
        /*
-        * Certain constaints must be met in order to use the bulk buffer
+        * Certain constraints must be met in order to use the bulk buffer
         * allocation Rx burst function.
         */
        use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
@@ -2255,6 +2165,14 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                             "used on port=%d, queue=%d.\n",
                             rxq->port_id, rxq->queue_id);
                dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+#ifdef RTE_IXGBE_INC_VECTOR
+               if (!ixgbe_rx_vec_condition_check(dev)) {
+                       PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
+                                    "sure RX burst size no less than 32.\n");
+                       ixgbe_rxq_vec_setup(rxq, socket_id);
+                       dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+               }
+#endif
 #endif
        } else {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
@@ -2325,8 +2243,8 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                struct igb_tx_queue *txq = dev->data->tx_queues[i];
                if (txq != NULL) {
-                       ixgbe_tx_queue_release_mbufs(txq);
-                       ixgbe_reset_tx_queue(txq);
+                       txq->ops->release_mbufs(txq);
+                       txq->ops->reset(txq);
                }
        }
 
@@ -2388,49 +2306,29 @@ ixgbe_rss_disable(struct rte_eth_dev *dev)
 }
 
 static void
-ixgbe_rss_configure(struct rte_eth_dev *dev)
+ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
 {
-       struct ixgbe_hw *hw;
-       uint8_t *hash_key;
-       uint32_t rss_key;
+       uint8_t  *hash_key;
        uint32_t mrqc;
-       uint32_t reta;
-       uint16_t rss_hf;
+       uint32_t rss_key;
+       uint64_t rss_hf;
        uint16_t i;
-       uint16_t j;
-
-       PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-       if (rss_hf == 0) { /* Disable RSS */
-               ixgbe_rss_disable(dev);
-               return;
-       }
-       hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
-       if (hash_key == NULL)
-               hash_key = rss_intel_key; /* Default hash key */
 
-       /* Fill in RSS hash key */
-       for (i = 0; i < 10; i++) {
-               rss_key  = hash_key[(i * 4)];
-               rss_key |= hash_key[(i * 4) + 1] << 8;
-               rss_key |= hash_key[(i * 4) + 2] << 16;
-               rss_key |= hash_key[(i * 4) + 3] << 24;
-               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
-       }
-
-       /* Fill in redirection table */
-       reta = 0;
-       for (i = 0, j = 0; i < 128; i++, j++) {
-               if (j == dev->data->nb_rx_queues) j = 0;
-               reta = (reta << 8) | j;
-               if ((i & 3) == 3)
-                       IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), rte_bswap32(reta));
+       hash_key = rss_conf->rss_key;
+       if (hash_key != NULL) {
+               /* Fill in RSS hash key */
+               for (i = 0; i < 10; i++) {
+                       rss_key  = hash_key[(i * 4)];
+                       rss_key |= hash_key[(i * 4) + 1] << 8;
+                       rss_key |= hash_key[(i * 4) + 2] << 16;
+                       rss_key |= hash_key[(i * 4) + 3] << 24;
+                       IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
+               }
        }
 
-       /* Set configured hashing functions in MRQC register */
-       mrqc = IXGBE_MRQC_RSSEN; /* RSS enable */
+       /* Set configured hashing protocols in MRQC register */
+       rss_hf = rss_conf->rss_hf;
+       mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
        if (rss_hf & ETH_RSS_IPV4)
                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
        if (rss_hf & ETH_RSS_IPV4_TCP)
@@ -2452,6 +2350,133 @@ ixgbe_rss_configure(struct rte_eth_dev *dev)
        IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 }
 
+int
+ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+                         struct rte_eth_rss_conf *rss_conf)
+{
+       struct ixgbe_hw *hw;
+       uint32_t mrqc;
+       uint64_t rss_hf;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
+        *     "RSS enabling cannot be done dynamically while it must be
+        *      preceded by a software reset"
+        * Before changing anything, first check that the update RSS operation
+        * does not attempt to disable RSS, if RSS was enabled at
+        * initialization time, or does not attempt to enable RSS, if RSS was
+        * disabled at initialization time.
+        */
+       rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
+       mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+       if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
+               if (rss_hf != 0) /* Enable RSS */
+                       return -(EINVAL);
+               return 0; /* Nothing to do */
+       }
+       /* RSS enabled */
+       if (rss_hf == 0) /* Disable RSS */
+               return -(EINVAL);
+       ixgbe_hw_rss_hash_set(hw, rss_conf);
+       return 0;
+}
+
+int
+ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+                           struct rte_eth_rss_conf *rss_conf)
+{
+       struct ixgbe_hw *hw;
+       uint8_t *hash_key;
+       uint32_t mrqc;
+       uint32_t rss_key;
+       uint64_t rss_hf;
+       uint16_t i;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hash_key = rss_conf->rss_key;
+       if (hash_key != NULL) {
+               /* Return RSS hash key */
+               for (i = 0; i < 10; i++) {
+                       rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i);
+                       hash_key[(i * 4)] = rss_key & 0x000000FF;
+                       hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
+                       hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
+                       hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
+               }
+       }
+
+       /* Get RSS functions configured in MRQC register */
+       mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+       if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
+               rss_conf->rss_hf = 0;
+               return 0;
+       }
+       rss_hf = 0;
+       if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
+               rss_hf |= ETH_RSS_IPV4;
+       if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
+               rss_hf |= ETH_RSS_IPV4_TCP;
+       if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
+               rss_hf |= ETH_RSS_IPV6;
+       if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
+               rss_hf |= ETH_RSS_IPV6_EX;
+       if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
+               rss_hf |= ETH_RSS_IPV6_TCP;
+       if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
+               rss_hf |= ETH_RSS_IPV6_TCP_EX;
+       if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
+               rss_hf |= ETH_RSS_IPV4_UDP;
+       if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
+               rss_hf |= ETH_RSS_IPV6_UDP;
+       if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
+               rss_hf |= ETH_RSS_IPV6_UDP_EX;
+       rss_conf->rss_hf = rss_hf;
+       return 0;
+}
+
+static void
+ixgbe_rss_configure(struct rte_eth_dev *dev)
+{
+       struct rte_eth_rss_conf rss_conf;
+       struct ixgbe_hw *hw;
+       uint32_t reta;
+       uint16_t i;
+       uint16_t j;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * Fill in redirection table
+        * The byte-swap is needed because NIC registers are in
+        * little-endian order.
+        */
+       reta = 0;
+       for (i = 0, j = 0; i < 128; i++, j++) {
+               if (j == dev->data->nb_rx_queues)
+                       j = 0;
+               reta = (reta << 8) | j;
+               if ((i & 3) == 3)
+                       IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
+                                       rte_bswap32(reta));
+       }
+
+       /*
+        * Configure the RSS key and the RSS protocols used to compute
+        * the RSS hash of input packets.
+        */
+       rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+       if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
+               ixgbe_rss_disable(dev);
+               return;
+       }
+       if (rss_conf.rss_key == NULL)
+               rss_conf.rss_key = rss_intel_key; /* Default hash key */
+       ixgbe_hw_rss_hash_set(hw, &rss_conf);
+}
+
 #define NUM_VFTA_REGISTERS 128
 #define NIC_RX_BUFFER_SIZE 0x200
 
@@ -2567,13 +2592,13 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
  * @hw: pointer to hardware structure
  * @dcb_config: pointer to ixgbe_dcb_config structure
  */
-static void 
+static void
 ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
                struct ixgbe_dcb_config *dcb_config)
 {
        uint32_t reg;
        uint32_t q;
-       
+
        PMD_INIT_FUNC_TRACE();
        if (hw->mac.type != ixgbe_mac_82598EB) {
                /* Disable the Tx desc arbiter so that MTQC can be changed */
@@ -2621,21 +2646,21 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 {
        struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
                        &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
-       struct ixgbe_hw *hw = 
+       struct ixgbe_hw *hw =
                        IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       
+
        PMD_INIT_FUNC_TRACE();
-       if (hw->mac.type != ixgbe_mac_82598EB)  
+       if (hw->mac.type != ixgbe_mac_82598EB)
                /*PF VF Transmit Enable*/
                IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
                        vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
-    
+
        /*Configure general DCB TX parameters*/
        ixgbe_dcb_tx_hw_config(hw,dcb_config);
        return;
 }
 
-static void 
+static void
 ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
                         struct ixgbe_dcb_config *dcb_config)
 {
@@ -2662,15 +2687,15 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
        }
 }
 
-static void 
+static void
 ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
                         struct ixgbe_dcb_config *dcb_config)
-{ 
+{
        struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
                        &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
        struct ixgbe_dcb_tc_config *tc;
        uint8_t i,j;
-       
+
        /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
        if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
                dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
@@ -2691,7 +2716,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
        return;
 }
 
-static void 
+static void
 ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
                struct ixgbe_dcb_config *dcb_config)
 {
@@ -2702,8 +2727,8 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
 
        dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
        dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
-       
-       /* User Priority to Traffic Class mapping */ 
+
+       /* User Priority to Traffic Class mapping */
        for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
                j = rx_conf->dcb_queue[i];
                tc = &dcb_config->tc_config[j];
@@ -2712,7 +2737,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
        }
 }
 
-static void 
+static void
 ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
                struct ixgbe_dcb_config *dcb_config)
 {
@@ -2723,8 +2748,8 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
 
        dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
        dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
-    
-       /* User Priority to Traffic Class mapping */ 
+
+       /* User Priority to Traffic Class mapping */
        for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
                j = tx_conf->dcb_queue[i];
                tc = &dcb_config->tc_config[j];
@@ -2784,7 +2809,7 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
        /* VFTA - enable all vlan filters */
        for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
@@ -2796,11 +2821,11 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
         */
        reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
        IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
        return;
 }
 
-static void 
+static void
 ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
                        uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
 {
@@ -2818,7 +2843,7 @@ ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
        }
 }
 
-static void 
+static void
 ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
                            uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
 {
@@ -2841,7 +2866,7 @@ ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *m
 #define DCB_TX_CONFIG  1
 #define DCB_TX_PB      1024
 /**
- * ixgbe_dcb_hw_configure - Enable DCB and configure 
+ * ixgbe_dcb_hw_configure - Enable DCB and configure
  * general DCB in VT mode and non-VT mode parameters
  * @dev: pointer to rte_eth_dev structure
  * @dcb_config: pointer to ixgbe_dcb_config structure
@@ -2861,8 +2886,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
        uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
        uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
        struct ixgbe_dcb_tc_config *tc;
-       uint32_t max_frame = dev->data->max_frame_size;
-       struct ixgbe_hw *hw = 
+       uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+       struct ixgbe_hw *hw =
                        IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        switch(dev->data->dev_conf.rxmode.mq_mode){
@@ -2871,7 +2896,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                if (hw->mac.type != ixgbe_mac_82598EB) {
                        config_dcb_rx = DCB_RX_CONFIG;
                        /*
-                        *get dcb and VT rx configuration parameters 
+                        *get dcb and VT rx configuration parameters
                         *from rte_eth_conf
                         */
                        ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
@@ -2921,7 +2946,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                /* Avoid un-configured priority mapping to TC0 */
                uint8_t j = 4;
                uint8_t mask = 0xFF;
-               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++) 
+               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
                        mask = (uint8_t)(mask & (~ (1 << map[i])));
                for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
                        if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
@@ -3029,14 +3054,14 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
 {
        struct ixgbe_dcb_config *dcb_cfg =
-                       IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 
+                       IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
        struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
-       
-       PMD_INIT_FUNC_TRACE();  
-       
+
+       PMD_INIT_FUNC_TRACE();
+
        /* check support mq_mode for DCB */
-       if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) && 
-           (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB)) 
+       if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
+           (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
                return;
 
        if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
@@ -3044,7 +3069,7 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
 
        /** Configure DCB hardware **/
        ixgbe_dcb_hw_configure(dev,dcb_cfg);
-       
+
        return;
 }
 
@@ -3086,7 +3111,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
 
        /* VFTA - enable all vlan filters */
-       for (i = 0; i < NUM_VFTA_REGISTERS; i++) 
+       for (i = 0; i < NUM_VFTA_REGISTERS; i++)
                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
 
        /* VFRE: pool enabling for receive - 64 */
@@ -3111,7 +3136,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
                 * pools, we only need to use the first half of the register
                 * i.e. bits 0-31
                 */
-               if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0) 
+               if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
                        IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
                                        (cfg->pool_map[i].pools & UINT32_MAX));
                else
@@ -3121,6 +3146,13 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
        }
 
+       /* PFDMA Tx General Switch Control Enables VMDQ loopback */
+       if (cfg->enable_loop_back) {
+               IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+               for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
+       }
+
        IXGBE_WRITE_FLUSH(hw);
 }
 
@@ -3128,12 +3160,12 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
  * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
  * @hw: pointer to hardware structure
  */
-static void 
+static void
 ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
 {
        uint32_t reg;
        uint32_t q;
-       
+
        PMD_INIT_FUNC_TRACE();
        /*PF VF Transmit Enable*/
        IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
@@ -3180,11 +3212,10 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
                }
 
                rte_mbuf_refcnt_set(mbuf, 1);
-               mbuf->type = RTE_MBUF_PKT;
-               mbuf->pkt.next = NULL;
-               mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
-               mbuf->pkt.nb_segs = 1;
-               mbuf->pkt.in_port = rxq->port_id;
+               mbuf->next = NULL;
+               mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+               mbuf->nb_segs = 1;
+               mbuf->port = rxq->port_id;
 
                dma_addr =
                        rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
@@ -3200,21 +3231,18 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
 static int
 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = 
+       struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (hw->mac.type == ixgbe_mac_82598EB)
                return 0;
 
        if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
-               /* 
-                * SRIOV inactive scheme
+               /*
+                * SRIOV inactive scheme
                 * any DCB/RSS w/o VMDq multi-queue setting
                 */
-               if (dev->data->nb_rx_queues > 1)
-                       switch (dev->data->dev_conf.rxmode.mq_mode) {
-                       case ETH_MQ_RX_NONE:
-                               /* if mq_mode not assign, we use rss mode.*/
+               switch (dev->data->dev_conf.rxmode.mq_mode) {
                        case ETH_MQ_RX_RSS:
                                ixgbe_rss_configure(dev);
                                break;
@@ -3222,15 +3250,15 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
                        case ETH_MQ_RX_VMDQ_DCB:
                                ixgbe_vmdq_dcb_configure(dev);
                                break;
-       
+
                        case ETH_MQ_RX_VMDQ_ONLY:
                                ixgbe_vmdq_rx_hw_configure(dev);
                                break;
-                       
+
+                       case ETH_MQ_RX_NONE:
+                               /* if mq_mode is none, disable rss mode.*/
                        default: ixgbe_rss_disable(dev);
-                       }
-               else
-                       ixgbe_rss_disable(dev);
+               }
        } else {
                switch (RTE_ETH_DEV_SRIOV(dev).active) {
                /*
@@ -3244,7 +3272,7 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
                case ETH_32_POOLS:
                        IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT4TCEN);
                        break;
-               
+
                case ETH_16_POOLS:
                        IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN);
                        break;
@@ -3259,7 +3287,7 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 static int
 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = 
+       struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t mtqc;
        uint32_t rttdcs;
@@ -3273,10 +3301,10 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
        IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
 
        if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
-               /* 
-                * SRIOV inactive scheme
+               /*
+                * SRIOV inactive scheme
                 * any DCB w/o VMDq multi-queue setting
-                */
+                */
                if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
                        ixgbe_vmdq_tx_hw_configure(hw);
                else {
@@ -3297,7 +3325,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
                        mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
                        break;
                case ETH_16_POOLS:
-                       mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA | 
+                       mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
                                IXGBE_MTQC_8TC_8TQ;
                        break;
                default:
@@ -3333,8 +3361,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        uint32_t rxcsum;
        uint16_t buf_size;
        uint16_t i;
-       int ret;
-       
+
        PMD_INIT_FUNC_TRACE();
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -3373,17 +3400,21 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        } else
                hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 
+       /*
+        * If loopback mode is configured for 82599, set LPBK bit.
+        */
+       if (hw->mac.type == ixgbe_mac_82599EB &&
+                       dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+               hlreg0 |= IXGBE_HLREG0_LPBK;
+       else
+               hlreg0 &= ~IXGBE_HLREG0_LPBK;
+
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
        /* Setup RX queues */
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
 
-               /* Allocate buffers for descriptor rings */
-               ret = ixgbe_alloc_rx_queue_mbufs(rxq);
-               if (ret)
-                       return ret;
-
                /*
                 * Reset crc_len in case it was changed after queue setup by a
                 * call to configure.
@@ -3436,8 +3467,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                 * The value is in 1 KB resolution. Valid values can be from
                 * 1 KB to 16 KB.
                 */
-               mbp_priv = (struct rte_pktmbuf_pool_private *)
-                       ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
+               mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
                buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
                                       RTE_PKTMBUF_HEADROOM);
                srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
@@ -3455,6 +3485,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                }
        }
 
+       if (dev->data->dev_conf.rxmode.enable_scatter) {
+               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+               dev->data->scattered_rx = 1;
+       }
+
        /*
         * Device configured with multiple RX queues.
         */
@@ -3483,7 +3518,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
                IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
        }
-       
+
        return 0;
 }
 
@@ -3552,6 +3587,34 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
        ixgbe_dev_mq_tx_configure(dev);
 }
 
+/*
+ * Set up link for 82599 loopback mode Tx->Rx.
+ */
+static inline void
+ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
+{
+       DEBUGFUNC("ixgbe_setup_loopback_link_82599");
+
+       if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+               if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
+                               IXGBE_SUCCESS) {
+                       PMD_INIT_LOG(ERR, "Could not enable loopback mode\n");
+                       /* ignore error */
+                       return;
+               }
+       }
+
+       /* Restart link */
+       IXGBE_WRITE_REG(hw,
+                       IXGBE_AUTOC,
+                       IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
+       ixgbe_reset_pipeline_82599(hw);
+
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+       msec_delay(50);
+}
+
+
 /*
  * Start Transmit and Receive Units.
  */
@@ -3563,10 +3626,8 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
        struct igb_rx_queue *rxq;
        uint32_t txdctl;
        uint32_t dmatxctl;
-       uint32_t rxdctl;
        uint32_t rxctrl;
        uint16_t i;
-       int poll_ms;
 
        PMD_INIT_FUNC_TRACE();
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -3589,50 +3650,220 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                txq = dev->data->tx_queues[i];
-               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
-               txdctl |= IXGBE_TXDCTL_ENABLE;
-               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
-
-               /* Wait until TX Enable ready */
-               if (hw->mac.type == ixgbe_mac_82599EB) {
-                       poll_ms = 10;
-                       do {
-                               rte_delay_ms(1);
-                               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
-                       } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
-                       if (!poll_ms)
-                               PMD_INIT_LOG(ERR, "Could not enable "
-                                            "Tx Queue %d\n", i);
-               }
+               if (!txq->start_tx_per_q)
+                       ixgbe_dev_tx_queue_start(dev, i);
        }
+
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
+               if (!rxq->start_rx_per_q)
+                       ixgbe_dev_rx_queue_start(dev, i);
+       }
+
+       /* Enable Receive engine */
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               rxctrl |= IXGBE_RXCTRL_DMBYPS;
+       rxctrl |= IXGBE_RXCTRL_RXEN;
+       hw->mac.ops.enable_rx_dma(hw, rxctrl);
+
+       /* If loopback mode is enabled for 82599, set up the link accordingly */
+       if (hw->mac.type == ixgbe_mac_82599EB &&
+                       dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+               ixgbe_setup_loopback_link_82599(hw);
+
+}
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int
+ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_rx_queue *rxq;
+       uint32_t rxdctl;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (rx_queue_id < dev->data->nb_rx_queues) {
+               rxq = dev->data->rx_queues[rx_queue_id];
+
+               /* Allocate buffers for descriptor rings */
+               if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+                       PMD_INIT_LOG(ERR,
+                               "Could not alloc mbuf for queue:%d\n",
+                               rx_queue_id);
+                       return -1;
+               }
                rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
                rxdctl |= IXGBE_RXDCTL_ENABLE;
                IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
 
                /* Wait until RX Enable ready */
-               poll_ms = 10;
+               poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
                do {
                        rte_delay_ms(1);
                        rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
                } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
                if (!poll_ms)
                        PMD_INIT_LOG(ERR, "Could not enable "
-                                    "Rx Queue %d\n", i);
+                                    "Rx Queue %d\n", rx_queue_id);
                rte_wmb();
+               IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
                IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
-       }
+       } else
+               return -1;
 
-       /* Enable Receive engine */
-       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-       if (hw->mac.type == ixgbe_mac_82598EB)
-               rxctrl |= IXGBE_RXCTRL_DMBYPS;
-       rxctrl |= IXGBE_RXCTRL_RXEN;
-       hw->mac.ops.enable_rx_dma(hw, rxctrl);
+       return 0;
+}
+
+/*
+ * Stop Receive Units for specified queue.
+ */
+int
+ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_rx_queue *rxq;
+       uint32_t rxdctl;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (rx_queue_id < dev->data->nb_rx_queues) {
+               rxq = dev->data->rx_queues[rx_queue_id];
+
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+               rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+               /* Wait until RX Enable ready */
+               poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+               do {
+                       rte_delay_ms(1);
+                       rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+               } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
+               if (!poll_ms)
+                       PMD_INIT_LOG(ERR, "Could not disable "
+                                    "Rx Queue %d\n", rx_queue_id);
+
+               rte_delay_us(RTE_IXGBE_WAIT_100_US);
+
+               ixgbe_rx_queue_release_mbufs(rxq);
+               ixgbe_reset_rx_queue(rxq);
+       } else
+               return -1;
+
+       return 0;
 }
 
 
+/*
+ * Start Transmit Units for specified queue.
+ */
+int
+ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_tx_queue *txq;
+       uint32_t txdctl;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (tx_queue_id < dev->data->nb_tx_queues) {
+               txq = dev->data->tx_queues[tx_queue_id];
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+               txdctl |= IXGBE_TXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+               /* Wait until TX Enable ready */
+               if (hw->mac.type == ixgbe_mac_82599EB) {
+                       poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+                       do {
+                               rte_delay_ms(1);
+                               txdctl = IXGBE_READ_REG(hw,
+                                       IXGBE_TXDCTL(txq->reg_idx));
+                       } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+                       if (!poll_ms)
+                               PMD_INIT_LOG(ERR, "Could not enable "
+                                            "Tx Queue %d\n", tx_queue_id);
+               }
+               rte_wmb();
+               IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+       } else
+               return -1;
+
+       return 0;
+}
+
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int
+ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_tx_queue *txq;
+       uint32_t txdctl;
+       uint32_t txtdh, txtdt;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (tx_queue_id < dev->data->nb_tx_queues) {
+               txq = dev->data->tx_queues[tx_queue_id];
+
+               /* Wait until TX queue is empty */
+               if (hw->mac.type == ixgbe_mac_82599EB) {
+                       poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+                       do {
+                               rte_delay_us(RTE_IXGBE_WAIT_100_US);
+                               txtdh = IXGBE_READ_REG(hw,
+                                               IXGBE_TDH(txq->reg_idx));
+                               txtdt = IXGBE_READ_REG(hw,
+                                               IXGBE_TDT(txq->reg_idx));
+                       } while (--poll_ms && (txtdh != txtdt));
+                       if (!poll_ms)
+                               PMD_INIT_LOG(ERR,
+                               "Tx Queue %d is not empty when stopping.\n",
+                               tx_queue_id);
+               }
+
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+               txdctl &= ~IXGBE_TXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+               /* Wait until TX Enable ready */
+               if (hw->mac.type == ixgbe_mac_82599EB) {
+                       poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+                       do {
+                               rte_delay_ms(1);
+                               txdctl = IXGBE_READ_REG(hw,
+                                               IXGBE_TXDCTL(txq->reg_idx));
+                       } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
+                       if (!poll_ms)
+                               PMD_INIT_LOG(ERR, "Could not disable "
+                                            "Tx Queue %d\n", tx_queue_id);
+               }
+
+               if (txq->ops != NULL) {
+                       txq->ops->release_mbufs(txq);
+                       txq->ops->reset(txq);
+               }
+       } else
+               return -1;
+
+       return 0;
+}
+
 /*
  * [VF] Initializes Receive Unit.
  */
@@ -3651,6 +3882,23 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+       /*
+        * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
+        * disables the VF receipt of packets if the PF MTU is > 1500.
+        * This is done to deal with 82599 limitations that imposes
+        * the PF and all VFs to share the same MTU.
+        * Then, the PF driver enables again the VF receipt of packet when
+        * the VF driver issues a IXGBE_VF_SET_LPE request.
+        * In the meantime, the VF device cannot be used, even if the VF driver
+        * and the Guest VM network stack are ready to accept packets with a
+        * size up to the PF MTU.
+        * As a work-around to this PF behaviour, force the call to
+        * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
+        * VF packets received can work in all cases.
+        */
+       ixgbevf_rlpml_set_vf(hw,
+               (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
        /* Setup RX queues */
        dev->rx_pkt_burst = ixgbe_recv_pkts;
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -3708,8 +3956,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
                 * The value is in 1 KB resolution. Valid values can be from
                 * 1 KB to 16 KB.
                 */
-               mbp_priv = (struct rte_pktmbuf_pool_private *)
-                       ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
+               mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
                buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
                                       RTE_PKTMBUF_HEADROOM);
                srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
@@ -3731,6 +3978,11 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
                }
        }
 
+       if (dev->data->dev_conf.rxmode.enable_scatter) {
+               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+               dev->data->scattered_rx = 1;
+       }
+
        return 0;
 }