mbuf: replace data pointer by an offset
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
index 4608595..41727e7 100644 (file)
@@ -1,13 +1,13 @@
 /*-
  *   BSD LICENSE
- * 
+ *
  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
  *   All rights reserved.
- * 
+ *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
  *   are met:
- * 
+ *
  *     * Redistributions of source code must retain the above copyright
  *       notice, this list of conditions and the following disclaimer.
  *     * Redistributions in binary form must reproduce the above copyright
@@ -17,7 +17,7 @@
  *     * Neither the name of Intel Corporation nor the names of its
  *       contributors may be used to endorse or promote products derived
  *       from this software without specific prior written permission.
- * 
+ *
  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 #include "e1000/e1000_api.h"
 #include "e1000_ethdev.h"
 
+#define IGB_RSS_OFFLOAD_ALL ( \
+               ETH_RSS_IPV4 | \
+               ETH_RSS_IPV4_TCP | \
+               ETH_RSS_IPV6 | \
+               ETH_RSS_IPV6_EX | \
+               ETH_RSS_IPV6_TCP | \
+               ETH_RSS_IPV6_TCP_EX | \
+               ETH_RSS_IPV4_UDP | \
+               ETH_RSS_IPV6_UDP | \
+               ETH_RSS_IPV6_UDP_EX)
+
 static inline struct rte_mbuf *
 rte_rxmbuf_alloc(struct rte_mempool *mp)
 {
        struct rte_mbuf *m;
 
        m = __rte_mbuf_raw_alloc(mp);
-       __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
+       __rte_mbuf_sanity_check_raw(m, 0);
        return (m);
 }
 
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
-       (uint64_t) ((mb)->buf_physaddr +                   \
-                       (uint64_t) ((char *)((mb)->pkt.data) -     \
-                               (char *)(mb)->buf_addr))
+       (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
 
 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
        (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
@@ -142,13 +151,33 @@ enum igb_advctx_num {
        IGB_CTX_NUM  = 2, /**< CTX_NUM */
 };
 
+/** Offload features */
+union igb_vlan_macip {
+       uint32_t data;
+       struct {
+               uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
+               uint16_t vlan_tci;
+               /**< VLAN Tag Control Identifier (CPU order). */
+       } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with igb_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */
+/** MAC+IP  length. */
+#define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
 /**
  * Strucutre to check if new context need be built
  */
 struct igb_advctx_info {
        uint16_t flags;           /**< ol_flags related to context build. */
        uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
-       union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
+       union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
 };
 
 /**
@@ -331,6 +360,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        volatile union e1000_adv_tx_desc *txd;
        struct rte_mbuf     *tx_pkt;
        struct rte_mbuf     *m_seg;
+       union igb_vlan_macip vlan_macip_lens;
        uint64_t buf_dma_addr;
        uint32_t olinfo_status;
        uint32_t cmd_type_len;
@@ -344,7 +374,6 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint16_t tx_ol_req;
        uint32_t new_ctx = 0;
        uint32_t ctx = 0;
-       uint32_t vlan_macip_lens;
 
        txq = tx_queue;
        sw_ring = txq->sw_ring;
@@ -354,7 +383,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
                tx_pkt = *tx_pkts++;
-               pkt_len = tx_pkt->pkt.pkt_len;
+               pkt_len = tx_pkt->pkt_len;
 
                RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
@@ -366,16 +395,17 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * for the packet, starting from the current position (tx_id)
                 * in the ring.
                 */
-               tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
+               tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
 
                ol_flags = tx_pkt->ol_flags;
-               vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
+               vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
+               vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
                tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
 
                /* If a Context Descriptor need be built . */
                if (tx_ol_req) {
                        ctx = what_advctx_update(txq, tx_ol_req,
-                               vlan_macip_lens);
+                               vlan_macip_lens.data);
                        /* Only allocate context descriptor if required*/
                        new_ctx = (ctx == IGB_CTX_NUM);
                        ctx = txq->ctx_curr;
@@ -491,7 +521,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                                }
 
                                igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
-                                   vlan_macip_lens);
+                                   vlan_macip_lens.data);
 
                                txe->last_id = tx_last;
                                tx_id = txe->next_id;
@@ -516,7 +546,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        /*
                         * Set up transmit descriptor.
                         */
-                       slen = (uint16_t) m_seg->pkt.data_len;
+                       slen = (uint16_t) m_seg->data_len;
                        buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
                        txd->read.buffer_addr =
                                rte_cpu_to_le_64(buf_dma_addr);
@@ -527,7 +557,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        txe->last_id = tx_last;
                        tx_id = txe->next_id;
                        txe = txn;
-                       m_seg = m_seg->pkt.next;
+                       m_seg = m_seg->next;
                } while (m_seg != NULL);
 
                /*
@@ -742,19 +772,18 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 */
                pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
                                      rxq->crc_len);
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-               rte_packet_prefetch(rxm->pkt.data);
-               rxm->pkt.nb_segs = 1;
-               rxm->pkt.next = NULL;
-               rxm->pkt.pkt_len = pkt_len;
-               rxm->pkt.data_len = pkt_len;
-               rxm->pkt.in_port = rxq->port_id;
-
-               rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+               rxm->data_off = RTE_PKTMBUF_HEADROOM;
+               rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+               rxm->nb_segs = 1;
+               rxm->next = NULL;
+               rxm->pkt_len = pkt_len;
+               rxm->data_len = pkt_len;
+               rxm->port = rxq->port_id;
+
+               rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
                hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
                /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-               rxm->pkt.vlan_macip.f.vlan_tci =
-                       rte_le_to_cpu_16(rxd.wb.upper.vlan);
+               rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
 
                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
                pkt_flags = (uint16_t)(pkt_flags |
@@ -918,8 +947,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 * Set data length & data buffer address of mbuf.
                 */
                data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
-               rxm->pkt.data_len = data_len;
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rxm->data_len = data_len;
+               rxm->data_off = RTE_PKTMBUF_HEADROOM;
 
                /*
                 * If this is the first buffer of the received packet,
@@ -931,12 +960,12 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 */
                if (first_seg == NULL) {
                        first_seg = rxm;
-                       first_seg->pkt.pkt_len = data_len;
-                       first_seg->pkt.nb_segs = 1;
+                       first_seg->pkt_len = data_len;
+                       first_seg->nb_segs = 1;
                } else {
-                       first_seg->pkt.pkt_len += data_len;
-                       first_seg->pkt.nb_segs++;
-                       last_seg->pkt.next = rxm;
+                       first_seg->pkt_len += data_len;
+                       first_seg->nb_segs++;
+                       last_seg->next = rxm;
                }
 
                /*
@@ -959,18 +988,18 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 *     mbuf, subtract the length of that CRC part from the
                 *     data length of the previous mbuf.
                 */
-               rxm->pkt.next = NULL;
+               rxm->next = NULL;
                if (unlikely(rxq->crc_len > 0)) {
-                       first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+                       first_seg->pkt_len -= ETHER_CRC_LEN;
                        if (data_len <= ETHER_CRC_LEN) {
                                rte_pktmbuf_free_seg(rxm);
-                               first_seg->pkt.nb_segs--;
-                               last_seg->pkt.data_len = (uint16_t)
-                                       (last_seg->pkt.data_len -
+                               first_seg->nb_segs--;
+                               last_seg->data_len = (uint16_t)
+                                       (last_seg->data_len -
                                         (ETHER_CRC_LEN - data_len));
-                               last_seg->pkt.next = NULL;
+                               last_seg->next = NULL;
                        } else
-                               rxm->pkt.data_len =
+                               rxm->data_len =
                                        (uint16_t) (data_len - ETHER_CRC_LEN);
                }
 
@@ -983,15 +1012,14 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 *      - VLAN TCI, if any,
                 *      - error flags.
                 */
-               first_seg->pkt.in_port = rxq->port_id;
-               first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+               first_seg->port = rxq->port_id;
+               first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
 
                /*
                 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
                 * set in the pkt_flags field.
                 */
-               first_seg->pkt.vlan_macip.f.vlan_tci =
-                       rte_le_to_cpu_16(rxd.wb.upper.vlan);
+               first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
                hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
                pkt_flags = (uint16_t)(pkt_flags |
@@ -1001,7 +1029,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                first_seg->ol_flags = pkt_flags;
 
                /* Prefetch data of first segment, if configured to do so. */
-               rte_packet_prefetch(first_seg->pkt.data);
+               rte_packet_prefetch((char *)first_seg->buf_addr +
+                       first_seg->data_off);
 
                /*
                 * Store the mbuf address into the next entry of the array
@@ -1078,7 +1107,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
        char z_name[RTE_MEMZONE_NAMESIZE];
        const struct rte_memzone *mz;
 
-       rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+       snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
                        dev->driver->pci_drv.name, ring_name,
                                dev->data->port_id, queue_id);
        mz = rte_memzone_lookup(z_name);
@@ -1212,8 +1241,10 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
                        "the TX WTHRESH value to 4, 8, or 16.\n");
 
        /* Free memory prior to re-allocation if needed */
-       if (dev->data->tx_queues[queue_idx] != NULL)
+       if (dev->data->tx_queues[queue_idx] != NULL) {
                igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               dev->data->tx_queues[queue_idx] = NULL;
+       }
 
        /* First allocate the tx queue data structure */
        txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
@@ -1386,8 +1417,8 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 #ifndef RTE_LIBRTE_XEN_DOM0
        rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
 #else
-       rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); 
-#endif 
+       rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#endif
        rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
 
        /* Allocate software ring. */
@@ -1407,7 +1438,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
        return 0;
 }
 
-uint32_t 
+uint32_t
 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
 #define IGB_RXQ_SCAN_INTERVAL 4
@@ -1519,53 +1550,28 @@ igb_rss_disable(struct rte_eth_dev *dev)
 }
 
 static void
-igb_rss_configure(struct rte_eth_dev *dev)
+igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
 {
-       struct e1000_hw *hw;
-       uint8_t *hash_key;
+       uint8_t  *hash_key;
        uint32_t rss_key;
        uint32_t mrqc;
-       uint32_t shift;
-       uint16_t rss_hf;
+       uint64_t rss_hf;
        uint16_t i;
 
-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-       if (rss_hf == 0) /* Disable RSS. */ {
-               igb_rss_disable(dev);
-               return;
-       }
-       hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
-       if (hash_key == NULL)
-               hash_key = rss_intel_key; /* Default hash key. */
-
-       /* Fill in RSS hash key. */
-       for (i = 0; i < 10; i++) {
-               rss_key  = hash_key[(i * 4)];
-               rss_key |= hash_key[(i * 4) + 1] << 8;
-               rss_key |= hash_key[(i * 4) + 2] << 16;
-               rss_key |= hash_key[(i * 4) + 3] << 24;
-               E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
-       }
-
-       /* Fill in redirection table. */
-       shift = (hw->mac.type == e1000_82575) ? 6 : 0;
-       for (i = 0; i < 128; i++) {
-               union e1000_reta {
-                       uint32_t dword;
-                       uint8_t  bytes[4];
-               } reta;
-               uint8_t q_idx;
-
-               q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
-                                  i % dev->data->nb_rx_queues : 0);
-               reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
-               if ((i & 3) == 3)
-                       E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
+       hash_key = rss_conf->rss_key;
+       if (hash_key != NULL) {
+               /* Fill in RSS hash key */
+               for (i = 0; i < 10; i++) {
+                       rss_key  = hash_key[(i * 4)];
+                       rss_key |= hash_key[(i * 4) + 1] << 8;
+                       rss_key |= hash_key[(i * 4) + 2] << 16;
+                       rss_key |= hash_key[(i * 4) + 3] << 24;
+                       E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
+               }
        }
 
-       /* Set configured hashing functions in MRQC register. */
+       /* Set configured hashing protocols in MRQC register */
+       rss_hf = rss_conf->rss_hf;
        mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
        if (rss_hf & ETH_RSS_IPV4)
                mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
@@ -1588,6 +1594,128 @@ igb_rss_configure(struct rte_eth_dev *dev)
        E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
 }
 
+int
+eth_igb_rss_hash_update(struct rte_eth_dev *dev,
+                       struct rte_eth_rss_conf *rss_conf)
+{
+       struct e1000_hw *hw;
+       uint32_t mrqc;
+       uint64_t rss_hf;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * Before changing anything, first check that the update RSS operation
+        * does not attempt to disable RSS, if RSS was enabled at
+        * initialization time, or does not attempt to enable RSS, if RSS was
+        * disabled at initialization time.
+        */
+       rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
+       mrqc = E1000_READ_REG(hw, E1000_MRQC);
+       if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
+               if (rss_hf != 0) /* Enable RSS */
+                       return -(EINVAL);
+               return 0; /* Nothing to do */
+       }
+       /* RSS enabled */
+       if (rss_hf == 0) /* Disable RSS */
+               return -(EINVAL);
+       igb_hw_rss_hash_set(hw, rss_conf);
+       return 0;
+}
+
+int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
+                             struct rte_eth_rss_conf *rss_conf)
+{
+       struct e1000_hw *hw;
+       uint8_t *hash_key;
+       uint32_t rss_key;
+       uint32_t mrqc;
+       uint64_t rss_hf;
+       uint16_t i;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hash_key = rss_conf->rss_key;
+       if (hash_key != NULL) {
+               /* Return RSS hash key */
+               for (i = 0; i < 10; i++) {
+                       rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
+                       hash_key[(i * 4)] = rss_key & 0x000000FF;
+                       hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
+                       hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
+                       hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
+               }
+       }
+
+       /* Get RSS functions configured in MRQC register */
+       mrqc = E1000_READ_REG(hw, E1000_MRQC);
+       if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
+               rss_conf->rss_hf = 0;
+               return 0;
+       }
+       rss_hf = 0;
+       if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
+               rss_hf |= ETH_RSS_IPV4;
+       if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
+               rss_hf |= ETH_RSS_IPV4_TCP;
+       if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
+               rss_hf |= ETH_RSS_IPV6;
+       if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
+               rss_hf |= ETH_RSS_IPV6_EX;
+       if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
+               rss_hf |= ETH_RSS_IPV6_TCP;
+       if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
+               rss_hf |= ETH_RSS_IPV6_TCP_EX;
+       if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
+               rss_hf |= ETH_RSS_IPV4_UDP;
+       if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
+               rss_hf |= ETH_RSS_IPV6_UDP;
+       if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
+               rss_hf |= ETH_RSS_IPV6_UDP_EX;
+       rss_conf->rss_hf = rss_hf;
+       return 0;
+}
+
+static void
+igb_rss_configure(struct rte_eth_dev *dev)
+{
+       struct rte_eth_rss_conf rss_conf;
+       struct e1000_hw *hw;
+       uint32_t shift;
+       uint16_t i;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Fill in redirection table. */
+       shift = (hw->mac.type == e1000_82575) ? 6 : 0;
+       for (i = 0; i < 128; i++) {
+               union e1000_reta {
+                       uint32_t dword;
+                       uint8_t  bytes[4];
+               } reta;
+               uint8_t q_idx;
+
+               q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
+                                  i % dev->data->nb_rx_queues : 0);
+               reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
+               if ((i & 3) == 3)
+                       E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
+       }
+
+       /*
+        * Configure the RSS key and the RSS protocols used to compute
+        * the RSS hash of input packets.
+        */
+       rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+       if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
+               igb_rss_disable(dev);
+               return;
+       }
+       if (rss_conf.rss_key == NULL)
+               rss_conf.rss_key = rss_intel_key; /* Default hash key */
+       igb_hw_rss_hash_set(hw, &rss_conf);
+}
+
 /*
  * Check if the mac type support VMDq or not.
  * Return 1 if it supports, otherwise, return 0.
@@ -1596,27 +1724,27 @@ static int
 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
 {
        const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       
-       switch (hw->mac.type) { 
-       case e1000_82576: 
-       case e1000_82580: 
-       case e1000_i350: 
+
+       switch (hw->mac.type) {
+       case e1000_82576:
+       case e1000_82580:
+       case e1000_i350:
                return 1;
-       case e1000_82540: 
-       case e1000_82541: 
-       case e1000_82542: 
-       case e1000_82543: 
-       case e1000_82544: 
-       case e1000_82545: 
-       case e1000_82546: 
-       case e1000_82547: 
-       case e1000_82571: 
-       case e1000_82572: 
-       case e1000_82573: 
-       case e1000_82574: 
-       case e1000_82583: 
-       case e1000_i210: 
-       case e1000_i211: 
+       case e1000_82540:
+       case e1000_82541:
+       case e1000_82542:
+       case e1000_82543:
+       case e1000_82544:
+       case e1000_82545:
+       case e1000_82546:
+       case e1000_82547:
+       case e1000_82571:
+       case e1000_82572:
+       case e1000_82573:
+       case e1000_82574:
+       case e1000_82583:
+       case e1000_i210:
+       case e1000_i211:
        default:
                PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
                return 0;
@@ -1630,8 +1758,8 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
        struct e1000_hw *hw;
        uint32_t mrqc, vt_ctl, vmolr, rctl;
        int i;
-       PMD_INIT_LOG(DEBUG, ">>");
+
+       PMD_INIT_LOG(DEBUG, ">>");
        hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 
@@ -1640,7 +1768,7 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
                return -1;
 
        igb_rss_disable(dev);
-       
+
        /* RCTL: eanble VLAN filter */
        rctl = E1000_READ_REG(hw, E1000_RCTL);
        rctl |= E1000_RCTL_VFE;
@@ -1648,20 +1776,20 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
        /* MRQC: enable vmdq */
        mrqc = E1000_READ_REG(hw, E1000_MRQC);
-       mrqc |= E1000_MRQC_ENABLE_VMDQ; 
+       mrqc |= E1000_MRQC_ENABLE_VMDQ;
        E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+
        /* VTCTL:  pool selection according to VLAN tag */
        vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
-       if (cfg->enable_default_pool) 
+       if (cfg->enable_default_pool)
                vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
        vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
        E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
-       
-       /* 
+
+       /*
         * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
-        * Both 82576 and 82580 support it 
-        */
+        * Both 82576 and 82580 support it
+        */
        if (hw->mac.type != e1000_i350) {
                for (i = 0; i < E1000_VMOLR_SIZE; i++) {
                        vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
@@ -1671,13 +1799,13 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
        }
 
        /* VFTA - enable all vlan filters */
-       for (i = 0; i < IGB_VFTA_SIZE; i++) 
+       for (i = 0; i < IGB_VFTA_SIZE; i++)
                E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
-       
+
        /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
        if (hw->mac.type != e1000_82580)
                E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
+
        /*
         * RAH/RAL - allow pools to read specific mac addresses
         * In this case, all pools should be able to read from mac addr 0
@@ -1695,7 +1823,7 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
        }
 
        E1000_WRITE_FLUSH(hw);
-       
+
        return 0;
 }
 
@@ -1721,7 +1849,6 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
                if (mbuf == NULL) {
                        PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
                                "queue_id=%hu\n", rxq->queue_id);
-                       igb_rx_queue_release(rxq);
                        return (-ENOMEM);
                }
                dma_addr =
@@ -1742,39 +1869,39 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
        struct e1000_hw *hw =
                E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t mrqc;
+
        if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
                /*
-               * SRIOV active scheme
-               * FIXME if support RSS together with VMDq & SRIOV
-               */
+                * SRIOV active scheme
+                * FIXME if support RSS together with VMDq & SRIOV
+                */
                mrqc = E1000_MRQC_ENABLE_VMDQ;
                /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
                mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
                E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
-       } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) { 
+       } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
                /*
-               * SRIOV inactive scheme
-               */
+                * SRIOV inactive scheme
+                */
                switch (dev->data->dev_conf.rxmode.mq_mode) {
                        case ETH_MQ_RX_RSS:
                                igb_rss_configure(dev);
                                break;
                        case ETH_MQ_RX_VMDQ_ONLY:
                                /*Configure general VMDQ only RX parameters*/
-                               igb_vmdq_rx_hw_configure(dev); 
+                               igb_vmdq_rx_hw_configure(dev);
                                break;
                        case ETH_MQ_RX_NONE:
                                /* if mq_mode is none, disable rss mode.*/
-                       default: 
+                       default:
                                igb_rss_disable(dev);
                                break;
                }
        }
+
        return 0;
 }
+
 int
 eth_igb_rx_init(struct rte_eth_dev *dev)
 {
@@ -1899,6 +2026,11 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
        }
 
+       if (dev->data->dev_conf.rxmode.enable_scatter) {
+               dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+               dev->data->scattered_rx = 1;
+       }
+
        /*
         * Setup BSIZE field of RCTL register, if needed.
         * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
@@ -2077,6 +2209,11 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 
        hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+       /* setup MTU */
+       e1000_rlpml_set_vf(hw,
+               (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
+               VLAN_TAG_SIZE));
+
        /* Configure and enable each RX queue. */
        rctl_bsize = 0;
        dev->rx_pkt_burst = eth_igb_recv_pkts;
@@ -2149,10 +2286,10 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                rxdctl &= 0xFFF00000;
                rxdctl |= (rxq->pthresh & 0x1F);
                rxdctl |= ((rxq->hthresh & 0x1F) << 8);
-               if (hw->mac.type == e1000_82576) {
-                       /* 
+               if (hw->mac.type == e1000_vfadapt) {
+                       /*
                         * Workaround of 82576 VF Erratum
-                        * force set WTHRESH to 1 
+                        * force set WTHRESH to 1
                         * to avoid Write-Back not triggered sometimes
                         */
                        rxdctl |= 0x10000;
@@ -2163,6 +2300,11 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
        }
 
+       if (dev->data->dev_conf.rxmode.enable_scatter) {
+               dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+               dev->data->scattered_rx = 1;
+       }
+
        /*
         * Setup the HW Rx Head and Tail Descriptor Pointers.
         * This needs to be done after enable.
@@ -2213,12 +2355,12 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
                txdctl |= txq->pthresh & 0x1F;
                txdctl |= ((txq->hthresh & 0x1F) << 8);
                if (hw->mac.type == e1000_82576) {
-                       /* 
+                       /*
                         * Workaround of 82576 VF Erratum
-                        * force set WTHRESH to 1 
+                        * force set WTHRESH to 1
                         * to avoid Write-Back not triggered sometimes
                         */
-                       txdctl |= 0x10000; 
+                       txdctl |= 0x10000;
                        PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
                }
                else