fix typos using codespell utility
[dpdk.git] / drivers / net / enic / enic_main.c
index f47e96c..2192c7f 100644 (file)
@@ -31,7 +31,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  *
  */
-#ident "$Id$"
 
 #include <stdio.h>
 
@@ -80,14 +79,22 @@ static int is_eth_addr_valid(uint8_t *addr)
        return !is_mcast_addr(addr) && !is_zero_addr(addr);
 }
 
-static inline struct rte_mbuf *
-enic_rxmbuf_alloc(struct rte_mempool *mp)
+static void
+enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
 {
-       struct rte_mbuf *m;
+       uint16_t i;
+
+       if (!rq || !rq->mbuf_ring) {
+               dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
+               return;
+       }
 
-       m = __rte_mbuf_raw_alloc(mp);
-       __rte_mbuf_sanity_check_raw(m, 0);
-       return m;
+       for (i = 0; i < rq->ring.desc_count; i++) {
+               if (rq->mbuf_ring[i]) {
+                       rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
+                       rq->mbuf_ring[i] = NULL;
+               }
+       }
 }
 
 void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
@@ -95,32 +102,12 @@ void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
        vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
 }
 
-static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf)
-{
-       struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf;
-
-       rte_mempool_put(mbuf->pool, mbuf);
-       buf->os_buf = NULL;
-}
-
-static void enic_wq_free_buf(struct vnic_wq *wq,
-       __rte_unused struct cq_desc *cq_desc,
-       struct vnic_wq_buf *buf,
-       __rte_unused void *opaque)
-{
-       enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
-       __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
+static void enic_free_wq_buf(struct vnic_wq_buf *buf)
 {
-       struct enic *enic = vnic_dev_priv(vdev);
-
-       vnic_wq_service(&enic->wq[q_number], cq_desc,
-               completed_index, enic_wq_free_buf,
-               opaque);
+       struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
 
-       return 0;
+       rte_pktmbuf_free_seg(mbuf);
+       buf->mb = NULL;
 }
 
 static void enic_log_q_error(struct enic *enic)
@@ -135,7 +122,9 @@ static void enic_log_q_error(struct enic *enic)
                                error_status);
        }
 
-       for (i = 0; i < enic->rq_count; i++) {
+       for (i = 0; i < enic_vnic_rq_count(enic); i++) {
+               if (!enic->rq[i].in_use)
+                       continue;
                error_status = vnic_rq_error_status(&enic->rq[i]);
                if (error_status)
                        dev_err(enic, "RQ[%d] error_status %d\n", i,
@@ -143,131 +132,99 @@ static void enic_log_q_error(struct enic *enic)
        }
 }
 
-unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
+static void enic_clear_soft_stats(struct enic *enic)
 {
-       unsigned int cq = enic_cq_wq(enic, wq->index);
-
-       /* Return the work done */
-       return vnic_cq_service(&enic->cq[cq],
-               -1 /*wq_work_to_do*/, enic_wq_service, NULL);
+       struct enic_soft_stats *soft_stats = &enic->soft_stats;
+       rte_atomic64_clear(&soft_stats->rx_nombuf);
+       rte_atomic64_clear(&soft_stats->rx_packet_errors);
+       rte_atomic64_clear(&soft_stats->tx_oversized);
 }
 
-
-int enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
-       struct rte_mbuf *tx_pkt, unsigned short len,
-       uint8_t sop, uint8_t eop,
-       uint16_t ol_flags, uint16_t vlan_tag)
+static void enic_init_soft_stats(struct enic *enic)
 {
-       struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
-       uint16_t mss = 0;
-       uint8_t cq_entry = eop;
-       uint8_t vlan_tag_insert = 0;
-       uint64_t bus_addr = (dma_addr_t)
-           (tx_pkt->buf_physaddr + RTE_PKTMBUF_HEADROOM);
-
-       if (sop) {
-               if (ol_flags & PKT_TX_VLAN_PKT)
-                       vlan_tag_insert = 1;
-
-               if (enic->hw_ip_checksum) {
-                       if (ol_flags & PKT_TX_IP_CKSUM)
-                               mss |= ENIC_CALC_IP_CKSUM;
-
-                       if (ol_flags & PKT_TX_TCP_UDP_CKSUM)
-                               mss |= ENIC_CALC_TCP_UDP_CKSUM;
-               }
-       }
-
-       wq_enet_desc_enc(desc,
-               bus_addr,
-               len,
-               mss,
-               0 /* header_length */,
-               0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */,
-               eop,
-               cq_entry,
-               0 /* fcoe_encap */,
-               vlan_tag_insert,
-               vlan_tag,
-               0 /* loopback */);
-
-       vnic_wq_post(wq, (void *)tx_pkt, bus_addr, len,
-               sop, eop,
-               1 /*desc_skip_cnt*/,
-               cq_entry,
-               0 /*compressed send*/,
-               0 /*wrid*/);
-
-       return 0;
+       struct enic_soft_stats *soft_stats = &enic->soft_stats;
+       rte_atomic64_init(&soft_stats->rx_nombuf);
+       rte_atomic64_init(&soft_stats->rx_packet_errors);
+       rte_atomic64_init(&soft_stats->tx_oversized);
+       enic_clear_soft_stats(enic);
 }
 
 void enic_dev_stats_clear(struct enic *enic)
 {
        if (vnic_dev_stats_clear(enic->vdev))
                dev_err(enic, "Error in clearing stats\n");
+       enic_clear_soft_stats(enic);
 }
 
 void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
 {
        struct vnic_stats *stats;
+       struct enic_soft_stats *soft_stats = &enic->soft_stats;
+       int64_t rx_truncated;
+       uint64_t rx_packet_errors;
 
        if (vnic_dev_stats_dump(enic->vdev, &stats)) {
                dev_err(enic, "Error in getting stats\n");
                return;
        }
 
-       r_stats->ipackets = stats->rx.rx_frames_ok;
+       /* The number of truncated packets can only be calculated by
+        * subtracting a hardware counter from error packets received by
+        * the driver. Note: this causes transient inaccuracies in the
+        * ipackets count. Also, the length of truncated packets are
+        * counted in ibytes even though truncated packets are dropped
+        * which can make ibytes be slightly higher than it should be.
+        */
+       rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
+       rx_truncated = rx_packet_errors - stats->rx.rx_errors;
+
+       r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
        r_stats->opackets = stats->tx.tx_frames_ok;
 
        r_stats->ibytes = stats->rx.rx_bytes_ok;
        r_stats->obytes = stats->tx.tx_bytes_ok;
 
-       r_stats->ierrors = stats->rx.rx_errors;
-       r_stats->oerrors = stats->tx.tx_errors;
+       r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
+       r_stats->oerrors = stats->tx.tx_errors
+                          + rte_atomic64_read(&soft_stats->tx_oversized);
 
-       r_stats->imcasts = stats->rx.rx_multicast_frames_ok;
-       r_stats->rx_nombuf = stats->rx.rx_no_bufs;
+       r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
+
+       r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
 }
 
-void enic_del_mac_address(struct enic *enic)
+void enic_del_mac_address(struct enic *enic, int mac_index)
 {
-       if (vnic_dev_del_addr(enic->vdev, enic->mac_addr))
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
+       uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
+
+       if (vnic_dev_del_addr(enic->vdev, mac_addr))
                dev_err(enic, "del mac addr failed\n");
 }
 
-void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
+int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
 {
        int err;
 
        if (!is_eth_addr_valid(mac_addr)) {
                dev_err(enic, "invalid mac address\n");
-               return;
+               return -EINVAL;
        }
 
-       err = vnic_dev_del_addr(enic->vdev, mac_addr);
-       if (err) {
-               dev_err(enic, "del mac addr failed\n");
-               return;
-       }
-
-       ether_addr_copy((struct ether_addr *)mac_addr,
-               (struct ether_addr *)enic->mac_addr);
-
        err = vnic_dev_add_addr(enic->vdev, mac_addr);
-       if (err) {
+       if (err)
                dev_err(enic, "add mac addr failed\n");
-               return;
-       }
+       return err;
 }
 
 static void
-enic_free_rq_buf(__rte_unused struct vnic_rq *rq, struct vnic_rq_buf *buf)
+enic_free_rq_buf(struct rte_mbuf **mbuf)
 {
-       if (!buf->os_buf)
+       if (*mbuf == NULL)
                return;
 
-       rte_pktmbuf_free((struct rte_mbuf *)buf->os_buf);
-       buf->os_buf = NULL;
+       rte_pktmbuf_free(*mbuf);
+       mbuf = NULL;
 }
 
 void enic_init_vnic_resources(struct enic *enic)
@@ -275,12 +232,35 @@ void enic_init_vnic_resources(struct enic *enic)
        unsigned int error_interrupt_enable = 1;
        unsigned int error_interrupt_offset = 0;
        unsigned int index = 0;
+       unsigned int cq_idx;
+       struct vnic_rq *data_rq;
 
        for (index = 0; index < enic->rq_count; index++) {
-               vnic_rq_init(&enic->rq[index],
-                       enic_cq_rq(enic, index),
+               cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
+
+               vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
+                       cq_idx,
                        error_interrupt_enable,
                        error_interrupt_offset);
+
+               data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
+               if (data_rq->in_use)
+                       vnic_rq_init(data_rq,
+                                    cq_idx,
+                                    error_interrupt_enable,
+                                    error_interrupt_offset);
+
+               vnic_cq_init(&enic->cq[cq_idx],
+                       0 /* flow_control_enable */,
+                       1 /* color_enable */,
+                       0 /* cq_head */,
+                       0 /* cq_tail */,
+                       1 /* cq_tail_color */,
+                       0 /* interrupt_enable */,
+                       1 /* cq_entry_enable */,
+                       0 /* cq_message_enable */,
+                       0 /* interrupt offset */,
+                       0 /* cq_message_addr */);
        }
 
        for (index = 0; index < enic->wq_count; index++) {
@@ -288,22 +268,19 @@ void enic_init_vnic_resources(struct enic *enic)
                        enic_cq_wq(enic, index),
                        error_interrupt_enable,
                        error_interrupt_offset);
-       }
-
-       vnic_dev_stats_clear(enic->vdev);
 
-       for (index = 0; index < enic->cq_count; index++) {
-               vnic_cq_init(&enic->cq[index],
+               cq_idx = enic_cq_wq(enic, index);
+               vnic_cq_init(&enic->cq[cq_idx],
                        0 /* flow_control_enable */,
                        1 /* color_enable */,
                        0 /* cq_head */,
                        0 /* cq_tail */,
                        1 /* cq_tail_color */,
                        0 /* interrupt_enable */,
-                       1 /* cq_entry_enable */,
-                       0 /* cq_message_enable */,
+                       0 /* cq_entry_enable */,
+                       1 /* cq_message_enable */,
                        0 /* interrupt offset */,
-                       0 /* cq_message_addr */);
+                       (u64)enic->wq[index].cqmsg_rz->phys_addr);
        }
 
        vnic_intr_init(&enic->intr,
@@ -313,260 +290,70 @@ void enic_init_vnic_resources(struct enic *enic)
 }
 
 
-static int enic_rq_alloc_buf(struct vnic_rq *rq)
+static int
+enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
 {
-       struct enic *enic = vnic_dev_priv(rq->vdev);
+       struct rte_mbuf *mb;
+       struct rq_enet_desc *rqd = rq->ring.descs;
+       unsigned i;
        dma_addr_t dma_addr;
-       struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
-       uint8_t type = RQ_ENET_TYPE_ONLY_SOP;
-       u16 split_hdr_size = vnic_get_hdr_split_size(enic->vdev);
-       struct rte_mbuf *mbuf = enic_rxmbuf_alloc(rq->mp);
-       struct rte_mbuf *hdr_mbuf = NULL;
-
-       if (!mbuf) {
-               dev_err(enic, "mbuf alloc in enic_rq_alloc_buf failed\n");
-               return -1;
-       }
-
-       if (unlikely(split_hdr_size)) {
-               if (vnic_rq_desc_avail(rq) < 2) {
-                       rte_mempool_put(mbuf->pool, mbuf);
-                       return -1;
-               }
-               hdr_mbuf = enic_rxmbuf_alloc(rq->mp);
-               if (!hdr_mbuf) {
-                       rte_mempool_put(mbuf->pool, mbuf);
-                       dev_err(enic,
-                               "hdr_mbuf alloc in enic_rq_alloc_buf failed\n");
-                       return -1;
-               }
 
-               hdr_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
-
-               hdr_mbuf->nb_segs = 2;
-               hdr_mbuf->port = enic->port_id;
-               hdr_mbuf->next = mbuf;
-
-               dma_addr = (dma_addr_t)
-                   (hdr_mbuf->buf_physaddr + hdr_mbuf->data_off);
-
-               rq_enet_desc_enc(desc, dma_addr, type, split_hdr_size);
-
-               vnic_rq_post(rq, (void *)hdr_mbuf, 0 /*os_buf_index*/, dma_addr,
-                       (unsigned int)split_hdr_size, 0 /*wrid*/);
-
-               desc = vnic_rq_next_desc(rq);
-               type = RQ_ENET_TYPE_NOT_SOP;
-       } else {
-               mbuf->nb_segs = 1;
-               mbuf->port = enic->port_id;
-       }
-
-       mbuf->data_off = RTE_PKTMBUF_HEADROOM;
-       mbuf->next = NULL;
-
-       dma_addr = (dma_addr_t)
-           (mbuf->buf_physaddr + mbuf->data_off);
-
-       rq_enet_desc_enc(desc, dma_addr, type, mbuf->buf_len);
-
-       vnic_rq_post(rq, (void *)mbuf, 0 /*os_buf_index*/, dma_addr,
-               (unsigned int)mbuf->buf_len, 0 /*wrid*/);
-
-       return 0;
-}
-
-static int enic_rq_indicate_buf(struct vnic_rq *rq,
-       struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
-       int skipped, void *opaque)
-{
-       struct enic *enic = vnic_dev_priv(rq->vdev);
-       struct rte_mbuf **rx_pkt_bucket = (struct rte_mbuf **)opaque;
-       struct rte_mbuf *rx_pkt = NULL;
-       struct rte_mbuf *hdr_rx_pkt = NULL;
-
-       u8 type, color, eop, sop, ingress_port, vlan_stripped;
-       u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
-       u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
-       u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
-       u8 packet_error;
-       u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
-       u32 rss_hash;
-
-       cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
-               &type, &color, &q_number, &completed_index,
-               &ingress_port, &fcoe, &eop, &sop, &rss_type,
-               &csum_not_calc, &rss_hash, &bytes_written,
-               &packet_error, &vlan_stripped, &vlan_tci, &checksum,
-               &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
-               &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
-               &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
-               &fcs_ok);
-
-       rx_pkt = (struct rte_mbuf *)buf->os_buf;
-       buf->os_buf = NULL;
-
-       if (unlikely(packet_error)) {
-               dev_err(enic, "packet error\n");
-               rx_pkt->data_len = 0;
+       if (!rq->in_use)
                return 0;
-       }
-
-       if (unlikely(skipped)) {
-               rx_pkt->data_len = 0;
-               return 0;
-       }
 
-       if (likely(!vnic_get_hdr_split_size(enic->vdev))) {
-               /* No header split configured */
-               *rx_pkt_bucket = rx_pkt;
-               rx_pkt->pkt_len = bytes_written;
-
-               if (ipv4) {
-#ifdef RTE_NEXT_ABI
-                       rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
-#else
-                       rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
-#endif
-                       if (!csum_not_calc) {
-                               if (unlikely(!ipv4_csum_ok))
-                                       rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
-                               if ((tcp || udp) && (!tcp_udp_csum_ok))
-                                       rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-                       }
-               } else if (ipv6)
-#ifdef RTE_NEXT_ABI
-                       rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
-#else
-                       rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
-#endif
-       } else {
-               /* Header split */
-               if (sop && !eop) {
-                       /* This piece is header */
-                       *rx_pkt_bucket = rx_pkt;
-                       rx_pkt->pkt_len = bytes_written;
-               } else {
-                       if (sop && eop) {
-                               /* The packet is smaller than split_hdr_size */
-                               *rx_pkt_bucket = rx_pkt;
-                               rx_pkt->pkt_len = bytes_written;
-                               if (ipv4) {
-#ifdef RTE_NEXT_ABI
-                                       rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
-#else
-                                       rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
-#endif
-                                       if (!csum_not_calc) {
-                                               if (unlikely(!ipv4_csum_ok))
-                                                       rx_pkt->ol_flags |=
-                                                           PKT_RX_IP_CKSUM_BAD;
-
-                                               if ((tcp || udp) &&
-                                                   (!tcp_udp_csum_ok))
-                                                       rx_pkt->ol_flags |=
-                                                           PKT_RX_L4_CKSUM_BAD;
-                                       }
-                               } else if (ipv6)
-#ifdef RTE_NEXT_ABI
-                                       rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
-#else
-                                       rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
-#endif
-                       } else {
-                               /* Payload */
-                               hdr_rx_pkt = *rx_pkt_bucket;
-                               hdr_rx_pkt->pkt_len += bytes_written;
-                               if (ipv4) {
-#ifdef RTE_NEXT_ABI
-                                       hdr_rx_pkt->packet_type =
-                                               RTE_PTYPE_L3_IPV4;
-#else
-                                       hdr_rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
-#endif
-                                       if (!csum_not_calc) {
-                                               if (unlikely(!ipv4_csum_ok))
-                                                       hdr_rx_pkt->ol_flags |=
-                                                           PKT_RX_IP_CKSUM_BAD;
-
-                                               if ((tcp || udp) &&
-                                                   (!tcp_udp_csum_ok))
-                                                       hdr_rx_pkt->ol_flags |=
-                                                           PKT_RX_L4_CKSUM_BAD;
-                                       }
-                               } else if (ipv6)
-#ifdef RTE_NEXT_ABI
-                                       hdr_rx_pkt->packet_type =
-                                               RTE_PTYPE_L3_IPV6;
-#else
-                                       hdr_rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
-#endif
+       dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
+                 rq->ring.desc_count);
 
-                       }
+       for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
+               mb = rte_mbuf_raw_alloc(rq->mp);
+               if (mb == NULL) {
+                       dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
+                       (unsigned)rq->index);
+                       return -ENOMEM;
                }
-       }
-
-       rx_pkt->data_len = bytes_written;
-
-       if (rss_hash) {
-               rx_pkt->ol_flags |= PKT_RX_RSS_HASH;
-               rx_pkt->hash.rss = rss_hash;
-       }
 
-       if (vlan_tci) {
-               rx_pkt->ol_flags |= PKT_RX_VLAN_PKT;
-               rx_pkt->vlan_tci = vlan_tci;
+               mb->data_off = RTE_PKTMBUF_HEADROOM;
+               dma_addr = (dma_addr_t)(mb->buf_physaddr
+                          + RTE_PKTMBUF_HEADROOM);
+               rq_enet_desc_enc(rqd, dma_addr,
+                               (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
+                               : RQ_ENET_TYPE_NOT_SOP),
+                               mb->buf_len - RTE_PKTMBUF_HEADROOM);
+               rq->mbuf_ring[i] = mb;
        }
 
-       return eop;
-}
-
-static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
-       __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
-       struct enic *enic = vnic_dev_priv(vdev);
-
-       return vnic_rq_service(&enic->rq[q_number], cq_desc,
-               completed_index, VNIC_RQ_RETURN_DESC,
-               enic_rq_indicate_buf, opaque);
+       /* make sure all prior writes are complete before doing the PIO write */
+       rte_rmb();
 
-}
+       /* Post all but the last buffer to VIC. */
+       rq->posted_index = rq->ring.desc_count - 1;
 
-int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,
-       unsigned int budget, unsigned int *work_done)
-{
-       struct enic *enic = vnic_dev_priv(rq->vdev);
-       unsigned int cq = enic_cq_rq(enic, rq->index);
-       int err = 0;
+       rq->rx_nb_hold = 0;
 
-       *work_done = vnic_cq_service(&enic->cq[cq],
-               budget, enic_rq_service, (void *)rx_pkts);
+       dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
+               enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
+       iowrite32(rq->posted_index, &rq->ctrl->posted_index);
+       iowrite32(0, &rq->ctrl->fetch_index);
+       rte_rmb();
 
-       if (*work_done) {
-               vnic_rq_fill(rq, enic_rq_alloc_buf);
+       return 0;
 
-               /* Need at least one buffer on ring to get going */
-               if (vnic_rq_desc_used(rq) == 0) {
-                       dev_err(enic, "Unable to alloc receive buffers\n");
-                       err = -1;
-               }
-       }
-       return err;
 }
 
 static void *
-enic_alloc_consistent(__rte_unused void *priv, size_t size,
+enic_alloc_consistent(void *priv, size_t size,
        dma_addr_t *dma_handle, u8 *name)
 {
        void *vaddr;
        const struct rte_memzone *rz;
        *dma_handle = 0;
+       struct enic *enic = (struct enic *)priv;
+       struct enic_memzone_entry *mze;
 
        rz = rte_memzone_reserve_aligned((const char *)name,
-               size, 0, 0, ENIC_ALIGN);
+                                        size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
        if (!rz) {
-               pr_err("%s : Failed to allocate memory requested for %s",
+               pr_err("%s : Failed to allocate memory requested for %s\n",
                        __func__, name);
                return NULL;
        }
@@ -574,58 +361,124 @@ enic_alloc_consistent(__rte_unused void *priv, size_t size,
        vaddr = rz->addr;
        *dma_handle = (dma_addr_t)rz->phys_addr;
 
+       mze = rte_malloc("enic memzone entry",
+                        sizeof(struct enic_memzone_entry), 0);
+
+       if (!mze) {
+               pr_err("%s : Failed to allocate memory for memzone list\n",
+                      __func__);
+               rte_memzone_free(rz);
+       }
+
+       mze->rz = rz;
+
+       rte_spinlock_lock(&enic->memzone_list_lock);
+       LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
+       rte_spinlock_unlock(&enic->memzone_list_lock);
+
        return vaddr;
 }
 
 static void
-enic_free_consistent(__rte_unused struct rte_pci_device *hwdev,
-       __rte_unused size_t size,
-       __rte_unused void *vaddr,
-       __rte_unused dma_addr_t dma_handle)
+enic_free_consistent(void *priv,
+                    __rte_unused size_t size,
+                    void *vaddr,
+                    dma_addr_t dma_handle)
 {
-       /* Nothing to be done */
+       struct enic_memzone_entry *mze;
+       struct enic *enic = (struct enic *)priv;
+
+       rte_spinlock_lock(&enic->memzone_list_lock);
+       LIST_FOREACH(mze, &enic->memzone_list, entries) {
+               if (mze->rz->addr == vaddr &&
+                   mze->rz->phys_addr == dma_handle)
+                       break;
+       }
+       if (mze == NULL) {
+               rte_spinlock_unlock(&enic->memzone_list_lock);
+               dev_warning(enic,
+                           "Tried to free memory, but couldn't find it in the memzone list\n");
+               return;
+       }
+       LIST_REMOVE(mze, entries);
+       rte_spinlock_unlock(&enic->memzone_list_lock);
+       rte_memzone_free(mze->rz);
+       rte_free(mze);
+}
+
+int enic_link_update(struct enic *enic)
+{
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
+       int ret;
+       int link_status = 0;
+
+       link_status = enic_get_link_status(enic);
+       ret = (link_status == enic->link_status);
+       enic->link_status = link_status;
+       eth_dev->data->dev_link.link_status = link_status;
+       eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
+       return ret;
 }
 
 static void
-enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
-       void *arg)
+enic_intr_handler(void *arg)
 {
-       struct enic *enic = pmd_priv((struct rte_eth_dev *)arg);
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
+       struct enic *enic = pmd_priv(dev);
 
        vnic_intr_return_all_credits(&enic->intr);
 
+       enic_link_update(enic);
+       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
        enic_log_q_error(enic);
 }
 
 int enic_enable(struct enic *enic)
 {
        unsigned int index;
+       int err;
        struct rte_eth_dev *eth_dev = enic->rte_dev;
 
        eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
        eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-       vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+
+       /* vnic notification of link status has already been turned on in
+        * enic_dev_init() which is called during probe time.  Here we are
+        * just turning on interrupt vector 0 if needed.
+        */
+       if (eth_dev->data->dev_conf.intr_conf.lsc)
+               vnic_dev_notify_set(enic->vdev, 0);
 
        if (enic_clsf_init(enic))
                dev_warning(enic, "Init of hash table for clsf failed."\
                        "Flow director feature will not work\n");
 
-       /* Fill RQ bufs */
        for (index = 0; index < enic->rq_count; index++) {
-               vnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf);
-
-               /* Need at least one buffer on ring to get going
-               */
-               if (vnic_rq_desc_used(&enic->rq[index]) == 0) {
-                       dev_err(enic, "Unable to alloc receive buffers\n");
-                       return -1;
+               err = enic_alloc_rx_queue_mbufs(enic,
+                       &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
+               if (err) {
+                       dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
+                       return err;
+               }
+               err = enic_alloc_rx_queue_mbufs(enic,
+                       &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
+               if (err) {
+                       /* release the allocated mbufs for the sop rq*/
+                       enic_rxmbuf_queue_release(enic,
+                               &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
+
+                       dev_err(enic, "Failed to alloc data RX queue mbufs\n");
+                       return err;
                }
        }
 
        for (index = 0; index < enic->wq_count; index++)
-               vnic_wq_enable(&enic->wq[index]);
+               enic_start_wq(enic, index);
        for (index = 0; index < enic->rq_count; index++)
-               vnic_rq_enable(&enic->rq[index]);
+               enic_start_rq(enic, index);
+
+       vnic_dev_add_addr(enic->vdev, enic->mac_addr);
 
        vnic_dev_enable_wait(enic->vdev);
 
@@ -645,7 +498,7 @@ int enic_alloc_intr_resources(struct enic *enic)
 
        dev_info(enic, "vNIC resources used:  "\
                "wq %d rq %d cq %d intr %d\n",
-               enic->wq_count, enic->rq_count,
+               enic->wq_count, enic_vnic_rq_count(enic),
                enic->cq_count, enic->intr_count);
 
        err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
@@ -657,84 +510,260 @@ int enic_alloc_intr_resources(struct enic *enic)
 
 void enic_free_rq(void *rxq)
 {
-       struct vnic_rq *rq = (struct vnic_rq *)rxq;
-       struct enic *enic = vnic_dev_priv(rq->vdev);
+       struct vnic_rq *rq_sop, *rq_data;
+       struct enic *enic;
+
+       if (rxq == NULL)
+               return;
+
+       rq_sop = (struct vnic_rq *)rxq;
+       enic = vnic_dev_priv(rq_sop->vdev);
+       rq_data = &enic->rq[rq_sop->data_queue_idx];
+
+       enic_rxmbuf_queue_release(enic, rq_sop);
+       if (rq_data->in_use)
+               enic_rxmbuf_queue_release(enic, rq_data);
+
+       rte_free(rq_sop->mbuf_ring);
+       if (rq_data->in_use)
+               rte_free(rq_data->mbuf_ring);
+
+       rq_sop->mbuf_ring = NULL;
+       rq_data->mbuf_ring = NULL;
 
-       vnic_rq_free(rq);
-       vnic_cq_free(&enic->cq[rq->index]);
+       vnic_rq_free(rq_sop);
+       if (rq_data->in_use)
+               vnic_rq_free(rq_data);
+
+       vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
+
+       rq_sop->in_use = 0;
+       rq_data->in_use = 0;
 }
 
 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
 {
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
        vnic_wq_enable(&enic->wq[queue_idx]);
+       eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
 }
 
 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
 {
-       return vnic_wq_disable(&enic->wq[queue_idx]);
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
+       int ret;
+
+       ret = vnic_wq_disable(&enic->wq[queue_idx]);
+       if (ret)
+               return ret;
+
+       eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return 0;
 }
 
 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
 {
-       vnic_rq_enable(&enic->rq[queue_idx]);
+       struct vnic_rq *rq_sop;
+       struct vnic_rq *rq_data;
+       rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
+       rq_data = &enic->rq[rq_sop->data_queue_idx];
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+       if (rq_data->in_use)
+               vnic_rq_enable(rq_data);
+       rte_mb();
+       vnic_rq_enable(rq_sop);
+       eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
 }
 
 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
 {
-       return vnic_rq_disable(&enic->rq[queue_idx]);
+       int ret1 = 0, ret2 = 0;
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
+       struct vnic_rq *rq_sop;
+       struct vnic_rq *rq_data;
+       rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
+       rq_data = &enic->rq[rq_sop->data_queue_idx];
+
+       ret2 = vnic_rq_disable(rq_sop);
+       rte_mb();
+       if (rq_data->in_use)
+               ret1 = vnic_rq_disable(rq_data);
+
+       if (ret2)
+               return ret2;
+       else if (ret1)
+               return ret1;
+
+       eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return 0;
 }
 
 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
        unsigned int socket_id, struct rte_mempool *mp,
-       uint16_t nb_desc)
+       uint16_t nb_desc, uint16_t free_thresh)
 {
-       int err;
-       struct vnic_rq *rq = &enic->rq[queue_idx];
-
-       rq->socket_id = socket_id;
-       rq->mp = mp;
+       int rc;
+       uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
+       uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
+       struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
+       struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
+       unsigned int mbuf_size, mbufs_per_pkt;
+       unsigned int nb_sop_desc, nb_data_desc;
+       uint16_t min_sop, max_sop, min_data, max_data;
+       uint16_t mtu = enic->rte_dev->data->mtu;
+
+       rq_sop->is_sop = 1;
+       rq_sop->data_queue_idx = data_queue_idx;
+       rq_data->is_sop = 0;
+       rq_data->data_queue_idx = 0;
+       rq_sop->socket_id = socket_id;
+       rq_sop->mp = mp;
+       rq_data->socket_id = socket_id;
+       rq_data->mp = mp;
+       rq_sop->in_use = 1;
+       rq_sop->rx_free_thresh = free_thresh;
+       rq_data->rx_free_thresh = free_thresh;
+       dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
+                 free_thresh);
+
+       mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
+                              RTE_PKTMBUF_HEADROOM);
+
+       if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+               dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
+               /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
+               mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
+                                (mbuf_size - 1)) / mbuf_size;
+       } else {
+               dev_info(enic, "Scatter rx mode disabled\n");
+               mbufs_per_pkt = 1;
+       }
 
-       if (nb_desc) {
-               if (nb_desc > enic->config.rq_desc_count) {
-                       dev_warning(enic,
-                               "RQ %d - number of rx desc in cmd line (%d)"\
-                               "is greater than that in the UCSM/CIMC adapter"\
-                               "policy.  Applying the value in the adapter "\
-                               "policy (%d).\n",
-                               queue_idx, nb_desc, enic->config.rq_desc_count);
-               } else if (nb_desc != enic->config.rq_desc_count) {
-                       enic->config.rq_desc_count = nb_desc;
-                       dev_info(enic,
-                               "RX Queues - effective number of descs:%d\n",
-                               nb_desc);
-               }
+       if (mbufs_per_pkt > 1) {
+               dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
+               rq_sop->data_queue_enable = 1;
+               rq_data->in_use = 1;
+       } else {
+               dev_info(enic, "Rq %u Scatter rx mode not being used\n",
+                        queue_idx);
+               rq_sop->data_queue_enable = 0;
+               rq_data->in_use = 0;
        }
 
-       /* Allocate queue resources */
-       err = vnic_rq_alloc(enic->vdev, &enic->rq[queue_idx], queue_idx,
-               enic->config.rq_desc_count,
-               sizeof(struct rq_enet_desc));
-       if (err) {
-               dev_err(enic, "error in allocation of rq\n");
-               return err;
+       /* number of descriptors have to be a multiple of 32 */
+       nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
+       nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
+
+       rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
+       rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
+
+       if (mbufs_per_pkt > 1) {
+               min_sop = 64;
+               max_sop = ((enic->config.rq_desc_count /
+                           (mbufs_per_pkt - 1)) & ~0x1F);
+               min_data = min_sop * (mbufs_per_pkt - 1);
+               max_data = enic->config.rq_desc_count;
+       } else {
+               min_sop = 64;
+               max_sop = enic->config.rq_desc_count;
+               min_data = 0;
+               max_data = 0;
        }
 
-       err = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
-               socket_id, enic->config.rq_desc_count,
-               sizeof(struct cq_enet_rq_desc));
-       if (err) {
-               vnic_rq_free(rq);
+       if (nb_desc < (min_sop + min_data)) {
+               dev_warning(enic,
+                           "Number of rx descs too low, adjusting to minimum\n");
+               nb_sop_desc = min_sop;
+               nb_data_desc = min_data;
+       } else if (nb_desc > (max_sop + max_data)) {
+               dev_warning(enic,
+                           "Number of rx_descs too high, adjusting to maximum\n");
+               nb_sop_desc = max_sop;
+               nb_data_desc = max_data;
+       }
+       if (mbufs_per_pkt > 1) {
+               dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
+                        mtu, mbuf_size, min_sop + min_data,
+                        max_sop + max_data);
+       }
+       dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
+                nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
+
+       /* Allocate sop queue resources */
+       rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
+               nb_sop_desc, sizeof(struct rq_enet_desc));
+       if (rc) {
+               dev_err(enic, "error in allocation of sop rq\n");
+               goto err_exit;
+       }
+       nb_sop_desc = rq_sop->ring.desc_count;
+
+       if (rq_data->in_use) {
+               /* Allocate data queue resources */
+               rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
+                                  nb_data_desc,
+                                  sizeof(struct rq_enet_desc));
+               if (rc) {
+                       dev_err(enic, "error in allocation of data rq\n");
+                       goto err_free_rq_sop;
+               }
+               nb_data_desc = rq_data->ring.desc_count;
+       }
+       rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
+                          socket_id, nb_sop_desc + nb_data_desc,
+                          sizeof(struct cq_enet_rq_desc));
+       if (rc) {
                dev_err(enic, "error in allocation of cq for rq\n");
+               goto err_free_rq_data;
        }
 
-       return err;
+       /* Allocate the mbuf rings */
+       rq_sop->mbuf_ring = (struct rte_mbuf **)
+               rte_zmalloc_socket("rq->mbuf_ring",
+                                  sizeof(struct rte_mbuf *) * nb_sop_desc,
+                                  RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+       if (rq_sop->mbuf_ring == NULL)
+               goto err_free_cq;
+
+       if (rq_data->in_use) {
+               rq_data->mbuf_ring = (struct rte_mbuf **)
+                       rte_zmalloc_socket("rq->mbuf_ring",
+                               sizeof(struct rte_mbuf *) * nb_data_desc,
+                               RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+               if (rq_data->mbuf_ring == NULL)
+                       goto err_free_sop_mbuf;
+       }
+
+       rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
+
+       return 0;
+
+err_free_sop_mbuf:
+       rte_free(rq_sop->mbuf_ring);
+err_free_cq:
+       /* cleanup on error */
+       vnic_cq_free(&enic->cq[queue_idx]);
+err_free_rq_data:
+       if (rq_data->in_use)
+               vnic_rq_free(rq_data);
+err_free_rq_sop:
+       vnic_rq_free(rq_sop);
+err_exit:
+       return -ENOMEM;
 }
 
 void enic_free_wq(void *txq)
 {
-       struct vnic_wq *wq = (struct vnic_wq *)txq;
-       struct enic *enic = vnic_dev_priv(wq->vdev);
+       struct vnic_wq *wq;
+       struct enic *enic;
+
+       if (txq == NULL)
+               return;
 
+       wq = (struct vnic_wq *)txq;
+       enic = vnic_dev_priv(wq->vdev);
+       rte_memzone_free(wq->cqmsg_rz);
        vnic_wq_free(wq);
        vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
 }
@@ -745,6 +774,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
        int err;
        struct vnic_wq *wq = &enic->wq[queue_idx];
        unsigned int cq_index = enic_cq_wq(enic, queue_idx);
+       char name[NAME_MAX];
+       static int instance;
 
        wq->socket_id = socket_id;
        if (nb_desc) {
@@ -780,6 +811,18 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
                dev_err(enic, "error in allocation of cq for wq\n");
        }
 
+       /* setup up CQ message */
+       snprintf((char *)name, sizeof(name),
+                "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
+               instance++);
+
+       wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
+                                                  sizeof(uint32_t),
+                                                  SOCKET_ID_ANY, 0,
+                                                  ENIC_ALIGN);
+       if (!wq->cqmsg_rz)
+               return -ENOMEM;
+
        return err;
 }
 
@@ -790,6 +833,10 @@ int enic_disable(struct enic *enic)
 
        vnic_intr_mask(&enic->intr);
        (void)vnic_intr_masked(&enic->intr); /* flush write */
+       rte_intr_disable(&enic->pdev->intr_handle);
+       rte_intr_callback_unregister(&enic->pdev->intr_handle,
+                                    enic_intr_handler,
+                                    (void *)enic->rte_dev);
 
        vnic_dev_disable(enic->vdev);
 
@@ -803,19 +850,29 @@ int enic_disable(struct enic *enic)
                if (err)
                        return err;
        }
-       for (i = 0; i < enic->rq_count; i++) {
-               err = vnic_rq_disable(&enic->rq[i]);
-               if (err)
-                       return err;
+       for (i = 0; i < enic_vnic_rq_count(enic); i++) {
+               if (enic->rq[i].in_use) {
+                       err = vnic_rq_disable(&enic->rq[i]);
+                       if (err)
+                               return err;
+               }
        }
 
+       /* If we were using interrupts, set the interrupt vector to -1
+        * to disable interrupts.  We are not disabling link notifcations,
+        * though, as we want the polling of link status to continue working.
+        */
+       if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
+               vnic_dev_notify_set(enic->vdev, -1);
+
        vnic_dev_set_reset_flag(enic->vdev, 1);
-       vnic_dev_notify_unset(enic->vdev);
 
        for (i = 0; i < enic->wq_count; i++)
                vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
-       for (i = 0; i < enic->rq_count; i++)
-               vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+
+       for (i = 0; i < enic_vnic_rq_count(enic); i++)
+               if (enic->rq[i].in_use)
+                       vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
        for (i = 0; i < enic->cq_count; i++)
                vnic_cq_clean(&enic->cq[i]);
        vnic_intr_clean(&enic->intr);
@@ -888,7 +945,7 @@ static int enic_set_rsskey(struct enic *enic)
                rss_key_buf_pa,
                sizeof(union vnic_rss_key));
 
-       enic_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
+       enic_free_consistent(enic, sizeof(union vnic_rss_key),
                rss_key_buf_va, rss_key_buf_pa);
 
        return err;
@@ -909,13 +966,14 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
                return -ENOMEM;
 
        for (i = 0; i < (1 << rss_hash_bits); i++)
-               (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
+               (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
+                       enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
 
        err = enic_set_rss_cpu(enic,
                rss_cpu_buf_pa,
                sizeof(union vnic_rss_cpu));
 
-       enic_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
+       enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
                rss_cpu_buf_va, rss_cpu_buf_pa);
 
        return err;
@@ -971,14 +1029,14 @@ int enic_setup_finish(struct enic *enic)
 {
        int ret;
 
+       enic_init_soft_stats(enic);
+
        ret = enic_set_rss_nic_cfg(enic);
        if (ret) {
                dev_err(enic, "Failed to config nic, aborting.\n");
                return -1;
        }
 
-       vnic_dev_add_addr(enic->vdev, enic->mac_addr);
-
        /* Default conf */
        vnic_dev_packet_filter(enic->vdev,
                1 /* directed  */,
@@ -1009,6 +1067,9 @@ static void enic_dev_deinit(struct enic *enic)
 {
        struct rte_eth_dev *eth_dev = enic->rte_dev;
 
+       /* stop link status checking */
+       vnic_dev_notify_unset(enic->vdev);
+
        rte_free(eth_dev->data->mac_addrs);
 }
 
@@ -1016,24 +1077,205 @@ static void enic_dev_deinit(struct enic *enic)
 int enic_set_vnic_res(struct enic *enic)
 {
        struct rte_eth_dev *eth_dev = enic->rte_dev;
+       int rc = 0;
 
-       if ((enic->rq_count < eth_dev->data->nb_rx_queues) ||
-               (enic->wq_count < eth_dev->data->nb_tx_queues)) {
-               dev_err(dev, "Not enough resources configured, aborting\n");
-               return -1;
+       /* With Rx scatter support, two RQs are now used per RQ used by
+        * the application.
+        */
+       if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
+               dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
+                       eth_dev->data->nb_rx_queues,
+                       eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
+               rc = -EINVAL;
+       }
+       if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
+               dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
+                       eth_dev->data->nb_tx_queues, enic->conf_wq_count);
+               rc = -EINVAL;
        }
 
-       enic->rq_count = eth_dev->data->nb_rx_queues;
-       enic->wq_count = eth_dev->data->nb_tx_queues;
-       if (enic->cq_count < (enic->rq_count + enic->wq_count)) {
-               dev_err(dev, "Not enough resources configured, aborting\n");
-               return -1;
+       if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
+                                  eth_dev->data->nb_tx_queues)) {
+               dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
+                       (eth_dev->data->nb_rx_queues +
+                        eth_dev->data->nb_tx_queues), enic->conf_cq_count);
+               rc = -EINVAL;
+       }
+
+       if (rc == 0) {
+               enic->rq_count = eth_dev->data->nb_rx_queues;
+               enic->wq_count = eth_dev->data->nb_tx_queues;
+               enic->cq_count = enic->rq_count + enic->wq_count;
+       }
+
+       return rc;
+}
+
+/* Initialize the completion queue for an RQ */
+static int
+enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
+{
+       struct vnic_rq *sop_rq, *data_rq;
+       unsigned int cq_idx = enic_cq_rq(enic, rq_idx);
+       int rc = 0;
+
+       sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+       data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
+
+       vnic_cq_clean(&enic->cq[cq_idx]);
+       vnic_cq_init(&enic->cq[cq_idx],
+                    0 /* flow_control_enable */,
+                    1 /* color_enable */,
+                    0 /* cq_head */,
+                    0 /* cq_tail */,
+                    1 /* cq_tail_color */,
+                    0 /* interrupt_enable */,
+                    1 /* cq_entry_enable */,
+                    0 /* cq_message_enable */,
+                    0 /* interrupt offset */,
+                    0 /* cq_message_addr */);
+
+
+       vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
+                          enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
+                          sop_rq->ring.desc_count - 1, 1, 0);
+       if (data_rq->in_use) {
+               vnic_rq_init_start(data_rq,
+                                  enic_cq_rq(enic,
+                                  enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
+                                  data_rq->ring.desc_count - 1, 1, 0);
+       }
+
+       rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
+       if (rc)
+               return rc;
+
+       if (data_rq->in_use) {
+               rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
+               if (rc) {
+                       enic_rxmbuf_queue_release(enic, sop_rq);
+                       return rc;
+               }
        }
 
-       enic->cq_count = enic->rq_count + enic->wq_count;
        return 0;
 }
 
+/* The Cisco NIC can send and receive packets up to a max packet size
+ * determined by the NIC type and firmware. There is also an MTU
+ * configured into the NIC via the CIMC/UCSM management interface
+ * which can be overridden by this function (up to the max packet size).
+ * Depending on the network setup, doing so may cause packet drops
+ * and unexpected behavior.
+ */
+int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
+{
+       unsigned int rq_idx;
+       struct vnic_rq *rq;
+       int rc = 0;
+       uint16_t old_mtu;       /* previous setting */
+       uint16_t config_mtu;    /* Value configured into NIC via CIMC/UCSM */
+       struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+       old_mtu = eth_dev->data->mtu;
+       config_mtu = enic->config.mtu;
+
+       if (new_mtu > enic->max_mtu) {
+               dev_err(enic,
+                       "MTU not updated: requested (%u) greater than max (%u)\n",
+                       new_mtu, enic->max_mtu);
+               return -EINVAL;
+       }
+       if (new_mtu < ENIC_MIN_MTU) {
+               dev_info(enic,
+                       "MTU not updated: requested (%u) less than min (%u)\n",
+                       new_mtu, ENIC_MIN_MTU);
+               return -EINVAL;
+       }
+       if (new_mtu > config_mtu)
+               dev_warning(enic,
+                       "MTU (%u) is greater than value configured in NIC (%u)\n",
+                       new_mtu, config_mtu);
+
+       /* The easy case is when scatter is disabled. However if the MTU
+        * becomes greater than the mbuf data size, packet drops will ensue.
+        */
+       if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+               eth_dev->data->mtu = new_mtu;
+               goto set_mtu_done;
+       }
+
+       /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
+        * change Rx scatter mode if necessary for better performance. I.e. if
+        * MTU was greater than the mbuf size and now it's less, scatter Rx
+        * doesn't have to be used and vice versa.
+         */
+       rte_spinlock_lock(&enic->mtu_lock);
+
+       /* Stop traffic on all RQs */
+       for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
+               rq = &enic->rq[rq_idx];
+               if (rq->is_sop && rq->in_use) {
+                       rc = enic_stop_rq(enic,
+                                         enic_sop_rq_idx_to_rte_idx(rq_idx));
+                       if (rc) {
+                               dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
+                               goto set_mtu_done;
+                       }
+               }
+       }
+
+       /* replace Rx function with a no-op to avoid getting stale pkts */
+       eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
+       rte_mb();
+
+       /* Allow time for threads to exit the real Rx function. */
+       usleep(100000);
+
+       /* now it is safe to reconfigure the RQs */
+
+       /* update the mtu */
+       eth_dev->data->mtu = new_mtu;
+
+       /* free and reallocate RQs with the new MTU */
+       for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+               rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+
+               enic_free_rq(rq);
+               rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
+                                  rq->tot_nb_desc, rq->rx_free_thresh);
+               if (rc) {
+                       dev_err(enic,
+                               "Fatal MTU alloc error- No traffic will pass\n");
+                       goto set_mtu_done;
+               }
+
+               rc = enic_reinit_rq(enic, rq_idx);
+               if (rc) {
+                       dev_err(enic,
+                               "Fatal MTU RQ reinit- No traffic will pass\n");
+                       goto set_mtu_done;
+               }
+       }
+
+       /* put back the real receive function */
+       rte_mb();
+       eth_dev->rx_pkt_burst = enic_recv_pkts;
+       rte_mb();
+
+       /* restart Rx traffic */
+       for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+               rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+               if (rq->is_sop && rq->in_use)
+                       enic_start_rq(enic, rq_idx);
+       }
+
+set_mtu_done:
+       dev_info(enic, "MTU changed from %u to %u\n",  old_mtu, new_mtu);
+       rte_spinlock_unlock(&enic->mtu_lock);
+       return rc;
+}
+
 static int enic_dev_init(struct enic *enic)
 {
        int err;
@@ -1049,20 +1291,34 @@ static int enic_dev_init(struct enic *enic)
                return err;
        }
 
-       eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
+       /* Get available resource counts */
+       enic_get_res_counts(enic);
+       if (enic->conf_rq_count == 1) {
+               dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
+               dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
+               dev_err(enic, "See the ENIC PMD guide for more information.\n");
+               return -EINVAL;
+       }
+
+       /* Get the supported filters */
+       enic_fdir_info(enic);
+
+       eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
+                                               * ENIC_MAX_MAC_ADDR, 0);
        if (!eth_dev->data->mac_addrs) {
                dev_err(enic, "mac addr storage alloc failed, aborting.\n");
                return -1;
        }
        ether_addr_copy((struct ether_addr *) enic->mac_addr,
-               &eth_dev->data->mac_addrs[0]);
+                       eth_dev->data->mac_addrs);
 
+       vnic_dev_set_reset_flag(enic->vdev, 0);
 
-       /* Get available resource counts
-       */
-       enic_get_res_counts(enic);
+       LIST_INIT(&enic->flows);
+       rte_spinlock_init(&enic->flows_lock);
 
-       vnic_dev_set_reset_flag(enic->vdev, 0);
+       /* set up link status checking */
+       vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
 
        return 0;
 
@@ -1073,7 +1329,7 @@ int enic_probe(struct enic *enic)
        struct rte_pci_device *pdev = enic->pdev;
        int err = -1;
 
-       dev_debug(enic, " Initializing ENIC PMD version %s\n", DRV_VERSION);
+       dev_debug(enic, " Initializing ENIC PMD\n");
 
        enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
        enic->bar0.len = pdev->mem_resource[0].len;
@@ -1085,6 +1341,9 @@ int enic_probe(struct enic *enic)
                goto err_out;
        }
 
+       LIST_INIT(&enic->memzone_list);
+       rte_spinlock_init(&enic->memzone_list_lock);
+
        vnic_register_cbacks(enic->vdev,
                enic_alloc_consistent,
                enic_free_consistent);
@@ -1098,7 +1357,7 @@ int enic_probe(struct enic *enic)
 
        /* Set ingress vlan rewrite mode before vnic initialization */
        err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
-               IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+               IG_VLAN_REWRITE_MODE_PASS_THRU);
        if (err) {
                dev_err(enic,
                        "Failed to set ingress vlan rewrite mode, aborting.\n");