drivers: remove direct access to interrupt handle
[dpdk.git] / drivers / net / enic / enic_main.c
index 235ef59..5cc6d9f 100644 (file)
@@ -8,7 +8,6 @@
 #include <sys/stat.h>
 #include <sys/mman.h>
 #include <fcntl.h>
-#include <libgen.h>
 
 #include <rte_pci.h>
 #include <rte_bus_pci.h>
@@ -16,7 +15,8 @@
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
 #include <rte_string_fns.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
+#include <rte_geneve.h>
 
 #include "enic_compat.h"
 #include "enic.h"
@@ -51,7 +51,7 @@ static int is_eth_addr_valid(uint8_t *addr)
        return !is_mcast_addr(addr) && !is_zero_addr(addr);
 }
 
-static void
+void
 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
 {
        uint16_t i;
@@ -69,18 +69,18 @@ enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
        }
 }
 
-static void enic_free_wq_buf(struct vnic_wq_buf *buf)
+void enic_free_wq_buf(struct rte_mbuf **buf)
 {
-       struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
+       struct rte_mbuf *mbuf = *buf;
 
        rte_pktmbuf_free_seg(mbuf);
-       buf->mb = NULL;
+       *buf = NULL;
 }
 
 static void enic_log_q_error(struct enic *enic)
 {
        unsigned int i;
-       u32 error_status;
+       uint32_t error_status;
 
        for (i = 0; i < enic->wq_count; i++) {
                error_status = vnic_wq_error_status(&enic->wq[i]);
@@ -116,11 +116,18 @@ static void enic_init_soft_stats(struct enic *enic)
        enic_clear_soft_stats(enic);
 }
 
-void enic_dev_stats_clear(struct enic *enic)
+int enic_dev_stats_clear(struct enic *enic)
 {
-       if (vnic_dev_stats_clear(enic->vdev))
+       int ret;
+
+       ret = vnic_dev_stats_clear(enic->vdev);
+       if (ret != 0) {
                dev_err(enic, "Error in clearing stats\n");
+               return ret;
+       }
        enic_clear_soft_stats(enic);
+
+       return 0;
 }
 
 int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
@@ -162,13 +169,12 @@ int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
        return 0;
 }
 
-void enic_del_mac_address(struct enic *enic, int mac_index)
+int enic_del_mac_address(struct enic *enic, int mac_index)
 {
        struct rte_eth_dev *eth_dev = enic->rte_dev;
        uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
 
-       if (vnic_dev_del_addr(enic->vdev, mac_addr))
-               dev_err(enic, "del mac addr failed\n");
+       return vnic_dev_del_addr(enic->vdev, mac_addr);
 }
 
 int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
@@ -186,8 +192,7 @@ int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
        return err;
 }
 
-static void
-enic_free_rq_buf(struct rte_mbuf **mbuf)
+void enic_free_rq_buf(struct rte_mbuf **mbuf)
 {
        if (*mbuf == NULL)
                return;
@@ -200,10 +205,15 @@ void enic_init_vnic_resources(struct enic *enic)
 {
        unsigned int error_interrupt_enable = 1;
        unsigned int error_interrupt_offset = 0;
+       unsigned int rxq_interrupt_enable = 0;
+       unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
        unsigned int index = 0;
        unsigned int cq_idx;
        struct vnic_rq *data_rq;
 
+       if (enic->rte_dev->data->dev_conf.intr_conf.rxq)
+               rxq_interrupt_enable = 1;
+
        for (index = 0; index < enic->rq_count; index++) {
                cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
 
@@ -212,24 +222,25 @@ void enic_init_vnic_resources(struct enic *enic)
                        error_interrupt_enable,
                        error_interrupt_offset);
 
-               data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
+               data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)];
                if (data_rq->in_use)
                        vnic_rq_init(data_rq,
                                     cq_idx,
                                     error_interrupt_enable,
                                     error_interrupt_offset);
-
                vnic_cq_init(&enic->cq[cq_idx],
                        0 /* flow_control_enable */,
                        1 /* color_enable */,
                        0 /* cq_head */,
                        0 /* cq_tail */,
                        1 /* cq_tail_color */,
-                       0 /* interrupt_enable */,
+                       rxq_interrupt_enable,
                        1 /* cq_entry_enable */,
                        0 /* cq_message_enable */,
-                       0 /* interrupt offset */,
+                       rxq_interrupt_offset,
                        0 /* cq_message_addr */);
+               if (rxq_interrupt_enable)
+                       rxq_interrupt_offset++;
        }
 
        for (index = 0; index < enic->wq_count; index++) {
@@ -237,6 +248,9 @@ void enic_init_vnic_resources(struct enic *enic)
                        enic_cq_wq(enic, index),
                        error_interrupt_enable,
                        error_interrupt_offset);
+               /* Compute unsupported ol flags for enic_prep_pkts() */
+               enic->wq[index].tx_offload_notsup_mask =
+                       RTE_MBUF_F_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
 
                cq_idx = enic_cq_wq(enic, index);
                vnic_cq_init(&enic->cq[cq_idx],
@@ -249,24 +263,26 @@ void enic_init_vnic_resources(struct enic *enic)
                        0 /* cq_entry_enable */,
                        1 /* cq_message_enable */,
                        0 /* interrupt offset */,
-                       (u64)enic->wq[index].cqmsg_rz->iova);
+                       (uint64_t)enic->wq[index].cqmsg_rz->iova);
        }
 
-       vnic_intr_init(&enic->intr,
-               enic->config.intr_timer_usec,
-               enic->config.intr_timer_type,
-               /*mask_on_assertion*/1);
+       for (index = 0; index < enic->intr_count; index++) {
+               vnic_intr_init(&enic->intr[index],
+                              enic->config.intr_timer_usec,
+                              enic->config.intr_timer_type,
+                              /*mask_on_assertion*/1);
+       }
 }
 
 
-static int
+int
 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
 {
        struct rte_mbuf *mb;
        struct rq_enet_desc *rqd = rq->ring.descs;
        unsigned i;
        dma_addr_t dma_addr;
-       uint32_t max_rx_pkt_len;
+       uint32_t max_rx_pktlen;
        uint16_t rq_buf_len;
 
        if (!rq->in_use)
@@ -276,17 +292,17 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
                  rq->ring.desc_count);
 
        /*
-        * If *not* using scatter and the mbuf size is smaller than the
-        * requested max packet size (max_rx_pkt_len), then reduce the
-        * posted buffer size to max_rx_pkt_len. HW still receives packets
-        * larger than max_rx_pkt_len, but they will be truncated, which we
+        * If *not* using scatter and the mbuf size is greater than the
+        * requested max packet size (mtu + eth overhead), then reduce the
+        * posted buffer size to max packet size. HW still receives packets
+        * larger than max packet size, but they will be truncated, which we
         * drop in the rx handler. Not ideal, but better than returning
         * large packets when the user is not expecting them.
         */
-       max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
        rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
-       if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
-               rq_buf_len = max_rx_pkt_len;
+       if (max_rx_pktlen < rq_buf_len && !rq->data_queue_enable)
+               rq_buf_len = max_rx_pktlen;
        for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
                mb = rte_mbuf_raw_alloc(rq->mp);
                if (mb == NULL) {
@@ -304,6 +320,26 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
                                rq_buf_len);
                rq->mbuf_ring[i] = mb;
        }
+       /*
+        * Do not post the buffers to the NIC until we enable the RQ via
+        * enic_start_rq().
+        */
+       rq->need_initial_post = true;
+       /* Initialize fetch index while RQ is disabled */
+       iowrite32(0, &rq->ctrl->fetch_index);
+       return 0;
+}
+
+/*
+ * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has
+ * allocated the buffers and filled the RQ descriptor ring. Just need to push
+ * the post index to the NIC.
+ */
+static void
+enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq)
+{
+       if (!rq->in_use || !rq->need_initial_post)
+               return;
 
        /* make sure all prior writes are complete before doing the PIO write */
        rte_rmb();
@@ -316,16 +352,13 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
        dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
                enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
        iowrite32(rq->posted_index, &rq->ctrl->posted_index);
-       iowrite32(0, &rq->ctrl->fetch_index);
        rte_rmb();
-
-       return 0;
-
+       rq->need_initial_post = false;
 }
 
-static void *
+void *
 enic_alloc_consistent(void *priv, size_t size,
-       dma_addr_t *dma_handle, u8 *name)
+       dma_addr_t *dma_handle, uint8_t *name)
 {
        void *vaddr;
        const struct rte_memzone *rz;
@@ -333,8 +366,8 @@ enic_alloc_consistent(void *priv, size_t size,
        struct enic *enic = (struct enic *)priv;
        struct enic_memzone_entry *mze;
 
-       rz = rte_memzone_reserve_aligned((const char *)name,
-                                        size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
+       rz = rte_memzone_reserve_aligned((const char *)name, size,
+                       SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
        if (!rz) {
                pr_err("%s : Failed to allocate memory requested for %s\n",
                        __func__, name);
@@ -363,7 +396,7 @@ enic_alloc_consistent(void *priv, size_t size,
        return vaddr;
 }
 
-static void
+void
 enic_free_consistent(void *priv,
                     __rte_unused size_t size,
                     void *vaddr,
@@ -390,19 +423,17 @@ enic_free_consistent(void *priv,
        rte_free(mze);
 }
 
-int enic_link_update(struct enic *enic)
+int enic_link_update(struct rte_eth_dev *eth_dev)
 {
-       struct rte_eth_dev *eth_dev = enic->rte_dev;
-       int ret;
-       int link_status = 0;
+       struct enic *enic = pmd_priv(eth_dev);
+       struct rte_eth_link link;
 
-       link_status = enic_get_link_status(enic);
-       ret = (link_status == enic->link_status);
-       enic->link_status = link_status;
-       eth_dev->data->dev_link.link_status = link_status;
-       eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-       eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
-       return ret;
+       memset(&link, 0, sizeof(link));
+       link.link_status = enic_get_link_status(enic);
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+       link.link_speed = vnic_dev_port_speed(enic->vdev);
+
+       return rte_eth_linkstatus_set(eth_dev, &link);
 }
 
 static void
@@ -411,11 +442,133 @@ enic_intr_handler(void *arg)
        struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
        struct enic *enic = pmd_priv(dev);
 
-       vnic_intr_return_all_credits(&enic->intr);
+       vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
 
-       enic_link_update(enic);
-       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+       enic_link_update(dev);
+       rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
        enic_log_q_error(enic);
+       /* Re-enable irq in case of INTx */
+       rte_intr_ack(enic->pdev->intr_handle);
+}
+
+static int enic_rxq_intr_init(struct enic *enic)
+{
+       struct rte_intr_handle *intr_handle;
+       uint32_t rxq_intr_count, i;
+       int err;
+
+       intr_handle = enic->rte_dev->intr_handle;
+       if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
+               return 0;
+       /*
+        * Rx queue interrupts only work when we have MSI-X interrupts,
+        * one per queue. Sharing one interrupt is technically
+        * possible with VIC, but it is not worth the complications it brings.
+        */
+       if (!rte_intr_cap_multiple(intr_handle)) {
+               dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
+                       " (vfio-pci driver)\n");
+               return -ENOTSUP;
+       }
+       rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
+       err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
+       if (err) {
+               dev_err(enic, "Failed to enable event fds for Rx queue"
+                       " interrupts\n");
+               return err;
+       }
+
+       if (rte_intr_vec_list_alloc(intr_handle, "enic_intr_vec",
+                                          rxq_intr_count)) {
+               dev_err(enic, "Failed to allocate intr_vec\n");
+               return -ENOMEM;
+       }
+       for (i = 0; i < rxq_intr_count; i++)
+               if (rte_intr_vec_list_index_set(intr_handle, i,
+                                                  i + ENICPMD_RXQ_INTR_OFFSET))
+                       return -rte_errno;
+       return 0;
+}
+
+static void enic_rxq_intr_deinit(struct enic *enic)
+{
+       struct rte_intr_handle *intr_handle;
+
+       intr_handle = enic->rte_dev->intr_handle;
+       rte_intr_efd_disable(intr_handle);
+
+       rte_intr_vec_list_free(intr_handle);
+}
+
+static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
+{
+       struct wq_enet_desc *desc;
+       struct vnic_wq *wq;
+       unsigned int i;
+
+       /*
+        * Fill WQ descriptor fields that never change. Every descriptor is
+        * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH
+        * descriptors (i.e. request one completion update every 32 packets).
+        */
+       wq = &enic->wq[queue_idx];
+       desc = (struct wq_enet_desc *)wq->ring.descs;
+       for (i = 0; i < wq->ring.desc_count; i++, desc++) {
+               desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT;
+               if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1)
+                       desc->header_length_flags |=
+                               (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT);
+       }
+}
+
+/*
+ * The 'strong' version is in enic_rxtx_vec_avx2.c. This weak version is used
+ * used when that file is not compiled.
+ */
+__rte_weak bool
+enic_use_vector_rx_handler(__rte_unused struct rte_eth_dev *eth_dev)
+{
+       return false;
+}
+
+void enic_pick_rx_handler(struct rte_eth_dev *eth_dev)
+{
+       struct enic *enic = pmd_priv(eth_dev);
+
+       if (enic->cq64) {
+               ENICPMD_LOG(DEBUG, " use the normal Rx handler for 64B CQ entry");
+               eth_dev->rx_pkt_burst = &enic_recv_pkts_64;
+               return;
+       }
+       /*
+        * Preference order:
+        * 1. The vectorized handler if possible and requested.
+        * 2. The non-scatter, simplified handler if scatter Rx is not used.
+        * 3. The default handler as a fallback.
+        */
+       if (enic_use_vector_rx_handler(eth_dev))
+               return;
+       if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
+               ENICPMD_LOG(DEBUG, " use the non-scatter Rx handler");
+               eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts;
+       } else {
+               ENICPMD_LOG(DEBUG, " use the normal Rx handler");
+               eth_dev->rx_pkt_burst = &enic_recv_pkts;
+       }
+}
+
+/* Secondary process uses this to set the Tx handler */
+void enic_pick_tx_handler(struct rte_eth_dev *eth_dev)
+{
+       struct enic *enic = pmd_priv(eth_dev);
+
+       if (enic->use_simple_tx_handler) {
+               ENICPMD_LOG(DEBUG, " use the simple tx handler");
+               eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
+       } else {
+               ENICPMD_LOG(DEBUG, " use the default tx handler");
+               eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+       }
 }
 
 int enic_enable(struct enic *enic)
@@ -423,9 +576,28 @@ int enic_enable(struct enic *enic)
        unsigned int index;
        int err;
        struct rte_eth_dev *eth_dev = enic->rte_dev;
+       uint64_t simple_tx_offloads;
+       uintptr_t p;
+
+       if (enic->enable_avx2_rx) {
+               struct rte_mbuf mb_def = { .buf_addr = 0 };
+
+               /*
+                * mbuf_initializer contains const-after-init fields of
+                * receive mbufs (i.e. 64 bits of fields from rearm_data).
+                * It is currently used by the vectorized handler.
+                */
+               mb_def.nb_segs = 1;
+               mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+               mb_def.port = enic->port_id;
+               rte_mbuf_refcnt_set(&mb_def, 1);
+               rte_compiler_barrier();
+               p = (uintptr_t)&mb_def.rearm_data;
+               enic->mbuf_initializer = *(uint64_t *)p;
+       }
 
        eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
-       eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        /* vnic notification of link status has already been turned on in
         * enic_dev_init() which is called during probe time.  Here we are
@@ -434,9 +606,13 @@ int enic_enable(struct enic *enic)
        if (eth_dev->data->dev_conf.intr_conf.lsc)
                vnic_dev_notify_set(enic->vdev, 0);
 
-       if (enic_clsf_init(enic))
-               dev_warning(enic, "Init of hash table for clsf failed."\
-                       "Flow director feature will not work\n");
+       err = enic_rxq_intr_init(enic);
+       if (err)
+               return err;
+
+       /* Initialize flowman if not already initialized during probe */
+       if (enic->fm == NULL && enic_fm_init(enic))
+               dev_warning(enic, "Init of flowman failed.\n");
 
        for (index = 0; index < enic->rq_count; index++) {
                err = enic_alloc_rx_queue_mbufs(enic,
@@ -446,7 +622,7 @@ int enic_enable(struct enic *enic)
                        return err;
                }
                err = enic_alloc_rx_queue_mbufs(enic,
-                       &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
+                       &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)]);
                if (err) {
                        /* release the allocated mbufs for the sop rq*/
                        enic_rxmbuf_queue_release(enic,
@@ -457,6 +633,30 @@ int enic_enable(struct enic *enic)
                }
        }
 
+       /*
+        * Use the simple TX handler if possible. Only checksum offloads
+        * and vlan insertion are supported.
+        */
+       simple_tx_offloads = enic->tx_offload_capa &
+               (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+                RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+                RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+                RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+                RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
+       if ((eth_dev->data->dev_conf.txmode.offloads &
+            ~simple_tx_offloads) == 0) {
+               ENICPMD_LOG(DEBUG, " use the simple tx handler");
+               eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
+               for (index = 0; index < enic->wq_count; index++)
+                       enic_prep_wq_for_simple_tx(enic, index);
+               enic->use_simple_tx_handler = 1;
+       } else {
+               ENICPMD_LOG(DEBUG, " use the default tx handler");
+               eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+       }
+
+       enic_pick_rx_handler(eth_dev);
+
        for (index = 0; index < enic->wq_count; index++)
                enic_start_wq(enic, index);
        for (index = 0; index < enic->rq_count; index++)
@@ -467,11 +667,12 @@ int enic_enable(struct enic *enic)
        vnic_dev_enable_wait(enic->vdev);
 
        /* Register and enable error interrupt */
-       rte_intr_callback_register(&(enic->pdev->intr_handle),
+       rte_intr_callback_register(enic->pdev->intr_handle,
                enic_intr_handler, (void *)enic->rte_dev);
 
-       rte_intr_enable(&(enic->pdev->intr_handle));
-       vnic_intr_unmask(&enic->intr);
+       rte_intr_enable(enic->pdev->intr_handle);
+       /* Unmask LSC interrupt */
+       vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
 
        return 0;
 }
@@ -479,17 +680,21 @@ int enic_enable(struct enic *enic)
 int enic_alloc_intr_resources(struct enic *enic)
 {
        int err;
+       unsigned int i;
 
        dev_info(enic, "vNIC resources used:  "\
                "wq %d rq %d cq %d intr %d\n",
                enic->wq_count, enic_vnic_rq_count(enic),
                enic->cq_count, enic->intr_count);
 
-       err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
-       if (err)
-               enic_free_vnic_resources(enic);
-
-       return err;
+       for (i = 0; i < enic->intr_count; i++) {
+               err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
+               if (err) {
+                       enic_free_vnic_resources(enic);
+                       return err;
+               }
+       }
+       return 0;
 }
 
 void enic_free_rq(void *rxq)
@@ -504,6 +709,19 @@ void enic_free_rq(void *rxq)
        enic = vnic_dev_priv(rq_sop->vdev);
        rq_data = &enic->rq[rq_sop->data_queue_idx];
 
+       if (rq_sop->free_mbufs) {
+               struct rte_mbuf **mb;
+               int i;
+
+               mb = rq_sop->free_mbufs;
+               for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs;
+                    i < ENIC_RX_BURST_MAX; i++)
+                       rte_pktmbuf_free(mb[i]);
+               rte_free(rq_sop->free_mbufs);
+               rq_sop->free_mbufs = NULL;
+               rq_sop->num_free_mbufs = 0;
+       }
+
        enic_rxmbuf_queue_release(enic, rq_sop);
        if (rq_data->in_use)
                enic_rxmbuf_queue_release(enic, rq_data);
@@ -527,43 +745,46 @@ void enic_free_rq(void *rxq)
 
 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
 {
-       struct rte_eth_dev *eth_dev = enic->rte_dev;
+       struct rte_eth_dev_data *data = enic->dev_data;
        vnic_wq_enable(&enic->wq[queue_idx]);
-       eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
+       data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
 }
 
 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
 {
-       struct rte_eth_dev *eth_dev = enic->rte_dev;
+       struct rte_eth_dev_data *data = enic->dev_data;
        int ret;
 
        ret = vnic_wq_disable(&enic->wq[queue_idx]);
        if (ret)
                return ret;
 
-       eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
        return 0;
 }
 
 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
 {
+       struct rte_eth_dev_data *data = enic->dev_data;
        struct vnic_rq *rq_sop;
        struct vnic_rq *rq_data;
        rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
        rq_data = &enic->rq[rq_sop->data_queue_idx];
-       struct rte_eth_dev *eth_dev = enic->rte_dev;
 
-       if (rq_data->in_use)
+       if (rq_data->in_use) {
                vnic_rq_enable(rq_data);
+               enic_initial_post_rx(enic, rq_data);
+       }
        rte_mb();
        vnic_rq_enable(rq_sop);
-       eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
+       enic_initial_post_rx(enic, rq_sop);
+       data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
 }
 
 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
 {
+       struct rte_eth_dev_data *data = enic->dev_data;
        int ret1 = 0, ret2 = 0;
-       struct rte_eth_dev *eth_dev = enic->rte_dev;
        struct vnic_rq *rq_sop;
        struct vnic_rq *rq_data;
        rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
@@ -579,7 +800,7 @@ int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
        else if (ret1)
                return ret1;
 
-       eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
        return 0;
 }
 
@@ -587,16 +808,36 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
        unsigned int socket_id, struct rte_mempool *mp,
        uint16_t nb_desc, uint16_t free_thresh)
 {
+       struct enic_vf_representor *vf;
        int rc;
-       uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
-       uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
-       struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
-       struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
+       uint16_t sop_queue_idx;
+       uint16_t data_queue_idx;
+       uint16_t cq_idx;
+       struct vnic_rq *rq_sop;
+       struct vnic_rq *rq_data;
        unsigned int mbuf_size, mbufs_per_pkt;
        unsigned int nb_sop_desc, nb_data_desc;
        uint16_t min_sop, max_sop, min_data, max_data;
-       uint32_t max_rx_pkt_len;
+       uint32_t max_rx_pktlen;
 
+       /*
+        * Representor uses a reserved PF queue. Translate representor
+        * queue number to PF queue number.
+        */
+       if (enic_is_vf_rep(enic)) {
+               RTE_ASSERT(queue_idx == 0);
+               vf = VF_ENIC_TO_VF_REP(enic);
+               sop_queue_idx = vf->pf_rq_sop_idx;
+               data_queue_idx = vf->pf_rq_data_idx;
+               enic = vf->pf;
+               queue_idx = sop_queue_idx;
+       } else {
+               sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
+               data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx, enic);
+       }
+       cq_idx = enic_cq_rq(enic, sop_queue_idx);
+       rq_sop = &enic->rq[sop_queue_idx];
+       rq_data = &enic->rq[data_queue_idx];
        rq_sop->is_sop = 1;
        rq_sop->data_queue_idx = data_queue_idx;
        rq_data->is_sop = 0;
@@ -613,23 +854,23 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 
        mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
                               RTE_PKTMBUF_HEADROOM);
-       /* max_rx_pkt_len includes the ethernet header and CRC. */
-       max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       /* max_rx_pktlen includes the ethernet header and CRC. */
+       max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
 
        if (enic->rte_dev->data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_SCATTER) {
+           RTE_ETH_RX_OFFLOAD_SCATTER) {
                dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
                /* ceil((max pkt len)/mbuf_size) */
-               mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
+               mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
        } else {
                dev_info(enic, "Scatter rx mode disabled\n");
                mbufs_per_pkt = 1;
-               if (max_rx_pkt_len > mbuf_size) {
+               if (max_rx_pktlen > mbuf_size) {
                        dev_warning(enic, "The maximum Rx packet size (%u) is"
                                    " larger than the mbuf size (%u), and"
                                    " scatter is disabled. Larger packets will"
                                    " be truncated.\n",
-                                   max_rx_pkt_len, mbuf_size);
+                                   max_rx_pktlen, mbuf_size);
                }
        }
 
@@ -638,16 +879,15 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
                rq_sop->data_queue_enable = 1;
                rq_data->in_use = 1;
                /*
-                * HW does not directly support rxmode.max_rx_pkt_len. HW always
+                * HW does not directly support MTU. HW always
                 * receives packet sizes up to the "max" MTU.
                 * If not using scatter, we can achieve the effect of dropping
                 * larger packets by reducing the size of posted buffers.
                 * See enic_alloc_rx_queue_mbufs().
                 */
-               if (max_rx_pkt_len <
-                   enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu)) {
-                       dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
-                                   " when scatter rx mode is in use.\n");
+               if (enic->rte_dev->data->mtu < enic->max_mtu) {
+                       dev_warning(enic,
+                               "mtu is ignored when scatter rx mode is in use.\n");
                }
        } else {
                dev_info(enic, "Rq %u Scatter rx mode not being used\n",
@@ -657,20 +897,20 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
        }
 
        /* number of descriptors have to be a multiple of 32 */
-       nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
-       nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
+       nb_sop_desc = (nb_desc / mbufs_per_pkt) & ENIC_ALIGN_DESCS_MASK;
+       nb_data_desc = (nb_desc - nb_sop_desc) & ENIC_ALIGN_DESCS_MASK;
 
        rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
        rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
 
        if (mbufs_per_pkt > 1) {
-               min_sop = 64;
+               min_sop = ENIC_RX_BURST_MAX;
                max_sop = ((enic->config.rq_desc_count /
-                           (mbufs_per_pkt - 1)) & ~0x1F);
+                           (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK);
                min_data = min_sop * (mbufs_per_pkt - 1);
                max_data = enic->config.rq_desc_count;
        } else {
-               min_sop = 64;
+               min_sop = ENIC_RX_BURST_MAX;
                max_sop = enic->config.rq_desc_count;
                min_data = 0;
                max_data = 0;
@@ -690,7 +930,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
        if (mbufs_per_pkt > 1) {
                dev_info(enic, "For max packet size %u and mbuf size %u valid"
                         " rx descriptor range is %u to %u\n",
-                        max_rx_pkt_len, mbuf_size, min_sop + min_data,
+                        max_rx_pktlen, mbuf_size, min_sop + min_data,
                         max_sop + max_data);
        }
        dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
@@ -716,8 +956,22 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
                }
                nb_data_desc = rq_data->ring.desc_count;
        }
-       rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
+       /* Enable 64B CQ entry if requested */
+       if (enic->cq64 && vnic_dev_set_cq_entry_size(enic->vdev,
+                               sop_queue_idx, VNIC_RQ_CQ_ENTRY_SIZE_64)) {
+               dev_err(enic, "failed to enable 64B CQ entry on sop rq\n");
+               goto err_free_rq_data;
+       }
+       if (rq_data->in_use && enic->cq64 &&
+           vnic_dev_set_cq_entry_size(enic->vdev, data_queue_idx,
+               VNIC_RQ_CQ_ENTRY_SIZE_64)) {
+               dev_err(enic, "failed to enable 64B CQ entry on data rq\n");
+               goto err_free_rq_data;
+       }
+
+       rc = vnic_cq_alloc(enic->vdev, &enic->cq[cq_idx], cq_idx,
                           socket_id, nb_sop_desc + nb_data_desc,
+                          enic->cq64 ? sizeof(struct cq_enet_rq_desc_64) :
                           sizeof(struct cq_enet_rq_desc));
        if (rc) {
                dev_err(enic, "error in allocation of cq for rq\n");
@@ -741,15 +995,26 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
                        goto err_free_sop_mbuf;
        }
 
+       rq_sop->free_mbufs = (struct rte_mbuf **)
+               rte_zmalloc_socket("rq->free_mbufs",
+                                  sizeof(struct rte_mbuf *) *
+                                  ENIC_RX_BURST_MAX,
+                                  RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+       if (rq_sop->free_mbufs == NULL)
+               goto err_free_data_mbuf;
+       rq_sop->num_free_mbufs = 0;
+
        rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
 
        return 0;
 
+err_free_data_mbuf:
+       rte_free(rq_data->mbuf_ring);
 err_free_sop_mbuf:
        rte_free(rq_sop->mbuf_ring);
 err_free_cq:
        /* cleanup on error */
-       vnic_cq_free(&enic->cq[queue_idx]);
+       vnic_cq_free(&enic->cq[cq_idx]);
 err_free_rq_data:
        if (rq_data->in_use)
                vnic_rq_free(rq_data);
@@ -777,32 +1042,37 @@ void enic_free_wq(void *txq)
 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
        unsigned int socket_id, uint16_t nb_desc)
 {
+       struct enic_vf_representor *vf;
        int err;
-       struct vnic_wq *wq = &enic->wq[queue_idx];
-       unsigned int cq_index = enic_cq_wq(enic, queue_idx);
-       char name[NAME_MAX];
+       struct vnic_wq *wq;
+       unsigned int cq_index;
+       char name[RTE_MEMZONE_NAMESIZE];
        static int instance;
 
-       wq->socket_id = socket_id;
-       if (nb_desc) {
-               if (nb_desc > enic->config.wq_desc_count) {
-                       dev_warning(enic,
-                               "WQ %d - number of tx desc in cmd line (%d)"\
-                               "is greater than that in the UCSM/CIMC adapter"\
-                               "policy.  Applying the value in the adapter "\
-                               "policy (%d)\n",
-                               queue_idx, nb_desc, enic->config.wq_desc_count);
-               } else if (nb_desc != enic->config.wq_desc_count) {
-                       enic->config.wq_desc_count = nb_desc;
-                       dev_info(enic,
-                               "TX Queues - effective number of descs:%d\n",
-                               nb_desc);
-               }
+       /*
+        * Representor uses a reserved PF queue. Translate representor
+        * queue number to PF queue number.
+        */
+       if (enic_is_vf_rep(enic)) {
+               RTE_ASSERT(queue_idx == 0);
+               vf = VF_ENIC_TO_VF_REP(enic);
+               queue_idx = vf->pf_wq_idx;
+               cq_index = vf->pf_wq_cq_idx;
+               enic = vf->pf;
+       } else {
+               cq_index = enic_cq_wq(enic, queue_idx);
        }
+       wq = &enic->wq[queue_idx];
+       wq->socket_id = socket_id;
+       /*
+        * rte_eth_tx_queue_setup() checks min, max, and alignment. So just
+        * print an info message for diagnostics.
+        */
+       dev_info(enic, "TX Queues - effective number of descs:%d\n", nb_desc);
 
        /* Allocate queue resources */
        err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
-               enic->config.wq_desc_count,
+               nb_desc,
                sizeof(struct wq_enet_desc));
        if (err) {
                dev_err(enic, "error in allocation of wq\n");
@@ -810,7 +1080,7 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
        }
 
        err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
-               socket_id, enic->config.wq_desc_count,
+               socket_id, nb_desc,
                sizeof(struct cq_enet_wq_desc));
        if (err) {
                vnic_wq_free(wq);
@@ -823,9 +1093,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
                instance++);
 
        wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
-                                                  sizeof(uint32_t),
-                                                  SOCKET_ID_ANY, 0,
-                                                  ENIC_ALIGN);
+                       sizeof(uint32_t), SOCKET_ID_ANY,
+                       RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
        if (!wq->cqmsg_rz)
                return -ENOMEM;
 
@@ -837,16 +1106,19 @@ int enic_disable(struct enic *enic)
        unsigned int i;
        int err;
 
-       vnic_intr_mask(&enic->intr);
-       (void)vnic_intr_masked(&enic->intr); /* flush write */
-       rte_intr_disable(&enic->pdev->intr_handle);
-       rte_intr_callback_unregister(&enic->pdev->intr_handle,
+       for (i = 0; i < enic->intr_count; i++) {
+               vnic_intr_mask(&enic->intr[i]);
+               (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
+       }
+       enic_rxq_intr_deinit(enic);
+       rte_intr_disable(enic->pdev->intr_handle);
+       rte_intr_callback_unregister(enic->pdev->intr_handle,
                                     enic_intr_handler,
                                     (void *)enic->rte_dev);
 
        vnic_dev_disable(enic->vdev);
 
-       enic_clsf_destroy(enic);
+       enic_fm_destroy(enic);
 
        if (!enic_is_sriov_vf(enic))
                vnic_dev_del_addr(enic->vdev, enic->mac_addr);
@@ -881,7 +1153,8 @@ int enic_disable(struct enic *enic)
                        vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
        for (i = 0; i < enic->cq_count; i++)
                vnic_cq_clean(&enic->cq[i]);
-       vnic_intr_clean(&enic->intr);
+       for (i = 0; i < enic->intr_count; i++)
+               vnic_intr_clean(&enic->intr[i]);
 
        return 0;
 }
@@ -914,9 +1187,10 @@ static int enic_dev_wait(struct vnic_dev *vdev,
 static int enic_dev_open(struct enic *enic)
 {
        int err;
+       int flags = CMD_OPENF_IG_DESCCACHE;
 
        err = enic_dev_wait(enic->vdev, vnic_dev_open,
-               vnic_dev_open_done, 0);
+               vnic_dev_open_done, flags);
        if (err)
                dev_err(enic_get_dev(enic),
                        "vNIC device open failed, err %d\n", err);
@@ -929,10 +1203,10 @@ static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
        dma_addr_t rss_key_buf_pa;
        union vnic_rss_key *rss_key_buf_va = NULL;
        int err, i;
-       u8 name[NAME_MAX];
+       uint8_t name[RTE_MEMZONE_NAMESIZE];
 
        RTE_ASSERT(user_key != NULL);
-       snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
+       snprintf((char *)name, sizeof(name), "rss_key-%s", enic->bdf_name);
        rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
                &rss_key_buf_pa, name);
        if (!rss_key_buf_va)
@@ -961,9 +1235,9 @@ int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
        dma_addr_t rss_cpu_buf_pa;
        union vnic_rss_cpu *rss_cpu_buf_va = NULL;
        int err;
-       u8 name[NAME_MAX];
+       uint8_t name[RTE_MEMZONE_NAMESIZE];
 
-       snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
+       snprintf((char *)name, sizeof(name), "rss_cpu-%s", enic->bdf_name);
        rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
                &rss_cpu_buf_pa, name);
        if (!rss_cpu_buf_va)
@@ -984,10 +1258,11 @@ int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
        return err;
 }
 
-static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
-       u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
+static int enic_set_niccfg(struct enic *enic, uint8_t rss_default_cpu,
+       uint8_t rss_hash_type, uint8_t rss_hash_bits, uint8_t rss_base_cpu,
+       uint8_t rss_enable)
 {
-       const u8 tso_ipid_split_en = 0;
+       const uint8_t tso_ipid_split_en = 0;
        int err;
 
        err = enic_set_nic_cfg(enic,
@@ -1043,6 +1318,18 @@ int enic_setup_finish(struct enic *enic)
 {
        enic_init_soft_stats(enic);
 
+       /* switchdev: enable promisc mode on PF */
+       if (enic->switchdev_mode) {
+               vnic_dev_packet_filter(enic->vdev,
+                                      0 /* directed  */,
+                                      0 /* multicast */,
+                                      0 /* broadcast */,
+                                      1 /* promisc   */,
+                                      0 /* allmulti  */);
+               enic->promisc = 1;
+               enic->allmulti = 0;
+               return 0;
+       }
        /* Default conf */
        vnic_dev_packet_filter(enic->vdev,
                1 /* directed  */,
@@ -1083,8 +1370,8 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 {
        struct rte_eth_dev *eth_dev;
        uint64_t rss_hf;
-       u8 rss_hash_type;
-       u8 rss_enable;
+       uint8_t rss_hash_type;
+       uint8_t rss_enable;
        int ret;
 
        RTE_ASSERT(rss_conf != NULL);
@@ -1098,21 +1385,35 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
        rss_hash_type = 0;
        rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
        if (enic->rq_count > 1 &&
-           (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+           (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
            rss_hf != 0) {
                rss_enable = 1;
-               if (rss_hf & ETH_RSS_IPV4)
+               if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+                             RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
                        rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
-               if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+               if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
                        rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
-               if (rss_hf & ETH_RSS_IPV6)
+               if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
+                       rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
+                       if (enic->udp_rss_weak) {
+                               /*
+                                * 'TCP' is not a typo. The "weak" version of
+                                * UDP RSS requires both the TCP and UDP bits
+                                * be set. It does enable TCP RSS as well.
+                                */
+                               rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+                       }
+               }
+               if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX |
+                             RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
                        rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
-               if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+               if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX))
                        rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
-               if (rss_hf & ETH_RSS_IPV6_EX)
-                       rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6_EX;
-               if (rss_hf & ETH_RSS_IPV6_TCP_EX)
-                       rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX;
+               if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) {
+                       rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
+                       if (enic->udp_rss_weak)
+                               rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+               }
        } else {
                rss_enable = 0;
                rss_hf = 0;
@@ -1134,8 +1435,11 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
                enic->rss_hf = rss_hf;
                enic->rss_hash_type = rss_hash_type;
                enic->rss_enable = rss_enable;
+       } else {
+               dev_err(enic, "Failed to update RSS configurations."
+                       " hash=0x%x\n", rss_hash_type);
        }
-       return 0;
+       return ret;
 }
 
 int enic_set_vlan_strip(struct enic *enic)
@@ -1149,10 +1453,15 @@ int enic_set_vlan_strip(struct enic *enic)
                               enic->rss_enable);
 }
 
-void enic_add_packet_filter(struct enic *enic)
+int enic_add_packet_filter(struct enic *enic)
 {
+       /* switchdev ignores packet filters */
+       if (enic->switchdev_mode) {
+               ENICPMD_LOG(DEBUG, " switchdev: ignore packet filter");
+               return 0;
+       }
        /* Args -> directed, multicast, broadcast, promisc, allmulti */
-       vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
+       return vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
                enic->promisc, enic->allmulti);
 }
 
@@ -1163,13 +1472,12 @@ int enic_get_link_status(struct enic *enic)
 
 static void enic_dev_deinit(struct enic *enic)
 {
-       struct rte_eth_dev *eth_dev = enic->rte_dev;
-
        /* stop link status checking */
        vnic_dev_notify_unset(enic->vdev);
 
-       rte_free(eth_dev->data->mac_addrs);
+       /* mac_addrs is freed by rte_eth_dev_release_port() */
        rte_free(enic->cq);
+       rte_free(enic->intr);
        rte_free(enic->rq);
        rte_free(enic->wq);
 }
@@ -1179,12 +1487,27 @@ int enic_set_vnic_res(struct enic *enic)
 {
        struct rte_eth_dev *eth_dev = enic->rte_dev;
        int rc = 0;
-       unsigned int required_rq, required_wq, required_cq;
+       unsigned int required_rq, required_wq, required_cq, required_intr;
 
        /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
        required_rq = eth_dev->data->nb_rx_queues * 2;
        required_wq = eth_dev->data->nb_tx_queues;
        required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
+       required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
+       if (eth_dev->data->dev_conf.intr_conf.rxq) {
+               required_intr += eth_dev->data->nb_rx_queues;
+       }
+       ENICPMD_LOG(DEBUG, "Required queues for PF: rq %u wq %u cq %u",
+                   required_rq, required_wq, required_cq);
+       if (enic->vf_required_rq) {
+               /* Queues needed for VF representors */
+               required_rq += enic->vf_required_rq;
+               required_wq += enic->vf_required_wq;
+               required_cq += enic->vf_required_cq;
+               ENICPMD_LOG(DEBUG, "Required queues for VF representors: rq %u wq %u cq %u",
+                           enic->vf_required_rq, enic->vf_required_wq,
+                           enic->vf_required_cq);
+       }
 
        if (enic->conf_rq_count < required_rq) {
                dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
@@ -1203,11 +1526,18 @@ int enic_set_vnic_res(struct enic *enic)
                        required_cq, enic->conf_cq_count);
                rc = -EINVAL;
        }
+       if (enic->conf_intr_count < required_intr) {
+               dev_err(dev, "Not enough Interrupts to support Rx queue"
+                       " interrupts. Required:%u, Configured:%u\n",
+                       required_intr, enic->conf_intr_count);
+               rc = -EINVAL;
+       }
 
        if (rc == 0) {
                enic->rq_count = eth_dev->data->nb_rx_queues;
                enic->wq_count = eth_dev->data->nb_tx_queues;
                enic->cq_count = enic->rq_count + enic->wq_count;
+               enic->intr_count = required_intr;
        }
 
        return rc;
@@ -1222,8 +1552,8 @@ enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
        int rc = 0;
 
        sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
-       data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
-       cq_idx = rq_idx;
+       data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx, enic)];
+       cq_idx = enic_cq_rq(enic, rq_idx);
 
        vnic_cq_clean(&enic->cq[cq_idx]);
        vnic_cq_init(&enic->cq[cq_idx],
@@ -1245,8 +1575,8 @@ enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
        if (data_rq->in_use) {
                vnic_rq_init_start(data_rq,
                                   enic_cq_rq(enic,
-                                  enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
-                                  data_rq->ring.desc_count - 1, 1, 0);
+                                  enic_rte_rq_idx_to_data_idx(rq_idx, enic)),
+                                  0, data_rq->ring.desc_count - 1, 1, 0);
        }
 
        rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
@@ -1303,20 +1633,21 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
                        "MTU (%u) is greater than value configured in NIC (%u)\n",
                        new_mtu, config_mtu);
 
-       /* The easy case is when scatter is disabled. However if the MTU
-        * becomes greater than the mbuf data size, packet drops will ensue.
+       /*
+        * If the device has not started (enic_enable), nothing to do.
+        * Later, enic_enable() will set up RQs reflecting the new maximum
+        * packet length.
         */
-       if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
-             DEV_RX_OFFLOAD_SCATTER)) {
-               eth_dev->data->mtu = new_mtu;
+       if (!eth_dev->data->dev_started)
                goto set_mtu_done;
-       }
 
-       /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
-        * change Rx scatter mode if necessary for better performance. I.e. if
-        * MTU was greater than the mbuf size and now it's less, scatter Rx
-        * doesn't have to be used and vice versa.
-         */
+       /*
+        * The device has started, re-do RQs on the fly. In the process, we
+        * pick up the new maximum packet length.
+        *
+        * Some applications rely on the ability to change MTU without stopping
+        * the device. So keep this behavior for now.
+        */
        rte_spinlock_lock(&enic->mtu_lock);
 
        /* Stop traffic on all RQs */
@@ -1341,12 +1672,12 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
 
        /* now it is safe to reconfigure the RQs */
 
-       /* update the mtu */
-       eth_dev->data->mtu = new_mtu;
 
        /* free and reallocate RQs with the new MTU */
        for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
                rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+               if (!rq->in_use)
+                       continue;
 
                enic_free_rq(rq);
                rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
@@ -1367,7 +1698,7 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
 
        /* put back the real receive function */
        rte_mb();
-       eth_dev->rx_pkt_burst = enic_recv_pkts;
+       enic_pick_rx_handler(eth_dev);
        rte_mb();
 
        /* restart Rx traffic */
@@ -1383,6 +1714,85 @@ set_mtu_done:
        return rc;
 }
 
+static void
+enic_disable_overlay_offload(struct enic *enic)
+{
+       /*
+        * Disabling fails if the feature is provisioned but
+        * not enabled. So ignore result and do not log error.
+        */
+       if (enic->vxlan) {
+               vnic_dev_overlay_offload_ctrl(enic->vdev,
+                       OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_DISABLE);
+       }
+       if (enic->geneve) {
+               vnic_dev_overlay_offload_ctrl(enic->vdev,
+                       OVERLAY_FEATURE_GENEVE, OVERLAY_OFFLOAD_DISABLE);
+       }
+}
+
+static int
+enic_enable_overlay_offload(struct enic *enic)
+{
+       if (enic->vxlan && vnic_dev_overlay_offload_ctrl(enic->vdev,
+                       OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_ENABLE) != 0) {
+               dev_err(NULL, "failed to enable VXLAN offload\n");
+               return -EINVAL;
+       }
+       if (enic->geneve && vnic_dev_overlay_offload_ctrl(enic->vdev,
+                       OVERLAY_FEATURE_GENEVE, OVERLAY_OFFLOAD_ENABLE) != 0) {
+               dev_err(NULL, "failed to enable Geneve offload\n");
+               return -EINVAL;
+       }
+       enic->tx_offload_capa |=
+               RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+               (enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
+               (enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
+       enic->tx_offload_mask |=
+               RTE_MBUF_F_TX_OUTER_IPV6 |
+               RTE_MBUF_F_TX_OUTER_IPV4 |
+               RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+               RTE_MBUF_F_TX_TUNNEL_MASK;
+       enic->overlay_offload = true;
+
+       if (enic->vxlan && enic->geneve)
+               dev_info(NULL, "Overlay offload is enabled (VxLAN, Geneve)\n");
+       else if (enic->vxlan)
+               dev_info(NULL, "Overlay offload is enabled (VxLAN)\n");
+       else
+               dev_info(NULL, "Overlay offload is enabled (Geneve)\n");
+
+       return 0;
+}
+
+static int
+enic_reset_overlay_port(struct enic *enic)
+{
+       if (enic->vxlan) {
+               enic->vxlan_port = RTE_VXLAN_DEFAULT_PORT;
+               /*
+                * Reset the vxlan port to the default, as the NIC firmware
+                * does not reset it automatically and keeps the old setting.
+                */
+               if (vnic_dev_overlay_offload_cfg(enic->vdev,
+                                                OVERLAY_CFG_VXLAN_PORT_UPDATE,
+                                                RTE_VXLAN_DEFAULT_PORT)) {
+                       dev_err(enic, "failed to update vxlan port\n");
+                       return -EINVAL;
+               }
+       }
+       if (enic->geneve) {
+               enic->geneve_port = RTE_GENEVE_DEFAULT_PORT;
+               if (vnic_dev_overlay_offload_cfg(enic->vdev,
+                                                OVERLAY_CFG_GENEVE_PORT_UPDATE,
+                                                RTE_GENEVE_DEFAULT_PORT)) {
+                       dev_err(enic, "failed to update vxlan port\n");
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
 static int enic_dev_init(struct enic *enic)
 {
        int err;
@@ -1409,6 +1819,8 @@ static int enic_dev_init(struct enic *enic)
        /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
        enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
                               enic->conf_cq_count, 8);
+       enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
+                                enic->conf_intr_count, 8);
        enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
                               enic->conf_rq_count, 8);
        enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
@@ -1417,6 +1829,10 @@ static int enic_dev_init(struct enic *enic)
                dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
                return -1;
        }
+       if (enic->conf_intr_count > 0 && enic->intr == NULL) {
+               dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
+               return -1;
+       }
        if (enic->conf_rq_count > 0 && enic->rq == NULL) {
                dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
                return -1;
@@ -1426,28 +1842,63 @@ static int enic_dev_init(struct enic *enic)
                return -1;
        }
 
-       /* Get the supported filters */
-       enic_fdir_info(enic);
-
-       eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
-                                               * ENIC_MAX_MAC_ADDR, 0);
+       eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr",
+                                       sizeof(struct rte_ether_addr) *
+                                       ENIC_UNICAST_PERFECT_FILTERS, 0);
        if (!eth_dev->data->mac_addrs) {
                dev_err(enic, "mac addr storage alloc failed, aborting.\n");
                return -1;
        }
-       ether_addr_copy((struct ether_addr *) enic->mac_addr,
+       rte_ether_addr_copy((struct rte_ether_addr *)enic->mac_addr,
                        eth_dev->data->mac_addrs);
 
        vnic_dev_set_reset_flag(enic->vdev, 0);
 
        LIST_INIT(&enic->flows);
-       rte_spinlock_init(&enic->flows_lock);
 
        /* set up link status checking */
        vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
 
+       enic->overlay_offload = false;
+       /*
+        * First, explicitly disable overlay offload as the setting is
+        * sticky, and resetting vNIC may not disable it.
+        */
+       enic_disable_overlay_offload(enic);
+       /* Then, enable overlay offload according to vNIC flags */
+       if (!enic->disable_overlay && (enic->vxlan || enic->geneve)) {
+               err = enic_enable_overlay_offload(enic);
+               if (err) {
+                       dev_info(NULL, "failed to enable overlay offload\n");
+                       return err;
+               }
+       }
+       /*
+        * Reset the vxlan/geneve port if HW parsing is available. It
+        * is always enabled regardless of overlay offload
+        * enable/disable.
+        */
+       err = enic_reset_overlay_port(enic);
+       if (err)
+               return err;
+
+       if (enic_fm_init(enic))
+               dev_warning(enic, "Init of flowman failed.\n");
        return 0;
+}
 
+static void lock_devcmd(void *priv)
+{
+       struct enic *enic = priv;
+
+       rte_spinlock_lock(&enic->devcmd_lock);
+}
+
+static void unlock_devcmd(void *priv)
+{
+       struct enic *enic = priv;
+
+       rte_spinlock_unlock(&enic->devcmd_lock);
 }
 
 int enic_probe(struct enic *enic)
@@ -1455,7 +1906,7 @@ int enic_probe(struct enic *enic)
        struct rte_pci_device *pdev = enic->pdev;
        int err = -1;
 
-       dev_debug(enic, " Initializing ENIC PMD\n");
+       dev_debug(enic, "Initializing ENIC PMD\n");
 
        /* if this is a secondary process the hardware is already initialized */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -1495,8 +1946,10 @@ int enic_probe(struct enic *enic)
        }
 
        /* Set ingress vlan rewrite mode before vnic initialization */
+       dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n",
+                 enic->ig_vlan_rewrite_mode);
        err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
-               IG_VLAN_REWRITE_MODE_PASS_THRU);
+               enic->ig_vlan_rewrite_mode);
        if (err) {
                dev_err(enic,
                        "Failed to set ingress vlan rewrite mode, aborting.\n");
@@ -1523,6 +1976,11 @@ int enic_probe(struct enic *enic)
                goto err_out_dev_close;
        }
 
+       /* Use a PF spinlock to serialize devcmd from PF and VF representors */
+       if (enic->switchdev_mode) {
+               rte_spinlock_init(&enic->devcmd_lock);
+               vnic_register_lock(enic->vdev, lock_devcmd, unlock_devcmd);
+       }
        return 0;
 
 err_out_dev_close: