drivers: advertise kmod dependencies in pmdinfo
[dpdk.git] / drivers / net / nfp / nfp_net.c
index 0d85fa4..e315dd8 100644 (file)
  * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
  */
 
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/socket.h>
-#include <sys/io.h>
-#include <assert.h>
-#include <time.h>
 #include <math.h>
-#include <inttypes.h>
 
 #include <rte_byteorder.h>
 #include <rte_common.h>
@@ -65,6 +54,7 @@
 #include <rte_version.h>
 #include <rte_string_fns.h>
 #include <rte_alarm.h>
+#include <rte_spinlock.h>
 
 #include "nfp_net_pmd.h"
 #include "nfp_net_logs.h"
 /* Prototypes */
 static void nfp_net_close(struct rte_eth_dev *dev);
 static int nfp_net_configure(struct rte_eth_dev *dev);
+static void nfp_net_dev_interrupt_handler(struct rte_intr_handle *handle,
+                                         void *param);
+static void nfp_net_dev_interrupt_delayed_handler(void *param);
+static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void nfp_net_infos_get(struct rte_eth_dev *dev,
+                             struct rte_eth_dev_info *dev_info);
 static int nfp_net_init(struct rte_eth_dev *eth_dev);
+static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
+static void nfp_net_promisc_disable(struct rte_eth_dev *dev);
 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
                                       uint16_t queue_idx);
@@ -90,6 +89,9 @@ static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                  uint16_t nb_desc, unsigned int socket_id,
                                  const struct rte_eth_txconf *tx_conf);
 static int nfp_net_start(struct rte_eth_dev *dev);
+static void nfp_net_stats_get(struct rte_eth_dev *dev,
+                             struct rte_eth_stats *stats);
+static void nfp_net_stats_reset(struct rte_eth_dev *dev);
 static void nfp_net_stop(struct rte_eth_dev *dev);
 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                                  uint16_t nb_pkts);
@@ -212,7 +214,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
        const struct rte_memzone *mz;
 
        snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                dev->driver->pci_drv.name,
+                dev->driver->pci_drv.driver.name,
                 ring_name, dev->data->port_id, queue_id);
 
        mz = rte_memzone_lookup(z_name);
@@ -223,6 +225,57 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
                                           NFP_MEMZONE_ALIGN);
 }
 
+/*
+ * Atomically reads link status information from global structure rte_eth_dev.
+ *
+ * @param dev
+ *   - Pointer to the structure rte_eth_dev to read from.
+ *   - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ *   - On success, zero.
+ *   - On failure, negative value.
+ */
+static inline int
+nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+                                   struct rte_eth_link *link)
+{
+       struct rte_eth_link *dst = link;
+       struct rte_eth_link *src = &dev->data->dev_link;
+
+       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+                               *(uint64_t *)src) == 0)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ *   - Pointer to the structure rte_eth_dev to read from.
+ *   - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ *   - On success, zero.
+ *   - On failure, negative value.
+ */
+static inline int
+nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+                                    struct rte_eth_link *link)
+{
+       struct rte_eth_link *dst = &dev->data->dev_link;
+       struct rte_eth_link *src = link;
+
+       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+                               *(uint64_t *)src) == 0)
+               return -1;
+
+       return 0;
+}
+
 static void
 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
 {
@@ -270,7 +323,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
 
        for (i = 0; i < txq->tx_count; i++) {
                if (txq->txbufs[i].mbuf) {
-                       rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
+                       rte_pktmbuf_free(txq->txbufs[i].mbuf);
                        txq->txbufs[i].mbuf = NULL;
                }
        }
@@ -295,6 +348,7 @@ nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
        txq->wr_p = 0;
        txq->rd_p = 0;
        txq->tail = 0;
+       txq->qcp_rd_p = 0;
 }
 
 static int
@@ -354,6 +408,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
        PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
                    ctrl, update);
 
+       rte_spinlock_lock(&hw->reconfig_lock);
+
        nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
        nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
 
@@ -361,6 +417,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
 
        err = __nfp_net_reconfig(hw, update);
 
+       rte_spinlock_unlock(&hw->reconfig_lock);
+
        if (!err)
                return 0;
 
@@ -549,18 +607,8 @@ nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
 static void
 nfp_net_params_setup(struct nfp_net_hw *hw)
 {
-       uint32_t *mac_address;
-
        nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
        nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
-
-       /* A MAC address is 8 bytes long */
-       mac_address = (uint32_t *)(hw->mac_addr);
-
-       nn_cfg_writel(hw, NFP_NET_CFG_MACADDR,
-                     rte_cpu_to_be_32(*mac_address));
-       nn_cfg_writel(hw, NFP_NET_CFG_MACADDR + 4,
-                     rte_cpu_to_be_32(*(mac_address + 4)));
 }
 
 static void
@@ -569,6 +617,17 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
        hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
 }
 
+static void nfp_net_read_mac(struct nfp_net_hw *hw)
+{
+       uint32_t tmp;
+
+       tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
+       memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr));
+
+       tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
+       memcpy(&hw->mac_addr[4], &tmp, 2);
+}
+
 static int
 nfp_net_start(struct rte_eth_dev *dev)
 {
@@ -671,14 +730,351 @@ nfp_net_close(struct rte_eth_dev *dev)
 
        nfp_net_stop(dev);
 
+       rte_intr_disable(&dev->pci_dev->intr_handle);
        nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
 
+       /* unregister callback func from eal lib */
+       rte_intr_callback_unregister(&dev->pci_dev->intr_handle,
+                                    nfp_net_dev_interrupt_handler,
+                                    (void *)dev);
+
        /*
         * The ixgbe PMD driver disables the pcie master on the
         * device. The i40e does not...
         */
 }
 
+static void
+nfp_net_promisc_enable(struct rte_eth_dev *dev)
+{
+       uint32_t new_ctrl, update = 0;
+       struct nfp_net_hw *hw;
+
+       PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n");
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
+               PMD_INIT_LOG(INFO, "Promiscuous mode not supported\n");
+               return;
+       }
+
+       if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
+               PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n");
+               return;
+       }
+
+       new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
+       update = NFP_NET_CFG_UPDATE_GEN;
+
+       /*
+        * DPDK sets promiscuous mode on just after this call assuming
+        * it can not fail ...
+        */
+       if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
+               return;
+
+       hw->ctrl = new_ctrl;
+}
+
+static void
+nfp_net_promisc_disable(struct rte_eth_dev *dev)
+{
+       uint32_t new_ctrl, update = 0;
+       struct nfp_net_hw *hw;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
+               PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n");
+               return;
+       }
+
+       new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
+       update = NFP_NET_CFG_UPDATE_GEN;
+
+       /*
+        * DPDK sets promiscuous mode off just before this call
+        * assuming it can not fail ...
+        */
+       if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
+               return;
+
+       hw->ctrl = new_ctrl;
+}
+
+/*
+ * return 0 means link status changed, -1 means not changed
+ *
+ * Wait to complete is needed as it can take up to 9 seconds to get the Link
+ * status.
+ */
+static int
+nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
+{
+       struct nfp_net_hw *hw;
+       struct rte_eth_link link, old;
+       uint32_t nn_link_status;
+
+       PMD_DRV_LOG(DEBUG, "Link update\n");
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       memset(&old, 0, sizeof(old));
+       nfp_net_dev_atomic_read_link_status(dev, &old);
+
+       nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
+
+       memset(&link, 0, sizeof(struct rte_eth_link));
+
+       if (nn_link_status & NFP_NET_CFG_STS_LINK)
+               link.link_status = ETH_LINK_UP;
+
+       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       /* Other cards can limit the tx and rx rate per VF */
+       link.link_speed = ETH_SPEED_NUM_40G;
+
+       if (old.link_status != link.link_status) {
+               nfp_net_dev_atomic_write_link_status(dev, &link);
+               if (link.link_status)
+                       PMD_DRV_LOG(INFO, "NIC Link is Up\n");
+               else
+                       PMD_DRV_LOG(INFO, "NIC Link is Down\n");
+               return 0;
+       }
+
+       return -1;
+}
+
+static void
+nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+       int i;
+       struct nfp_net_hw *hw;
+       struct rte_eth_stats nfp_dev_stats;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
+
+       /* reading per RX ring stats */
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+                       break;
+
+               nfp_dev_stats.q_ipackets[i] =
+                       nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
+
+               nfp_dev_stats.q_ipackets[i] -=
+                       hw->eth_stats_base.q_ipackets[i];
+
+               nfp_dev_stats.q_ibytes[i] =
+                       nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
+
+               nfp_dev_stats.q_ibytes[i] -=
+                       hw->eth_stats_base.q_ibytes[i];
+       }
+
+       /* reading per TX ring stats */
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+                       break;
+
+               nfp_dev_stats.q_opackets[i] =
+                       nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
+
+               nfp_dev_stats.q_opackets[i] -=
+                       hw->eth_stats_base.q_opackets[i];
+
+               nfp_dev_stats.q_obytes[i] =
+                       nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
+
+               nfp_dev_stats.q_obytes[i] -=
+                       hw->eth_stats_base.q_obytes[i];
+       }
+
+       nfp_dev_stats.ipackets =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
+
+       nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
+
+       nfp_dev_stats.ibytes =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+
+       nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
+
+       nfp_dev_stats.opackets =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+
+       nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
+
+       nfp_dev_stats.obytes =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+
+       nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
+
+       /* reading general device stats */
+       nfp_dev_stats.ierrors =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+
+       nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
+
+       nfp_dev_stats.oerrors =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+
+       nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
+
+       /* RX ring mbuf allocation failures */
+       nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+
+       nfp_dev_stats.imissed =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+
+       nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
+
+       if (stats)
+               memcpy(stats, &nfp_dev_stats, sizeof(*stats));
+}
+
+static void
+nfp_net_stats_reset(struct rte_eth_dev *dev)
+{
+       int i;
+       struct nfp_net_hw *hw;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * hw->eth_stats_base records the per counter starting point.
+        * Lets update it now
+        */
+
+       /* reading per RX ring stats */
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+                       break;
+
+               hw->eth_stats_base.q_ipackets[i] =
+                       nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
+
+               hw->eth_stats_base.q_ibytes[i] =
+                       nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
+       }
+
+       /* reading per TX ring stats */
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+                       break;
+
+               hw->eth_stats_base.q_opackets[i] =
+                       nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
+
+               hw->eth_stats_base.q_obytes[i] =
+                       nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
+       }
+
+       hw->eth_stats_base.ipackets =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
+
+       hw->eth_stats_base.ibytes =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+
+       hw->eth_stats_base.opackets =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+
+       hw->eth_stats_base.obytes =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+
+       /* reading general device stats */
+       hw->eth_stats_base.ierrors =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+
+       hw->eth_stats_base.oerrors =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+
+       /* RX ring mbuf allocation failures */
+       dev->data->rx_mbuf_alloc_failed = 0;
+
+       hw->eth_stats_base.imissed =
+               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+}
+
+static void
+nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+       struct nfp_net_hw *hw;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       dev_info->driver_name = dev->driver->pci_drv.driver.name;
+       dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
+       dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
+       dev_info->min_rx_bufsize = ETHER_MIN_MTU;
+       dev_info->max_rx_pktlen = hw->mtu;
+       /* Next should change when PF support is implemented */
+       dev_info->max_mac_addrs = 1;
+
+       if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
+               dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+
+       if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
+               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
+                                            DEV_RX_OFFLOAD_UDP_CKSUM |
+                                            DEV_RX_OFFLOAD_TCP_CKSUM;
+
+       if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
+               dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+
+       if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
+               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
+                                            DEV_RX_OFFLOAD_UDP_CKSUM |
+                                            DEV_RX_OFFLOAD_TCP_CKSUM;
+
+       dev_info->default_rxconf = (struct rte_eth_rxconf) {
+               .rx_thresh = {
+                       .pthresh = DEFAULT_RX_PTHRESH,
+                       .hthresh = DEFAULT_RX_HTHRESH,
+                       .wthresh = DEFAULT_RX_WTHRESH,
+               },
+               .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
+               .rx_drop_en = 0,
+       };
+
+       dev_info->default_txconf = (struct rte_eth_txconf) {
+               .tx_thresh = {
+                       .pthresh = DEFAULT_TX_PTHRESH,
+                       .hthresh = DEFAULT_TX_HTHRESH,
+                       .wthresh = DEFAULT_TX_WTHRESH,
+               },
+               .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
+               .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
+               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+                            ETH_TXQ_FLAGS_NOOFFLOADS,
+       };
+
+       dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
+       dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
+
+       dev_info->speed_capa = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+}
+
+static const uint32_t *
+nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* refers to nfp_net_set_hash() */
+               RTE_PTYPE_INNER_L3_IPV4,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_INNER_L3_IPV6_EXT,
+               RTE_PTYPE_INNER_L4_MASK,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == nfp_net_recv_pkts)
+               return ptypes;
+       return NULL;
+}
+
 static uint32_t
 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
@@ -723,6 +1119,142 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
        return count;
 }
 
+static void
+nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
+{
+       struct rte_eth_link link;
+
+       memset(&link, 0, sizeof(link));
+       nfp_net_dev_atomic_read_link_status(dev, &link);
+       if (link.link_status)
+               RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
+                       (int)(dev->data->port_id), (unsigned)link.link_speed,
+                       link.link_duplex == ETH_LINK_FULL_DUPLEX
+                       ? "full-duplex" : "half-duplex");
+       else
+               RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
+                       (int)(dev->data->port_id));
+
+       RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
+               dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
+               dev->pci_dev->addr.devid, dev->pci_dev->addr.function);
+}
+
+/* Interrupt configuration and handling */
+
+/*
+ * nfp_net_irq_unmask - Unmask an interrupt
+ *
+ * If MSI-X auto-masking is enabled clear the mask bit, otherwise
+ * clear the ICR for the entry.
+ */
+static void
+nfp_net_irq_unmask(struct rte_eth_dev *dev)
+{
+       struct nfp_net_hw *hw;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
+               /* If MSI-X auto-masking is used, clear the entry */
+               rte_wmb();
+               rte_intr_enable(&dev->pci_dev->intr_handle);
+       } else {
+               /* Make sure all updates are written before un-masking */
+               rte_wmb();
+               nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
+                             NFP_NET_CFG_ICR_UNMASKED);
+       }
+}
+
+static void
+nfp_net_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+                             void *param)
+{
+       int64_t timeout;
+       struct rte_eth_link link;
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+       PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n");
+
+       /* get the link status */
+       memset(&link, 0, sizeof(link));
+       nfp_net_dev_atomic_read_link_status(dev, &link);
+
+       nfp_net_link_update(dev, 0);
+
+       /* likely to up */
+       if (!link.link_status) {
+               /* handle it 1 sec later, wait it being stable */
+               timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
+               /* likely to down */
+       } else {
+               /* handle it 4 sec later, wait it being stable */
+               timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
+       }
+
+       if (rte_eal_alarm_set(timeout * 1000,
+                             nfp_net_dev_interrupt_delayed_handler,
+                             (void *)dev) < 0) {
+               RTE_LOG(ERR, PMD, "Error setting alarm");
+               /* Unmasking */
+               nfp_net_irq_unmask(dev);
+       }
+}
+
+/*
+ * Interrupt handler which shall be registered for alarm callback for delayed
+ * handling specific interrupt to wait for the stable nic state. As the NIC
+ * interrupt state is not stable for nfp after link is just down, it needs
+ * to wait 4 seconds to get the stable status.
+ *
+ * @param handle   Pointer to interrupt handle.
+ * @param param    The address of parameter (struct rte_eth_dev *)
+ *
+ * @return  void
+ */
+static void
+nfp_net_dev_interrupt_delayed_handler(void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+       nfp_net_link_update(dev, 0);
+       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+
+       nfp_net_dev_link_status_print(dev);
+
+       /* Unmasking */
+       nfp_net_irq_unmask(dev);
+}
+
+static int
+nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       struct nfp_net_hw *hw;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* check that mtu is within the allowed range */
+       if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
+               return -EINVAL;
+
+       /* switch to jumbo mode if needed */
+       if ((uint32_t)mtu > ETHER_MAX_LEN)
+               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+       else
+               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
+
+       /* writing to configuration space */
+       nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
+
+       hw->mtu = mtu;
+
+       return 0;
+}
+
 static int
 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
                       uint16_t queue_idx, uint16_t nb_desc,
@@ -743,7 +1275,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
            (nb_desc > NFP_NET_MAX_RX_DESC) ||
            (nb_desc < NFP_NET_MIN_RX_DESC)) {
                RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /*
@@ -759,7 +1291,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
        rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (rxq == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
 
        /* Hw queues mapping based on firmware confifguration */
        rxq->qidx = queue_idx;
@@ -796,7 +1328,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
        if (tz == NULL) {
                RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
                nfp_net_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Saving physical and virtual addresses for the RX ring */
@@ -809,7 +1341,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
                                         RTE_CACHE_LINE_SIZE, socket_id);
        if (rxq->rxbufs == NULL) {
                nfp_net_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
@@ -847,7 +1379,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
                if (mbuf == NULL) {
                        RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
                                (unsigned)rxq->qidx);
-                       return (-ENOMEM);
+                       return -ENOMEM;
                }
 
                dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
@@ -925,7 +1457,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (txq == NULL) {
                RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /*
@@ -939,7 +1471,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        if (tz == NULL) {
                RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
                nfp_net_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        txq->tx_count = nb_desc;
@@ -967,7 +1499,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                         RTE_CACHE_LINE_SIZE, socket_id);
        if (txq->txbufs == NULL) {
                nfp_net_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
                   txq->txbufs, txq->txds, (unsigned long int)txq->dma);
@@ -992,7 +1524,7 @@ static inline void
 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
                 struct rte_mbuf *mb)
 {
-       uint16_t ol_flags;
+       uint64_t ol_flags;
        struct nfp_net_hw *hw = txq->hw;
 
        if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
@@ -1013,7 +1545,8 @@ nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
                break;
        }
 
-       txd->flags |= PCIE_DESC_TX_CSUM;
+       if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
+               txd->flags |= PCIE_DESC_TX_CSUM;
 }
 
 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
@@ -1174,7 +1707,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                 * DPDK just checks the queue is lower than max queues
                 * enabled. But the queue needs to be configured
                 */
-               RTE_LOG(ERR, PMD, "RX Bad queue\n");
+               RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
                return -EINVAL;
        }
 
@@ -1187,7 +1720,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                rxb = &rxq->rxbufs[idx];
                if (unlikely(rxb == NULL)) {
-                       RTE_LOG(ERR, PMD, "rxb does not exist!\n");
+                       RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
                        break;
                }
 
@@ -1207,7 +1740,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                 */
                new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
                if (unlikely(new_mb == NULL)) {
-                       RTE_LOG(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
+                       RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
                                "queue_id=%u\n", (unsigned)rxq->port_id,
                                (unsigned)rxq->qidx);
                        nfp_net_mbuf_alloc_failed(rxq);
@@ -1238,7 +1771,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                         * responsibility of avoiding it. But we have
                         * to give some info about the error
                         */
-                       RTE_LOG(ERR, PMD,
+                       RTE_LOG_DP(ERR, PMD,
                                "mbuf overflow likely due to the RX offset.\n"
                                "\t\tYour mbuf size should have extra space for"
                                " RX offset=%u bytes.\n"
@@ -1273,7 +1806,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
                    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
                        mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
-                       mb->ol_flags |= PKT_RX_VLAN_PKT;
+                       mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
                }
 
                /* Adding the mbuff to the mbuff array passed by the app */
@@ -1449,13 +1982,18 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                 */
                pkt_size = pkt->pkt_len;
 
-               while (pkt_size) {
-                       /* Releasing mbuf which was prefetched above */
-                       if (*lmbuf)
-                               rte_pktmbuf_free_seg(*lmbuf);
+               /* Releasing mbuf which was prefetched above */
+               if (*lmbuf)
+                       rte_pktmbuf_free(*lmbuf);
+               /*
+                * Linking mbuf with descriptor for being released
+                * next time descriptor is used
+                */
+               *lmbuf = pkt;
 
+               while (pkt_size) {
                        dma_size = pkt->data_len;
-                       dma_addr = RTE_MBUF_DATA_DMA_ADDR(pkt);
+                       dma_addr = rte_mbuf_data_dma_addr(pkt);
                        PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
                                   "%" PRIx64 "\n", dma_addr);
 
@@ -1467,12 +2005,6 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        ASSERT(free_descs > 0);
                        free_descs--;
 
-                       /*
-                        * Linking mbuf with descriptor for being released
-                        * next time descriptor is used
-                        */
-                       *lmbuf = pkt;
-
                        txq->wr_p++;
                        txq->tail++;
                        if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/
@@ -1501,12 +2033,274 @@ xmit_end:
        return i;
 }
 
+static void
+nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+       uint32_t new_ctrl, update;
+       struct nfp_net_hw *hw;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       new_ctrl = 0;
+
+       if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
+           (mask & ETH_VLAN_FILTER_OFFLOAD))
+               RTE_LOG(INFO, PMD, "Not support for ETH_VLAN_FILTER_OFFLOAD or"
+                       " ETH_VLAN_FILTER_EXTEND");
+
+       /* Enable vlan strip if it is not configured yet */
+       if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
+           !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
+               new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
+
+       /* Disable vlan strip just if it is configured */
+       if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
+           (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
+               new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
+
+       if (new_ctrl == 0)
+               return;
+
+       update = NFP_NET_CFG_UPDATE_GEN;
+
+       if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
+               return;
+
+       hw->ctrl = new_ctrl;
+}
+
+/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
+static int
+nfp_net_reta_update(struct rte_eth_dev *dev,
+                   struct rte_eth_rss_reta_entry64 *reta_conf,
+                   uint16_t reta_size)
+{
+       uint32_t reta, mask;
+       int i, j;
+       int idx, shift;
+       uint32_t update;
+       struct nfp_net_hw *hw =
+               NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
+               return -EINVAL;
+
+       if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
+               RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
+               return -EINVAL;
+       }
+
+       /*
+        * Update Redirection Table. There are 128 8bit-entries which can be
+        * manage as 32 32bit-entries
+        */
+       for (i = 0; i < reta_size; i += 4) {
+               /* Handling 4 RSS entries per loop */
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
+
+               if (!mask)
+                       continue;
+
+               reta = 0;
+               /* If all 4 entries were set, don't need read RETA register */
+               if (mask != 0xF)
+                       reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
+
+               for (j = 0; j < 4; j++) {
+                       if (!(mask & (0x1 << j)))
+                               continue;
+                       if (mask != 0xF)
+                               /* Clearing the entry bits */
+                               reta &= ~(0xFF << (8 * j));
+                       reta |= reta_conf[idx].reta[shift + j] << (8 * j);
+               }
+               nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta);
+       }
+
+       update = NFP_NET_CFG_UPDATE_RSS;
+
+       if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
+               return -EIO;
+
+       return 0;
+}
+
+ /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
+static int
+nfp_net_reta_query(struct rte_eth_dev *dev,
+                  struct rte_eth_rss_reta_entry64 *reta_conf,
+                  uint16_t reta_size)
+{
+       uint8_t i, j, mask;
+       int idx, shift;
+       uint32_t reta;
+       struct nfp_net_hw *hw;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
+               return -EINVAL;
+
+       if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
+               RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
+               return -EINVAL;
+       }
+
+       /*
+        * Reading Redirection Table. There are 128 8bit-entries which can be
+        * manage as 32 32bit-entries
+        */
+       for (i = 0; i < reta_size; i += 4) {
+               /* Handling 4 RSS entries per loop */
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
+
+               if (!mask)
+                       continue;
+
+               reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift);
+               for (j = 0; j < 4; j++) {
+                       if (!(mask & (0x1 << j)))
+                               continue;
+                       reta_conf->reta[shift + j] =
+                               (uint8_t)((reta >> (8 * j)) & 0xF);
+               }
+       }
+       return 0;
+}
+
+static int
+nfp_net_rss_hash_update(struct rte_eth_dev *dev,
+                       struct rte_eth_rss_conf *rss_conf)
+{
+       uint32_t update;
+       uint32_t cfg_rss_ctrl = 0;
+       uint8_t key;
+       uint64_t rss_hf;
+       int i;
+       struct nfp_net_hw *hw;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       rss_hf = rss_conf->rss_hf;
+
+       /* Checking if RSS is enabled */
+       if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
+               if (rss_hf != 0) { /* Enable RSS? */
+                       RTE_LOG(ERR, PMD, "RSS unsupported\n");
+                       return -EINVAL;
+               }
+               return 0; /* Nothing to do */
+       }
+
+       if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
+               RTE_LOG(ERR, PMD, "hash key too long\n");
+               return -EINVAL;
+       }
+
+       if (rss_hf & ETH_RSS_IPV4)
+               cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
+                               NFP_NET_CFG_RSS_IPV4_TCP |
+                               NFP_NET_CFG_RSS_IPV4_UDP;
+
+       if (rss_hf & ETH_RSS_IPV6)
+               cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 |
+                               NFP_NET_CFG_RSS_IPV6_TCP |
+                               NFP_NET_CFG_RSS_IPV6_UDP;
+
+       /* configuring where to apply the RSS hash */
+       nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
+
+       /* Writing the key byte a byte */
+       for (i = 0; i < rss_conf->rss_key_len; i++) {
+               memcpy(&key, &rss_conf->rss_key[i], 1);
+               nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
+       }
+
+       /* Writing the key size */
+       nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
+
+       update = NFP_NET_CFG_UPDATE_RSS;
+
+       if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
+               return -EIO;
+
+       return 0;
+}
+
+static int
+nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
+                         struct rte_eth_rss_conf *rss_conf)
+{
+       uint64_t rss_hf;
+       uint32_t cfg_rss_ctrl;
+       uint8_t key;
+       int i;
+       struct nfp_net_hw *hw;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
+               return -EINVAL;
+
+       rss_hf = rss_conf->rss_hf;
+       cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
+
+       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
+
+       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+
+       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+
+       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+
+       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+
+       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+
+       /* Reading the key size */
+       rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
+
+       /* Reading the key byte a byte */
+       for (i = 0; i < rss_conf->rss_key_len; i++) {
+               key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
+               memcpy(&rss_conf->rss_key[i], &key, 1);
+       }
+
+       return 0;
+}
+
 /* Initialise and register driver with DPDK Application */
-static struct eth_dev_ops nfp_net_eth_dev_ops = {
+static const struct eth_dev_ops nfp_net_eth_dev_ops = {
        .dev_configure          = nfp_net_configure,
        .dev_start              = nfp_net_start,
        .dev_stop               = nfp_net_stop,
        .dev_close              = nfp_net_close,
+       .promiscuous_enable     = nfp_net_promisc_enable,
+       .promiscuous_disable    = nfp_net_promisc_disable,
+       .link_update            = nfp_net_link_update,
+       .stats_get              = nfp_net_stats_get,
+       .stats_reset            = nfp_net_stats_reset,
+       .dev_infos_get          = nfp_net_infos_get,
+       .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
+       .mtu_set                = nfp_net_dev_mtu_set,
+       .vlan_offload_set       = nfp_net_vlan_offload_set,
+       .reta_update            = nfp_net_reta_update,
+       .reta_query             = nfp_net_reta_query,
+       .rss_hash_update        = nfp_net_rss_hash_update,
+       .rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
        .rx_queue_setup         = nfp_net_rx_queue_setup,
        .rx_queue_release       = nfp_net_rx_queue_release,
        .rx_queue_count         = nfp_net_rx_queue_count,
@@ -1537,6 +2331,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
                return 0;
 
        pci_dev = eth_dev->pci_dev;
+       rte_eth_copy_pci_info(eth_dev, pci_dev);
+
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
@@ -1613,6 +2409,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n",
                     hw->max_rx_queues, hw->max_tx_queues);
 
+       /* Initializing spinlock for reconfigs */
+       rte_spinlock_init(&hw->reconfig_lock);
+
        /* Allocating memory for mac addr */
        eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
        if (eth_dev->data->mac_addrs == NULL) {
@@ -1620,12 +2419,15 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
                return -ENOMEM;
        }
 
-       /* Using random mac addresses for VFs */
-       eth_random_addr(&hw->mac_addr[0]);
+       nfp_net_read_mac(hw);
+
+       if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr))
+               /* Using random mac addresses for VFs */
+               eth_random_addr(&hw->mac_addr[0]);
 
        /* Copying mac address to DPDK eth_dev struct */
-       ether_addr_copy(&eth_dev->data->mac_addrs[0],
-                       (struct ether_addr *)hw->mac_addr);
+       ether_addr_copy((struct ether_addr *)hw->mac_addr,
+                       &eth_dev->data->mac_addrs[0]);
 
        PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
                     "mac=%02x:%02x:%02x:%02x:%02x:%02x",
@@ -1634,21 +2436,31 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
                     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
                     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
 
+       /* Registering LSC interrupt handler */
+       rte_intr_callback_register(&pci_dev->intr_handle,
+                                  nfp_net_dev_interrupt_handler,
+                                  (void *)eth_dev);
+
+       /* enable uio intr after callback register */
+       rte_intr_enable(&pci_dev->intr_handle);
+
+       /* Telling the firmware about the LSC interrupt entry */
+       nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+
+       /* Recording current stats counters values */
+       nfp_net_stats_reset(eth_dev);
+
        return 0;
 }
 
 static struct rte_pci_id pci_id_nfp_net_map[] = {
        {
-               .vendor_id = PCI_VENDOR_ID_NETRONOME,
-               .device_id = PCI_DEVICE_ID_NFP6000_PF_NIC,
-               .subsystem_vendor_id = PCI_ANY_ID,
-               .subsystem_device_id = PCI_ANY_ID,
+               RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
+                              PCI_DEVICE_ID_NFP6000_PF_NIC)
        },
        {
-               .vendor_id = PCI_VENDOR_ID_NETRONOME,
-               .device_id = PCI_DEVICE_ID_NFP6000_VF_NIC,
-               .subsystem_vendor_id = PCI_ANY_ID,
-               .subsystem_device_id = PCI_ANY_ID,
+               RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
+                              PCI_DEVICE_ID_NFP6000_VF_NIC)
        },
        {
                .vendor_id = 0,
@@ -1656,33 +2468,20 @@ static struct rte_pci_id pci_id_nfp_net_map[] = {
 };
 
 static struct eth_driver rte_nfp_net_pmd = {
-       {
-               .name = "rte_nfp_net_pmd",
+       .pci_drv = {
                .id_table = pci_id_nfp_net_map,
-               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+                            RTE_PCI_DRV_DETACHABLE,
+               .probe = rte_eth_dev_pci_probe,
+               .remove = rte_eth_dev_pci_remove,
        },
        .eth_dev_init = nfp_net_init,
        .dev_private_size = sizeof(struct nfp_net_adapter),
 };
 
-static int
-nfp_net_pmd_init(const char *name __rte_unused,
-                const char *params __rte_unused)
-{
-       PMD_INIT_FUNC_TRACE();
-       PMD_INIT_LOG(INFO, "librte_pmd_nfp_net version %s\n",
-                    NFP_NET_PMD_VERSION);
-
-       rte_eth_driver_register(&rte_nfp_net_pmd);
-       return 0;
-}
-
-static struct rte_driver rte_nfp_net_driver = {
-       .type = PMD_PDEV,
-       .init = nfp_net_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_nfp_net_driver);
+RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio");
 
 /*
  * Local variables: