drivers: remove direct access to interrupt handle
[dpdk.git] / drivers / net / thunderx / nicvf_ethdev.c
index 879d889..fc334cf 100644 (file)
@@ -23,8 +23,8 @@
 #include <rte_dev.h>
 #include <rte_eal.h>
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
 #include <rte_interrupts.h>
 #include <rte_log.h>
 #include <rte_memory.h>
 #include "nicvf_svf.h"
 #include "nicvf_logs.h"
 
-int nicvf_logtype_mbox;
-int nicvf_logtype_init;
-int nicvf_logtype_driver;
-
-static void nicvf_dev_stop(struct rte_eth_dev *dev);
+static int nicvf_dev_stop(struct rte_eth_dev *dev);
 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
                          bool cleanup);
 static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
 static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 
-RTE_INIT(nicvf_init_log)
-{
-       nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
-       if (nicvf_logtype_mbox >= 0)
-               rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE);
-
-       nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init");
-       if (nicvf_logtype_init >= 0)
-               rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE);
-
-       nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver");
-       if (nicvf_logtype_driver >= 0)
-               rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_mbox, mbox, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_driver, driver, NOTICE);
 
 static void
 nicvf_link_status_update(struct nicvf *nic,
@@ -76,14 +61,14 @@ nicvf_link_status_update(struct nicvf *nic,
 {
        memset(link, 0, sizeof(*link));
 
-       link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+       link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
        if (nic->duplex == NICVF_HALF_DUPLEX)
-               link->link_duplex = ETH_LINK_HALF_DUPLEX;
+               link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
        else if (nic->duplex == NICVF_FULL_DUPLEX)
-               link->link_duplex = ETH_LINK_FULL_DUPLEX;
+               link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
        link->link_speed = nic->speed;
-       link->link_autoneg = ETH_LINK_AUTONEG;
+       link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -98,9 +83,9 @@ nicvf_interrupt(void *arg)
                        nicvf_link_status_update(nic, &link);
                        rte_eth_linkstatus_set(dev, &link);
 
-                       _rte_eth_dev_callback_process(dev,
-                                                     RTE_ETH_EVENT_INTR_LSC,
-                                                     NULL);
+                       rte_eth_dev_callback_process(dev,
+                                                    RTE_ETH_EVENT_INTR_LSC,
+                                                    NULL);
                }
        }
 
@@ -149,7 +134,7 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                /* rte_eth_link_get() might need to wait up to 9 seconds */
                for (i = 0; i < MAX_CHECK_TIME; i++) {
                        nicvf_link_status_update(nic, &link);
-                       if (link.link_status == ETH_LINK_UP)
+                       if (link.link_status == RTE_ETH_LINK_UP)
                                break;
                        rte_delay_ms(CHECK_INTERVAL);
                }
@@ -166,16 +151,9 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
        struct nicvf *nic = nicvf_pmd_priv(dev);
        uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD;
        size_t i;
-       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 
        PMD_INIT_FUNC_TRACE();
 
-       if (frame_size > NIC_HW_MAX_FRS)
-               return -EINVAL;
-
-       if (frame_size < NIC_HW_MIN_FRS)
-               return -EINVAL;
-
        buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
 
        /*
@@ -191,16 +169,9 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
                (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
                return -EINVAL;
 
-       if (frame_size > ETHER_MAX_LEN)
-               rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-       else
-               rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
        if (nicvf_mbox_update_hw_max_frs(nic, mtu))
                return -EINVAL;
 
-       /* Update max_rx_pkt_len */
-       rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN;
        nic->mtu = mtu;
 
        for (i = 0; i < nic->sqs_count; i++)
@@ -362,7 +333,7 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
        return ptypes;
 }
 
-static void
+static int
 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
 {
        int i;
@@ -370,6 +341,7 @@ nicvf_dev_stats_reset(struct rte_eth_dev *dev)
        struct nicvf *nic = nicvf_pmd_priv(dev);
        uint16_t rx_start, rx_end;
        uint16_t tx_start, tx_end;
+       int ret;
 
        /* Reset all primary nic counters */
        nicvf_rx_range(dev, nic, &rx_start, &rx_end);
@@ -380,7 +352,9 @@ nicvf_dev_stats_reset(struct rte_eth_dev *dev)
        for (i = tx_start; i <= tx_end; i++)
                txqs |= (0x3 << (i * 2));
 
-       nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
+       ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
+       if (ret != 0)
+               return ret;
 
        /* Reset secondary nic queue counters */
        for (i = 0; i < nic->sqs_count; i++) {
@@ -396,14 +370,19 @@ nicvf_dev_stats_reset(struct rte_eth_dev *dev)
                for (i = tx_start; i <= tx_end; i++)
                        txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
 
-               nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
+               ret = nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
+               if (ret != 0)
+                       return ret;
        }
+
+       return 0;
 }
 
 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
-static void
+static int
 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
 {
+       return 0;
 }
 
 static inline uint64_t
@@ -411,35 +390,35 @@ nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
 {
        uint64_t nic_rss = 0;
 
-       if (ethdev_rss & ETH_RSS_IPV4)
+       if (ethdev_rss & RTE_ETH_RSS_IPV4)
                nic_rss |= RSS_IP_ENA;
 
-       if (ethdev_rss & ETH_RSS_IPV6)
+       if (ethdev_rss & RTE_ETH_RSS_IPV6)
                nic_rss |= RSS_IP_ENA;
 
-       if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
+       if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
                nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-       if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
+       if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
                nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-       if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
+       if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
                nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-       if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
+       if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
                nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-       if (ethdev_rss & ETH_RSS_PORT)
+       if (ethdev_rss & RTE_ETH_RSS_PORT)
                nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
 
        if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
-               if (ethdev_rss & ETH_RSS_VXLAN)
+               if (ethdev_rss & RTE_ETH_RSS_VXLAN)
                        nic_rss |= RSS_TUN_VXLAN_ENA;
 
-               if (ethdev_rss & ETH_RSS_GENEVE)
+               if (ethdev_rss & RTE_ETH_RSS_GENEVE)
                        nic_rss |= RSS_TUN_GENEVE_ENA;
 
-               if (ethdev_rss & ETH_RSS_NVGRE)
+               if (ethdev_rss & RTE_ETH_RSS_NVGRE)
                        nic_rss |= RSS_TUN_NVGRE_ENA;
        }
 
@@ -452,28 +431,28 @@ nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
        uint64_t ethdev_rss = 0;
 
        if (nic_rss & RSS_IP_ENA)
-               ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
+               ethdev_rss |= (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6);
 
        if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
-               ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
-                               ETH_RSS_NONFRAG_IPV6_TCP);
+               ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+                               RTE_ETH_RSS_NONFRAG_IPV6_TCP);
 
        if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
-               ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
-                               ETH_RSS_NONFRAG_IPV6_UDP);
+               ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+                               RTE_ETH_RSS_NONFRAG_IPV6_UDP);
 
        if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
-               ethdev_rss |= ETH_RSS_PORT;
+               ethdev_rss |= RTE_ETH_RSS_PORT;
 
        if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
                if (nic_rss & RSS_TUN_VXLAN_ENA)
-                       ethdev_rss |= ETH_RSS_VXLAN;
+                       ethdev_rss |= RTE_ETH_RSS_VXLAN;
 
                if (nic_rss & RSS_TUN_GENEVE_ENA)
-                       ethdev_rss |= ETH_RSS_GENEVE;
+                       ethdev_rss |= RTE_ETH_RSS_GENEVE;
 
                if (nic_rss & RSS_TUN_NVGRE_ENA)
-                       ethdev_rss |= ETH_RSS_NVGRE;
+                       ethdev_rss |= RTE_ETH_RSS_NVGRE;
        }
        return ethdev_rss;
 }
@@ -488,9 +467,10 @@ nicvf_dev_reta_query(struct rte_eth_dev *dev,
        int ret, i, j;
 
        if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
-               RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
-                       "(%d) doesn't match the number hardware can supported "
-                       "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
+               PMD_DRV_LOG(ERR,
+                           "The size of hash lookup table configured "
+                           "(%u) doesn't match the number hardware can supported "
+                           "(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
                return -EINVAL;
        }
 
@@ -499,8 +479,8 @@ nicvf_dev_reta_query(struct rte_eth_dev *dev,
                return ret;
 
        /* Copy RETA table */
-       for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-               for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+       for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+               for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
                        if ((reta_conf[i].mask >> j) & 0x01)
                                reta_conf[i].reta[j] = tbl[j];
        }
@@ -518,9 +498,9 @@ nicvf_dev_reta_update(struct rte_eth_dev *dev,
        int ret, i, j;
 
        if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
-               RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
-                       "(%d) doesn't match the number hardware can supported "
-                       "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%u) doesn't match the number hardware can supported "
+                       "(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
                return -EINVAL;
        }
 
@@ -529,8 +509,8 @@ nicvf_dev_reta_update(struct rte_eth_dev *dev,
                return ret;
 
        /* Copy RETA table */
-       for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-               for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+       for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+               for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
                        if ((reta_conf[i].mask >> j) & 0x01)
                                tbl[j] = reta_conf[i].reta[j];
        }
@@ -561,8 +541,8 @@ nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
 
        if (rss_conf->rss_key &&
                rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
-               RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
-                               rss_conf->rss_key_len);
+               PMD_DRV_LOG(ERR, "Hash key size mismatch %u",
+                           rss_conf->rss_key_len);
                return -EINVAL;
        }
 
@@ -644,6 +624,7 @@ nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
                                      NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
        if (rz == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
+               rte_free(rbdr);
                return -ENOMEM;
        }
 
@@ -826,9 +807,9 @@ nicvf_configure_rss(struct rte_eth_dev *dev)
                    dev->data->nb_rx_queues,
                    dev->data->dev_conf.lpbk_mode, rsshf);
 
-       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+       if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
                ret = nicvf_rss_term(nic);
-       else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+       else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
                ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
        if (ret)
                PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
@@ -863,13 +844,12 @@ nicvf_configure_rss_reta(struct rte_eth_dev *dev)
 }
 
 static void
-nicvf_dev_tx_queue_release(void *sq)
+nicvf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct nicvf_txq *txq;
+       struct nicvf_txq *txq = dev->data->tx_queues[qid];
 
        PMD_INIT_FUNC_TRACE();
 
-       txq = (struct nicvf_txq *)sq;
        if (txq) {
                if (txq->txbuffs != NULL) {
                        nicvf_tx_queue_release_mbufs(txq);
@@ -877,6 +857,7 @@ nicvf_dev_tx_queue_release(void *sq)
                        txq->txbuffs = NULL;
                }
                rte_free(txq);
+               dev->data->tx_queues[qid] = NULL;
        }
 }
 
@@ -889,7 +870,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                txq = dev->data->tx_queues[i];
-               if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+               if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
                        multiseg = true;
                        break;
                }
@@ -990,8 +971,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
                PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
                                nicvf_netdev_qidx(nic, qidx));
-               nicvf_dev_tx_queue_release(
-                       dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
+               nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
                dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
        }
 
@@ -1012,7 +992,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
        txq->offloads = offloads;
 
-       is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+       is_single_pool = !!(offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
 
        /* Choose optimum free threshold value for multipool case */
        if (!is_single_pool) {
@@ -1025,19 +1005,21 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                txq->pool_free = nicvf_single_pool_free_xmited_buffers;
        }
 
+       dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
+
        /* Allocate software ring */
        txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
                                nb_desc * sizeof(struct rte_mbuf *),
                                RTE_CACHE_LINE_SIZE, nic->node);
 
        if (txq->txbuffs == NULL) {
-               nicvf_dev_tx_queue_release(txq);
+               nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
                return -ENOMEM;
        }
 
        if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
                PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
-               nicvf_dev_tx_queue_release(txq);
+               nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
                return -ENOMEM;
        }
 
@@ -1048,7 +1030,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                        nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
                        txq->phys, txq->offloads);
 
-       dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
        dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
                RTE_ETH_QUEUE_STATE_STOPPED;
        return 0;
@@ -1065,8 +1046,7 @@ nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
        if (dev->rx_pkt_burst == NULL)
                return;
 
-       while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
-                               nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
+       while ((rxq_cnt = nicvf_dev_rx_queue_count(rxq))) {
                nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
                                        NICVF_MAX_RX_FREE_THRESH);
                PMD_DRV_LOG(INFO, "nb_pkts=%d  rxq_cnt=%d", nb_pkts, rxq_cnt);
@@ -1166,11 +1146,11 @@ nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
 }
 
 static void
-nicvf_dev_rx_queue_release(void *rx_queue)
+nicvf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
        PMD_INIT_FUNC_TRACE();
 
-       rte_free(rx_queue);
+       rte_free(dev->data->rx_queues[qid]);
 }
 
 static int
@@ -1307,7 +1287,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        }
 
        /* Mempool memory must be physically contiguous */
-       if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
+       if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG) {
                PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
                return -EINVAL;
        }
@@ -1341,8 +1321,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
                PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
                                nicvf_netdev_qidx(nic, qidx));
-               nicvf_dev_rx_queue_release(
-                       dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
+               nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
                dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
        }
 
@@ -1370,12 +1349,14 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        else
                rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
 
+       dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
+
        nicvf_rxq_mbuf_setup(rxq);
 
        /* Alloc completion queue */
        if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
                PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
-               nicvf_dev_rx_queue_release(rxq);
+               nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
                return -ENOMEM;
        }
 
@@ -1387,13 +1368,12 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                        nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
                        rte_mempool_avail_count(mp), rxq->phys, offloads);
 
-       dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
        dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
                RTE_ETH_QUEUE_STATE_STOPPED;
        return 0;
 }
 
-static void
+static int
 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
        struct nicvf *nic = nicvf_pmd_priv(dev);
@@ -1402,14 +1382,14 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        PMD_INIT_FUNC_TRACE();
 
        /* Autonegotiation may be disabled */
-       dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-       dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-                                ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+       dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+                                RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
        if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
-               dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 
-       dev_info->min_rx_bufsize = ETHER_MIN_MTU;
-       dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN;
+       dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
+       dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
        dev_info->max_rx_queues =
                        (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
        dev_info->max_tx_queues =
@@ -1435,11 +1415,13 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        dev_info->default_txconf = (struct rte_eth_txconf) {
                .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
-               .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-                       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
-                       DEV_TX_OFFLOAD_UDP_CKSUM          |
-                       DEV_TX_OFFLOAD_TCP_CKSUM,
+               .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+                       RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
+                       RTE_ETH_TX_OFFLOAD_UDP_CKSUM          |
+                       RTE_ETH_TX_OFFLOAD_TCP_CKSUM,
        };
+
+       return 0;
 }
 
 static nicvf_iova_addr_t
@@ -1600,8 +1582,8 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
                     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
        /* Configure VLAN Strip */
-       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-               ETH_VLAN_EXTEND_MASK;
+       mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+               RTE_ETH_VLAN_EXTEND_MASK;
        ret = nicvf_vlan_offload_config(dev, mask);
 
        /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
@@ -1727,16 +1709,13 @@ nicvf_dev_start(struct rte_eth_dev *dev)
        }
 
        /* Setup scatter mode if needed by jumbo */
-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                                           2 * VLAN_TAG_SIZE > buffsz)
+       if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz)
                dev->data->scattered_rx = 1;
-       if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
+       if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) != 0)
                dev->data->scattered_rx = 1;
 
-       /* Setup MTU based on max_rx_pkt_len or default */
-       mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
-               dev->data->dev_conf.rxmode.max_rx_pkt_len
-                       -  ETHER_HDR_LEN : ETHER_MTU;
+       /* Setup MTU */
+       mtu = dev->data->mtu;
 
        if (nicvf_dev_set_mtu(dev, mtu)) {
                PMD_INIT_LOG(ERR, "Failed to set default mtu size");
@@ -1770,6 +1749,7 @@ nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
        struct nicvf *nic = nicvf_pmd_priv(dev);
 
        PMD_INIT_FUNC_TRACE();
+       dev->data->dev_started = 0;
 
        /* Teardown secondary vf first */
        for (i = 0; i < nic->sqs_count; i++) {
@@ -1793,12 +1773,14 @@ nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
                PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
 }
 
-static void
+static int
 nicvf_dev_stop(struct rte_eth_dev *dev)
 {
        PMD_INIT_FUNC_TRACE();
 
        nicvf_dev_stop_cleanup(dev, false);
+
+       return 0;
 }
 
 static void
@@ -1856,13 +1838,15 @@ nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
        }
 }
 
-static void
+static int
 nicvf_dev_close(struct rte_eth_dev *dev)
 {
        size_t i;
        struct nicvf *nic = nicvf_pmd_priv(dev);
 
        PMD_INIT_FUNC_TRACE();
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
 
        nicvf_dev_stop_cleanup(dev, true);
        nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
@@ -1873,6 +1857,10 @@ nicvf_dev_close(struct rte_eth_dev *dev)
 
                nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
        }
+
+       rte_intr_instance_free(nic->intr_handle);
+
+       return 0;
 }
 
 static int
@@ -1910,6 +1898,9 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
+       if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
        if (!rte_eal_has_hugepages()) {
                PMD_INIT_LOG(INFO, "Huge page is not configured");
                return -EINVAL;
@@ -1920,8 +1911,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
-       if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-               rxmode->mq_mode != ETH_MQ_RX_RSS) {
+       if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+               rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
                PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
                return -EINVAL;
        }
@@ -1931,7 +1922,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
-       if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+       if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
                PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
                return -EINVAL;
        }
@@ -1966,7 +1957,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
                }
        }
 
-       if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+       if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
                nic->offload_cksum = 1;
 
        PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
@@ -1975,6 +1966,37 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
        return 0;
 }
 
+static int
+nicvf_dev_set_link_up(struct rte_eth_dev *dev)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       int rc, i;
+
+       rc = nicvf_mbox_set_link_up_down(nic, true);
+       if (rc)
+               goto done;
+
+       /* Start tx queues  */
+       for (i = 0; i < dev->data->nb_tx_queues; i++)
+               nicvf_dev_tx_queue_start(dev, i);
+
+done:
+       return rc;
+}
+
+static int
+nicvf_dev_set_link_down(struct rte_eth_dev *dev)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       int i;
+
+       /* Stop tx queues  */
+       for (i = 0; i < dev->data->nb_tx_queues; i++)
+               nicvf_dev_tx_queue_stop(dev, i);
+
+       return nicvf_mbox_set_link_up_down(nic, false);
+}
+
 /* Initialize and register driver with DPDK Application */
 static const struct eth_dev_ops nicvf_eth_dev_ops = {
        .dev_configure            = nicvf_dev_configure,
@@ -1999,9 +2021,10 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
        .tx_queue_stop            = nicvf_dev_tx_queue_stop,
        .rx_queue_setup           = nicvf_dev_rx_queue_setup,
        .rx_queue_release         = nicvf_dev_rx_queue_release,
-       .rx_queue_count           = nicvf_dev_rx_queue_count,
        .tx_queue_setup           = nicvf_dev_tx_queue_setup,
        .tx_queue_release         = nicvf_dev_tx_queue_release,
+       .dev_set_link_up          = nicvf_dev_set_link_up,
+       .dev_set_link_down        = nicvf_dev_set_link_down,
        .get_reg                  = nicvf_dev_get_regs,
 };
 
@@ -2011,8 +2034,8 @@ nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
        struct rte_eth_rxmode *rxmode;
        struct nicvf *nic = nicvf_pmd_priv(dev);
        rxmode = &dev->data->dev_conf.rxmode;
-       if (mask & ETH_VLAN_STRIP_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        nicvf_vlan_hw_strip(nic, true);
                else
                        nicvf_vlan_hw_strip(nic, false);
@@ -2083,6 +2106,13 @@ kvlist_free:
        return ret;
 }
 static int
+nicvf_eth_dev_uninit(struct rte_eth_dev *dev)
+{
+       PMD_INIT_FUNC_TRACE();
+       nicvf_dev_close(dev);
+       return 0;
+}
+static int
 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
 {
        int ret;
@@ -2092,6 +2122,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
        PMD_INIT_FUNC_TRACE();
 
        eth_dev->dev_ops = &nicvf_eth_dev_ops;
+       eth_dev->rx_queue_count = nicvf_dev_rx_queue_count;
 
        /* For secondary processes, the primary has done all the work */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
@@ -2109,6 +2140,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
 
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        nic->device_id = pci_dev->id.device_id;
        nic->vendor_id = pci_dev->id.vendor_id;
@@ -2127,6 +2159,14 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
                goto fail;
        }
 
+       /* Allocate interrupt instance */
+       nic->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+       if (nic->intr_handle == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to allocate intr handle");
+               ret = -ENODEV;
+               goto fail;
+       }
+
        nicvf_disable_all_interrupts(nic);
 
        ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
@@ -2173,16 +2213,17 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
                return ENOTSUP;
        }
 
-       eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+       eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+                                       RTE_ETHER_ADDR_LEN, 0);
        if (eth_dev->data->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
                ret = -ENOMEM;
                goto alarm_fail;
        }
-       if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
-               eth_random_addr(&nic->mac_addr[0]);
+       if (rte_is_zero_ether_addr((struct rte_ether_addr *)nic->mac_addr))
+               rte_eth_random_addr(&nic->mac_addr[0]);
 
-       ether_addr_copy((struct ether_addr *)nic->mac_addr,
+       rte_ether_addr_copy((struct rte_ether_addr *)nic->mac_addr,
                        &eth_dev->data->mac_addrs[0]);
 
        ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
@@ -2196,7 +2237,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
                PMD_INIT_LOG(ERR, "Failed to configure first skip");
                goto malloc_fail;
        }
-       PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
+       PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=" RTE_ETHER_ADDR_PRT_FMT,
                eth_dev->data->port_id, nic->vendor_id, nic->device_id,
                nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
                nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
@@ -2205,6 +2246,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
 
 malloc_fail:
        rte_free(eth_dev->data->mac_addrs);
+       eth_dev->data->mac_addrs = NULL;
 alarm_fail:
        nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
 fail:
@@ -2254,7 +2296,7 @@ static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 
 static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
 {
-       return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+       return rte_eth_dev_pci_generic_remove(pci_dev, nicvf_eth_dev_uninit);
 }
 
 static struct rte_pci_driver rte_nicvf_pmd = {