mempool: rename functions with confusing names
[dpdk.git] / drivers / net / thunderx / nicvf_ethdev.c
index e20f0d9..48ed381 100644 (file)
 #include "base/nicvf_plat.h"
 
 #include "nicvf_ethdev.h"
-
+#include "nicvf_rxtx.h"
 #include "nicvf_logs.h"
 
+static void nicvf_dev_stop(struct rte_eth_dev *dev);
+
 static inline int
 nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
                               struct rte_eth_link *link)
@@ -260,6 +262,45 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        stats->oerrors = port_stats.tx_drops;
 }
 
+static const uint32_t *
+nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       size_t copied;
+       static uint32_t ptypes[32];
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       static const uint32_t ptypes_pass1[] = {
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV4_EXT,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_L3_IPV6_EXT,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_L4_FRAG,
+       };
+       static const uint32_t ptypes_pass2[] = {
+               RTE_PTYPE_TUNNEL_GRE,
+               RTE_PTYPE_TUNNEL_GENEVE,
+               RTE_PTYPE_TUNNEL_VXLAN,
+               RTE_PTYPE_TUNNEL_NVGRE,
+       };
+       static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
+
+       copied = sizeof(ptypes_pass1);
+       memcpy(ptypes, ptypes_pass1, copied);
+       if (nicvf_hw_version(nic) == NICVF_PASS2) {
+               memcpy((char *)ptypes + copied, ptypes_pass2,
+                       sizeof(ptypes_pass2));
+               copied += sizeof(ptypes_pass2);
+       }
+
+       memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
+       if (dev->rx_pkt_burst == nicvf_recv_pkts ||
+               dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
+               return ptypes;
+
+       return NULL;
+}
+
 static void
 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
 {
@@ -495,6 +536,82 @@ nicvf_qset_sq_alloc(struct nicvf *nic,  struct nicvf_txq *sq, uint16_t qidx,
        return 0;
 }
 
+static int
+nicvf_qset_rbdr_alloc(struct nicvf *nic, uint32_t desc_cnt, uint32_t buffsz)
+{
+       struct nicvf_rbdr *rbdr;
+       const struct rte_memzone *rz;
+       uint32_t ring_size;
+
+       assert(nic->rbdr == NULL);
+       rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
+                                 RTE_CACHE_LINE_SIZE, nic->node);
+       if (rbdr == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
+               return -ENOMEM;
+       }
+
+       ring_size = sizeof(struct rbdr_entry_t) * desc_cnt;
+       rz = rte_eth_dma_zone_reserve(nic->eth_dev, "rbdr", 0, ring_size,
+                                  NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
+       if (rz == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
+               return -ENOMEM;
+       }
+
+       memset(rz->addr, 0, ring_size);
+
+       rbdr->phys = rz->phys_addr;
+       rbdr->tail = 0;
+       rbdr->next_tail = 0;
+       rbdr->desc = rz->addr;
+       rbdr->buffsz = buffsz;
+       rbdr->qlen_mask = desc_cnt - 1;
+       rbdr->rbdr_status =
+               nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
+       rbdr->rbdr_door =
+               nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
+
+       nic->rbdr = rbdr;
+       return 0;
+}
+
+static void
+nicvf_rbdr_release_mbuf(struct nicvf *nic, nicvf_phys_addr_t phy)
+{
+       uint16_t qidx;
+       void *obj;
+       struct nicvf_rxq *rxq;
+
+       for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+               rxq = nic->eth_dev->data->rx_queues[qidx];
+               if (rxq->precharge_cnt) {
+                       obj = (void *)nicvf_mbuff_phy2virt(phy,
+                                                          rxq->mbuf_phys_off);
+                       rte_mempool_put(rxq->pool, obj);
+                       rxq->precharge_cnt--;
+                       break;
+               }
+       }
+}
+
+static inline void
+nicvf_rbdr_release_mbufs(struct nicvf *nic)
+{
+       uint32_t qlen_mask, head;
+       struct rbdr_entry_t *entry;
+       struct nicvf_rbdr *rbdr = nic->rbdr;
+
+       qlen_mask = rbdr->qlen_mask;
+       head = rbdr->head;
+       while (head != rbdr->tail) {
+               entry = rbdr->desc + head;
+               nicvf_rbdr_release_mbuf(nic, entry->full_addr);
+               head++;
+               head = head & qlen_mask;
+       }
+}
+
 static inline void
 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
 {
@@ -523,6 +640,124 @@ nicvf_tx_queue_reset(struct nicvf_txq *txq)
        txq->xmit_bufs = 0;
 }
 
+static inline int
+nicvf_start_tx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+       struct nicvf_txq *txq;
+       int ret;
+
+       if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+               return 0;
+
+       txq = dev->data->tx_queues[qidx];
+       txq->pool = NULL;
+       ret = nicvf_qset_sq_config(nicvf_pmd_priv(dev), qidx, txq);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Failed to configure sq %d %d", qidx, ret);
+               goto config_sq_error;
+       }
+
+       dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+       return ret;
+
+config_sq_error:
+       nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx);
+       return ret;
+}
+
+static inline int
+nicvf_stop_tx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+       struct nicvf_txq *txq;
+       int ret;
+
+       if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+               return 0;
+
+       ret = nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx);
+       if (ret)
+               PMD_INIT_LOG(ERR, "Failed to reclaim sq %d %d", qidx, ret);
+
+       txq = dev->data->tx_queues[qidx];
+       nicvf_tx_queue_release_mbufs(txq);
+       nicvf_tx_queue_reset(txq);
+
+       dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return ret;
+}
+
+static inline int
+nicvf_configure_cpi(struct rte_eth_dev *dev)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       uint16_t qidx, qcnt;
+       int ret;
+
+       /* Count started rx queues */
+       for (qidx = qcnt = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++)
+               if (dev->data->rx_queue_state[qidx] ==
+                   RTE_ETH_QUEUE_STATE_STARTED)
+                       qcnt++;
+
+       nic->cpi_alg = CPI_ALG_NONE;
+       ret = nicvf_mbox_config_cpi(nic, qcnt);
+       if (ret)
+               PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
+
+       return ret;
+}
+
+static inline int
+nicvf_configure_rss(struct rte_eth_dev *dev)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       uint64_t rsshf;
+       int ret = -EINVAL;
+
+       rsshf = nicvf_rss_ethdev_to_nic(nic,
+                       dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
+       PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
+                   dev->data->dev_conf.rxmode.mq_mode,
+                   nic->eth_dev->data->nb_rx_queues,
+                   nic->eth_dev->data->dev_conf.lpbk_mode, rsshf);
+
+       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+               ret = nicvf_rss_term(nic);
+       else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+               ret = nicvf_rss_config(nic,
+                                      nic->eth_dev->data->nb_rx_queues, rsshf);
+       if (ret)
+               PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
+
+       return ret;
+}
+
+static int
+nicvf_configure_rss_reta(struct rte_eth_dev *dev)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       unsigned int idx, qmap_size;
+       uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
+       uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
+
+       if (nic->cpi_alg != CPI_ALG_NONE)
+               return -EINVAL;
+
+       /* Prepare queue map */
+       for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
+               if (dev->data->rx_queue_state[idx] ==
+                               RTE_ETH_QUEUE_STATE_STARTED)
+                       qmap[qmap_size++] = idx;
+       }
+
+       /* Update default RSS RETA */
+       for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
+               default_reta[idx] = qmap[idx % qmap_size];
+
+       return nicvf_rss_reta_update(nic, default_reta,
+                                    NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
 static void
 nicvf_dev_tx_queue_release(void *sq)
 {
@@ -541,6 +776,48 @@ nicvf_dev_tx_queue_release(void *sq)
        }
 }
 
+static void
+nicvf_set_tx_function(struct rte_eth_dev *dev)
+{
+       struct nicvf_txq *txq;
+       size_t i;
+       bool multiseg = false;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
+                       multiseg = true;
+                       break;
+               }
+       }
+
+       /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+       if (multiseg) {
+               PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
+               dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
+       } else {
+               PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
+               dev->tx_pkt_burst = nicvf_xmit_pkts;
+       }
+
+       if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
+               PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
+       else
+               PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
+}
+
+static void
+nicvf_set_rx_function(struct rte_eth_dev *dev)
+{
+       if (dev->data->scattered_rx) {
+               PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
+               dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
+       } else {
+               PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
+               dev->rx_pkt_burst = nicvf_recv_pkts;
+       }
+}
+
 static int
 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                         uint16_t nb_desc, unsigned int socket_id,
@@ -617,6 +894,9 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
                                NICVF_TX_FREE_MPOOL_THRESH :
                                tx_conf->tx_free_thresh);
+               txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
+       } else {
+               txq->pool_free = nicvf_single_pool_free_xmited_buffers;
        }
 
        /* Allocate software ring */
@@ -645,6 +925,33 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        return 0;
 }
 
+static inline void
+nicvf_rx_queue_release_mbufs(struct nicvf_rxq *rxq)
+{
+       uint32_t rxq_cnt;
+       uint32_t nb_pkts, released_pkts = 0;
+       uint32_t refill_cnt = 0;
+       struct rte_eth_dev *dev = rxq->nic->eth_dev;
+       struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
+
+       if (dev->rx_pkt_burst == NULL)
+               return;
+
+       while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) {
+               nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
+                                       NICVF_MAX_RX_FREE_THRESH);
+               PMD_DRV_LOG(INFO, "nb_pkts=%d  rxq_cnt=%d", nb_pkts, rxq_cnt);
+               while (nb_pkts) {
+                       rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
+                       released_pkts++;
+               }
+       }
+
+       refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id);
+       PMD_DRV_LOG(INFO, "free_cnt=%d  refill_cnt=%d",
+                   released_pkts, refill_cnt);
+}
+
 static void
 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
 {
@@ -653,6 +960,69 @@ nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
        rxq->recv_buffers = 0;
 }
 
+static inline int
+nicvf_start_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       struct nicvf_rxq *rxq;
+       int ret;
+
+       if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+               return 0;
+
+       /* Update rbdr pointer to all rxq */
+       rxq = dev->data->rx_queues[qidx];
+       rxq->shared_rbdr = nic->rbdr;
+
+       ret = nicvf_qset_rq_config(nic, qidx, rxq);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Failed to configure rq %d %d", qidx, ret);
+               goto config_rq_error;
+       }
+       ret = nicvf_qset_cq_config(nic, qidx, rxq);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Failed to configure cq %d %d", qidx, ret);
+               goto config_cq_error;
+       }
+
+       dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+       return 0;
+
+config_cq_error:
+       nicvf_qset_cq_reclaim(nic, qidx);
+config_rq_error:
+       nicvf_qset_rq_reclaim(nic, qidx);
+       return ret;
+}
+
+static inline int
+nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       struct nicvf_rxq *rxq;
+       int ret, other_error;
+
+       if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+               return 0;
+
+       ret = nicvf_qset_rq_reclaim(nic, qidx);
+       if (ret)
+               PMD_INIT_LOG(ERR, "Failed to reclaim rq %d %d", qidx, ret);
+
+       other_error = ret;
+       rxq = dev->data->rx_queues[qidx];
+       nicvf_rx_queue_release_mbufs(rxq);
+       nicvf_rx_queue_reset(rxq);
+
+       ret = nicvf_qset_cq_reclaim(nic, qidx);
+       if (ret)
+               PMD_INIT_LOG(ERR, "Failed to reclaim cq %d %d", qidx, ret);
+
+       other_error |= ret;
+       dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return other_error;
+}
+
 static void
 nicvf_dev_rx_queue_release(void *rx_queue)
 {
@@ -664,6 +1034,45 @@ nicvf_dev_rx_queue_release(void *rx_queue)
                rte_free(rxq);
 }
 
+static int
+nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+       int ret;
+
+       ret = nicvf_start_rx_queue(dev, qidx);
+       if (ret)
+               return ret;
+
+       ret = nicvf_configure_cpi(dev);
+       if (ret)
+               return ret;
+
+       return nicvf_configure_rss_reta(dev);
+}
+
+static int
+nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+       int ret;
+
+       ret = nicvf_stop_rx_queue(dev, qidx);
+       ret |= nicvf_configure_cpi(dev);
+       ret |= nicvf_configure_rss_reta(dev);
+       return ret;
+}
+
+static int
+nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+       return nicvf_start_tx_queue(dev, qidx);
+}
+
+static int
+nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+       return nicvf_stop_tx_queue(dev, qidx);
+}
+
 static int
 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                         uint16_t nb_desc, unsigned int socket_id,
@@ -749,7 +1158,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
        PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
                        qidx, rxq, mp->name, nb_desc,
-                       rte_mempool_count(mp), rxq->phys);
+                       rte_mempool_avail_count(mp), rxq->phys);
 
        dev->data->rx_queues[qidx] = rxq;
        dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -800,6 +1209,317 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        };
 }
 
+static nicvf_phys_addr_t
+rbdr_rte_mempool_get(void *opaque)
+{
+       uint16_t qidx;
+       uintptr_t mbuf;
+       struct nicvf_rxq *rxq;
+       struct nicvf *nic = nicvf_pmd_priv((struct rte_eth_dev *)opaque);
+
+       for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+               rxq = nic->eth_dev->data->rx_queues[qidx];
+               /* Maintain equal buffer count across all pools */
+               if (rxq->precharge_cnt >= rxq->qlen_mask)
+                       continue;
+               rxq->precharge_cnt++;
+               mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
+               if (mbuf)
+                       return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
+       }
+       return 0;
+}
+
+static int
+nicvf_dev_start(struct rte_eth_dev *dev)
+{
+       int ret;
+       uint16_t qidx;
+       uint32_t buffsz = 0, rbdrsz = 0;
+       uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
+       uint64_t mbuf_phys_off = 0;
+       struct nicvf_rxq *rxq;
+       struct rte_pktmbuf_pool_private *mbp_priv;
+       struct rte_mbuf *mbuf;
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+       uint16_t mtu;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Userspace process exited without proper shutdown in last run */
+       if (nicvf_qset_rbdr_active(nic, 0))
+               nicvf_dev_stop(dev);
+
+       /*
+        * Thunderx nicvf PMD can support more than one pool per port only when
+        * 1) Data payload size is same across all the pools in given port
+        * AND
+        * 2) All mbuffs in the pools are from the same hugepage
+        * AND
+        * 3) Mbuff metadata size is same across all the pools in given port
+        *
+        * This is to support existing application that uses multiple pool/port.
+        * But, the purpose of using multipool for QoS will not be addressed.
+        *
+        */
+
+       /* Validate RBDR buff size */
+       for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+               rxq = dev->data->rx_queues[qidx];
+               mbp_priv = rte_mempool_get_priv(rxq->pool);
+               buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+               if (buffsz % 128) {
+                       PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
+                       return -EINVAL;
+               }
+               if (rbdrsz == 0)
+                       rbdrsz = buffsz;
+               if (rbdrsz != buffsz) {
+                       PMD_INIT_LOG(ERR, "buffsz not same, qid=%d (%d/%d)",
+                                    qidx, rbdrsz, buffsz);
+                       return -EINVAL;
+               }
+       }
+
+       /* Validate mempool attributes */
+       for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+               rxq = dev->data->rx_queues[qidx];
+               rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
+               mbuf = rte_pktmbuf_alloc(rxq->pool);
+               if (mbuf == NULL) {
+                       PMD_INIT_LOG(ERR, "Failed allocate mbuf qid=%d pool=%s",
+                                    qidx, rxq->pool->name);
+                       return -ENOMEM;
+               }
+               rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf);
+               rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM;
+               rte_pktmbuf_free(mbuf);
+
+               if (mbuf_phys_off == 0)
+                       mbuf_phys_off = rxq->mbuf_phys_off;
+               if (mbuf_phys_off != rxq->mbuf_phys_off) {
+                       PMD_INIT_LOG(ERR, "pool params not same,%s %" PRIx64,
+                                    rxq->pool->name, mbuf_phys_off);
+                       return -EINVAL;
+               }
+       }
+
+       /* Check the level of buffers in the pool */
+       total_rxq_desc = 0;
+       for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+               rxq = dev->data->rx_queues[qidx];
+               /* Count total numbers of rxq descs */
+               total_rxq_desc += rxq->qlen_mask + 1;
+               exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
+               exp_buffs *= nic->eth_dev->data->nb_rx_queues;
+               if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
+                       PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
+                                    rxq->pool->name,
+                                    rte_mempool_avail_count(rxq->pool),
+                                    exp_buffs);
+                       return -ENOENT;
+               }
+       }
+
+       /* Check RBDR desc overflow */
+       ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
+       if (ret == 0) {
+               PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc");
+               return -ENOMEM;
+       }
+
+       /* Enable qset */
+       ret = nicvf_qset_config(nic);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Failed to enable qset %d", ret);
+               return ret;
+       }
+
+       /* Allocate RBDR and RBDR ring desc */
+       nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
+       ret = nicvf_qset_rbdr_alloc(nic, nb_rbdr_desc, rbdrsz);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc");
+               goto qset_reclaim;
+       }
+
+       /* Enable and configure RBDR registers */
+       ret = nicvf_qset_rbdr_config(nic, 0);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Failed to configure rbdr %d", ret);
+               goto qset_rbdr_free;
+       }
+
+       /* Fill rte_mempool buffers in RBDR pool and precharge it */
+       ret = nicvf_qset_rbdr_precharge(nic, 0, rbdr_rte_mempool_get,
+                                       dev, total_rxq_desc);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Failed to fill rbdr %d", ret);
+               goto qset_rbdr_reclaim;
+       }
+
+       PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR",
+                    nic->rbdr->tail, nb_rbdr_desc);
+
+       /* Configure RX queues */
+       for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+               ret = nicvf_start_rx_queue(dev, qidx);
+               if (ret)
+                       goto start_rxq_error;
+       }
+
+       /* Configure VLAN Strip */
+       nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
+
+       /* Configure TX queues */
+       for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++) {
+               ret = nicvf_start_tx_queue(dev, qidx);
+               if (ret)
+                       goto start_txq_error;
+       }
+
+       /* Configure CPI algorithm */
+       ret = nicvf_configure_cpi(dev);
+       if (ret)
+               goto start_txq_error;
+
+       /* Configure RSS */
+       ret = nicvf_configure_rss(dev);
+       if (ret)
+               goto qset_rss_error;
+
+       /* Configure loopback */
+       ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
+               goto qset_rss_error;
+       }
+
+       /* Reset all statistics counters attached to this port */
+       ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
+               goto qset_rss_error;
+       }
+
+       /* Setup scatter mode if needed by jumbo */
+       if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+                                           2 * VLAN_TAG_SIZE > buffsz)
+               dev->data->scattered_rx = 1;
+       if (rx_conf->enable_scatter)
+               dev->data->scattered_rx = 1;
+
+       /* Setup MTU based on max_rx_pkt_len or default */
+       mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
+               dev->data->dev_conf.rxmode.max_rx_pkt_len
+                       -  ETHER_HDR_LEN - ETHER_CRC_LEN
+               : ETHER_MTU;
+
+       if (nicvf_dev_set_mtu(dev, mtu)) {
+               PMD_INIT_LOG(ERR, "Failed to set default mtu size");
+               return -EBUSY;
+       }
+
+       /* Configure callbacks based on scatter mode */
+       nicvf_set_tx_function(dev);
+       nicvf_set_rx_function(dev);
+
+       /* Done; Let PF make the BGX's RX and TX switches to ON position */
+       nicvf_mbox_cfg_done(nic);
+       return 0;
+
+qset_rss_error:
+       nicvf_rss_term(nic);
+start_txq_error:
+       for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++)
+               nicvf_stop_tx_queue(dev, qidx);
+start_rxq_error:
+       for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++)
+               nicvf_stop_rx_queue(dev, qidx);
+qset_rbdr_reclaim:
+       nicvf_qset_rbdr_reclaim(nic, 0);
+       nicvf_rbdr_release_mbufs(nic);
+qset_rbdr_free:
+       if (nic->rbdr) {
+               rte_free(nic->rbdr);
+               nic->rbdr = NULL;
+       }
+qset_reclaim:
+       nicvf_qset_reclaim(nic);
+       return ret;
+}
+
+static void
+nicvf_dev_stop(struct rte_eth_dev *dev)
+{
+       int ret;
+       uint16_t qidx;
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Let PF make the BGX's RX and TX switches to OFF position */
+       nicvf_mbox_shutdown(nic);
+
+       /* Disable loopback */
+       ret = nicvf_loopback_config(nic, 0);
+       if (ret)
+               PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
+
+       /* Disable VLAN Strip */
+       nicvf_vlan_hw_strip(nic, 0);
+
+       /* Reclaim sq */
+       for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++)
+               nicvf_stop_tx_queue(dev, qidx);
+
+       /* Reclaim rq */
+       for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++)
+               nicvf_stop_rx_queue(dev, qidx);
+
+       /* Reclaim RBDR */
+       ret = nicvf_qset_rbdr_reclaim(nic, 0);
+       if (ret)
+               PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
+
+       /* Move all charged buffers in RBDR back to pool */
+       if (nic->rbdr != NULL)
+               nicvf_rbdr_release_mbufs(nic);
+
+       /* Reclaim CPI configuration */
+       if (!nic->sqs_mode) {
+               ret = nicvf_mbox_config_cpi(nic, 0);
+               if (ret)
+                       PMD_INIT_LOG(ERR, "Failed to reclaim CPI config");
+       }
+
+       /* Disable qset */
+       ret = nicvf_qset_config(nic);
+       if (ret)
+               PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
+
+       /* Disable all interrupts */
+       nicvf_disable_all_interrupts(nic);
+
+       /* Free RBDR SW structure */
+       if (nic->rbdr) {
+               rte_free(nic->rbdr);
+               nic->rbdr = NULL;
+       }
+}
+
+static void
+nicvf_dev_close(struct rte_eth_dev *dev)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+
+       PMD_INIT_FUNC_TRACE();
+
+       nicvf_dev_stop(dev);
+       nicvf_periodic_alarm_stop(nic);
+}
+
 static int
 nicvf_dev_configure(struct rte_eth_dev *dev)
 {
@@ -880,18 +1600,27 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 /* Initialize and register driver with DPDK Application */
 static const struct eth_dev_ops nicvf_eth_dev_ops = {
        .dev_configure            = nicvf_dev_configure,
+       .dev_start                = nicvf_dev_start,
+       .dev_stop                 = nicvf_dev_stop,
        .link_update              = nicvf_dev_link_update,
+       .dev_close                = nicvf_dev_close,
        .stats_get                = nicvf_dev_stats_get,
        .stats_reset              = nicvf_dev_stats_reset,
        .promiscuous_enable       = nicvf_dev_promisc_enable,
        .dev_infos_get            = nicvf_dev_info_get,
+       .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
        .mtu_set                  = nicvf_dev_set_mtu,
        .reta_update              = nicvf_dev_reta_update,
        .reta_query               = nicvf_dev_reta_query,
        .rss_hash_update          = nicvf_dev_rss_hash_update,
        .rss_hash_conf_get        = nicvf_dev_rss_hash_conf_get,
+       .rx_queue_start           = nicvf_dev_rx_queue_start,
+       .rx_queue_stop            = nicvf_dev_rx_queue_stop,
+       .tx_queue_start           = nicvf_dev_tx_queue_start,
+       .tx_queue_stop            = nicvf_dev_tx_queue_stop,
        .rx_queue_setup           = nicvf_dev_rx_queue_setup,
        .rx_queue_release         = nicvf_dev_rx_queue_release,
+       .rx_queue_count           = nicvf_dev_rx_queue_count,
        .tx_queue_setup           = nicvf_dev_tx_queue_setup,
        .tx_queue_release         = nicvf_dev_tx_queue_release,
        .get_reg_length           = nicvf_dev_get_reg_length,
@@ -909,6 +1638,14 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
 
        eth_dev->dev_ops = &nicvf_eth_dev_ops;
 
+       /* For secondary processes, the primary has done all the work */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+               /* Setup callbacks for secondary process */
+               nicvf_set_tx_function(eth_dev);
+               nicvf_set_rx_function(eth_dev);
+               return 0;
+       }
+
        pci_dev = eth_dev->pci_dev;
        rte_eth_copy_pci_info(eth_dev, pci_dev);