ethdev: add device flag to bypass auto-filled queue xstats
[dpdk.git] / drivers / net / bnxt / bnxt_reps.c
index bdbce6e..b4566c9 100644 (file)
 #include "bnxt_txr.h"
 #include "bnxt_hwrm.h"
 #include "hsi_struct_def_dpdk.h"
-
-static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
-       .dev_infos_get = bnxt_vf_rep_dev_info_get_op,
-       .dev_configure = bnxt_vf_rep_dev_configure_op,
-       .dev_start = bnxt_vf_rep_dev_start_op,
-       .rx_queue_setup = bnxt_vf_rep_rx_queue_setup_op,
-       .rx_queue_release = bnxt_vf_rep_rx_queue_release_op,
-       .tx_queue_setup = bnxt_vf_rep_tx_queue_setup_op,
-       .tx_queue_release = bnxt_vf_rep_tx_queue_release_op,
-       .link_update = bnxt_vf_rep_link_update_op,
-       .dev_close = bnxt_vf_rep_dev_close_op,
-       .dev_stop = bnxt_vf_rep_dev_stop_op,
-       .stats_get = bnxt_vf_rep_stats_get_op,
-       .stats_reset = bnxt_vf_rep_stats_reset_op,
+#include "bnxt_tf_common.h"
+#include "ulp_port_db.h"
+#include "ulp_flow_db.h"
+
+static const struct eth_dev_ops bnxt_rep_dev_ops = {
+       .dev_infos_get = bnxt_rep_dev_info_get_op,
+       .dev_configure = bnxt_rep_dev_configure_op,
+       .dev_start = bnxt_rep_dev_start_op,
+       .rx_queue_setup = bnxt_rep_rx_queue_setup_op,
+       .rx_queue_release = bnxt_rep_rx_queue_release_op,
+       .tx_queue_setup = bnxt_rep_tx_queue_setup_op,
+       .tx_queue_release = bnxt_rep_tx_queue_release_op,
+       .link_update = bnxt_rep_link_update_op,
+       .dev_close = bnxt_rep_dev_close_op,
+       .dev_stop = bnxt_rep_dev_stop_op,
+       .stats_get = bnxt_rep_stats_get_op,
+       .stats_reset = bnxt_rep_stats_reset_op,
+       .filter_ctrl = bnxt_filter_ctrl_op
 };
 
 uint16_t
-bnxt_vfr_recv(struct bnxt *bp, uint16_t cfa_code, uint16_t queue_id,
-             struct rte_mbuf *mbuf)
+bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf)
 {
-       struct bnxt_sw_rx_bd *prod_rx_buf;
+       struct rte_mbuf **prod_rx_buf;
        struct bnxt_rx_ring_info *rep_rxr;
        struct bnxt_rx_queue *rep_rxq;
        struct rte_eth_dev *vfr_eth_dev;
-       struct bnxt_vf_representor *vfr_bp;
-       uint16_t vf_id;
+       struct bnxt_representor *vfr_bp;
        uint16_t mask;
        uint8_t que;
 
-       vf_id = bp->cfa_code_map[cfa_code];
-       /* cfa_code is invalid OR vf_id > MAX REP. Assume normal Rx */
-       if (vf_id == BNXT_VF_IDX_INVALID || vf_id > BNXT_MAX_VF_REPS)
-               return 1;
-       vfr_eth_dev = bp->rep_info[vf_id].vfr_eth_dev;
-       if (!vfr_eth_dev)
-               return 1;
+       vfr_eth_dev = &rte_eth_devices[port_id];
        vfr_bp = vfr_eth_dev->data->dev_private;
-       if (vfr_bp->rx_cfa_code != cfa_code) {
-               /* cfa_code not meant for this VF rep!!?? */
-               return 1;
-       }
-       /* If rxq_id happens to be > max rep_queue, use rxq0 */
-       que = queue_id < BNXT_MAX_VF_REP_RINGS ? queue_id : 0;
+       /* If rxq_id happens to be > nr_rings, use ring 0 */
+       que = queue_id < vfr_bp->rx_nr_rings ? queue_id : 0;
        rep_rxq = vfr_bp->rx_queues[que];
+       /* Ideally should not happen now, paranoid check */
+       if (!rep_rxq)
+               return 1;
        rep_rxr = rep_rxq->rx_ring;
        mask = rep_rxr->rx_ring_struct->ring_mask;
 
        /* Put this mbuf on the RxQ of the Representor */
-       prod_rx_buf =
-               &rep_rxr->rx_buf_ring[rep_rxr->rx_prod++ & mask];
-       if (!prod_rx_buf->mbuf) {
-               prod_rx_buf->mbuf = mbuf;
+       prod_rx_buf = &rep_rxr->rx_buf_ring[rep_rxr->rx_prod & mask];
+       if (!*prod_rx_buf) {
+               *prod_rx_buf = mbuf;
                vfr_bp->rx_bytes[que] += mbuf->pkt_len;
                vfr_bp->rx_pkts[que]++;
+               rep_rxr->rx_prod++;
        } else {
+               /* Representor Rx ring full, drop pkt */
                vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
                vfr_bp->rx_drop_pkts[que]++;
-               rte_free(mbuf); /* Representor Rx ring full, drop pkt */
+               rte_pktmbuf_free(mbuf);
        }
 
        return 0;
 }
 
 static uint16_t
-bnxt_vf_rep_rx_burst(void *rx_queue,
+bnxt_rep_rx_burst(void *rx_queue,
                     struct rte_mbuf **rx_pkts,
                     uint16_t nb_pkts)
 {
        struct bnxt_rx_queue *rxq = rx_queue;
-       struct bnxt_sw_rx_bd *cons_rx_buf;
+       struct rte_mbuf **cons_rx_buf;
        struct bnxt_rx_ring_info *rxr;
        uint16_t nb_rx_pkts = 0;
        uint16_t mask, i;
@@ -93,11 +89,11 @@ bnxt_vf_rep_rx_burst(void *rx_queue,
        mask = rxr->rx_ring_struct->ring_mask;
        for (i = 0; i < nb_pkts; i++) {
                cons_rx_buf = &rxr->rx_buf_ring[rxr->rx_cons & mask];
-               if (!cons_rx_buf->mbuf)
+               if (*cons_rx_buf == NULL)
                        return nb_rx_pkts;
-               rx_pkts[nb_rx_pkts] = cons_rx_buf->mbuf;
+               rx_pkts[nb_rx_pkts] = *cons_rx_buf;
                rx_pkts[nb_rx_pkts]->port = rxq->port_id;
-               cons_rx_buf->mbuf = NULL;
+               *cons_rx_buf = NULL;
                nb_rx_pkts++;
                rxr->rx_cons++;
        }
@@ -106,14 +102,14 @@ bnxt_vf_rep_rx_burst(void *rx_queue,
 }
 
 static uint16_t
-bnxt_vf_rep_tx_burst(void *tx_queue,
+bnxt_rep_tx_burst(void *tx_queue,
                     struct rte_mbuf **tx_pkts,
                     __rte_unused uint16_t nb_pkts)
 {
        struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
        struct bnxt_tx_queue *ptxq;
        struct bnxt *parent;
-       struct  bnxt_vf_representor *vf_rep_bp;
+       struct  bnxt_representor *vf_rep_bp;
        int qid;
        int rc;
        int i;
@@ -127,7 +123,7 @@ bnxt_vf_rep_tx_burst(void *tx_queue,
        pthread_mutex_lock(&parent->rep_info->vfr_lock);
        ptxq = parent->tx_queues[qid];
 
-       ptxq->tx_cfa_action = vf_rep_bp->tx_cfa_action;
+       ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
 
        for (i = 0; i < nb_pkts; i++) {
                vf_rep_bp->tx_bytes[qid] += tx_pkts[i]->pkt_len;
@@ -135,28 +131,61 @@ bnxt_vf_rep_tx_burst(void *tx_queue,
        }
 
        rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
-       ptxq->tx_cfa_action = 0;
+       ptxq->vfr_tx_cfa_action = 0;
        pthread_mutex_unlock(&parent->rep_info->vfr_lock);
 
        return rc;
+}
 
-       return 0;
+static int
+bnxt_get_dflt_vnic_svif(struct bnxt *bp, struct bnxt_representor *vf_rep_bp)
+{
+       struct bnxt_rep_info *rep_info;
+       int rc;
+
+       rc = bnxt_hwrm_get_dflt_vnic_svif(bp, vf_rep_bp->fw_fid,
+                                         &vf_rep_bp->dflt_vnic_id,
+                                         &vf_rep_bp->svif);
+       if (rc) {
+               PMD_DRV_LOG(ERR, "Failed to get default vnic id of VF\n");
+               vf_rep_bp->dflt_vnic_id = BNXT_DFLT_VNIC_ID_INVALID;
+               vf_rep_bp->svif = BNXT_SVIF_INVALID;
+       } else {
+               PMD_DRV_LOG(INFO, "vf_rep->dflt_vnic_id = %d\n",
+                               vf_rep_bp->dflt_vnic_id);
+       }
+       if (vf_rep_bp->dflt_vnic_id != BNXT_DFLT_VNIC_ID_INVALID &&
+           vf_rep_bp->svif != BNXT_SVIF_INVALID) {
+               rep_info = &bp->rep_info[vf_rep_bp->vf_id];
+               rep_info->conduit_valid = true;
+       }
+
+       return rc;
 }
 
-int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
+int bnxt_representor_init(struct rte_eth_dev *eth_dev, void *params)
 {
-       struct bnxt_vf_representor *vf_rep_bp = eth_dev->data->dev_private;
-       struct bnxt_vf_representor *rep_params =
-                                (struct bnxt_vf_representor *)params;
+       struct bnxt_representor *vf_rep_bp = eth_dev->data->dev_private;
+       struct bnxt_representor *rep_params =
+                                (struct bnxt_representor *)params;
        struct rte_eth_link *link;
        struct bnxt *parent_bp;
+       uint16_t first_vf_id;
        int rc = 0;
 
+       PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR init\n", eth_dev->data->port_id);
        vf_rep_bp->vf_id = rep_params->vf_id;
        vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
        vf_rep_bp->parent_dev = rep_params->parent_dev;
-
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+       vf_rep_bp->rep_based_pf = rep_params->rep_based_pf;
+       vf_rep_bp->flags = rep_params->flags;
+       vf_rep_bp->rep_q_r2f = rep_params->rep_q_r2f;
+       vf_rep_bp->rep_q_f2r = rep_params->rep_q_f2r;
+       vf_rep_bp->rep_fc_r2f = rep_params->rep_fc_r2f;
+       vf_rep_bp->rep_fc_f2r = rep_params->rep_fc_f2r;
+
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
+                                       RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        eth_dev->data->representor_id = rep_params->vf_id;
 
        rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
@@ -164,13 +193,13 @@ int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
               sizeof(vf_rep_bp->mac_addr));
        eth_dev->data->mac_addrs =
                (struct rte_ether_addr *)&vf_rep_bp->mac_addr;
-       eth_dev->dev_ops = &bnxt_vf_rep_dev_ops;
+       eth_dev->dev_ops = &bnxt_rep_dev_ops;
 
        /* No data-path, but need stub Rx/Tx functions to avoid crash
         * when testing with ovs-dpdk
         */
-       eth_dev->rx_pkt_burst = bnxt_vf_rep_rx_burst;
-       eth_dev->tx_pkt_burst = bnxt_vf_rep_tx_burst;
+       eth_dev->rx_pkt_burst = bnxt_rep_rx_burst;
+       eth_dev->tx_pkt_burst = bnxt_rep_tx_burst;
        /* Link state. Inherited from PF or trusted VF */
        parent_bp = vf_rep_bp->parent_dev->data->dev_private;
        link = &parent_bp->eth_dev->data->dev_link;
@@ -180,45 +209,60 @@ int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
        eth_dev->data->dev_link.link_status = link->link_status;
        eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
 
-       vf_rep_bp->fw_fid = rep_params->vf_id + parent_bp->first_vf_id;
-       PMD_DRV_LOG(INFO, "vf_rep->fw_fid = %d\n", vf_rep_bp->fw_fid);
-       rc = bnxt_hwrm_get_dflt_vnic_svif(parent_bp, vf_rep_bp->fw_fid,
-                                         &vf_rep_bp->dflt_vnic_id,
-                                         &vf_rep_bp->svif);
-       if (rc)
-               PMD_DRV_LOG(ERR, "Failed to get default vnic id of VF\n");
-       else
-               PMD_DRV_LOG(INFO, "vf_rep->dflt_vnic_id = %d\n",
-                           vf_rep_bp->dflt_vnic_id);
-
        PMD_DRV_LOG(INFO, "calling bnxt_print_link_info\n");
        bnxt_print_link_info(eth_dev);
 
-       /* Pass the information to the rte_eth_dev_close() that it should also
-        * release the private port resources.
-        */
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
        PMD_DRV_LOG(INFO,
                    "Switch domain id %d: Representor Device %d init done\n",
                    vf_rep_bp->switch_domain_id, vf_rep_bp->vf_id);
 
+       if (vf_rep_bp->rep_based_pf) {
+               vf_rep_bp->fw_fid = vf_rep_bp->rep_based_pf + 1;
+               if (!(BNXT_REP_PF(vf_rep_bp))) {
+                       /* VF representor for the remote PF,get first_vf_id */
+                       rc = bnxt_hwrm_first_vf_id_query(parent_bp,
+                                                        vf_rep_bp->fw_fid,
+                                                        &first_vf_id);
+                       if (rc)
+                               return rc;
+                       if (first_vf_id == 0xffff) {
+                               PMD_DRV_LOG(ERR,
+                                           "Invalid first_vf_id fid:%x\n",
+                                           vf_rep_bp->fw_fid);
+                               return -EINVAL;
+                       }
+                       PMD_DRV_LOG(INFO, "first_vf_id = %x parent_fid:%x\n",
+                                   first_vf_id, vf_rep_bp->fw_fid);
+                       vf_rep_bp->fw_fid = rep_params->vf_id + first_vf_id;
+               }
+       }  else {
+               vf_rep_bp->fw_fid = rep_params->vf_id + parent_bp->first_vf_id;
+       }
+
+       PMD_DRV_LOG(INFO, "vf_rep->fw_fid = %d\n", vf_rep_bp->fw_fid);
+
        return 0;
 }
 
-int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev)
+int bnxt_representor_uninit(struct rte_eth_dev *eth_dev)
 {
        struct bnxt *parent_bp;
-       struct bnxt_vf_representor *rep =
-               (struct bnxt_vf_representor *)eth_dev->data->dev_private;
-
+       struct bnxt_representor *rep =
+               (struct bnxt_representor *)eth_dev->data->dev_private;
        uint16_t vf_id;
 
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
+
+       PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR uninit\n", eth_dev->data->port_id);
        eth_dev->data->mac_addrs = NULL;
-       eth_dev->dev_ops = NULL;
 
        parent_bp = rep->parent_dev->data->dev_private;
-       if (!parent_bp)
+       if (!parent_bp) {
+               PMD_DRV_LOG(DEBUG, "BNXT Port:%d already freed\n",
+                           eth_dev->data->port_id);
                return 0;
+       }
 
        parent_bp->num_reps--;
        vf_id = rep->vf_id;
@@ -229,15 +273,18 @@ int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
-int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
+int bnxt_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
 {
        struct bnxt *parent_bp;
-       struct bnxt_vf_representor *rep =
-               (struct bnxt_vf_representor *)eth_dev->data->dev_private;
+       struct bnxt_representor *rep =
+               (struct bnxt_representor *)eth_dev->data->dev_private;
        struct rte_eth_link *link;
        int rc;
 
        parent_bp = rep->parent_dev->data->dev_private;
+       if (!parent_bp)
+               return 0;
+
        rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
 
        /* Link state. Inherited from PF or trusted VF */
@@ -252,21 +299,72 @@ int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
        return rc;
 }
 
-static int bnxt_vfr_alloc(struct bnxt_vf_representor *vfr)
+static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
+{
+       int rc;
+       struct bnxt_representor *vfr = vfr_ethdev->data->dev_private;
+       struct rte_eth_dev *parent_dev = vfr->parent_dev;
+       struct bnxt *parent_bp = parent_dev->data->dev_private;
+
+       if (!parent_bp || !parent_bp->ulp_ctx) {
+               BNXT_TF_DBG(ERR, "Invalid arguments\n");
+               return 0;
+       }
+
+       /* Update the ULP portdata base with the new VFR interface */
+       rc = ulp_port_db_dev_port_intf_update(parent_bp->ulp_ctx, vfr_ethdev);
+       if (rc) {
+               BNXT_TF_DBG(ERR, "Failed to update ulp port details vfr:%u\n",
+                           vfr->vf_id);
+               return rc;
+       }
+
+       /* Create the default rules for the VFR */
+       rc = bnxt_ulp_create_vfr_default_rules(vfr_ethdev);
+       if (rc) {
+               BNXT_TF_DBG(ERR, "Failed to create VFR default rules vfr:%u\n",
+                           vfr->vf_id);
+               return rc;
+       }
+       /* update the port id so you can backtrack to ethdev */
+       vfr->dpdk_port_id = vfr_ethdev->data->port_id;
+
+       if (BNXT_STINGRAY(parent_bp)) {
+               rc = bnxt_hwrm_cfa_pair_alloc(parent_bp, vfr);
+       } else {
+               rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp, vfr->vf_id);
+       }
+       if (rc) {
+               BNXT_TF_DBG(ERR, "Failed in hwrm vfr alloc vfr:%u rc=%d\n",
+                           vfr->vf_id, rc);
+               (void)bnxt_ulp_delete_vfr_default_rules(vfr);
+       }
+       BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR created and initialized\n",
+                   vfr->dpdk_port_id);
+       return rc;
+}
+
+static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
 {
        int rc = 0;
+       struct bnxt_representor *vfr = vfr_ethdev->data->dev_private;
        struct bnxt *parent_bp;
 
        if (!vfr || !vfr->parent_dev) {
                PMD_DRV_LOG(ERR,
-                           "No memory allocated for representor\n");
+                               "No memory allocated for representor\n");
                return -ENOMEM;
        }
 
        parent_bp = vfr->parent_dev->data->dev_private;
+       if (parent_bp && !parent_bp->ulp_ctx) {
+               PMD_DRV_LOG(ERR,
+                           "ulp context not allocated for parent\n");
+               return -EIO;
+       }
 
        /* Check if representor has been already allocated in FW */
-       if (vfr->tx_cfa_action && vfr->rx_cfa_code)
+       if (vfr->vfr_tx_cfa_action)
                return 0;
 
        /*
@@ -274,29 +372,19 @@ static int bnxt_vfr_alloc(struct bnxt_vf_representor *vfr)
         * Otherwise the FW will create the VF-rep rules with
         * default drop action.
         */
-
-       /*
-        * This is where we need to replace invoking an HWRM cmd
-        * with the new TFLIB ULP API to do more/less the same job
-       rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp,
-                                    vfr->vf_id,
-                                    &vfr->tx_cfa_action,
-                                    &vfr->rx_cfa_code);
-        */
-       if (!rc) {
-               parent_bp->cfa_code_map[vfr->rx_cfa_code] = vfr->vf_id;
+       rc = bnxt_tf_vfr_alloc(vfr_ethdev);
+       if (!rc)
                PMD_DRV_LOG(DEBUG, "allocated representor %d in FW\n",
                            vfr->vf_id);
-       } else {
+       else
                PMD_DRV_LOG(ERR,
                            "Failed to alloc representor %d in FW\n",
                            vfr->vf_id);
-       }
 
        return rc;
 }
 
-static void bnxt_vf_rep_free_rx_mbufs(struct bnxt_vf_representor *rep_bp)
+static void bnxt_rep_free_rx_mbufs(struct bnxt_representor *rep_bp)
 {
        struct bnxt_rx_queue *rxq;
        unsigned int i;
@@ -307,27 +395,47 @@ static void bnxt_vf_rep_free_rx_mbufs(struct bnxt_vf_representor *rep_bp)
        }
 }
 
-int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
+int bnxt_rep_dev_start_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+       struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
+       struct bnxt_rep_info *rep_info;
+       struct bnxt *parent_bp;
        int rc;
 
-       rc = bnxt_vfr_alloc(rep_bp);
-
-       if (!rc) {
-               eth_dev->rx_pkt_burst = &bnxt_vf_rep_rx_burst;
-               eth_dev->tx_pkt_burst = &bnxt_vf_rep_tx_burst;
+       parent_bp = rep_bp->parent_dev->data->dev_private;
+       rep_info = &parent_bp->rep_info[rep_bp->vf_id];
+
+       BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR start\n", eth_dev->data->port_id);
+       pthread_mutex_lock(&rep_info->vfr_start_lock);
+       if (!rep_info->conduit_valid) {
+               rc = bnxt_get_dflt_vnic_svif(parent_bp, rep_bp);
+               if (rc || !rep_info->conduit_valid) {
+                       pthread_mutex_unlock(&rep_info->vfr_start_lock);
+                       return rc;
+               }
+       }
+       pthread_mutex_unlock(&rep_info->vfr_start_lock);
 
-               bnxt_vf_rep_link_update_op(eth_dev, 1);
-       } else {
+       rc = bnxt_vfr_alloc(eth_dev);
+       if (rc) {
                eth_dev->data->dev_link.link_status = 0;
-               bnxt_vf_rep_free_rx_mbufs(rep_bp);
+               bnxt_rep_free_rx_mbufs(rep_bp);
+               return rc;
        }
+       eth_dev->rx_pkt_burst = &bnxt_rep_rx_burst;
+       eth_dev->tx_pkt_burst = &bnxt_rep_tx_burst;
+       bnxt_rep_link_update_op(eth_dev, 1);
 
-       return rc;
+       return 0;
 }
 
-static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
+static int bnxt_tf_vfr_free(struct bnxt_representor *vfr)
+{
+       BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR ulp free\n", vfr->dpdk_port_id);
+       return bnxt_ulp_delete_vfr_default_rules(vfr);
+}
+
+static int bnxt_vfr_free(struct bnxt_representor *vfr)
 {
        int rc = 0;
        struct bnxt *parent_bp;
@@ -339,60 +447,67 @@ static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
        }
 
        parent_bp = vfr->parent_dev->data->dev_private;
+       if (!parent_bp) {
+               PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR already freed\n",
+                           vfr->dpdk_port_id);
+               return 0;
+       }
 
        /* Check if representor has been already freed in FW */
-       if (!vfr->tx_cfa_action && !vfr->rx_cfa_code)
+       if (!vfr->vfr_tx_cfa_action)
                return 0;
 
-       /*
-        * This is where we need to replace invoking an HWRM cmd
-        * with the new TFLIB ULP API to do more/less the same job
-       rc = bnxt_hwrm_cfa_vfr_free(parent_bp,
-                                   vfr->vf_id);
-        */
+       rc = bnxt_tf_vfr_free(vfr);
        if (rc) {
                PMD_DRV_LOG(ERR,
                            "Failed to free representor %d in FW\n",
                            vfr->vf_id);
-               return rc;
        }
 
-       parent_bp->cfa_code_map[vfr->rx_cfa_code] = BNXT_VF_IDX_INVALID;
        PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
                    vfr->vf_id);
-       vfr->tx_cfa_action = 0;
-       vfr->rx_cfa_code = 0;
+       vfr->vfr_tx_cfa_action = 0;
+
+       if (BNXT_STINGRAY(parent_bp))
+               rc = bnxt_hwrm_cfa_pair_free(parent_bp, vfr);
+       else
+               rc = bnxt_hwrm_cfa_vfr_free(parent_bp, vfr->vf_id);
 
        return rc;
 }
 
-void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
+int bnxt_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt_vf_representor *vfr_bp = eth_dev->data->dev_private;
+       struct bnxt_representor *vfr_bp = eth_dev->data->dev_private;
 
        /* Avoid crashes as we are about to free queues */
        eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
        eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
 
+       BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR stop\n", eth_dev->data->port_id);
+
        bnxt_vfr_free(vfr_bp);
 
        if (eth_dev->data->dev_started)
                eth_dev->data->dev_link.link_status = 0;
 
-       bnxt_vf_rep_free_rx_mbufs(vfr_bp);
+       bnxt_rep_free_rx_mbufs(vfr_bp);
+
+       return 0;
 }
 
-void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev)
+int bnxt_rep_dev_close_op(struct rte_eth_dev *eth_dev)
 {
-       bnxt_vf_representor_uninit(eth_dev);
+       BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR close\n", eth_dev->data->port_id);
+       bnxt_representor_uninit(eth_dev);
+       return 0;
 }
 
-int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
+int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
                                struct rte_eth_dev_info *dev_info)
 {
-       struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+       struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
        struct bnxt *parent_bp;
-       uint16_t max_vnics, i, j, vpool, vrxq;
        unsigned int max_rx_rings;
        int rc = 0;
 
@@ -412,7 +527,6 @@ int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
        dev_info->max_tx_queues = max_rx_rings;
        dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
        dev_info->hash_key_size = 40;
-       max_vnics = parent_bp->max_vnics;
 
        /* MTU specifics */
        dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -428,74 +542,17 @@ int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
        dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
        dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
-       /* *INDENT-OFF* */
-       dev_info->default_rxconf = (struct rte_eth_rxconf) {
-               .rx_thresh = {
-                       .pthresh = 8,
-                       .hthresh = 8,
-                       .wthresh = 0,
-               },
-               .rx_free_thresh = 32,
-               /* If no descriptors available, pkts are dropped by default */
-               .rx_drop_en = 1,
-       };
-
-       dev_info->default_txconf = (struct rte_eth_txconf) {
-               .tx_thresh = {
-                       .pthresh = 32,
-                       .hthresh = 0,
-                       .wthresh = 0,
-               },
-               .tx_free_thresh = 32,
-               .tx_rs_thresh = 32,
-       };
-       eth_dev->data->dev_conf.intr_conf.lsc = 1;
-
-       eth_dev->data->dev_conf.intr_conf.rxq = 1;
-       dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
-       dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
-       dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
-       dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
-
-       /* *INDENT-ON* */
-
-       /*
-        * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
-        *       need further investigation.
-        */
-
-       /* VMDq resources */
-       vpool = 64; /* ETH_64_POOLS */
-       vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
-       for (i = 0; i < 4; vpool >>= 1, i++) {
-               if (max_vnics > vpool) {
-                       for (j = 0; j < 5; vrxq >>= 1, j++) {
-                               if (dev_info->max_rx_queues > vrxq) {
-                                       if (vpool > vrxq)
-                                               vpool = vrxq;
-                                       goto found;
-                               }
-                       }
-                       /* Not enough resources to support VMDq */
-                       break;
-               }
-       }
-       /* Not enough resources to support VMDq */
-       vpool = 0;
-       vrxq = 0;
-found:
-       dev_info->max_vmdq_pools = vpool;
-       dev_info->vmdq_queue_num = vrxq;
-
-       dev_info->vmdq_pool_base = 0;
-       dev_info->vmdq_queue_base = 0;
+       dev_info->switch_info.name = eth_dev->device->name;
+       dev_info->switch_info.domain_id = rep_bp->switch_domain_id;
+       dev_info->switch_info.port_id =
+                       rep_bp->vf_id & BNXT_SWITCH_PORT_ID_VF_MASK;
 
        return 0;
 }
 
-int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
+int bnxt_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
 {
-       struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+       struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
 
        PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
        rep_bp->rx_queues = (void *)eth_dev->data->rx_queues;
@@ -505,18 +562,43 @@ int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
        return 0;
 }
 
-int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
-                         uint16_t queue_idx,
-                         uint16_t nb_desc,
-                         unsigned int socket_id,
-                         __rte_unused const struct rte_eth_rxconf *rx_conf,
-                         __rte_unused struct rte_mempool *mp)
+static int bnxt_init_rep_rx_ring(struct bnxt_rx_queue *rxq,
+                                unsigned int socket_id)
+{
+       struct bnxt_rx_ring_info *rxr;
+       struct bnxt_ring *ring;
+
+       rxr = rte_zmalloc_socket("bnxt_rep_rx_ring",
+                                sizeof(struct bnxt_rx_ring_info),
+                                RTE_CACHE_LINE_SIZE, socket_id);
+       if (rxr == NULL)
+               return -ENOMEM;
+       rxq->rx_ring = rxr;
+
+       ring = rte_zmalloc_socket("bnxt_rep_rx_ring_struct",
+                                 sizeof(struct bnxt_ring),
+                                 RTE_CACHE_LINE_SIZE, socket_id);
+       if (ring == NULL)
+               return -ENOMEM;
+       rxr->rx_ring_struct = ring;
+       ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
+       ring->ring_mask = ring->ring_size - 1;
+
+       return 0;
+}
+
+int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
+                              uint16_t queue_idx,
+                              uint16_t nb_desc,
+                              unsigned int socket_id,
+                              __rte_unused const struct rte_eth_rxconf *rx_conf,
+                              __rte_unused struct rte_mempool *mp)
 {
-       struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+       struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
        struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
        struct bnxt_rx_queue *parent_rxq;
        struct bnxt_rx_queue *rxq;
-       struct bnxt_sw_rx_bd *buf_ring;
+       struct rte_mbuf **buf_ring;
        int rc = 0;
 
        if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
@@ -531,6 +613,11 @@ int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
                return -EINVAL;
        }
 
+       if (!parent_bp->rx_queues) {
+               PMD_DRV_LOG(ERR, "Parent Rx qs not configured yet\n");
+               return -EINVAL;
+       }
+
        parent_rxq = parent_bp->rx_queues[queue_idx];
        if (!parent_rxq) {
                PMD_DRV_LOG(ERR, "Parent RxQ has not been configured yet\n");
@@ -558,12 +645,12 @@ int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
        rxq->nb_rx_desc = nb_desc;
 
-       rc = bnxt_init_rx_ring_struct(rxq, socket_id);
+       rc = bnxt_init_rep_rx_ring(rxq, socket_id);
        if (rc)
                goto out;
 
        buf_ring = rte_zmalloc_socket("bnxt_rx_vfr_buf_ring",
-                                     sizeof(struct bnxt_sw_rx_bd) *
+                                     sizeof(struct rte_mbuf *) *
                                      rxq->rx_ring->rx_ring_struct->ring_size,
                                      RTE_CACHE_LINE_SIZE, socket_id);
        if (!buf_ring) {
@@ -581,12 +668,12 @@ int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
 out:
        if (rxq)
-               bnxt_rx_queue_release_op(rxq);
+               bnxt_rep_rx_queue_release_op(rxq);
 
        return rc;
 }
 
-void bnxt_vf_rep_rx_queue_release_op(void *rx_queue)
+void bnxt_rep_rx_queue_release_op(void *rx_queue)
 {
        struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
 
@@ -596,19 +683,19 @@ void bnxt_vf_rep_rx_queue_release_op(void *rx_queue)
        bnxt_rx_queue_release_mbufs(rxq);
 
        bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
-       bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
-       bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+       rte_free(rxq->rx_ring->rx_ring_struct);
+       rte_free(rxq->rx_ring);
 
        rte_free(rxq);
 }
 
-int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
-                         uint16_t queue_idx,
-                         uint16_t nb_desc,
-                         unsigned int socket_id,
-                         __rte_unused const struct rte_eth_txconf *tx_conf)
+int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+                              uint16_t queue_idx,
+                              uint16_t nb_desc,
+                              unsigned int socket_id,
+                              __rte_unused const struct rte_eth_txconf *tx_conf)
 {
-       struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+       struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
        struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
        struct bnxt_tx_queue *parent_txq, *txq;
        struct bnxt_vf_rep_tx_queue *vfr_txq;
@@ -625,6 +712,11 @@ int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
                return -EINVAL;
        }
 
+       if (!parent_bp->tx_queues) {
+               PMD_DRV_LOG(ERR, "Parent Tx qs not configured yet\n");
+               return -EINVAL;
+       }
+
        parent_txq = parent_bp->tx_queues[queue_idx];
        if (!parent_txq) {
                PMD_DRV_LOG(ERR, "Parent TxQ has not been configured yet\n");
@@ -638,7 +730,7 @@ int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
        if (eth_dev->data->tx_queues) {
                vfr_txq = eth_dev->data->tx_queues[queue_idx];
-               bnxt_vf_rep_tx_queue_release_op(vfr_txq);
+               bnxt_rep_tx_queue_release_op(vfr_txq);
                vfr_txq = NULL;
        }
 
@@ -668,7 +760,7 @@ int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
-void bnxt_vf_rep_tx_queue_release_op(void *tx_queue)
+void bnxt_rep_tx_queue_release_op(void *tx_queue)
 {
        struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
 
@@ -679,10 +771,10 @@ void bnxt_vf_rep_tx_queue_release_op(void *tx_queue)
        rte_free(vfr_txq);
 }
 
-int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
+int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,
                             struct rte_eth_stats *stats)
 {
-       struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+       struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
        int i;
 
        memset(stats, 0, sizeof(*stats));
@@ -703,9 +795,9 @@ int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
-int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
+int bnxt_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+       struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
        int i;
 
        for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
@@ -717,3 +809,25 @@ int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
        }
        return 0;
 }
+
+int bnxt_rep_stop_all(struct bnxt *bp)
+{
+       uint16_t vf_id;
+       struct rte_eth_dev *rep_eth_dev;
+       int ret;
+
+       /* No vfrep ports just exit */
+       if (!bp->rep_info)
+               return 0;
+
+       for (vf_id = 0; vf_id < BNXT_MAX_VF_REPS; vf_id++) {
+               rep_eth_dev = bp->rep_info[vf_id].vfr_eth_dev;
+               if (!rep_eth_dev)
+                       continue;
+               ret = bnxt_rep_dev_stop_op(rep_eth_dev);
+               if (ret != 0)
+                       return ret;
+       }
+
+       return 0;
+}