net/bnxt: get IDs for port representor endpoint
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
index 93b2ea7..ed42e58 100644 (file)
@@ -164,8 +164,9 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
                    rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
                        return -ETIMEDOUT;
 
-               PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
-                           req->req_type);
+               PMD_DRV_LOG(ERR,
+                           "Error(timeout) sending msg 0x%04x, seq_id %d\n",
+                           req->req_type, req->seq_id);
                return -ETIMEDOUT;
        }
        return 0;
@@ -221,6 +222,8 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
                        rc = -EINVAL; \
                else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
                        rc = -ENOTSUP; \
+               else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
+                       rc = -EAGAIN; \
                else if (rc > 0) \
                        rc = -EIO; \
                return rc; \
@@ -249,6 +252,8 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
                        rc = -EINVAL; \
                else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
                        rc = -ENOTSUP; \
+               else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
+                       rc = -EAGAIN; \
                else if (rc > 0) \
                        rc = -EIO; \
                return rc; \
@@ -257,6 +262,89 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
 
 #define HWRM_UNLOCK()          rte_spinlock_unlock(&bp->hwrm_lock)
 
+int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
+                               bool use_kong_mb,
+                               uint16_t msg_type,
+                               void *msg,
+                               uint32_t msg_len,
+                               void *resp_msg,
+                               uint32_t resp_len)
+{
+       int rc = 0;
+       bool mailbox = BNXT_USE_CHIMP_MB;
+       struct input *req = msg;
+       struct output *resp = bp->hwrm_cmd_resp_addr;
+
+       if (use_kong_mb)
+               mailbox = BNXT_USE_KONG(bp);
+
+       HWRM_PREP(req, msg_type, mailbox);
+
+       rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
+
+       HWRM_CHECK_RESULT();
+
+       if (resp_msg)
+               memcpy(resp_msg, resp, resp_len);
+
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
+                                 bool use_kong_mb,
+                                 uint16_t tf_type,
+                                 uint16_t tf_subtype,
+                                 uint32_t *tf_response_code,
+                                 void *msg,
+                                 uint32_t msg_len,
+                                 void *response,
+                                 uint32_t response_len)
+{
+       int rc = 0;
+       struct hwrm_cfa_tflib_input req = { .req_type = 0 };
+       struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
+       bool mailbox = BNXT_USE_CHIMP_MB;
+
+       if (msg_len > sizeof(req.tf_req))
+               return -ENOMEM;
+
+       if (use_kong_mb)
+               mailbox = BNXT_USE_KONG(bp);
+
+       HWRM_PREP(&req, HWRM_TF, mailbox);
+       /* Build request using the user supplied request payload.
+        * TLV request size is checked at build time against HWRM
+        * request max size, thus no checking required.
+        */
+       req.tf_type = tf_type;
+       req.tf_subtype = tf_subtype;
+       memcpy(req.tf_req, msg, msg_len);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
+       HWRM_CHECK_RESULT();
+
+       /* Copy the resp to user provided response buffer */
+       if (response != NULL)
+               /* Post process response data. We need to copy only
+                * the 'payload' as the HWRM data structure really is
+                * HWRM header + msg header + payload and the TFLIB
+                * only provided a payload place holder.
+                */
+               if (response_len != 0) {
+                       memcpy(response,
+                              resp->tf_resp,
+                              response_len);
+               }
+
+       /* Extract the internal tflib response code */
+       *tf_response_code = resp->tf_resp_code;
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 {
        int rc = 0;
@@ -538,7 +626,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
 
        HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
 
-       req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+       req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -602,47 +690,53 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
        bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
        flags = rte_le_to_cpu_32(resp->flags);
        if (BNXT_PF(bp)) {
-               bp->pf.port_id = resp->port_id;
-               bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
-               bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
+               bp->pf->port_id = resp->port_id;
+               bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+               bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
                new_max_vfs = bp->pdev->max_vfs;
-               if (new_max_vfs != bp->pf.max_vfs) {
-                       if (bp->pf.vf_info)
-                               rte_free(bp->pf.vf_info);
-                       bp->pf.vf_info = rte_malloc("bnxt_vf_info",
-                           sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
-                       bp->pf.max_vfs = new_max_vfs;
+               if (new_max_vfs != bp->pf->max_vfs) {
+                       if (bp->pf->vf_info)
+                               rte_free(bp->pf->vf_info);
+                       bp->pf->vf_info = rte_malloc("bnxt_vf_info",
+                           sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
+                       bp->pf->max_vfs = new_max_vfs;
                        for (i = 0; i < new_max_vfs; i++) {
-                               bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
-                               bp->pf.vf_info[i].vlan_table =
+                               bp->pf->vf_info[i].fid =
+                                       bp->pf->first_vf_id + i;
+                               bp->pf->vf_info[i].vlan_table =
                                        rte_zmalloc("VF VLAN table",
                                                    getpagesize(),
                                                    getpagesize());
-                               if (bp->pf.vf_info[i].vlan_table == NULL)
+                               if (bp->pf->vf_info[i].vlan_table == NULL)
                                        PMD_DRV_LOG(ERR,
                                        "Fail to alloc VLAN table for VF %d\n",
                                        i);
                                else
                                        rte_mem_lock_page(
-                                               bp->pf.vf_info[i].vlan_table);
-                               bp->pf.vf_info[i].vlan_as_table =
+                                               bp->pf->vf_info[i].vlan_table);
+                               bp->pf->vf_info[i].vlan_as_table =
                                        rte_zmalloc("VF VLAN AS table",
                                                    getpagesize(),
                                                    getpagesize());
-                               if (bp->pf.vf_info[i].vlan_as_table == NULL)
+                               if (bp->pf->vf_info[i].vlan_as_table == NULL)
                                        PMD_DRV_LOG(ERR,
                                        "Alloc VLAN AS table for VF %d fail\n",
                                        i);
                                else
                                        rte_mem_lock_page(
-                                              bp->pf.vf_info[i].vlan_as_table);
-                               STAILQ_INIT(&bp->pf.vf_info[i].filter);
+                                             bp->pf->vf_info[i].vlan_as_table);
+                               STAILQ_INIT(&bp->pf->vf_info[i].filter);
                        }
                }
        }
 
        bp->fw_fid = rte_le_to_cpu_32(resp->fid);
-       memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
+       if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
+               bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
+               memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
+       } else {
+               bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
+       }
        bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
        bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
        bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
@@ -654,16 +748,18 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
                bp->max_l2_ctx += bp->max_rx_em_flows;
        /* TODO: For now, do not support VMDq/RFS on VFs. */
        if (BNXT_PF(bp)) {
-               if (bp->pf.max_vfs)
+               if (bp->pf->max_vfs)
                        bp->max_vnics = 1;
                else
                        bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
        } else {
                bp->max_vnics = 1;
        }
+       PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
+                   bp->max_l2_ctx, bp->max_vnics);
        bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
        if (BNXT_PF(bp)) {
-               bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
+               bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
                if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
                        bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
                        PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
@@ -791,9 +887,9 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
        if (BNXT_PF(bp)) {
                req.enables |= rte_cpu_to_le_32(
                        HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
-               memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
+               memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
                       RTE_MIN(sizeof(req.vf_req_fwd),
-                              sizeof(bp->pf.vf_req_fwd)));
+                              sizeof(bp->pf->vf_req_fwd)));
 
                /*
                 * PF can sniff HWRM API issued by VF. This can be set up by
@@ -1019,7 +1115,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
        dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
 
        if (bp->max_resp_len != max_resp_len) {
-               sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
+               sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
                        bp->pdev->addr.domain, bp->pdev->addr.bus,
                        bp->pdev->addr.devid, bp->pdev->addr.function);
 
@@ -1054,7 +1150,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
             (dev_caps_cfg &
              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
            bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
-               sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
+               sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
                        bp->pdev->addr.domain, bp->pdev->addr.bus,
                        bp->pdev->addr.devid, bp->pdev->addr.function);
 
@@ -1086,10 +1182,17 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
                PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
        if (dev_caps_cfg &
            HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
-               bp->flags |= BNXT_FLAG_ADV_FLOW_MGMT;
+               bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
                PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
        }
 
+       if (dev_caps_cfg &
+           HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
+               PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
+               bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
+       }
+
+
 error:
        HWRM_UNLOCK();
        return rc;
@@ -1126,7 +1229,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
 
        if (conf->link_up) {
                /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
-               if (bp->link_info.auto_mode && conf->link_speed) {
+               if (bp->link_info->auto_mode && conf->link_speed) {
                        req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
                        PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
                }
@@ -2137,11 +2240,11 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
        struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
        int rc;
 
-       req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+       req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
        req.enables = rte_cpu_to_le_32(
                        HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
        memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
-       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
 
        HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
@@ -2149,7 +2252,7 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
        HWRM_CHECK_RESULT();
        HWRM_UNLOCK();
 
-       bp->pf.vf_info[vf].random_mac = false;
+       bp->pf->vf_info[vf].random_mac = false;
 
        return rc;
 }
@@ -2178,7 +2281,8 @@ int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
 }
 
 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
-                         struct rte_eth_stats *stats)
+                         struct rte_eth_stats *stats,
+                         struct hwrm_func_qstats_output *func_qstats)
 {
        int rc = 0;
        struct hwrm_func_qstats_input req = {.req_type = 0};
@@ -2191,6 +2295,12 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        HWRM_CHECK_RESULT();
+       if (func_qstats)
+               memcpy(func_qstats, resp,
+                      sizeof(struct hwrm_func_qstats_output));
+
+       if (!stats)
+               goto exit;
 
        stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
        stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
@@ -2210,6 +2320,7 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
        stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
        stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
 
+exit:
        HWRM_UNLOCK();
 
        return rc;
@@ -2372,13 +2483,6 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
                if (BNXT_HAS_RING_GRPS(bp))
                        bp->grp_info[queue_index].rx_fw_ring_id =
                                                        INVALID_HW_RING_ID;
-               memset(rxr->rx_desc_ring, 0,
-                      rxr->rx_ring_struct->ring_size *
-                      sizeof(*rxr->rx_desc_ring));
-               memset(rxr->rx_buf_ring, 0,
-                      rxr->rx_ring_struct->ring_size *
-                      sizeof(*rxr->rx_buf_ring));
-               rxr->rx_prod = 0;
        }
        ring = rxr->ag_ring_struct;
        if (ring->fw_ring_id != INVALID_HW_RING_ID) {
@@ -2386,11 +2490,6 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
                                    BNXT_CHIP_THOR(bp) ?
                                    HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
                                    HWRM_RING_FREE_INPUT_RING_TYPE_RX);
-               ring->fw_ring_id = INVALID_HW_RING_ID;
-               memset(rxr->ag_buf_ring, 0,
-                      rxr->ag_ring_struct->ring_size *
-                      sizeof(*rxr->ag_buf_ring));
-               rxr->ag_prod = 0;
                if (BNXT_HAS_RING_GRPS(bp))
                        bp->grp_info[queue_index].ag_fw_ring_id =
                                                        INVALID_HW_RING_ID;
@@ -2474,7 +2573,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
        struct rte_pci_device *pdev = bp->pdev;
        char type[RTE_MEMZONE_NAMESIZE];
 
-       sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
+       sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
                pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
        bp->max_resp_len = HWRM_MAX_RESP_LEN;
        bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
@@ -2670,6 +2769,10 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
                eth_link_speed =
                        HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
                break;
+       case ETH_LINK_SPEED_200G:
+               eth_link_speed =
+                       HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_200GB;
+               break;
        default:
                PMD_DRV_LOG(ERR,
                        "Unsupported link speed %d; default to AUTO\n",
@@ -2682,15 +2785,21 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
                ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
                ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
-               ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
+               ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
+               ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
 
-static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
+static int bnxt_validate_link_speed(struct bnxt *bp)
 {
+       uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
+       uint16_t port_id = bp->eth_dev->data->port_id;
+       uint32_t link_speed_capa;
        uint32_t one_speed;
 
        if (link_speed == ETH_LINK_SPEED_AUTONEG)
                return 0;
 
+       link_speed_capa = bnxt_get_speed_capabilities(bp);
+
        if (link_speed & ETH_LINK_SPEED_FIXED) {
                one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
 
@@ -2700,14 +2809,14 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
                                link_speed, port_id);
                        return -EINVAL;
                }
-               if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
+               if ((one_speed & link_speed_capa) != one_speed) {
                        PMD_DRV_LOG(ERR,
                                "Unsupported advertised speed (%u) for port %u\n",
                                link_speed, port_id);
                        return -EINVAL;
                }
        } else {
-               if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
+               if (!(link_speed & link_speed_capa)) {
                        PMD_DRV_LOG(ERR,
                                "Unsupported advertised speeds (%u) for port %u\n",
                                link_speed, port_id);
@@ -2723,8 +2832,8 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
        uint16_t ret = 0;
 
        if (link_speed == ETH_LINK_SPEED_AUTONEG) {
-               if (bp->link_info.support_speeds)
-                       return bp->link_info.support_speeds;
+               if (bp->link_info->support_speeds)
+                       return bp->link_info->support_speeds;
                link_speed = BNXT_SUPPORTED_SPEEDS;
        }
 
@@ -2748,6 +2857,8 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
                ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
        if (link_speed & ETH_LINK_SPEED_100G)
                ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
+       if (link_speed & ETH_LINK_SPEED_200G)
+               ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_200GB;
        return ret;
 }
 
@@ -2783,6 +2894,9 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
        case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
                eth_link_speed = ETH_SPEED_NUM_100G;
                break;
+       case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
+               eth_link_speed = ETH_SPEED_NUM_200G;
+               break;
        case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
        default:
                PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
@@ -2816,7 +2930,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
 {
        int rc = 0;
-       struct bnxt_link_info *link_info = &bp->link_info;
+       struct bnxt_link_info *link_info = bp->link_info;
 
        rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
        if (rc) {
@@ -2848,8 +2962,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
        if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
                return 0;
 
-       rc = bnxt_valid_link_speed(dev_conf->link_speeds,
-                       bp->eth_dev->data->port_id);
+       rc = bnxt_validate_link_speed(bp);
        if (rc)
                goto error;
 
@@ -2878,19 +2991,19 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
         */
        if (autoneg == 1 &&
            !(!BNXT_CHIP_THOR(bp) &&
-             (bp->link_info.auto_link_speed ||
-              bp->link_info.force_link_speed))) {
+             (bp->link_info->auto_link_speed ||
+              bp->link_info->force_link_speed))) {
                link_req.phy_flags |=
                                HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
                link_req.auto_link_speed_mask =
                        bnxt_parse_eth_link_speed_mask(bp,
                                                       dev_conf->link_speeds);
        } else {
-               if (bp->link_info.phy_type ==
+               if (bp->link_info->phy_type ==
                    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
-                   bp->link_info.phy_type ==
+                   bp->link_info->phy_type ==
                    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
-                   bp->link_info.media_type ==
+                   bp->link_info->media_type ==
                    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
                        PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
                        return -EINVAL;
@@ -2900,14 +3013,14 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
                /* If user wants a particular speed try that first. */
                if (speed)
                        link_req.link_speed = speed;
-               else if (bp->link_info.force_link_speed)
-                       link_req.link_speed = bp->link_info.force_link_speed;
+               else if (bp->link_info->force_link_speed)
+                       link_req.link_speed = bp->link_info->force_link_speed;
                else
-                       link_req.link_speed = bp->link_info.auto_link_speed;
+                       link_req.link_speed = bp->link_info->auto_link_speed;
        }
        link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
-       link_req.auto_pause = bp->link_info.auto_pause;
-       link_req.force_pause = bp->link_info.force_pause;
+       link_req.auto_pause = bp->link_info->auto_pause;
+       link_req.force_pause = bp->link_info->force_pause;
 
 port_phy_cfg:
        rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
@@ -2927,6 +3040,8 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
        struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
        uint16_t flags;
        int rc = 0;
+       bp->func_svif = BNXT_SVIF_INVALID;
+       uint16_t svif_info;
 
        HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
        req.fid = rte_cpu_to_le_16(0xffff);
@@ -2937,6 +3052,12 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
 
        /* Hard Coded.. 0xfff VLAN ID mask */
        bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
+
+       svif_info = rte_le_to_cpu_16(resp->svif_info);
+       if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
+               bp->func_svif = svif_info &
+                                    HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
+
        flags = rte_le_to_cpu_16(resp->flags);
        if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
                bp->flags |= BNXT_FLAG_MULTI_HOST;
@@ -2973,6 +3094,62 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
        return rc;
 }
 
+int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
+                                uint16_t *vnic_id, uint16_t *svif)
+{
+       struct hwrm_func_qcfg_input req = {0};
+       struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint16_t svif_info;
+       int rc = 0;
+
+       HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
+       req.fid = rte_cpu_to_le_16(fid);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+       HWRM_CHECK_RESULT();
+
+       if (vnic_id)
+               *vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
+
+       svif_info = rte_le_to_cpu_16(resp->svif_info);
+       if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
+               *svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
+
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
+{
+       struct hwrm_port_mac_qcfg_input req = {0};
+       struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint16_t port_svif_info;
+       int rc;
+
+       bp->port_svif = BNXT_SVIF_INVALID;
+
+       if (!BNXT_PF(bp))
+               return 0;
+
+       HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+       HWRM_CHECK_RESULT();
+
+       port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
+       if (port_svif_info &
+           HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
+               bp->port_svif = port_svif_info &
+                       HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
+
+       HWRM_UNLOCK();
+
+       return 0;
+}
+
 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
                                   struct hwrm_func_qcaps_output *qcaps)
 {
@@ -3024,7 +3201,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
                req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
        }
 
-       req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+       req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
        req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
        req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
        req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
@@ -3093,7 +3270,7 @@ static void add_random_mac_if_needed(struct bnxt *bp,
                cfg_req->enables |=
                rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
                rte_eth_random_addr(cfg_req->dflt_mac_addr);
-               bp->pf.vf_info[vf].random_mac = true;
+               bp->pf->vf_info[vf].random_mac = true;
        } else {
                memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
                        RTE_ETHER_ADDR_LEN);
@@ -3110,7 +3287,7 @@ static int reserve_resources_from_vf(struct bnxt *bp,
 
        /* Get the actual allocated values now */
        HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
-       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        if (rc) {
@@ -3148,7 +3325,7 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
 
        /* Check for zero MAC address */
        HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
-       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
        HWRM_CHECK_RESULT();
        rc = rte_le_to_cpu_16(resp->vlan);
@@ -3172,7 +3349,7 @@ static int update_pf_resource_max(struct bnxt *bp)
 
        /* Only TX ring value reflects actual allocation? TODO */
        bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
-       bp->pf.evb_mode = resp->evb_mode;
+       bp->pf->evb_mode = resp->evb_mode;
 
        HWRM_UNLOCK();
 
@@ -3192,10 +3369,10 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
        if (rc)
                return rc;
 
-       bp->pf.func_cfg_flags &=
+       bp->pf->func_cfg_flags &=
                ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
                  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
-       bp->pf.func_cfg_flags |=
+       bp->pf->func_cfg_flags |=
                HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
        rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
        rc = __bnxt_hwrm_func_qcaps(bp);
@@ -3221,7 +3398,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
        if (rc)
                return rc;
 
-       bp->pf.active_vfs = num_vfs;
+       bp->pf->active_vfs = num_vfs;
 
        /*
         * First, configure the PF to only use one TX ring.  This ensures that
@@ -3233,10 +3410,10 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
         *
         * This has been fixed with firmware versions above 20.6.54
         */
-       bp->pf.func_cfg_flags &=
+       bp->pf->func_cfg_flags &=
                ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
                  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
-       bp->pf.func_cfg_flags |=
+       bp->pf->func_cfg_flags |=
                HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
        rc = bnxt_hwrm_pf_func_cfg(bp, 1);
        if (rc)
@@ -3246,16 +3423,16 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
         * Now, create and register a buffer to hold forwarded VF requests
         */
        req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
-       bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
+       bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
                page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
-       if (bp->pf.vf_req_buf == NULL) {
+       if (bp->pf->vf_req_buf == NULL) {
                rc = -ENOMEM;
                goto error_free;
        }
        for (sz = 0; sz < req_buf_sz; sz += getpagesize())
-               rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
+               rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
        for (i = 0; i < num_vfs; i++)
-               bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
+               bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
                                        (i * HWRM_MAX_REQ_LEN);
 
        rc = bnxt_hwrm_func_buf_rgtr(bp);
@@ -3264,13 +3441,13 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 
        populate_vf_func_cfg_req(bp, &req, num_vfs);
 
-       bp->pf.active_vfs = 0;
+       bp->pf->active_vfs = 0;
        for (i = 0; i < num_vfs; i++) {
                add_random_mac_if_needed(bp, &req, i);
 
                HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
-               req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
-               req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
+               req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
+               req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
                rc = bnxt_hwrm_send_message(bp,
                                            &req,
                                            sizeof(req),
@@ -3293,8 +3470,8 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
                HWRM_UNLOCK();
 
                reserve_resources_from_vf(bp, &req, i);
-               bp->pf.active_vfs++;
-               bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
+               bp->pf->active_vfs++;
+               bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
        }
 
        /*
@@ -3328,7 +3505,7 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
 
        req.fid = rte_cpu_to_le_16(0xffff);
        req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
-       req.evb_mode = bp->pf.evb_mode;
+       req.evb_mode = bp->pf->evb_mode;
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
        HWRM_CHECK_RESULT();
@@ -3396,7 +3573,7 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
 
        HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
-       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
        req.flags = rte_cpu_to_le_32(flags);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -3428,10 +3605,10 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 
        req.req_buf_num_pages = rte_cpu_to_le_16(1);
        req.req_buf_page_size = rte_cpu_to_le_16(
-                        page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
+                        page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN));
        req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
        req.req_buf_page_addr0 =
-               rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf));
+               rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
        if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map buffer address to physical memory\n");
@@ -3474,7 +3651,7 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
        HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
        req.fid = rte_cpu_to_le_16(0xffff);
-       req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+       req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
        req.enables = rte_cpu_to_le_32(
                        HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
        req.async_event_cr = rte_cpu_to_le_16(
@@ -3518,12 +3695,12 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
        HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
        if (is_vf) {
-               dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
-               fid = bp->pf.vf_info[vf].fid;
-               func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
+               dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
+               fid = bp->pf->vf_info[vf].fid;
+               func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
        } else {
                fid = rte_cpu_to_le_16(0xffff);
-               func_cfg_flags = bp->pf.func_cfg_flags;
+               func_cfg_flags = bp->pf->func_cfg_flags;
                dflt_vlan = bp->vlan;
        }
 
@@ -3549,9 +3726,9 @@ int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
 
        HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
-       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
        req.enables |= rte_cpu_to_le_32(enables);
-       req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+       req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
        req.max_bw = rte_cpu_to_le_32(max_bw);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -3569,10 +3746,10 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
 
        HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
-       req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
-       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
+       req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
        req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
-       req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
+       req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -3626,7 +3803,7 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
 
        HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
 
-       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        HWRM_CHECK_RESULT();
@@ -3703,7 +3880,7 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp)
 {
        struct hwrm_port_qstats_input req = {0};
        struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
-       struct bnxt_pf_info *pf = &bp->pf;
+       struct bnxt_pf_info *pf = bp->pf;
        int rc;
 
        HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
@@ -3723,7 +3900,7 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
 {
        struct hwrm_port_clr_stats_input req = {0};
        struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
-       struct bnxt_pf_info *pf = &bp->pf;
+       struct bnxt_pf_info *pf = bp->pf;
        int rc;
 
        /* Not allowed on NS2 device, NPAR, MultiHost, VF */
@@ -3752,7 +3929,7 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
                return 0;
 
        HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
-       req.port_id = bp->pf.port_id;
+       req.port_id = bp->pf->port_id;
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        HWRM_CHECK_RESULT();
@@ -3760,17 +3937,17 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
        if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
                unsigned int i;
 
-               bp->num_leds = resp->num_leds;
+               bp->leds->num_leds = resp->num_leds;
                memcpy(bp->leds, &resp->led0_id,
-                       sizeof(bp->leds[0]) * bp->num_leds);
-               for (i = 0; i < bp->num_leds; i++) {
+                       sizeof(bp->leds[0]) * bp->leds->num_leds);
+               for (i = 0; i < bp->leds->num_leds; i++) {
                        struct bnxt_led_info *led = &bp->leds[i];
 
                        uint16_t caps = led->led_state_caps;
 
                        if (!led->led_group_id ||
                                !BNXT_LED_ALT_BLINK_CAP(caps)) {
-                               bp->num_leds = 0;
+                               bp->leds->num_leds = 0;
                                break;
                        }
                }
@@ -3790,7 +3967,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
        uint16_t duration = 0;
        int rc, i;
 
-       if (!bp->num_leds || BNXT_VF(bp))
+       if (!bp->leds->num_leds || BNXT_VF(bp))
                return -EOPNOTSUPP;
 
        HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
@@ -3799,10 +3976,10 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
                led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
                duration = rte_cpu_to_le_16(500);
        }
-       req.port_id = bp->pf.port_id;
-       req.num_leds = bp->num_leds;
+       req.port_id = bp->pf->port_id;
+       req.num_leds = bp->leds->num_leds;
        led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
-       for (i = 0; i < bp->num_leds; i++, led_cfg++) {
+       for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
                req.enables |= BNXT_LED_DFLT_ENABLES(i);
                led_cfg->led_id = bp->leds[i].led_id;
                led_cfg->led_state = led_state;
@@ -4011,8 +4188,8 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
        /* First query all VNIC ids */
        HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
 
-       req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
-       req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
+       req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
+       req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
        req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
 
        if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
@@ -4047,7 +4224,7 @@ int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
        size_t sz;
 
        /* First query all VNIC ids */
-       vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+       vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
        vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
                        RTE_CACHE_LINE_SIZE);
        if (vnic_ids == NULL)
@@ -4066,7 +4243,7 @@ int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
        for (i = 0; i < num_vnic_ids; i++) {
                memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
                vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
-               rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
+               rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
                if (rc)
                        break;
                if (vnic.mru <= 4)      /* Indicates unallocated */
@@ -4093,7 +4270,7 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
 
        HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 
-       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
        req.enables |= rte_cpu_to_le_32(
                        HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
        req.vlan_antispoof_mode = on ?
@@ -4116,7 +4293,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
        size_t sz;
        int rc;
 
-       vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+       vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
        vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
                        RTE_CACHE_LINE_SIZE);
        if (vnic_ids == NULL)
@@ -4139,7 +4316,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
                memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
                vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
                rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
-                                       bp->pf.first_vf_id + vf);
+                                       bp->pf->first_vf_id + vf);
                if (rc)
                        goto exit;
                if (vnic.func_default) {
@@ -4565,7 +4742,7 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
        struct bnxt_ctx_pg_info *ctx_pg;
        struct bnxt_ctx_mem_info *ctx;
        int total_alloc_len;
-       int rc, i;
+       int rc, i, tqm_rings;
 
        if (!BNXT_CHIP_THOR(bp) ||
            bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
@@ -4585,17 +4762,6 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
                goto ctx_err;
        }
 
-       ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
-                           sizeof(*ctx_pg) * BNXT_MAX_Q,
-                           RTE_CACHE_LINE_SIZE);
-       if (!ctx_pg) {
-               rc = -ENOMEM;
-               goto ctx_err;
-       }
-       for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
-               ctx->tqm_mem[i] = ctx_pg;
-
-       bp->ctx = ctx;
        ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
        ctx->qp_min_qp1_entries =
                rte_le_to_cpu_16(resp->qp_min_qp1_entries);
@@ -4631,6 +4797,24 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
        ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
        ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
        ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
+       ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
+
+       if (!ctx->tqm_fp_rings_count)
+               ctx->tqm_fp_rings_count = bp->max_q;
+
+       tqm_rings = ctx->tqm_fp_rings_count + 1;
+
+       ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
+                           sizeof(*ctx_pg) * tqm_rings,
+                           RTE_CACHE_LINE_SIZE);
+       if (!ctx_pg) {
+               rc = -ENOMEM;
+               goto ctx_err;
+       }
+       for (i = 0; i < tqm_rings; i++, ctx_pg++)
+               ctx->tqm_mem[i] = ctx_pg;
+
+       bp->ctx = ctx;
 ctx_err:
        HWRM_UNLOCK();
        return rc;
@@ -4738,7 +4922,7 @@ int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
 {
        struct hwrm_port_qstats_ext_input req = {0};
        struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
-       struct bnxt_pf_info *pf = &bp->pf;
+       struct bnxt_pf_info *pf = bp->pf;
        int rc;
 
        if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
@@ -4879,7 +5063,6 @@ int bnxt_hwrm_set_mac(struct bnxt *bp)
 
        HWRM_CHECK_RESULT();
 
-       memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
        HWRM_UNLOCK();
 
        return rc;
@@ -4938,16 +5121,6 @@ int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
        if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
                return 0;
 
-       if (!info) {
-               info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
-                                  sizeof(*info), 0);
-               bp->recovery_info = info;
-               if (info == NULL)
-                       return -ENOMEM;
-       } else {
-               memset(info, 0, sizeof(*info));
-       }
-
        HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -5067,7 +5240,7 @@ int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
        }
 
        req.flags = rte_cpu_to_le_32(flags);
-       req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+       req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -5091,7 +5264,7 @@ int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
        uint32_t flags = 0;
        int rc = 0;
 
-       if (!(bp->flags & BNXT_FLAG_ADV_FLOW_MGMT))
+       if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT))
                return rc;
 
        if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
@@ -5114,3 +5287,158 @@ int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
 
        return rc;
 }
+
+int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
+{
+       int rc = 0;
+
+       struct hwrm_cfa_counter_qcaps_input req = {0};
+       struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+       if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+               PMD_DRV_LOG(DEBUG,
+                           "Not a PF or trusted VF. Command not supported\n");
+               return 0;
+       }
+
+       HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
+       req.target_id = rte_cpu_to_le_16(bp->fw_fid);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+       HWRM_CHECK_RESULT();
+       if (max_fc)
+               *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
+       HWRM_UNLOCK();
+
+       return 0;
+}
+
+int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
+{
+       int rc = 0;
+       struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
+       struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+       if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+               PMD_DRV_LOG(DEBUG,
+                           "Not a PF or trusted VF. Command not supported\n");
+               return 0;
+       }
+
+       HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
+
+       req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
+       req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
+       req.page_dir = rte_cpu_to_le_64(dma_addr);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+       HWRM_CHECK_RESULT();
+       if (ctx_id) {
+               *ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
+               PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
+       }
+       HWRM_UNLOCK();
+
+       return 0;
+}
+
+int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
+{
+       int rc = 0;
+       struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
+       struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+       if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+               PMD_DRV_LOG(DEBUG,
+                           "Not a PF or trusted VF. Command not supported\n");
+               return 0;
+       }
+
+       HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
+
+       req.ctx_id = rte_cpu_to_le_16(ctx_id);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
+                             uint16_t cntr, uint16_t ctx_id,
+                             uint32_t num_entries, bool enable)
+{
+       struct hwrm_cfa_counter_cfg_input req = {0};
+       struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint16_t flags = 0;
+       int rc;
+
+       if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+               PMD_DRV_LOG(DEBUG,
+                           "Not a PF or trusted VF. Command not supported\n");
+               return 0;
+       }
+
+       HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
+
+       req.target_id = rte_cpu_to_le_16(bp->fw_fid);
+       req.counter_type = rte_cpu_to_le_16(cntr);
+       flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
+               HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
+       flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
+       if (dir == BNXT_DIR_RX)
+               flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
+       else if (dir == BNXT_DIR_TX)
+               flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
+       req.flags = rte_cpu_to_le_16(flags);
+       req.ctx_id =  rte_cpu_to_le_16(ctx_id);
+       req.num_entries = rte_cpu_to_le_32(num_entries);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+
+       return 0;
+}
+
+int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
+                                enum bnxt_flow_dir dir,
+                                uint16_t cntr,
+                                uint16_t num_entries)
+{
+       struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_cfa_counter_qstats_input req = {0};
+       uint16_t flow_ctx_id = 0;
+       uint16_t flags = 0;
+       int rc = 0;
+
+       if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+               PMD_DRV_LOG(DEBUG,
+                           "Not a PF or trusted VF. Command not supported\n");
+               return 0;
+       }
+
+       if (dir == BNXT_DIR_RX) {
+               flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
+               flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
+       } else if (dir == BNXT_DIR_TX) {
+               flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
+               flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
+       }
+
+       HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
+       req.target_id = rte_cpu_to_le_16(bp->fw_fid);
+       req.counter_type = rte_cpu_to_le_16(cntr);
+       req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
+       req.num_entries = rte_cpu_to_le_16(num_entries);
+       req.flags = rte_cpu_to_le_16(flags);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+
+       return 0;
+}