net/bnxt: free the aggregation ring
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
index 7f146d6..d88061c 100644 (file)
@@ -259,7 +259,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
         * by ethtool.
         */
        if (vnic->flags & BNXT_VNIC_INFO_BCAST)
-               mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
        if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
                mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
        if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
@@ -277,7 +277,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
                if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
                        mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
                req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
-                        rte_mem_virt2phy(vlan_table));
+                        rte_mem_virt2iova(vlan_table));
                req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
        }
        req.mask = rte_cpu_to_le_32(mask);
@@ -318,7 +318,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
        req.fid = rte_cpu_to_le_16(fid);
 
        req.vlan_tag_mask_tbl_addr =
-               rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
+               rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
        req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -360,7 +360,24 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
        int rc = 0;
        struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
        struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+       struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+       const struct rte_eth_vmdq_rx_conf *conf =
+                   &dev_conf->rx_adv_conf.vmdq_rx_conf;
        uint32_t enables = 0;
+       uint16_t j = dst_id - 1;
+
+       //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
+       if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+           conf->pool_map[j].pools & (1UL << j)) {
+               RTE_LOG(DEBUG, PMD,
+                       "Add vlan %u to vmdq pool %u\n",
+                       conf->pool_map[j].vlan_id, j);
+
+               filter->l2_ivlan = conf->pool_map[j].vlan_id;
+               filter->enables |=
+                       HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
+                       HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
+       }
 
        if (filter->fw_l2_filter_id != UINT64_MAX)
                bnxt_hwrm_clear_l2_filter(bp, filter);
@@ -384,9 +401,15 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
        if (enables &
            HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
                req.l2_ovlan = filter->l2_ovlan;
+       if (enables &
+           HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
+               req.l2_ovlan = filter->l2_ivlan;
        if (enables &
            HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
                req.l2_ovlan_mask = filter->l2_ovlan_mask;
+       if (enables &
+           HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
+               req.l2_ovlan_mask = filter->l2_ivlan_mask;
        if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
                req.src_id = rte_cpu_to_le_32(filter->src_id);
        if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
@@ -404,12 +427,95 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
        return rc;
 }
 
+int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
+{
+       struct hwrm_port_mac_cfg_input req = {.req_type = 0};
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       uint32_t flags = 0;
+       int rc;
+
+       if (!ptp)
+               return 0;
+
+       HWRM_PREP(req, PORT_MAC_CFG);
+
+       if (ptp->rx_filter)
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+       else
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+       if (ptp->tx_tstamp_en)
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
+       else
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
+       req.flags = rte_cpu_to_le_32(flags);
+       req.enables =
+       rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+       req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+{
+       int rc = 0;
+       struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
+       struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+/*     if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
+       if (ptp)
+               return 0;
+
+       HWRM_PREP(req, PORT_MAC_PTP_QCFG);
+
+       req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT();
+
+       if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
+               return 0;
+
+       ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
+       if (!ptp)
+               return -ENOMEM;
+
+       ptp->rx_regs[BNXT_PTP_RX_TS_L] =
+               rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
+       ptp->rx_regs[BNXT_PTP_RX_TS_H] =
+               rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
+       ptp->rx_regs[BNXT_PTP_RX_SEQ] =
+               rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
+       ptp->rx_regs[BNXT_PTP_RX_FIFO] =
+               rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
+       ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
+               rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
+       ptp->tx_regs[BNXT_PTP_TX_TS_L] =
+               rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
+       ptp->tx_regs[BNXT_PTP_TX_TS_H] =
+               rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
+       ptp->tx_regs[BNXT_PTP_TX_SEQ] =
+               rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
+       ptp->tx_regs[BNXT_PTP_TX_FIFO] =
+               rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
+
+       ptp->bp = bp;
+       bp->ptp_cfg = ptp;
+
+       return 0;
+}
+
 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 {
        int rc = 0;
        struct hwrm_func_qcaps_input req = {.req_type = 0 };
        struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
        uint16_t new_max_vfs;
+       uint32_t flags;
        int i;
 
        HWRM_PREP(req, FUNC_QCAPS);
@@ -421,6 +527,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
        HWRM_CHECK_RESULT();
 
        bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+       flags = rte_le_to_cpu_32(resp->flags);
        if (BNXT_PF(bp)) {
                bp->pf.port_id = resp->port_id;
                bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
@@ -477,8 +584,16 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
                bp->max_vnics = 1;
        }
        bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
-       if (BNXT_PF(bp))
+       if (BNXT_PF(bp)) {
                bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
+               if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
+                       bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
+                       RTE_LOG(INFO, PMD, "PTP SUPPORTED");
+                       HWRM_UNLOCK();
+                       bnxt_hwrm_ptp_qcfg(bp);
+               }
+       }
+
        HWRM_UNLOCK();
 
        return rc;
@@ -527,7 +642,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
        }
 
        req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
-       memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
+       //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -621,7 +736,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                }
                rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
                bp->hwrm_cmd_resp_dma_addr =
-                       rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+                       rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
                if (bp->hwrm_cmd_resp_dma_addr == 0) {
                        RTE_LOG(ERR, PMD,
                        "Unable to map response buffer to physical memory.\n");
@@ -647,7 +762,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                }
                rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
                bp->hwrm_short_cmd_req_dma_addr =
-                       rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
+                       rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
                if (bp->hwrm_short_cmd_req_dma_addr == 0) {
                        rte_free(bp->hwrm_short_cmd_req_addr);
                        RTE_LOG(ERR, PMD,
@@ -692,34 +807,38 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
        struct hwrm_port_phy_cfg_input req = {0};
        struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
        uint32_t enables = 0;
-       uint32_t link_speed_mask =
-               HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
 
        HWRM_PREP(req, PORT_PHY_CFG);
 
        if (conf->link_up) {
+               /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
+               if (bp->link_info.auto_mode && conf->link_speed) {
+                       req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
+                       RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
+               }
+
                req.flags = rte_cpu_to_le_32(conf->phy_flags);
                req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
+               enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
                /*
                 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
                 * any auto mode, even "none".
                 */
                if (!conf->link_speed) {
-                       req.auto_mode = conf->auto_mode;
-                       enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
-                       if (conf->auto_mode ==
-                           HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
-                               req.auto_link_speed_mask =
-                                       conf->auto_link_speed_mask;
-                               enables |= link_speed_mask;
-                       }
-                       if (bp->link_info.auto_link_speed) {
-                               req.auto_link_speed =
-                                       bp->link_info.auto_link_speed;
-                               enables |=
-                               HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
-                       }
+                       /* No speeds specified. Enable AutoNeg - all speeds */
+                       req.auto_mode =
+                               HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
                }
+               /* AutoNeg - Advertise speeds specified. */
+               if (conf->auto_link_speed_mask) {
+                       req.auto_mode =
+                               HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
+                       req.auto_link_speed_mask =
+                               conf->auto_link_speed_mask;
+                       enables |=
+                       HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
+               }
+
                req.auto_duplex = conf->duplex;
                enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
                req.auto_pause = conf->auto_pause;
@@ -763,11 +882,13 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
                (link_info->phy_link_status ==
                 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
        link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
-       link_info->duplex = resp->duplex;
+       link_info->duplex = resp->duplex_cfg;
        link_info->pause = resp->pause;
        link_info->auto_pause = resp->auto_pause;
        link_info->force_pause = resp->force_pause;
        link_info->auto_mode = resp->auto_mode;
+       link_info->phy_type = resp->phy_type;
+       link_info->media_type = resp->media_type;
 
        link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
        link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
@@ -1017,7 +1138,6 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
 
        HWRM_UNLOCK();
-       bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
 
        return rc;
 }
@@ -1540,19 +1660,15 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
 
        for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
 
-               if (i >= bp->rx_cp_nr_rings)
+               if (i >= bp->rx_cp_nr_rings) {
                        cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
-               else
+               } else {
                        cpr = bp->rx_queues[i]->cp_ring;
+                       bp->grp_info[i].fw_stats_ctx = -1;
+               }
                if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
                        rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
                        cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
-                       /*
-                        * TODO. Need a better way to reset grp_info.stats_ctx
-                        * for Rx rings only. stats_ctx is not saved for Tx
-                        * in grp_info.
-                        */
-                       bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
                        if (rc)
                                return rc;
                }
@@ -1612,7 +1728,6 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        bnxt_hwrm_ring_free(bp, cp_ring,
                        HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
        cp_ring->fw_ring_id = INVALID_HW_RING_ID;
-       bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
        memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
                        sizeof(*cpr->cp_desc_ring));
        cpr->cp_raw_cons = 0;
@@ -1668,10 +1783,17 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                                        rxr->rx_ring_struct->ring_size *
                                        sizeof(*rxr->rx_buf_ring));
                        rxr->rx_prod = 0;
+               }
+               ring = rxr->ag_ring_struct;
+               if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                       bnxt_hwrm_ring_free(bp, ring,
+                                           HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+                       ring->fw_ring_id = INVALID_HW_RING_ID;
                        memset(rxr->ag_buf_ring, 0,
-                                       rxr->ag_ring_struct->ring_size *
-                                       sizeof(*rxr->ag_buf_ring));
+                              rxr->ag_ring_struct->ring_size *
+                              sizeof(*rxr->ag_buf_ring));
                        rxr->ag_prod = 0;
+                       bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
                }
                if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_free_cp_ring(bp, cpr, idx);
@@ -1730,7 +1852,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
        if (bp->hwrm_cmd_resp_addr == NULL)
                return -ENOMEM;
        bp->hwrm_cmd_resp_dma_addr =
-               rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+               rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
        if (bp->hwrm_cmd_resp_dma_addr == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -1863,6 +1985,11 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
        return hw_link_duplex;
 }
 
+static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
+{
+       return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
+}
+
 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
 {
        uint16_t eth_link_speed = 0;
@@ -1918,7 +2045,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
                ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
                ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
 
-static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
+static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
 {
        uint32_t one_speed;
 
@@ -2071,9 +2198,9 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
        int rc = 0;
        struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
        struct bnxt_link_info link_req;
-       uint16_t speed;
+       uint16_t speed, autoneg;
 
-       if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
+       if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
                return 0;
 
        rc = bnxt_valid_link_speed(dev_conf->link_speeds,
@@ -2086,20 +2213,28 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
        if (!link_up)
                goto port_phy_cfg;
 
+       autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
        speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
        link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
-       if (speed == 0) {
+       if (autoneg == 1) {
                link_req.phy_flags |=
                                HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
-               link_req.auto_mode =
-                               HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
                link_req.auto_link_speed_mask =
                        bnxt_parse_eth_link_speed_mask(bp,
                                                       dev_conf->link_speeds);
        } else {
+               if (bp->link_info.phy_type ==
+                   HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
+                   bp->link_info.phy_type ==
+                   HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
+                   bp->link_info.media_type ==
+                   HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
+                       RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
+                       return -EINVAL;
+               }
+
                link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
                link_req.link_speed = speed;
-               RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
        }
        link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
        link_req.auto_pause = bp->link_info.auto_pause;
@@ -2121,6 +2256,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
 {
        struct hwrm_func_qcfg_input req = {0};
        struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint16_t flags;
        int rc = 0;
 
        HWRM_PREP(req, FUNC_QCFG);
@@ -2132,6 +2268,9 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
 
        /* Hard Coded.. 0xfff VLAN ID mask */
        bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
+       flags = rte_le_to_cpu_16(resp->flags);
+       if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
+               bp->flags |= BNXT_FLAG_MULTI_HOST;
 
        switch (resp->port_partition_type) {
        case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
@@ -2599,7 +2738,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
                         page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
        req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
        req.req_buf_page_addr[0] =
-               rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
+               rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
        if (req.req_buf_page_addr[0] == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map buffer address to physical memory\n");
@@ -2975,6 +3114,167 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
        return rc;
 }
 
+int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
+                              uint32_t *length)
+{
+       int rc;
+       struct hwrm_nvm_get_dir_info_input req = {0};
+       struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
+
+       HWRM_PREP(req, NVM_GET_DIR_INFO);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+
+       if (!rc) {
+               *entries = rte_le_to_cpu_32(resp->entries);
+               *length = rte_le_to_cpu_32(resp->entry_length);
+       }
+       return rc;
+}
+
+int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
+{
+       int rc;
+       uint32_t dir_entries;
+       uint32_t entry_length;
+       uint8_t *buf;
+       size_t buflen;
+       rte_iova_t dma_handle;
+       struct hwrm_nvm_get_dir_entries_input req = {0};
+       struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
+
+       rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
+       if (rc != 0)
+               return rc;
+
+       *data++ = dir_entries;
+       *data++ = entry_length;
+       len -= 2;
+       memset(data, 0xff, len);
+
+       buflen = dir_entries * entry_length;
+       buf = rte_malloc("nvm_dir", buflen, 0);
+       rte_mem_lock_page(buf);
+       if (buf == NULL)
+               return -ENOMEM;
+       dma_handle = rte_mem_virt2iova(buf);
+       if (dma_handle == 0) {
+               RTE_LOG(ERR, PMD,
+                       "unable to map response address to physical memory\n");
+               return -ENOMEM;
+       }
+       HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
+       req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+
+       if (rc == 0)
+               memcpy(data, buf, len > buflen ? buflen : len);
+
+       rte_free(buf);
+
+       return rc;
+}
+
+int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
+                            uint32_t offset, uint32_t length,
+                            uint8_t *data)
+{
+       int rc;
+       uint8_t *buf;
+       rte_iova_t dma_handle;
+       struct hwrm_nvm_read_input req = {0};
+       struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
+
+       buf = rte_malloc("nvm_item", length, 0);
+       rte_mem_lock_page(buf);
+       if (!buf)
+               return -ENOMEM;
+
+       dma_handle = rte_mem_virt2iova(buf);
+       if (dma_handle == 0) {
+               RTE_LOG(ERR, PMD,
+                       "unable to map response address to physical memory\n");
+               return -ENOMEM;
+       }
+       HWRM_PREP(req, NVM_READ);
+       req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
+       req.dir_idx = rte_cpu_to_le_16(index);
+       req.offset = rte_cpu_to_le_32(offset);
+       req.len = rte_cpu_to_le_32(length);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+       if (rc == 0)
+               memcpy(data, buf, length);
+
+       rte_free(buf);
+       return rc;
+}
+
+int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
+{
+       int rc;
+       struct hwrm_nvm_erase_dir_entry_input req = {0};
+       struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
+
+       HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
+       req.dir_idx = rte_cpu_to_le_16(index);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+
+int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
+                         uint16_t dir_ordinal, uint16_t dir_ext,
+                         uint16_t dir_attr, const uint8_t *data,
+                         size_t data_len)
+{
+       int rc;
+       struct hwrm_nvm_write_input req = {0};
+       struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
+       rte_iova_t dma_handle;
+       uint8_t *buf;
+
+       HWRM_PREP(req, NVM_WRITE);
+
+       req.dir_type = rte_cpu_to_le_16(dir_type);
+       req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
+       req.dir_ext = rte_cpu_to_le_16(dir_ext);
+       req.dir_attr = rte_cpu_to_le_16(dir_attr);
+       req.dir_data_length = rte_cpu_to_le_32(data_len);
+
+       buf = rte_malloc("nvm_write", data_len, 0);
+       rte_mem_lock_page(buf);
+       if (!buf)
+               return -ENOMEM;
+
+       dma_handle = rte_mem_virt2iova(buf);
+       if (dma_handle == 0) {
+               RTE_LOG(ERR, PMD,
+                       "unable to map response address to physical memory\n");
+               return -ENOMEM;
+       }
+       memcpy(buf, data, data_len);
+       req.host_src_addr = rte_cpu_to_le_64(dma_handle);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+
+       rte_free(buf);
+       return rc;
+}
+
 static void
 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
 {
@@ -3012,7 +3312,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
 
        req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
        req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
-       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
+       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
 
        if (req.vnic_id_tbl_addr == 0) {
                HWRM_UNLOCK();
@@ -3372,7 +3672,6 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
        HWRM_UNLOCK();
 
        filter->fw_ntuple_filter_id = -1;
-       filter->fw_l2_filter_id = -1;
 
        return 0;
 }