net/bnxt: check for VNIC ID in rollback
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
index f691922..e5f8fda 100644 (file)
@@ -27,6 +27,7 @@
 #include <rte_io.h>
 
 #define HWRM_CMD_TIMEOUT               6000000
+#define HWRM_SHORT_CMD_TIMEOUT         50000
 #define HWRM_SPEC_CODE_1_8_3           0x10803
 #define HWRM_VERSION_1_9_1             0x10901
 #define HWRM_VERSION_1_9_2             0x10903
@@ -97,6 +98,17 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
                GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
        uint16_t mb_trigger_offset = use_kong_mb ?
                GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
+       uint32_t timeout;
+
+       /* Do not send HWRM commands to firmware in error state */
+       if (bp->flags & BNXT_FLAG_FATAL_ERROR)
+               return 0;
+
+       /* For VER_GET command, set timeout as 50ms */
+       if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
+               timeout = HWRM_SHORT_CMD_TIMEOUT;
+       else
+               timeout = HWRM_CMD_TIMEOUT;
 
        if (bp->flags & BNXT_FLAG_SHORT_CMD ||
            msg_len > bp->max_req_len) {
@@ -115,9 +127,6 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
                data = (uint32_t *)&short_input;
                msg_len = sizeof(short_input);
 
-               /* Sync memory write before updating doorbell */
-               rte_wmb();
-
                max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
        }
 
@@ -137,11 +146,17 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
        /* Ring channel doorbell */
        bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
        rte_write32(1, bar);
+       /*
+        * Make sure the channel doorbell ring command complete before
+        * reading the response to avoid getting stale or invalid
+        * responses.
+        */
+       rte_io_mb();
 
        /* Poll for the valid bit */
-       for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
+       for (i = 0; i < timeout; i++) {
                /* Sanity check on the resp->resp_len */
-               rte_rmb();
+               rte_cio_rmb();
                if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
                        /* Last byte of resp contains the valid key */
                        valid = (uint8_t *)resp + resp->resp_len - 1;
@@ -151,15 +166,17 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
                rte_delay_us(1);
        }
 
-       if (i >= HWRM_CMD_TIMEOUT) {
-               PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
-                       req->req_type);
-               goto err_ret;
+       if (i >= timeout) {
+               /* Suppress VER_GET timeout messages during reset recovery */
+               if (bp->flags & BNXT_FLAG_FW_RESET &&
+                   rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
+                       return -ETIMEDOUT;
+
+               PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
+                           req->req_type);
+               return -ETIMEDOUT;
        }
        return 0;
-
-err_ret:
-       return -1;
 }
 
 /*
@@ -202,8 +219,14 @@ err_ret:
                rte_spinlock_unlock(&bp->hwrm_lock); \
                if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
                        rc = -EACCES; \
-               else if (rc > 0) \
+               else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
+                       rc = -ENOSPC; \
+               else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
                        rc = -EINVAL; \
+               else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
+                       rc = -ENOTSUP; \
+               else if (rc > 0) \
+                       rc = -EIO; \
                return rc; \
        } \
        if (resp->error_code) { \
@@ -224,8 +247,14 @@ err_ret:
                rte_spinlock_unlock(&bp->hwrm_lock); \
                if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
                        rc = -EACCES; \
-               else if (rc > 0) \
+               else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
+                       rc = -ENOSPC; \
+               else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
                        rc = -EINVAL; \
+               else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
+                       rc = -ENOTSUP; \
+               else if (rc > 0) \
+                       rc = -EIO; \
                return rc; \
        } \
 } while (0)
@@ -344,12 +373,25 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
                           struct bnxt_filter_info *filter)
 {
        int rc = 0;
+       struct bnxt_filter_info *l2_filter = filter;
        struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
        struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
 
        if (filter->fw_l2_filter_id == UINT64_MAX)
                return 0;
 
+       if (filter->matching_l2_fltr_ptr)
+               l2_filter = filter->matching_l2_fltr_ptr;
+
+       PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
+                   filter, l2_filter, l2_filter->l2_ref_cnt);
+
+       if (l2_filter->l2_ref_cnt > 0)
+               l2_filter->l2_ref_cnt--;
+
+       if (l2_filter->l2_ref_cnt > 0)
+               return 0;
+
        HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
 
        req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
@@ -396,8 +438,6 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
        HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
 
        req.flags = rte_cpu_to_le_32(filter->flags);
-       req.flags |=
-       rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
 
        enables = filter->enables |
              HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
@@ -427,6 +467,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
                req.src_id = rte_cpu_to_le_32(filter->src_id);
        if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
                req.src_type = filter->src_type;
+       if (filter->pri_hint) {
+               req.pri_hint = filter->pri_hint;
+               req.l2_filter_id_hint =
+                       rte_cpu_to_le_64(filter->l2_filter_id_hint);
+       }
 
        req.enables = rte_cpu_to_le_32(enables);
 
@@ -492,31 +537,37 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
 
        HWRM_CHECK_RESULT();
 
-       if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
+       if (!BNXT_CHIP_THOR(bp) &&
+           !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
                return 0;
 
+       if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
+               bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
+
        ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
        if (!ptp)
                return -ENOMEM;
 
-       ptp->rx_regs[BNXT_PTP_RX_TS_L] =
-               rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
-       ptp->rx_regs[BNXT_PTP_RX_TS_H] =
-               rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
-       ptp->rx_regs[BNXT_PTP_RX_SEQ] =
-               rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
-       ptp->rx_regs[BNXT_PTP_RX_FIFO] =
-               rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
-       ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
-               rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
-       ptp->tx_regs[BNXT_PTP_TX_TS_L] =
-               rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
-       ptp->tx_regs[BNXT_PTP_TX_TS_H] =
-               rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
-       ptp->tx_regs[BNXT_PTP_TX_SEQ] =
-               rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
-       ptp->tx_regs[BNXT_PTP_TX_FIFO] =
-               rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
+       if (!BNXT_CHIP_THOR(bp)) {
+               ptp->rx_regs[BNXT_PTP_RX_TS_L] =
+                       rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
+               ptp->rx_regs[BNXT_PTP_RX_TS_H] =
+                       rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
+               ptp->rx_regs[BNXT_PTP_RX_SEQ] =
+                       rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
+               ptp->rx_regs[BNXT_PTP_RX_FIFO] =
+                       rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
+               ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
+                       rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
+               ptp->tx_regs[BNXT_PTP_TX_TS_L] =
+                       rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
+               ptp->tx_regs[BNXT_PTP_TX_TS_H] =
+                       rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
+               ptp->tx_regs[BNXT_PTP_TX_SEQ] =
+                       rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
+               ptp->tx_regs[BNXT_PTP_TX_FIFO] =
+                       rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
+       }
 
        ptp->bp = bp;
        bp->ptp_cfg = ptp;
@@ -589,8 +640,11 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
        bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
        bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
        bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
-       bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
        bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+       bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
+       bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+       if (!BNXT_CHIP_THOR(bp))
+               bp->max_l2_ctx += bp->max_rx_em_flows;
        /* TODO: For now, do not support VMDq/RFS on VFs. */
        if (BNXT_PF(bp)) {
                if (bp->pf.max_vfs)
@@ -614,6 +668,18 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
        if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
                bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
 
+       if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
+               bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
+               PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
+       } else {
+               bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
+       }
+
+       if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
+               bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
+       else
+               bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
+
        HWRM_UNLOCK();
 
        return rc;
@@ -658,12 +724,23 @@ int bnxt_hwrm_func_reset(struct bnxt *bp)
 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 {
        int rc;
+       uint32_t flags = 0;
        struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
        struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
 
        if (bp->flags & BNXT_FLAG_REGISTERED)
                return 0;
 
+       flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
+       if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+               flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
+
+       /* PFs and trusted VFs should indicate the support of the
+        * Master capability on non Stingray platform
+        */
+       if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
+               flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
+
        HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
        req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
                        HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
@@ -684,14 +761,20 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
                 * this HWRM sniffer list in FW because DPDK PF driver does
                 * not support this.
                 */
-               req.flags =
-               rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
+               flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
        }
 
+       req.flags = rte_cpu_to_le_32(flags);
+
        req.async_event_fwd[0] |=
                rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
                                 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
-                                ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
+                                ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
+                                ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
+                                ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
+       if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+               req.async_event_fwd[0] |=
+                       rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
        req.async_event_fwd[1] |=
                rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
                                 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
@@ -699,6 +782,11 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        HWRM_CHECK_RESULT();
+
+       flags = rte_le_to_cpu_32(resp->flags);
+       if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
+               bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE;
+
        HWRM_UNLOCK();
 
        bp->flags |= BNXT_FLAG_REGISTERED;
@@ -738,9 +826,12 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
        req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
        req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
                                            AGG_RING_MULTIPLIER);
-       req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
+       req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
+                                            bp->tx_nr_rings +
+                                            BNXT_NUM_ASYNC_CPR(bp));
        req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
-                                             bp->tx_nr_rings);
+                                             bp->tx_nr_rings +
+                                             BNXT_NUM_ASYNC_CPR(bp));
        req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
        if (bp->vf_resv_strategy ==
            HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
@@ -796,7 +887,12 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
                bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
                bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
                bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+               /* func_resource_qcaps does not return max_rx_em_flows.
+                * So use the value provided by func_qcaps.
+                */
                bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+               if (!BNXT_CHIP_THOR(bp))
+                       bp->max_l2_ctx += bp->max_rx_em_flows;
                bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
                bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
        }
@@ -830,7 +926,10 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
-       HWRM_CHECK_RESULT();
+       if (bp->flags & BNXT_FLAG_FW_RESET)
+               HWRM_CHECK_RESULT_SILENT();
+       else
+               HWRM_CHECK_RESULT();
 
        PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
                resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
@@ -881,7 +980,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
                bp->hwrm_cmd_resp_dma_addr =
                        rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
-               if (bp->hwrm_cmd_resp_dma_addr == 0) {
+               if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
                        PMD_DRV_LOG(ERR,
                        "Unable to map response buffer to physical memory.\n");
                        rc = -ENOMEM;
@@ -918,7 +1017,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
                bp->hwrm_short_cmd_req_dma_addr =
                        rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
-               if (bp->hwrm_short_cmd_req_dma_addr == 0) {
+               if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
                        rte_free(bp->hwrm_short_cmd_req_addr);
                        PMD_DRV_LOG(ERR,
                                "Unable to map buffer to physical memory.\n");
@@ -934,6 +1033,11 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
        if (dev_caps_cfg &
            HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
                PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
+       if (dev_caps_cfg &
+           HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
+               bp->flags |= BNXT_FLAG_ADV_FLOW_MGMT;
+               PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
+       }
 
 error:
        HWRM_UNLOCK();
@@ -957,8 +1061,6 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
        HWRM_CHECK_RESULT();
        HWRM_UNLOCK();
 
-       bp->flags &= ~BNXT_FLAG_REGISTERED;
-
        return rc;
 }
 
@@ -1169,6 +1271,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                        mb_pool = bp->rx_queues[0]->mb_pool;
                        rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
                                      RTE_PKTMBUF_HEADROOM;
+                       rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
                        req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
                        enables |=
                                HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
@@ -1199,6 +1302,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                mb_pool = bp->rx_queues[0]->mb_pool;
                rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
                              RTE_PKTMBUF_HEADROOM;
+               rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
                req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
 
                req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
@@ -1210,7 +1314,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
                        ring_type);
                HWRM_UNLOCK();
-               return -1;
+               return -EINVAL;
        }
        req.enables = rte_cpu_to_le_32(enables);
 
@@ -1490,6 +1594,11 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
        struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
        struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
+       if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+               PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
+               return rc;
+       }
+
        HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
 
        req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
@@ -1516,8 +1625,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        int rc = 0;
        struct hwrm_vnic_cfg_input req = {.req_type = 0 };
        struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct bnxt_plcmodes_cfg pmodes = { 0 };
        uint32_t ctx_enable_flag = 0;
-       struct bnxt_plcmodes_cfg pmodes;
        uint32_t enables = 0;
 
        if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
@@ -1534,7 +1643,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        if (BNXT_CHIP_THOR(bp)) {
                struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0];
                struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
-               struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+               struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
 
                req.default_rx_ring_id =
                        rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
@@ -1667,8 +1776,9 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
        return rc;
 }
 
-int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
-                           struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
+static
+int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
+                            struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
 {
        int rc = 0;
        struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
@@ -1691,6 +1801,28 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
        return rc;
 }
 
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+       int rc = 0;
+
+       if (BNXT_CHIP_THOR(bp)) {
+               int j;
+
+               for (j = 0; j < vnic->num_lb_ctxts; j++) {
+                       rc = _bnxt_hwrm_vnic_ctx_free(bp,
+                                                     vnic,
+                                                     vnic->fw_grp_ids[j]);
+                       vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
+               }
+               vnic->num_lb_ctxts = 0;
+       } else {
+               rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
+               vnic->rss_rule = INVALID_HW_RING_ID;
+       }
+
+       return rc;
+}
+
 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 {
        int rc = 0;
@@ -1724,23 +1856,20 @@ bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 {
        int i;
        int rc = 0;
-       int nr_ctxs = bp->max_ring_grps;
+       int nr_ctxs = vnic->num_lb_ctxts;
        struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
        struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
-       if (!(vnic->rss_table && vnic->hash_type))
-               return 0;
-
-       HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+       for (i = 0; i < nr_ctxs; i++) {
+               HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
 
-       req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
-       req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
-       req.hash_mode_flags = vnic->hash_mode;
+               req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+               req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+               req.hash_mode_flags = vnic->hash_mode;
 
-       req.hash_key_tbl_addr =
-           rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+               req.hash_key_tbl_addr =
+                       rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
 
-       for (i = 0; i < nr_ctxs; i++) {
                req.ring_grp_tbl_addr =
                        rte_cpu_to_le_64(vnic->rss_table_dma_addr +
                                         i * HW_HASH_INDEX_SIZE);
@@ -1751,12 +1880,9 @@ bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                                            BNXT_USE_CHIMP_MB);
 
                HWRM_CHECK_RESULT();
-               if (rc)
-                       break;
+               HWRM_UNLOCK();
        }
 
-       HWRM_UNLOCK();
-
        return rc;
 }
 
@@ -1767,6 +1893,9 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
        struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
        struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
+       if (!vnic->rss_table)
+               return 0;
+
        if (BNXT_CHIP_THOR(bp))
                return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
 
@@ -1813,6 +1942,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
 
        size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
        size -= RTE_PKTMBUF_HEADROOM;
+       size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
 
        req.jumbo_thresh = rte_cpu_to_le_16(size);
        req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
@@ -1835,6 +1965,11 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
        if (BNXT_CHIP_THOR(bp))
                return 0;
 
+       if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+               PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
+               return 0;
+       }
+
        HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
 
        if (enable) {
@@ -2067,7 +2202,7 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
        return rc;
 }
 
-static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
 {
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
@@ -2077,9 +2212,10 @@ static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
        memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
                                     sizeof(*cpr->cp_desc_ring));
        cpr->cp_raw_cons = 0;
+       cpr->valid = 0;
 }
 
-static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
 {
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
@@ -2089,6 +2225,7 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
        memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
                        sizeof(*cpr->cp_desc_ring));
        cpr->cp_raw_cons = 0;
+       cpr->valid = 0;
 }
 
 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
@@ -2216,7 +2353,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
                return -ENOMEM;
        bp->hwrm_cmd_resp_dma_addr =
                rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
-       if (bp->hwrm_cmd_resp_dma_addr == 0) {
+       if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
                return -ENOMEM;
@@ -2239,6 +2376,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                else
                        rc = bnxt_hwrm_clear_l2_filter(bp, filter);
                STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
+               bnxt_free_filter(bp, filter);
                //if (rc)
                        //break;
        }
@@ -2252,9 +2390,10 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        struct rte_flow *flow;
        int rc = 0;
 
-       STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+       while (!STAILQ_EMPTY(&vnic->flow_list)) {
+               flow = STAILQ_FIRST(&vnic->flow_list);
                filter = flow->filter;
-               PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
+               PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
                if (filter->filter_type == HWRM_CFA_EM_FILTER)
                        rc = bnxt_hwrm_clear_em_filter(bp, filter);
                else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
@@ -2305,7 +2444,7 @@ void bnxt_free_tunnel_ports(struct bnxt *bp)
 
 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
 {
-       int i, j;
+       int i;
 
        if (bp->vnic_info == NULL)
                return;
@@ -2314,24 +2453,18 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
         * Cleanup VNICs in reverse order, to make sure the L2 filter
         * from vnic0 is last to be cleaned up.
         */
-       for (i = bp->nr_vnics - 1; i >= 0; i--) {
+       for (i = bp->max_vnics - 1; i >= 0; i--) {
                struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
 
+               // If the VNIC ID is invalid we are not currently using the VNIC
+               if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+                       continue;
+
                bnxt_clear_hwrm_vnic_flows(bp, vnic);
 
                bnxt_clear_hwrm_vnic_filters(bp, vnic);
 
-               if (BNXT_CHIP_THOR(bp)) {
-                       for (j = 0; j < vnic->num_lb_ctxts; j++) {
-                               bnxt_hwrm_vnic_ctx_free(bp, vnic,
-                                                       vnic->fw_grp_ids[j]);
-                               vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
-                       }
-                       vnic->num_lb_ctxts = 0;
-               } else {
-                       bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
-                       vnic->rss_rule = INVALID_HW_RING_ID;
-               }
+               bnxt_hwrm_vnic_ctx_free(bp, vnic);
 
                bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
 
@@ -2602,11 +2735,27 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
                goto port_phy_cfg;
 
        autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
+       if (BNXT_CHIP_THOR(bp) &&
+           dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+               /* 40G is not supported as part of media auto detect.
+                * The speed should be forced and autoneg disabled
+                * to configure 40G speed.
+                */
+               PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
+               autoneg = 0;
+       }
+
        speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
        link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
-       /* Autoneg can be done only when the FW allows */
-       if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
-                               bp->link_info.force_link_speed)) {
+       /* Autoneg can be done only when the FW allows.
+        * When user configures fixed speed of 40G and later changes to
+        * any other speed, auto_link_speed/force_link_speed is still set
+        * to 40G until link comes up at new speed.
+        */
+       if (autoneg == 1 &&
+           !(!BNXT_CHIP_THOR(bp) &&
+             (bp->link_info.auto_link_speed ||
+              bp->link_info.force_link_speed))) {
                link_req.phy_flags |=
                                HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
                link_req.auto_link_speed_mask =
@@ -2671,6 +2820,10 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
        if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
                bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
                PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
+       } else if (BNXT_VF(bp) &&
+                  !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
+               bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
+               PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
        }
 
        if (mtu)
@@ -2872,14 +3025,7 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
        HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
        req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
-       if (rc) {
-               PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
-               return -1;
-       } else if (resp->error_code) {
-               rc = rte_le_to_cpu_16(resp->error_code);
-               PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
-               return -1;
-       }
+       HWRM_CHECK_RESULT();
        rc = rte_le_to_cpu_16(resp->vlan);
 
        HWRM_UNLOCK();
@@ -2914,7 +3060,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
 
        if (!BNXT_PF(bp)) {
                PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
-               return -1;
+               return -EINVAL;
        }
 
        rc = bnxt_hwrm_func_qcaps(bp);
@@ -2942,7 +3088,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 
        if (!BNXT_PF(bp)) {
                PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
-               return -1;
+               return -EINVAL;
        }
 
        rc = bnxt_hwrm_func_qcaps(bp);
@@ -3161,7 +3307,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
        req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
        req.req_buf_page_addr0 =
                rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
-       if (req.req_buf_page_addr0 == 0) {
+       if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map buffer address to physical memory\n");
                return -ENOMEM;
@@ -3181,6 +3327,9 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
        struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
        struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
 
+       if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
+               return 0;
+
        HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -3204,7 +3353,7 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
        req.enables = rte_cpu_to_le_32(
                        HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
        req.async_event_cr = rte_cpu_to_le_16(
-                       bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+                       bp->async_cp_ring->cp_ring_struct->fw_ring_id);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        HWRM_CHECK_RESULT();
@@ -3224,7 +3373,7 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
        req.enables = rte_cpu_to_le_32(
                        HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
        req.async_event_cr = rte_cpu_to_le_16(
-                       bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+                       bp->async_cp_ring->cp_ring_struct->fw_ring_id);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        HWRM_CHECK_RESULT();
@@ -3558,12 +3707,11 @@ int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        HWRM_CHECK_RESULT();
-       HWRM_UNLOCK();
 
-       if (!rc) {
-               *entries = rte_le_to_cpu_32(resp->entries);
-               *length = rte_le_to_cpu_32(resp->entry_length);
-       }
+       *entries = rte_le_to_cpu_32(resp->entries);
+       *length = rte_le_to_cpu_32(resp->entry_length);
+
+       HWRM_UNLOCK();
        return rc;
 }
 
@@ -3593,7 +3741,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
        if (buf == NULL)
                return -ENOMEM;
        dma_handle = rte_mem_virt2iova(buf);
-       if (dma_handle == 0) {
+       if (dma_handle == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
                return -ENOMEM;
@@ -3628,7 +3776,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
                return -ENOMEM;
 
        dma_handle = rte_mem_virt2iova(buf);
-       if (dma_handle == 0) {
+       if (dma_handle == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
                return -ENOMEM;
@@ -3682,7 +3830,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
                return -ENOMEM;
 
        dma_handle = rte_mem_virt2iova(buf);
-       if (dma_handle == 0) {
+       if (dma_handle == RTE_BAD_IOVA) {
                PMD_DRV_LOG(ERR,
                        "unable to map response address to physical memory\n");
                return -ENOMEM;
@@ -3746,23 +3894,14 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
        req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
        req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
 
-       if (req.vnic_id_tbl_addr == 0) {
+       if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
                HWRM_UNLOCK();
                PMD_DRV_LOG(ERR,
                "unable to map VNIC ID table address to physical memory\n");
                return -ENOMEM;
        }
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
-       if (rc) {
-               HWRM_UNLOCK();
-               PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
-               return -1;
-       } else if (resp->error_code) {
-               rc = rte_le_to_cpu_16(resp->error_code);
-               HWRM_UNLOCK();
-               PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
-               return -1;
-       }
+       HWRM_CHECK_RESULT();
        rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
 
        HWRM_UNLOCK();
@@ -3790,10 +3929,9 @@ int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
        vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
        vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
                        RTE_CACHE_LINE_SIZE);
-       if (vnic_ids == NULL) {
-               rc = -ENOMEM;
-               return rc;
-       }
+       if (vnic_ids == NULL)
+               return -ENOMEM;
+
        for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
                rte_mem_lock_page(((char *)vnic_ids) + sz);
 
@@ -3860,10 +3998,8 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
        vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
        vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
                        RTE_CACHE_LINE_SIZE);
-       if (vnic_ids == NULL) {
-               rc = -ENOMEM;
-               return rc;
-       }
+       if (vnic_ids == NULL)
+               return -ENOMEM;
 
        for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
                rte_mem_lock_page(((char *)vnic_ids) + sz);
@@ -3894,7 +4030,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
        PMD_DRV_LOG(ERR, "No default VNIC\n");
 exit:
        rte_free(vnic_ids);
-       return -1;
+       return rc;
 }
 
 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
@@ -4114,28 +4250,30 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
        uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
        struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
-       int nr_ctxs = bp->max_ring_grps;
        struct bnxt_rx_queue **rxqs = bp->rx_queues;
        uint16_t *ring_tbl = vnic->rss_table;
+       int nr_ctxs = vnic->num_lb_ctxts;
        int max_rings = bp->rx_nr_rings;
        int i, j, k, cnt;
        int rc = 0;
 
-       HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
-
-       req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
-       req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
-       req.hash_mode_flags = vnic->hash_mode;
-
-       req.ring_grp_tbl_addr =
-           rte_cpu_to_le_64(vnic->rss_table_dma_addr);
-       req.hash_key_tbl_addr =
-           rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
-
        for (i = 0, k = 0; i < nr_ctxs; i++) {
                struct bnxt_rx_ring_info *rxr;
                struct bnxt_cp_ring_info *cpr;
 
+               HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+
+               req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+               req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+               req.hash_mode_flags = vnic->hash_mode;
+
+               req.ring_grp_tbl_addr =
+                   rte_cpu_to_le_64(vnic->rss_table_dma_addr +
+                                    i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
+                                    2 * sizeof(*ring_tbl));
+               req.hash_key_tbl_addr =
+                   rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+
                req.ring_table_pair_index = i;
                req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
 
@@ -4171,12 +4309,9 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                                            BNXT_USE_CHIMP_MB);
 
                HWRM_CHECK_RESULT();
-               if (rc)
-                       break;
+               HWRM_UNLOCK();
        }
 
-       HWRM_UNLOCK();
-
        return rc;
 }
 
@@ -4190,23 +4325,31 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        if (BNXT_CHIP_THOR(bp))
                return bnxt_vnic_rss_configure_thor(bp, vnic);
 
-       /*
-        * Fill the RSS hash & redirection table with
-        * ring group ids for all VNICs
-        */
-       for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
-               rss_idx++, fw_idx++) {
-               for (i = 0; i < bp->rx_cp_nr_rings; i++) {
-                       fw_idx %= bp->rx_cp_nr_rings;
-                       if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
-                               break;
-                       fw_idx++;
+       if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+               return 0;
+
+       if (vnic->rss_table && vnic->hash_type) {
+               /*
+                * Fill the RSS hash & redirection table with
+                * ring group ids for all VNICs
+                */
+               for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+                       rss_idx++, fw_idx++) {
+                       for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+                               fw_idx %= bp->rx_cp_nr_rings;
+                               if (vnic->fw_grp_ids[fw_idx] !=
+                                   INVALID_HW_RING_ID)
+                                       break;
+                               fw_idx++;
+                       }
+                       if (i == bp->rx_cp_nr_rings)
+                               return 0;
+                       vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
                }
-               if (i == bp->rx_cp_nr_rings)
-                       return 0;
-               vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
+               return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
        }
-       return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+
+       return 0;
 }
 
 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
@@ -4250,8 +4393,7 @@ static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
 
        HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
-       if (rc)
-               goto out;
+       HWRM_CHECK_RESULT();
 
        agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
        agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
@@ -4264,8 +4406,6 @@ static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
         HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
        agg_req->enables = rte_cpu_to_le_32(enables);
 
-out:
-       HWRM_CHECK_RESULT();
        HWRM_UNLOCK();
        return rc;
 }
@@ -4302,7 +4442,10 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
        struct hwrm_func_backing_store_qcaps_input req = {0};
        struct hwrm_func_backing_store_qcaps_output *resp =
                bp->hwrm_cmd_resp_addr;
-       int rc;
+       struct bnxt_ctx_pg_info *ctx_pg;
+       struct bnxt_ctx_mem_info *ctx;
+       int total_alloc_len;
+       int rc, i;
 
        if (!BNXT_CHIP_THOR(bp) ||
            bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
@@ -4314,70 +4457,60 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
        HWRM_CHECK_RESULT_SILENT();
 
-       if (!rc) {
-               struct bnxt_ctx_pg_info *ctx_pg;
-               struct bnxt_ctx_mem_info *ctx;
-               int total_alloc_len;
-               int i;
-
-               total_alloc_len = sizeof(*ctx);
-               ctx = rte_malloc("bnxt_ctx_mem", total_alloc_len,
-                                RTE_CACHE_LINE_SIZE);
-               if (!ctx) {
-                       rc = -ENOMEM;
-                       goto ctx_err;
-               }
-               memset(ctx, 0, total_alloc_len);
+       total_alloc_len = sizeof(*ctx);
+       ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
+                         RTE_CACHE_LINE_SIZE);
+       if (!ctx) {
+               rc = -ENOMEM;
+               goto ctx_err;
+       }
 
-               ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
-                                   sizeof(*ctx_pg) * BNXT_MAX_Q,
-                                   RTE_CACHE_LINE_SIZE);
-               if (!ctx_pg) {
-                       rc = -ENOMEM;
-                       goto ctx_err;
-               }
-               for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
-                       ctx->tqm_mem[i] = ctx_pg;
-
-               bp->ctx = ctx;
-               ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
-               ctx->qp_min_qp1_entries =
-                       rte_le_to_cpu_16(resp->qp_min_qp1_entries);
-               ctx->qp_max_l2_entries =
-                       rte_le_to_cpu_16(resp->qp_max_l2_entries);
-               ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
-               ctx->srq_max_l2_entries =
-                       rte_le_to_cpu_16(resp->srq_max_l2_entries);
-               ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
-               ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
-               ctx->cq_max_l2_entries =
-                       rte_le_to_cpu_16(resp->cq_max_l2_entries);
-               ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
-               ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
-               ctx->vnic_max_vnic_entries =
-                       rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
-               ctx->vnic_max_ring_table_entries =
-                       rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
-               ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
-               ctx->stat_max_entries =
-                       rte_le_to_cpu_32(resp->stat_max_entries);
-               ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
-               ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
-               ctx->tqm_min_entries_per_ring =
-                       rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
-               ctx->tqm_max_entries_per_ring =
-                       rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
-               ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
-               if (!ctx->tqm_entries_multiple)
-                       ctx->tqm_entries_multiple = 1;
-               ctx->mrav_max_entries =
-                       rte_le_to_cpu_32(resp->mrav_max_entries);
-               ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
-               ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
-               ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
-       } else {
-               rc = 0;
+       ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
+                           sizeof(*ctx_pg) * BNXT_MAX_Q,
+                           RTE_CACHE_LINE_SIZE);
+       if (!ctx_pg) {
+               rc = -ENOMEM;
+               goto ctx_err;
        }
+       for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
+               ctx->tqm_mem[i] = ctx_pg;
+
+       bp->ctx = ctx;
+       ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
+       ctx->qp_min_qp1_entries =
+               rte_le_to_cpu_16(resp->qp_min_qp1_entries);
+       ctx->qp_max_l2_entries =
+               rte_le_to_cpu_16(resp->qp_max_l2_entries);
+       ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
+       ctx->srq_max_l2_entries =
+               rte_le_to_cpu_16(resp->srq_max_l2_entries);
+       ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
+       ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
+       ctx->cq_max_l2_entries =
+               rte_le_to_cpu_16(resp->cq_max_l2_entries);
+       ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
+       ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
+       ctx->vnic_max_vnic_entries =
+               rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
+       ctx->vnic_max_ring_table_entries =
+               rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
+       ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
+       ctx->stat_max_entries =
+               rte_le_to_cpu_32(resp->stat_max_entries);
+       ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
+       ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
+       ctx->tqm_min_entries_per_ring =
+               rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
+       ctx->tqm_max_entries_per_ring =
+               rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
+       ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
+       if (!ctx->tqm_entries_multiple)
+               ctx->tqm_entries_multiple = 1;
+       ctx->mrav_max_entries =
+               rte_le_to_cpu_32(resp->mrav_max_entries);
+       ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
+       ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
+       ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
 ctx_err:
        HWRM_UNLOCK();
        return rc;
@@ -4477,8 +4610,7 @@ int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
        HWRM_CHECK_RESULT();
        HWRM_UNLOCK();
-       if (rc)
-               rc = -EIO;
+
        return rc;
 }
 
@@ -4534,10 +4666,10 @@ bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
                bp->hwrm_cmd_resp_addr;
        int rc = 0;
 
-       HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_KONG(bp));
+       HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
        req.tunnel_type = type;
        req.dest_fid = bp->fw_fid;
-       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
        HWRM_CHECK_RESULT();
 
        HWRM_UNLOCK();
@@ -4553,10 +4685,10 @@ bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
                bp->hwrm_cmd_resp_addr;
        int rc = 0;
 
-       HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_KONG(bp));
+       HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
        req.tunnel_type = type;
        req.dest_fid = bp->fw_fid;
-       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
        HWRM_CHECK_RESULT();
 
        HWRM_UNLOCK();
@@ -4571,13 +4703,13 @@ int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
                bp->hwrm_cmd_resp_addr;
        int rc = 0;
 
-       HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_KONG(bp));
+       HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
        req.src_fid = bp->fw_fid;
-       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
        HWRM_CHECK_RESULT();
 
        if (type)
-               *type = resp->tunnel_mask;
+               *type = rte_le_to_cpu_32(resp->tunnel_mask);
 
        HWRM_UNLOCK();
 
@@ -4592,14 +4724,14 @@ int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
                bp->hwrm_cmd_resp_addr;
        int rc = 0;
 
-       HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_KONG(bp));
+       HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
        req.src_fid = bp->fw_fid;
        req.tunnel_type = tun_type;
-       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
        HWRM_CHECK_RESULT();
 
        if (dst_fid)
-               *dst_fid = resp->dest_fid;
+               *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
 
        PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
 
@@ -4607,3 +4739,255 @@ int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
 
        return rc;
 }
+
+int bnxt_hwrm_set_mac(struct bnxt *bp)
+{
+       struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_func_vf_cfg_input req = {0};
+       int rc = 0;
+
+       if (!BNXT_VF(bp))
+               return 0;
+
+       HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
+
+       req.enables =
+               rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+       memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+       HWRM_CHECK_RESULT();
+
+       memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
+{
+       struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_func_drv_if_change_input req = {0};
+       uint32_t flags;
+       int rc;
+
+       if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE))
+               return 0;
+
+       /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
+        * If we issue FUNC_DRV_IF_CHANGE with flags down before
+        * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
+        */
+       if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
+               return 0;
+
+       HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
+
+       if (up)
+               req.flags =
+               rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+       HWRM_CHECK_RESULT();
+       flags = rte_le_to_cpu_32(resp->flags);
+       HWRM_UNLOCK();
+
+       if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
+               PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
+               bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
+       }
+
+       return 0;
+}
+
+int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
+{
+       struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct bnxt_error_recovery_info *info = bp->recovery_info;
+       struct hwrm_error_recovery_qcfg_input req = {0};
+       uint32_t flags = 0;
+       unsigned int i;
+       int rc;
+
+       /* Older FW does not have error recovery support */
+       if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY))
+               return 0;
+
+       if (!info) {
+               info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
+                                  sizeof(*info), 0);
+               bp->recovery_info = info;
+               if (info == NULL)
+                       return -ENOMEM;
+       } else {
+               memset(info, 0, sizeof(*info));
+       }
+
+       HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+       HWRM_CHECK_RESULT();
+
+       flags = rte_le_to_cpu_32(resp->flags);
+       if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
+               info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
+       else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
+               info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
+
+       if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
+           !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
+               rc = -EINVAL;
+               goto err;
+       }
+
+       /* FW returned values are in units of 100msec */
+       info->driver_polling_freq =
+               rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
+       info->master_func_wait_period =
+               rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
+       info->normal_func_wait_period =
+               rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
+       info->master_func_wait_period_after_reset =
+               rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
+       info->max_bailout_time_after_reset =
+               rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
+       info->status_regs[BNXT_FW_STATUS_REG] =
+               rte_le_to_cpu_32(resp->fw_health_status_reg);
+       info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
+               rte_le_to_cpu_32(resp->fw_heartbeat_reg);
+       info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
+               rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
+       info->status_regs[BNXT_FW_RESET_INPROG_REG] =
+               rte_le_to_cpu_32(resp->reset_inprogress_reg);
+       info->reg_array_cnt =
+               rte_le_to_cpu_32(resp->reg_array_cnt);
+
+       if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
+               rc = -EINVAL;
+               goto err;
+       }
+
+       for (i = 0; i < info->reg_array_cnt; i++) {
+               info->reset_reg[i] =
+                       rte_le_to_cpu_32(resp->reset_reg[i]);
+               info->reset_reg_val[i] =
+                       rte_le_to_cpu_32(resp->reset_reg_val[i]);
+               info->delay_after_reset[i] =
+                       resp->delay_after_reset[i];
+       }
+err:
+       HWRM_UNLOCK();
+
+       /* Map the FW status registers */
+       if (!rc)
+               rc = bnxt_map_fw_health_status_regs(bp);
+
+       if (rc) {
+               rte_free(bp->recovery_info);
+               bp->recovery_info = NULL;
+       }
+       return rc;
+}
+
+int bnxt_hwrm_fw_reset(struct bnxt *bp)
+{
+       struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_fw_reset_input req = {0};
+       int rc;
+
+       if (!BNXT_PF(bp))
+               return -EOPNOTSUPP;
+
+       HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp));
+
+       req.embedded_proc_type =
+               HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
+       req.selfrst_status =
+               HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
+       req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
+                                   BNXT_USE_KONG(bp));
+
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
+{
+       struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_port_ts_query_input req = {0};
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       uint32_t flags = 0;
+       int rc;
+
+       if (!ptp)
+               return 0;
+
+       HWRM_PREP(req, PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
+
+       switch (path) {
+       case BNXT_PTP_FLAGS_PATH_TX:
+               flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
+               break;
+       case BNXT_PTP_FLAGS_PATH_RX:
+               flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
+               break;
+       case BNXT_PTP_FLAGS_CURRENT_TIME:
+               flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
+               break;
+       }
+
+       req.flags = rte_cpu_to_le_32(flags);
+       req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+       HWRM_CHECK_RESULT();
+
+       if (timestamp) {
+               *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
+               *timestamp |=
+                       (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
+       }
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
+{
+       struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
+                                       bp->hwrm_cmd_resp_addr;
+       struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
+       uint32_t flags = 0;
+       int rc = 0;
+
+       if (!(bp->flags & BNXT_FLAG_ADV_FLOW_MGMT))
+               return rc;
+
+       if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+               PMD_DRV_LOG(DEBUG,
+                           "Not a PF or trusted VF. Command not supported\n");
+               return 0;
+       }
+
+       HWRM_PREP(req, CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp));
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+       HWRM_CHECK_RESULT();
+       flags = rte_le_to_cpu_32(resp->flags);
+       HWRM_UNLOCK();
+
+       if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) {
+               bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN;
+               PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n");
+       }
+
+       return rc;
+}