HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
- req.ver_maj = RTE_VER_YEAR;
- req.ver_min = RTE_VER_MONTH;
- req.ver_upd = RTE_VER_MINOR;
+ req.ver_maj_8b = RTE_VER_YEAR;
+ req.ver_min_8b = RTE_VER_MONTH;
+ req.ver_upd_8b = RTE_VER_MINOR;
if (BNXT_PF(bp)) {
req.enables |= rte_cpu_to_le_32(
return rc;
}
-int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
+int bnxt_hwrm_func_driver_unregister(struct bnxt *bp)
{
int rc;
struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
return 0;
HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
- req.flags = flags;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
+ /* Check agg ring struct explicitly.
+ * bnxt_need_agg_ring() returns the current state of offload flags,
+ * but we may have to deal with agg ring struct before the offload
+ * flags are updated.
+ */
+ if (!bnxt_need_agg_ring(bp->eth_dev) || rxr->ag_ring_struct == NULL)
+ goto no_agg;
+
ring = rxr->ag_ring_struct;
bnxt_hwrm_ring_free(bp, ring,
BNXT_CHIP_P5(bp) ?
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
+no_agg:
bnxt_hwrm_stat_ctx_free(bp, cpr);
bnxt_free_cp_ring(bp, cpr);
/* FW returned values are in units of 100msec */
info->driver_polling_freq =
rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
- info->master_func_wait_period =
+ info->primary_func_wait_period =
rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
info->normal_func_wait_period =
rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
- info->master_func_wait_period_after_reset =
+ info->primary_func_wait_period_after_reset =
rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
info->max_bailout_time_after_reset =
rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;