uint8_t delay_after_reset[BNXT_NUM_RESET_REG];
#define BNXT_FLAG_ERROR_RECOVERY_HOST (1 << 0)
#define BNXT_FLAG_ERROR_RECOVERY_CO_CPU (1 << 1)
+#define BNXT_FLAG_MASTER_FUNC (1 << 2)
+#define BNXT_FLAG_RECOVERY_ENABLED (1 << 3)
uint32_t flags;
};
struct hwrm_async_event_cmpl *async_cmp =
(struct hwrm_async_event_cmpl *)cmp;
uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id);
+ struct bnxt_error_recovery_info *info;
uint32_t event_data;
/* TODO: HWRM async events are not defined yet */
rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
(void *)bp);
break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY:
+ info = bp->recovery_info;
+
+ if (!info)
+ return;
+
+ PMD_DRV_LOG(INFO, "Error recovery async event received\n");
+
+ event_data = rte_le_to_cpu_32(async_cmp->event_data1) &
+ EVENT_DATA1_FLAGS_MASK;
+
+ if (event_data & EVENT_DATA1_FLAGS_MASTER_FUNC)
+ info->flags |= BNXT_FLAG_MASTER_FUNC;
+ else
+ info->flags &= ~BNXT_FLAG_MASTER_FUNC;
+
+ if (event_data & EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
+ info->flags |= BNXT_FLAG_RECOVERY_ENABLED;
+ else
+ info->flags &= ~BNXT_FLAG_RECOVERY_ENABLED;
+
+ PMD_DRV_LOG(INFO, "recovery enabled(%d), master function(%d)\n",
+ bnxt_is_recovery_enabled(bp),
+ bnxt_is_master_func(bp));
+ break;
default:
PMD_DRV_LOG(INFO, "handle_async_event id = 0x%x\n", event_id);
break;
return evt;
}
+
+bool bnxt_is_master_func(struct bnxt *bp)
+{
+ if (bp->recovery_info->flags & BNXT_FLAG_MASTER_FUNC)
+ return true;
+
+ return false;
+}
+
+bool bnxt_is_recovery_enabled(struct bnxt *bp)
+{
+ struct bnxt_error_recovery_info *info;
+
+ info = bp->recovery_info;
+ if (info && (info->flags & BNXT_FLAG_RECOVERY_ENABLED))
+ return true;
+
+ return false;
+}
#define EVENT_DATA1_REASON_CODE_MASK \
HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK
+#define EVENT_DATA1_FLAGS_MASK \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK
+
+#define EVENT_DATA1_FLAGS_MASTER_FUNC \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC
+
+#define EVENT_DATA1_FLAGS_RECOVERY_ENABLED \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED
+
+bool bnxt_is_recovery_enabled(struct bnxt *bp);
+bool bnxt_is_master_func(struct bnxt *bp);
+
#endif
return 0;
flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
+ if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+ flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
/* PFs and trusted VFs should indicate the support of the
* Master capability on non Stingray platform
ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
+ if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+ req.async_event_fwd[0] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
req.async_event_fwd[1] |=
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
(1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE)
#define ASYNC_CMPL_EVENT_ID_RESET_NOTIFY \
(1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY)
+#define ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY)
#define ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
(1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD - 32))
#define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \