struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TC_Q];
};
+/* Maximum Firmware Reset bail out value in milliseconds */
+#define BNXT_MAX_FW_RESET_TIMEOUT 6000
+/* Minimum time required for the firmware readiness in milliseconds */
+#define BNXT_MIN_FW_READY_TIMEOUT 2000
+/* Frequency for the firmware readiness check in milliseconds */
+#define BNXT_FW_READY_WAIT_INTERVAL 100
+
+#define US_PER_MS 1000
+#define NS_PER_US 1000
+
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
struct bnxt {
void *bar0;
struct bnxt_ptp_cfg *ptp_cfg;
uint16_t vf_resv_strategy;
struct bnxt_ctx_mem_info *ctx;
+
+ uint16_t fw_reset_min_msecs;
+ uint16_t fw_reset_max_msecs;
};
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
*/
#include <rte_malloc.h>
+#include <rte_alarm.h>
#include "bnxt.h"
#include "bnxt_cpr.h"
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
PMD_DRV_LOG(INFO, "Port conn async event\n");
break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
+ /* timestamp_lo/hi values are in units of 100ms */
+ bp->fw_reset_max_msecs = async_cmp->timestamp_hi ?
+ rte_le_to_cpu_16(async_cmp->timestamp_hi) * 100 :
+ BNXT_MAX_FW_RESET_TIMEOUT;
+ bp->fw_reset_min_msecs = async_cmp->timestamp_lo ?
+ async_cmp->timestamp_lo * 100 :
+ BNXT_MIN_FW_READY_TIMEOUT;
+ PMD_DRV_LOG(INFO,
+ "Firmware non-fatal reset event received\n");
+
+ bp->flags |= BNXT_FLAG_FW_RESET;
+ rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
+ (void *)bp);
+ break;
default:
PMD_DRV_LOG(INFO, "handle_async_event id = 0x%x\n", event_id);
break;
void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp);
int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp);
+void bnxt_dev_reset_and_resume(void *arg);
#endif
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_cycles.h>
+#include <rte_alarm.h>
#include "bnxt.h"
#include "bnxt_cpr.h"
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
+static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
+static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
int is_bnxt_in_error(struct bnxt *bp)
{
return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
}
-static void bnxt_free_mem(struct bnxt *bp)
+static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
{
bnxt_free_filter_mem(bp);
bnxt_free_vnic_attributes(bp);
bnxt_free_vnic_mem(bp);
- bnxt_free_stats(bp);
- bnxt_free_tx_rings(bp);
- bnxt_free_rx_rings(bp);
+ /* tx/rx rings are configured as part of *_queue_setup callbacks.
+ * If the number of rings change across fw update,
+ * we don't have much choice except to warn the user.
+ */
+ if (!reconfig) {
+ bnxt_free_stats(bp);
+ bnxt_free_tx_rings(bp);
+ bnxt_free_rx_rings(bp);
+ }
bnxt_free_async_cp_ring(bp);
}
-static int bnxt_alloc_mem(struct bnxt *bp)
+static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
{
int rc;
return 0;
alloc_mem_err:
- bnxt_free_mem(bp);
+ bnxt_free_mem(bp, reconfig);
return rc;
}
.timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
};
+static void bnxt_dev_cleanup(struct bnxt *bp)
+{
+ bnxt_set_hwrm_link_config(bp, false);
+ bp->link_info.link_up = 0;
+ if (bp->dev_stopped == 0)
+ bnxt_dev_stop_op(bp->eth_dev);
+
+ bnxt_uninit_resources(bp, true);
+}
+
+static int bnxt_restore_filters(struct bnxt *bp)
+{
+ struct rte_eth_dev *dev = bp->eth_dev;
+ int ret = 0;
+
+ if (dev->data->all_multicast)
+ ret = bnxt_allmulticast_enable_op(dev);
+ if (dev->data->promiscuous)
+ ret = bnxt_promiscuous_enable_op(dev);
+
+ /* TODO restore other filters as well */
+ return ret;
+}
+
+static void bnxt_dev_recover(void *arg)
+{
+ struct bnxt *bp = arg;
+ int timeout = bp->fw_reset_max_msecs;
+ int rc = 0;
+
+ do {
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc == 0)
+ break;
+ rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL);
+ timeout -= BNXT_FW_READY_WAIT_INTERVAL;
+ } while (rc && timeout);
+
+ if (rc) {
+ PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
+ goto err;
+ }
+
+ rc = bnxt_init_resources(bp, true);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to initialize resources after reset\n");
+ goto err;
+ }
+ /* clear reset flag as the device is initialized now */
+ bp->flags &= ~BNXT_FLAG_FW_RESET;
+
+ rc = bnxt_dev_start_op(bp->eth_dev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
+ goto err;
+ }
+
+ rc = bnxt_restore_filters(bp);
+ if (rc)
+ goto err;
+
+ PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
+ return;
+err:
+ bp->flags |= BNXT_FLAG_FATAL_ERROR;
+ bnxt_uninit_resources(bp, false);
+ PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
+}
+
+void bnxt_dev_reset_and_resume(void *arg)
+{
+ struct bnxt *bp = arg;
+ int rc;
+
+ bnxt_dev_cleanup(bp);
+
+ rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs,
+ bnxt_dev_recover, (void *)bp);
+ if (rc)
+ PMD_DRV_LOG(ERR, "Error setting recovery alarm");
+}
+
static bool bnxt_vf_pciid(uint16_t id)
{
if (id == BROADCOM_DEV_ID_57304_VF ||
return rc;
}
+static int bnxt_restore_dflt_mac(struct bnxt *bp)
+{
+ int rc = 0;
+
+ /* MAC is already configured in FW */
+ if (!bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN))
+ return 0;
+
+ /* Restore the old MAC configured */
+ rc = bnxt_hwrm_set_mac(bp);
+ if (rc)
+ PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
+
+ return rc;
+}
+
static void bnxt_config_vf_req_fwd(struct bnxt *bp)
{
if (!BNXT_PF(bp))
return 0;
}
-static int bnxt_init_resources(struct bnxt *bp)
+static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
{
int rc;
if (rc)
return rc;
- rc = bnxt_setup_mac_addr(bp->eth_dev);
- if (rc)
- return rc;
+ if (!reconfig_dev) {
+ rc = bnxt_setup_mac_addr(bp->eth_dev);
+ if (rc)
+ return rc;
+ } else {
+ rc = bnxt_restore_dflt_mac(bp);
+ if (rc)
+ return rc;
+ }
bnxt_config_vf_req_fwd(bp);
}
}
- rc = bnxt_alloc_mem(bp);
+ rc = bnxt_alloc_mem(bp, reconfig_dev);
if (rc)
return rc;
"Failed to allocate hwrm resource rc: %x\n", rc);
goto error_free;
}
- rc = bnxt_init_resources(bp);
+ rc = bnxt_init_resources(bp, false);
if (rc)
goto error_free;
}
static int
-bnxt_uninit_resources(struct bnxt *bp)
+bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
{
int rc;
bnxt_disable_int(bp);
bnxt_free_int(bp);
- bnxt_free_mem(bp);
+ bnxt_free_mem(bp, reconfig_dev);
bnxt_hwrm_func_buf_unrgtr(bp);
rc = bnxt_hwrm_func_driver_unregister(bp, 0);
bp->flags &= ~BNXT_FLAG_REGISTERED;
bnxt_free_ctx_mem(bp);
- bnxt_free_hwrm_resources(bp);
+ if (!reconfig_dev)
+ bnxt_free_hwrm_resources(bp);
return rc;
}
PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
- rc = bnxt_uninit_resources(bp);
+ rc = bnxt_uninit_resources(bp, false);
if (bp->grp_info != NULL) {
rte_free(bp->grp_info);
#include <rte_io.h>
#define HWRM_CMD_TIMEOUT 6000000
+#define HWRM_SHORT_CMD_TIMEOUT 50000
#define HWRM_SPEC_CODE_1_8_3 0x10803
#define HWRM_VERSION_1_9_1 0x10901
#define HWRM_VERSION_1_9_2 0x10903
GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
uint16_t mb_trigger_offset = use_kong_mb ?
GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
+ uint32_t timeout;
+
+ /* Do not send HWRM commands to firmware in error state */
+ if (bp->flags & BNXT_FLAG_FATAL_ERROR)
+ return 0;
+
+ /* For VER_GET command, set timeout as 50ms */
+ if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
+ timeout = HWRM_SHORT_CMD_TIMEOUT;
+ else
+ timeout = HWRM_CMD_TIMEOUT;
if (bp->flags & BNXT_FLAG_SHORT_CMD ||
msg_len > bp->max_req_len) {
rte_write32(1, bar);
/* Poll for the valid bit */
- for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
+ for (i = 0; i < timeout; i++) {
/* Sanity check on the resp->resp_len */
rte_rmb();
if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
rte_delay_us(1);
}
- if (i >= HWRM_CMD_TIMEOUT) {
+ if (i >= timeout) {
+ /* Suppress VER_GET timeout messages during reset recovery */
+ if (bp->flags & BNXT_FLAG_FW_RESET &&
+ rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
+ return -ETIMEDOUT;
+
PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
req->req_type);
return -ETIMEDOUT;
int bnxt_hwrm_func_driver_register(struct bnxt *bp)
{
int rc;
+ uint32_t flags = 0;
struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
if (bp->flags & BNXT_FLAG_REGISTERED)
return 0;
+ flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
+
+ /* PFs and trusted VFs should indicate the support of the
+ * Master capability on non Stingray platform
+ */
+ if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
+ flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
+
HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
* this HWRM sniffer list in FW because DPDK PF driver does
* not support this.
*/
- req.flags =
- rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
+ flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
}
+ req.flags = rte_cpu_to_le_32(flags);
+
req.async_event_fwd[0] |=
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
- ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
+ ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
+ ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
req.async_event_fwd[1] |=
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- HWRM_CHECK_RESULT();
+ if (bp->flags & BNXT_FLAG_FW_RESET)
+ HWRM_CHECK_RESULT_SILENT();
+ else
+ HWRM_CHECK_RESULT();
PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
+ } else if (BNXT_VF(bp) &&
+ !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
+ bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
+ PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
}
if (mtu)
(1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED)
#define ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \
(1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_RESET_NOTIFY \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY)
#define ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
(1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD - 32))
#define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \
*/
#define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT \
UINT32_C(0x20)
+ /*
+ * When this bit is 1, the function is indicating the support of the
+ * Master capability. The Firmware will use this capability to select
+ * the Master function. The master function will be used to initiate
+ * designated functionality like error recovery etc. If none of the
+ * registered PFs or trusted VFs indicate this support, then
+ * firmware will select the 1st registered PF as Master capable
+ * instance.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT \
+ UINT32_C(0x40)
uint32_t enables;
/*
* This bit must be '1' for the os_type field to be