1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
6 #include <rte_malloc.h>
8 #include <rte_cycles.h>
11 #include "bnxt_hwrm.h"
12 #include "bnxt_ring.h"
13 #include "hsi_struct_def_dpdk.h"
15 void bnxt_wait_for_device_shutdown(struct bnxt *bp)
17 uint32_t val, timeout;
19 /* if HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD is set
20 * in HWRM_FUNC_QCAPS command, wait for FW_STATUS to set
21 * the SHUTDOWN bit in health register
23 if (!(bp->recovery_info &&
24 (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)))
27 /* Driver has to wait for fw_reset_max_msecs or shutdown bit which comes
28 * first for FW to collect crash dump.
30 timeout = bp->fw_reset_max_msecs;
32 /* Driver has to poll for shutdown bit in fw_status register
34 * 1. in case of hot fw upgrade, this bit will be set after all
35 * function drivers unregistered with fw.
36 * 2. in case of fw initiated error recovery, this bit will be
37 * set after fw has collected the core dump
40 val = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
41 if (val & BNXT_FW_STATUS_SHUTDOWN)
50 * Async event handling
52 void bnxt_handle_async_event(struct bnxt *bp,
53 struct cmpl_base *cmp)
55 struct hwrm_async_event_cmpl *async_cmp =
56 (struct hwrm_async_event_cmpl *)cmp;
57 uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id);
58 struct bnxt_error_recovery_info *info;
62 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
63 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
64 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
66 bnxt_link_update(bp->eth_dev, 0, ETH_LINK_UP);
68 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
69 PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
71 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
72 PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
73 bnxt_hwrm_func_qcfg(bp, NULL);
75 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
76 PMD_DRV_LOG(INFO, "Port conn async event\n");
78 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
79 /* Ignore reset notify async events when stopping the port */
80 if (!bp->eth_dev->data->dev_started) {
81 bp->flags |= BNXT_FLAG_FATAL_ERROR;
85 event_data = rte_le_to_cpu_32(async_cmp->event_data1);
86 /* timestamp_lo/hi values are in units of 100ms */
87 bp->fw_reset_max_msecs = async_cmp->timestamp_hi ?
88 rte_le_to_cpu_16(async_cmp->timestamp_hi) * 100 :
89 BNXT_MAX_FW_RESET_TIMEOUT;
90 bp->fw_reset_min_msecs = async_cmp->timestamp_lo ?
91 async_cmp->timestamp_lo * 100 :
92 BNXT_MIN_FW_READY_TIMEOUT;
93 if ((event_data & EVENT_DATA1_REASON_CODE_MASK) ==
94 EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) {
96 "Firmware fatal reset event received\n");
97 bp->flags |= BNXT_FLAG_FATAL_ERROR;
100 "Firmware non-fatal reset event received\n");
103 bp->flags |= BNXT_FLAG_FW_RESET;
104 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
107 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY:
108 info = bp->recovery_info;
113 PMD_DRV_LOG(INFO, "Error recovery async event received\n");
115 event_data = rte_le_to_cpu_32(async_cmp->event_data1) &
116 EVENT_DATA1_FLAGS_MASK;
118 if (event_data & EVENT_DATA1_FLAGS_MASTER_FUNC)
119 info->flags |= BNXT_FLAG_MASTER_FUNC;
121 info->flags &= ~BNXT_FLAG_MASTER_FUNC;
123 if (event_data & EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
124 info->flags |= BNXT_FLAG_RECOVERY_ENABLED;
126 info->flags &= ~BNXT_FLAG_RECOVERY_ENABLED;
128 PMD_DRV_LOG(INFO, "recovery enabled(%d), master function(%d)\n",
129 bnxt_is_recovery_enabled(bp),
130 bnxt_is_master_func(bp));
132 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
135 info->last_heart_beat =
136 bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
137 info->last_reset_counter =
138 bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
140 bnxt_schedule_fw_health_check(bp);
142 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
143 PMD_DRV_LOG(INFO, "DNC event: evt_data1 %#x evt_data2 %#x\n",
144 rte_le_to_cpu_32(async_cmp->event_data1),
145 rte_le_to_cpu_32(async_cmp->event_data2));
148 PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id);
153 void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
155 struct hwrm_exec_fwd_resp_input *fwreq;
156 struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
157 struct input *fwd_cmd;
163 if (bp->pf.active_vfs <= 0) {
164 PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
168 /* Qualify the fwd request */
169 fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id);
170 vf_id = fw_vf_id - bp->pf.first_vf_id;
172 req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) &
173 HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >>
174 HWRM_FWD_REQ_CMPL_REQ_LEN_SFT;
175 if (req_len > sizeof(fwreq->encap_request))
176 req_len = sizeof(fwreq->encap_request);
178 /* Locate VF's forwarded command */
179 fwd_cmd = (struct input *)bp->pf.vf_info[vf_id].req_buf;
181 if (fw_vf_id < bp->pf.first_vf_id ||
182 fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
184 "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
185 fw_vf_id, bp->pf.first_vf_id,
186 (bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
187 bp->pf.first_vf_id, bp->pf.active_vfs);
191 if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) {
193 * In older firmware versions, the MAC had to be all zeros for
194 * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all
195 * zeros if it's being configured and has been ok'd by caller.
197 if (fwd_cmd->req_type == HWRM_FUNC_VF_CFG) {
198 struct hwrm_func_vf_cfg_input *vfc = (void *)fwd_cmd;
201 HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR) {
202 bnxt_hwrm_func_vf_mac(bp, vf_id,
203 (const uint8_t *)"\x00\x00\x00\x00\x00");
206 if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {
207 struct hwrm_cfa_l2_set_rx_mask_input *srm =
210 srm->vlan_tag_tbl_addr = rte_cpu_to_le_64(0);
211 srm->num_vlan_tags = rte_cpu_to_le_32(0);
212 srm->mask &= ~rte_cpu_to_le_32(
213 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY |
214 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |
215 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
218 rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
221 "Failed to send FWD req VF 0x%x, type 0x%x.\n",
222 fw_vf_id - bp->pf.first_vf_id,
223 rte_le_to_cpu_16(fwd_cmd->req_type));
229 rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
232 "Failed to send REJECT req VF 0x%x, type 0x%x.\n",
233 fw_vf_id - bp->pf.first_vf_id,
234 rte_le_to_cpu_16(fwd_cmd->req_type));
240 int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
244 if (bp == NULL || cmp == NULL) {
245 PMD_DRV_LOG(ERR, "invalid NULL argument\n");
249 if (unlikely(is_bnxt_in_error(bp)))
252 switch (CMP_TYPE(cmp)) {
253 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
254 /* Handle any async event */
255 bnxt_handle_async_event(bp, cmp);
258 case CMPL_BASE_TYPE_HWRM_FWD_RESP:
259 /* Handle HWRM forwarded responses */
260 bnxt_handle_fwd_req(bp, cmp);
264 /* Ignore any other events */
265 PMD_DRV_LOG(DEBUG, "Ignoring %02x completion\n", CMP_TYPE(cmp));
272 bool bnxt_is_master_func(struct bnxt *bp)
274 if (bp->recovery_info->flags & BNXT_FLAG_MASTER_FUNC)
280 bool bnxt_is_recovery_enabled(struct bnxt *bp)
282 struct bnxt_error_recovery_info *info;
284 info = bp->recovery_info;
285 if (info && (info->flags & BNXT_FLAG_RECOVERY_ENABLED))