1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
6 #include <rte_malloc.h>
11 #include "bnxt_hwrm.h"
12 #include "bnxt_ring.h"
13 #include "hsi_struct_def_dpdk.h"
16 * Async event handling
18 void bnxt_handle_async_event(struct bnxt *bp,
19 struct cmpl_base *cmp)
21 struct hwrm_async_event_cmpl *async_cmp =
22 (struct hwrm_async_event_cmpl *)cmp;
23 uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id);
24 struct bnxt_error_recovery_info *info;
27 /* TODO: HWRM async events are not defined yet */
28 /* Needs to handle: link events, error events, etc. */
30 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
31 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
32 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
34 bnxt_link_update_op(bp->eth_dev, 1);
36 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
37 PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
39 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
40 PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
41 bnxt_hwrm_func_qcfg(bp, NULL);
43 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
44 PMD_DRV_LOG(INFO, "Port conn async event\n");
46 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
47 event_data = rte_le_to_cpu_32(async_cmp->event_data1);
48 /* timestamp_lo/hi values are in units of 100ms */
49 bp->fw_reset_max_msecs = async_cmp->timestamp_hi ?
50 rte_le_to_cpu_16(async_cmp->timestamp_hi) * 100 :
51 BNXT_MAX_FW_RESET_TIMEOUT;
52 bp->fw_reset_min_msecs = async_cmp->timestamp_lo ?
53 async_cmp->timestamp_lo * 100 :
54 BNXT_MIN_FW_READY_TIMEOUT;
55 if ((event_data & EVENT_DATA1_REASON_CODE_MASK) ==
56 EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) {
58 "Firmware fatal reset event received\n");
59 bp->flags |= BNXT_FLAG_FATAL_ERROR;
62 "Firmware non-fatal reset event received\n");
65 bp->flags |= BNXT_FLAG_FW_RESET;
66 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
69 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY:
70 info = bp->recovery_info;
75 PMD_DRV_LOG(INFO, "Error recovery async event received\n");
77 event_data = rte_le_to_cpu_32(async_cmp->event_data1) &
78 EVENT_DATA1_FLAGS_MASK;
80 if (event_data & EVENT_DATA1_FLAGS_MASTER_FUNC)
81 info->flags |= BNXT_FLAG_MASTER_FUNC;
83 info->flags &= ~BNXT_FLAG_MASTER_FUNC;
85 if (event_data & EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
86 info->flags |= BNXT_FLAG_RECOVERY_ENABLED;
88 info->flags &= ~BNXT_FLAG_RECOVERY_ENABLED;
90 PMD_DRV_LOG(INFO, "recovery enabled(%d), master function(%d)\n",
91 bnxt_is_recovery_enabled(bp),
92 bnxt_is_master_func(bp));
95 PMD_DRV_LOG(INFO, "handle_async_event id = 0x%x\n", event_id);
100 void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
102 struct hwrm_exec_fwd_resp_input *fwreq;
103 struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
104 struct input *fwd_cmd;
110 if (bp->pf.active_vfs <= 0) {
111 PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
115 /* Qualify the fwd request */
116 fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id);
117 vf_id = fw_vf_id - bp->pf.first_vf_id;
119 req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) &
120 HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >>
121 HWRM_FWD_REQ_CMPL_REQ_LEN_SFT;
122 if (req_len > sizeof(fwreq->encap_request))
123 req_len = sizeof(fwreq->encap_request);
125 /* Locate VF's forwarded command */
126 fwd_cmd = (struct input *)bp->pf.vf_info[vf_id].req_buf;
128 if (fw_vf_id < bp->pf.first_vf_id ||
129 fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
131 "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
132 fw_vf_id, bp->pf.first_vf_id,
133 (bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
134 bp->pf.first_vf_id, bp->pf.active_vfs);
138 if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) {
140 * In older firmware versions, the MAC had to be all zeros for
141 * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all
142 * zeros if it's being configured and has been ok'd by caller.
144 if (fwd_cmd->req_type == HWRM_FUNC_VF_CFG) {
145 struct hwrm_func_vf_cfg_input *vfc = (void *)fwd_cmd;
148 HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR) {
149 bnxt_hwrm_func_vf_mac(bp, vf_id,
150 (const uint8_t *)"\x00\x00\x00\x00\x00");
153 if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {
154 struct hwrm_cfa_l2_set_rx_mask_input *srm =
157 srm->vlan_tag_tbl_addr = rte_cpu_to_le_64(0);
158 srm->num_vlan_tags = rte_cpu_to_le_32(0);
159 srm->mask &= ~rte_cpu_to_le_32(
160 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY |
161 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |
162 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
165 rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
168 "Failed to send FWD req VF 0x%x, type 0x%x.\n",
169 fw_vf_id - bp->pf.first_vf_id,
170 rte_le_to_cpu_16(fwd_cmd->req_type));
176 rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
179 "Failed to send REJECT req VF 0x%x, type 0x%x.\n",
180 fw_vf_id - bp->pf.first_vf_id,
181 rte_le_to_cpu_16(fwd_cmd->req_type));
187 int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
191 if (bp == NULL || cmp == NULL) {
192 PMD_DRV_LOG(ERR, "invalid NULL argument\n");
196 if (unlikely(is_bnxt_in_error(bp)))
199 switch (CMP_TYPE(cmp)) {
200 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
201 /* Handle any async event */
202 bnxt_handle_async_event(bp, cmp);
205 case CMPL_BASE_TYPE_HWRM_FWD_RESP:
206 /* Handle HWRM forwarded responses */
207 bnxt_handle_fwd_req(bp, cmp);
211 /* Ignore any other events */
212 PMD_DRV_LOG(INFO, "Ignoring %02x completion\n", CMP_TYPE(cmp));
219 bool bnxt_is_master_func(struct bnxt *bp)
221 if (bp->recovery_info->flags & BNXT_FLAG_MASTER_FUNC)
227 bool bnxt_is_recovery_enabled(struct bnxt *bp)
229 struct bnxt_error_recovery_info *info;
231 info = bp->recovery_info;
232 if (info && (info->flags & BNXT_FLAG_RECOVERY_ENABLED))