X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_cpr.c;h=2c7fd78c3db416f482e8b4f68fbeb6d61e44dca2;hb=46c451d905e8e27787a0471ecc1d31a4cde25a9e;hp=2c3129fe28b7196f76bb384b3eb4d45b284fb2c6;hpb=839dee404325c45f4181712fd63917229e737652;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c index 2c3129fe28..2c7fd78c3d 100644 --- a/drivers/net/bnxt/bnxt_cpr.c +++ b/drivers/net/bnxt/bnxt_cpr.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2014-2018 Broadcom + * Copyright(c) 2014-2021 Broadcom * All rights reserved. */ @@ -46,6 +46,51 @@ void bnxt_wait_for_device_shutdown(struct bnxt *bp) } while (timeout); } +static void +bnxt_process_default_vnic_change(struct bnxt *bp, + struct hwrm_async_event_cmpl *async_cmp) +{ + uint16_t vnic_state, vf_fid, vf_id; + struct bnxt_representor *vf_rep_bp; + struct rte_eth_dev *eth_dev; + bool vfr_found = false; + uint32_t event_data; + + if (!BNXT_TRUFLOW_EN(bp)) + return; + + PMD_DRV_LOG(INFO, "Default vnic change async event received\n"); + event_data = rte_le_to_cpu_32(async_cmp->event_data1); + + vnic_state = (event_data & BNXT_DEFAULT_VNIC_STATE_MASK) >> + BNXT_DEFAULT_VNIC_STATE_SFT; + if (vnic_state != BNXT_DEFAULT_VNIC_ALLOC) + return; + + if (!bp->rep_info) + return; + + vf_fid = (event_data & BNXT_DEFAULT_VNIC_CHANGE_VF_ID_MASK) >> + BNXT_DEFAULT_VNIC_CHANGE_VF_ID_SFT; + PMD_DRV_LOG(INFO, "async event received vf_id 0x%x\n", vf_fid); + + for (vf_id = 0; vf_id < BNXT_MAX_VF_REPS; vf_id++) { + eth_dev = bp->rep_info[vf_id].vfr_eth_dev; + if (!eth_dev) + continue; + vf_rep_bp = eth_dev->data->dev_private; + if (vf_rep_bp && + vf_rep_bp->fw_fid == vf_fid) { + vfr_found = true; + break; + } + } + if (!vfr_found) + return; + + bnxt_rep_dev_start_op(eth_dev); +} + /* * Async event handling */ @@ -55,8 +100,10 @@ void bnxt_handle_async_event(struct bnxt *bp, struct hwrm_async_event_cmpl *async_cmp = (struct hwrm_async_event_cmpl *)cmp; uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id); + uint16_t port_id = bp->eth_dev->data->port_id; struct bnxt_error_recovery_info *info; uint32_t event_data; + uint32_t echo_req_data1, echo_req_data2; switch (event_id) { case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: @@ -76,6 +123,19 @@ void bnxt_handle_async_event(struct bnxt *bp, PMD_DRV_LOG(INFO, "Port conn async event\n"); break; case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: + /* + * Avoid any rx/tx packet processing during firmware reset + * operation. + */ + bnxt_stop_rxtx(bp); + + /* Ignore reset notify async events when stopping the port */ + if (!bp->eth_dev->data->dev_started) { + bp->flags |= BNXT_FLAG_FATAL_ERROR; + return; + } + + pthread_mutex_lock(&bp->err_recovery_lock); event_data = rte_le_to_cpu_32(async_cmp->event_data1); /* timestamp_lo/hi values are in units of 100ms */ bp->fw_reset_max_msecs = async_cmp->timestamp_hi ? @@ -87,14 +147,17 @@ void bnxt_handle_async_event(struct bnxt *bp, if ((event_data & EVENT_DATA1_REASON_CODE_MASK) == EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) { PMD_DRV_LOG(INFO, - "Firmware fatal reset event received\n"); + "Port %u: Firmware fatal reset event received\n", + port_id); bp->flags |= BNXT_FLAG_FATAL_ERROR; } else { PMD_DRV_LOG(INFO, - "Firmware non-fatal reset event received\n"); + "Port %u: Firmware non-fatal reset event received\n", + port_id); } bp->flags |= BNXT_FLAG_FW_RESET; + pthread_mutex_unlock(&bp->err_recovery_lock); rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, (void *)bp); break; @@ -104,7 +167,8 @@ void bnxt_handle_async_event(struct bnxt *bp, if (!info) return; - PMD_DRV_LOG(INFO, "Error recovery async event received\n"); + PMD_DRV_LOG(INFO, "Port %u: Error recovery async event received\n", + port_id); event_data = rte_le_to_cpu_32(async_cmp->event_data1) & EVENT_DATA1_FLAGS_MASK; @@ -119,8 +183,8 @@ void bnxt_handle_async_event(struct bnxt *bp, else info->flags &= ~BNXT_FLAG_RECOVERY_ENABLED; - PMD_DRV_LOG(INFO, "recovery enabled(%d), master function(%d)\n", - bnxt_is_recovery_enabled(bp), + PMD_DRV_LOG(INFO, "Port %u: recovery enabled(%d), master function(%d)\n", + port_id, bnxt_is_recovery_enabled(bp), bnxt_is_master_func(bp)); if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) @@ -133,6 +197,24 @@ void bnxt_handle_async_event(struct bnxt *bp, bnxt_schedule_fw_health_check(bp); break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: + PMD_DRV_LOG(INFO, "DNC event: evt_data1 %#x evt_data2 %#x\n", + rte_le_to_cpu_32(async_cmp->event_data1), + rte_le_to_cpu_32(async_cmp->event_data2)); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE: + bnxt_process_default_vnic_change(bp, async_cmp); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: + echo_req_data1 = rte_le_to_cpu_32(async_cmp->event_data1); + echo_req_data2 = rte_le_to_cpu_32(async_cmp->event_data2); + PMD_DRV_LOG(INFO, + "Port %u: Received fw echo request: data1 %#x data2 %#x\n", + port_id, echo_req_data1, echo_req_data2); + if (bp->recovery_info) + bnxt_hwrm_fw_echo_reply(bp, echo_req_data1, + echo_req_data2); + break; default: PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id); break; @@ -149,14 +231,14 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) uint16_t req_len; int rc; - if (bp->pf.active_vfs <= 0) { + if (bp->pf->active_vfs <= 0) { PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n"); return; } /* Qualify the fwd request */ fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id); - vf_id = fw_vf_id - bp->pf.first_vf_id; + vf_id = fw_vf_id - bp->pf->first_vf_id; req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) & HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >> @@ -165,19 +247,19 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) req_len = sizeof(fwreq->encap_request); /* Locate VF's forwarded command */ - fwd_cmd = (struct input *)bp->pf.vf_info[vf_id].req_buf; + fwd_cmd = (struct input *)bp->pf->vf_info[vf_id].req_buf; - if (fw_vf_id < bp->pf.first_vf_id || - fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) { + if (fw_vf_id < bp->pf->first_vf_id || + fw_vf_id >= bp->pf->first_vf_id + bp->pf->active_vfs) { PMD_DRV_LOG(ERR, "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n", - fw_vf_id, bp->pf.first_vf_id, - (bp->pf.first_vf_id) + bp->pf.active_vfs - 1, - bp->pf.first_vf_id, bp->pf.active_vfs); + fw_vf_id, bp->pf->first_vf_id, + (bp->pf->first_vf_id) + bp->pf->active_vfs - 1, + bp->pf->first_vf_id, bp->pf->active_vfs); goto reject; } - if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) { + if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd)) { /* * In older firmware versions, the MAC had to be all zeros for * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all @@ -192,6 +274,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) (const uint8_t *)"\x00\x00\x00\x00\x00"); } } + if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) { struct hwrm_cfa_l2_set_rx_mask_input *srm = (void *)fwd_cmd; @@ -203,12 +286,13 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN); } + /* Forward */ rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len); if (rc) { PMD_DRV_LOG(ERR, "Failed to send FWD req VF 0x%x, type 0x%x.\n", - fw_vf_id - bp->pf.first_vf_id, + fw_vf_id - bp->pf->first_vf_id, rte_le_to_cpu_16(fwd_cmd->req_type)); } return; @@ -219,7 +303,7 @@ reject: if (rc) { PMD_DRV_LOG(ERR, "Failed to send REJECT req VF 0x%x, type 0x%x.\n", - fw_vf_id - bp->pf.first_vf_id, + fw_vf_id - bp->pf->first_vf_id, rte_le_to_cpu_16(fwd_cmd->req_type)); } @@ -244,7 +328,7 @@ int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp) bnxt_handle_async_event(bp, cmp); evt = 1; break; - case CMPL_BASE_TYPE_HWRM_FWD_RESP: + case CMPL_BASE_TYPE_HWRM_FWD_REQ: /* Handle HWRM forwarded responses */ bnxt_handle_fwd_req(bp, cmp); evt = 1; @@ -276,3 +360,9 @@ bool bnxt_is_recovery_enabled(struct bnxt *bp) return false; } + +void bnxt_stop_rxtx(struct bnxt *bp) +{ + bp->eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; + bp->eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; +}