X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_cpr.c;h=a43b22a8f8b1d7bc860e54aa7334bca279673a55;hb=369ce46248c0605d31bd29ebaa4474309a875176;hp=19c684caa5a21a4d155163d0563bc11ce2bd92ec;hpb=9a82633c27cdfbc33e34876dff41abdbfa71bb97;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c index 19c684caa5..a43b22a8f8 100644 --- a/drivers/net/bnxt/bnxt_cpr.c +++ b/drivers/net/bnxt/bnxt_cpr.c @@ -1,44 +1,112 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2021 Broadcom + * All rights reserved. */ #include +#include +#include #include "bnxt.h" -#include "bnxt_cpr.h" #include "bnxt_hwrm.h" #include "bnxt_ring.h" #include "hsi_struct_def_dpdk.h" +void bnxt_wait_for_device_shutdown(struct bnxt *bp) +{ + uint32_t val, timeout; + + /* if HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD is set + * in HWRM_FUNC_QCAPS command, wait for FW_STATUS to set + * the SHUTDOWN bit in health register + */ + if (!(bp->recovery_info && + (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD))) + return; + + /* Driver has to wait for fw_reset_max_msecs or shutdown bit which comes + * first for FW to collect crash dump. + */ + timeout = bp->fw_reset_max_msecs; + + /* Driver has to poll for shutdown bit in fw_status register + * + * 1. in case of hot fw upgrade, this bit will be set after all + * function drivers unregistered with fw. + * 2. in case of fw initiated error recovery, this bit will be + * set after fw has collected the core dump + */ + do { + val = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); + if (val & BNXT_FW_STATUS_SHUTDOWN) + return; + + rte_delay_ms(100); + timeout -= 100; + } while (timeout); +} + +static void +bnxt_process_default_vnic_change(struct bnxt *bp, + struct hwrm_async_event_cmpl *async_cmp) +{ + uint16_t vnic_state, vf_fid, vf_id; + struct bnxt_representor *vf_rep_bp; + struct rte_eth_dev *eth_dev; + bool vfr_found = false; + uint32_t event_data; + + if (!BNXT_TRUFLOW_EN(bp)) + return; + + PMD_DRV_LOG(INFO, "Default vnic change async event received\n"); + event_data = rte_le_to_cpu_32(async_cmp->event_data1); + + vnic_state = (event_data & BNXT_DEFAULT_VNIC_STATE_MASK) >> + BNXT_DEFAULT_VNIC_STATE_SFT; + if (vnic_state != BNXT_DEFAULT_VNIC_ALLOC) + return; + + if (!bp->rep_info) + return; + + vf_fid = (event_data & BNXT_DEFAULT_VNIC_CHANGE_VF_ID_MASK) >> + BNXT_DEFAULT_VNIC_CHANGE_VF_ID_SFT; + PMD_DRV_LOG(INFO, "async event received vf_id 0x%x\n", vf_fid); + + for (vf_id = 0; vf_id < BNXT_MAX_VF_REPS(bp); vf_id++) { + eth_dev = bp->rep_info[vf_id].vfr_eth_dev; + if (!eth_dev) + continue; + vf_rep_bp = eth_dev->data->dev_private; + if (vf_rep_bp && + vf_rep_bp->fw_fid == vf_fid) { + vfr_found = true; + break; + } + } + if (!vfr_found) + return; + + bnxt_rep_dev_start_op(eth_dev); +} + +static void bnxt_handle_event_error_report(struct bnxt *bp, + uint32_t data1, + uint32_t data2) +{ + switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) { + case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: + PMD_DRV_LOG(WARNING, "Port:%d Pause Storm detected!\n", + bp->eth_dev->data->port_id); + break; + default: + PMD_DRV_LOG(INFO, "FW reported unknown error type data1 %d" + " data2: %d\n", data1, data2); + break; + } +} + /* * Async event handling */ @@ -48,17 +116,129 @@ void bnxt_handle_async_event(struct bnxt *bp, struct hwrm_async_event_cmpl *async_cmp = (struct hwrm_async_event_cmpl *)cmp; uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id); + uint16_t port_id = bp->eth_dev->data->port_id; + struct bnxt_error_recovery_info *info; + uint32_t event_data; + uint32_t data1, data2; + uint32_t status; + + data1 = rte_le_to_cpu_32(async_cmp->event_data1); + data2 = rte_le_to_cpu_32(async_cmp->event_data2); - /* TODO: HWRM async events are not defined yet */ - /* Needs to handle: link events, error events, etc. */ switch (event_id) { case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: - bnxt_link_update_op(bp->eth_dev, 1); + /* FALLTHROUGH */ + bnxt_link_update_op(bp->eth_dev, 0); + rte_eth_dev_callback_process(bp->eth_dev, + RTE_ETH_EVENT_INTR_LSC, NULL); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: + PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n"); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: + PMD_DRV_LOG(INFO, "Async event: VF config changed\n"); + bnxt_hwrm_func_qcfg(bp, NULL); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: + PMD_DRV_LOG(INFO, "Port conn async event\n"); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: + /* + * Avoid any rx/tx packet processing during firmware reset + * operation. + */ + bnxt_stop_rxtx(bp); + + /* Ignore reset notify async events when stopping the port */ + if (!bp->eth_dev->data->dev_started) { + bp->flags |= BNXT_FLAG_FATAL_ERROR; + return; + } + + pthread_mutex_lock(&bp->err_recovery_lock); + event_data = data1; + /* timestamp_lo/hi values are in units of 100ms */ + bp->fw_reset_max_msecs = async_cmp->timestamp_hi ? + rte_le_to_cpu_16(async_cmp->timestamp_hi) * 100 : + BNXT_MAX_FW_RESET_TIMEOUT; + bp->fw_reset_min_msecs = async_cmp->timestamp_lo ? + async_cmp->timestamp_lo * 100 : + BNXT_MIN_FW_READY_TIMEOUT; + if ((event_data & EVENT_DATA1_REASON_CODE_MASK) == + EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) { + PMD_DRV_LOG(INFO, + "Port %u: Firmware fatal reset event received\n", + port_id); + bp->flags |= BNXT_FLAG_FATAL_ERROR; + } else { + PMD_DRV_LOG(INFO, + "Port %u: Firmware non-fatal reset event received\n", + port_id); + } + + bp->flags |= BNXT_FLAG_FW_RESET; + pthread_mutex_unlock(&bp->err_recovery_lock); + rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, + (void *)bp); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: + info = bp->recovery_info; + + if (!info) + return; + + event_data = data1 & EVENT_DATA1_FLAGS_MASK; + + if (event_data & EVENT_DATA1_FLAGS_RECOVERY_ENABLED) { + info->flags |= BNXT_FLAG_RECOVERY_ENABLED; + } else { + info->flags &= ~BNXT_FLAG_RECOVERY_ENABLED; + PMD_DRV_LOG(INFO, "Driver recovery watchdog is disabled\n"); + return; + } + + if (event_data & EVENT_DATA1_FLAGS_MASTER_FUNC) + info->flags |= BNXT_FLAG_PRIMARY_FUNC; + else + info->flags &= ~BNXT_FLAG_PRIMARY_FUNC; + + status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); + PMD_DRV_LOG(INFO, + "Port: %u Driver recovery watchdog, role: %s, FW status: 0x%x (%s)\n", + port_id, bnxt_is_primary_func(bp) ? "primary" : "backup", status, + (status == BNXT_FW_STATUS_HEALTHY) ? "healthy" : "unhealthy"); + + if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) + return; + + info->last_heart_beat = + bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); + info->last_reset_counter = + bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); + + bnxt_schedule_fw_health_check(bp); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: + PMD_DRV_LOG(INFO, "Port: %u DNC event: data1 %#x data2 %#x\n", + port_id, data1, data2); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE: + bnxt_process_default_vnic_change(bp, async_cmp); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: + PMD_DRV_LOG(INFO, + "Port %u: Received fw echo request: data1 %#x data2 %#x\n", + port_id, data1, data2); + if (bp->recovery_info) + bnxt_hwrm_fw_echo_reply(bp, data1, data2); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: + bnxt_handle_event_error_report(bp, data1, data2); break; default: - RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id); + PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id); break; } } @@ -73,14 +253,14 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) uint16_t req_len; int rc; - if (bp->pf.active_vfs <= 0) { - RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n"); + if (bp->pf->active_vfs <= 0) { + PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n"); return; } /* Qualify the fwd request */ fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id); - vf_id = fw_vf_id - bp->pf.first_vf_id; + vf_id = fw_vf_id - bp->pf->first_vf_id; req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) & HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >> @@ -89,19 +269,19 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) req_len = sizeof(fwreq->encap_request); /* Locate VF's forwarded command */ - fwd_cmd = (struct input *)bp->pf.vf_info[vf_id].req_buf; + fwd_cmd = (struct input *)bp->pf->vf_info[vf_id].req_buf; - if (fw_vf_id < bp->pf.first_vf_id || - fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) { - RTE_LOG(ERR, PMD, + if (fw_vf_id < bp->pf->first_vf_id || + fw_vf_id >= bp->pf->first_vf_id + bp->pf->active_vfs) { + PMD_DRV_LOG(ERR, "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n", - fw_vf_id, bp->pf.first_vf_id, - (bp->pf.first_vf_id) + bp->pf.active_vfs - 1, - bp->pf.first_vf_id, bp->pf.active_vfs); + fw_vf_id, bp->pf->first_vf_id, + (bp->pf->first_vf_id) + bp->pf->active_vfs - 1, + bp->pf->first_vf_id, bp->pf->active_vfs); goto reject; } - if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) { + if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd)) { /* * In older firmware versions, the MAC had to be all zeros for * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all @@ -116,6 +296,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) (const uint8_t *)"\x00\x00\x00\x00\x00"); } } + if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) { struct hwrm_cfa_l2_set_rx_mask_input *srm = (void *)fwd_cmd; @@ -127,12 +308,13 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN); } + /* Forward */ rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Failed to send FWD req VF 0x%x, type 0x%x.\n", - fw_vf_id - bp->pf.first_vf_id, + fw_vf_id - bp->pf->first_vf_id, rte_le_to_cpu_16(fwd_cmd->req_type)); } return; @@ -141,79 +323,77 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) reject: rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Failed to send REJECT req VF 0x%x, type 0x%x.\n", - fw_vf_id - bp->pf.first_vf_id, + fw_vf_id - bp->pf->first_vf_id, rte_le_to_cpu_16(fwd_cmd->req_type)); } return; } -/* For the default completion ring only */ -int bnxt_alloc_def_cp_ring(struct bnxt *bp) +int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp) { - struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - struct bnxt_ring *cp_ring = cpr->cp_ring_struct; - int rc; + bool evt = 0; + + if (bp == NULL || cmp == NULL) { + PMD_DRV_LOG(ERR, "invalid NULL argument\n"); + return evt; + } - rc = bnxt_hwrm_ring_alloc(bp, cp_ring, - HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, - 0, HWRM_NA_SIGNATURE, - HWRM_NA_SIGNATURE); - if (rc) - goto err_out; - cpr->cp_doorbell = bp->pdev->mem_resource[2].addr; - B_CP_DIS_DB(cpr, cpr->cp_raw_cons); - bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id; - if (BNXT_PF(bp)) - rc = bnxt_hwrm_func_cfg_def_cp(bp); - else - rc = bnxt_hwrm_vf_func_cfg_def_cp(bp); - -err_out: - return rc; + if (unlikely(is_bnxt_in_error(bp))) + return 0; + + switch (CMP_TYPE(cmp)) { + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + /* Handle any async event */ + bnxt_handle_async_event(bp, cmp); + evt = 1; + break; + case CMPL_BASE_TYPE_HWRM_FWD_REQ: + /* Handle HWRM forwarded responses */ + bnxt_handle_fwd_req(bp, cmp); + evt = 1; + break; + default: + /* Ignore any other events */ + PMD_DRV_LOG(DEBUG, "Ignoring %02x completion\n", CMP_TYPE(cmp)); + break; + } + + return evt; } -void bnxt_free_def_cp_ring(struct bnxt *bp) +bool bnxt_is_primary_func(struct bnxt *bp) { - struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; + if (bp->recovery_info->flags & BNXT_FLAG_PRIMARY_FUNC) + return true; - if (cpr == NULL) - return; + return false; +} - bnxt_free_ring(cpr->cp_ring_struct); - cpr->cp_ring_struct = NULL; - rte_free(cpr->cp_ring_struct); - rte_free(cpr); - bp->def_cp_ring = NULL; +bool bnxt_is_recovery_enabled(struct bnxt *bp) +{ + struct bnxt_error_recovery_info *info; + + info = bp->recovery_info; + if (info && (info->flags & BNXT_FLAG_RECOVERY_ENABLED)) + return true; + + return false; } -/* For the default completion ring only */ -int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id) +void bnxt_stop_rxtx(struct bnxt *bp) { - struct bnxt_cp_ring_info *cpr; - struct bnxt_ring *ring; - - cpr = rte_zmalloc_socket("cpr", - sizeof(struct bnxt_cp_ring_info), - RTE_CACHE_LINE_SIZE, socket_id); - if (cpr == NULL) - return -ENOMEM; - bp->def_cp_ring = cpr; - - ring = rte_zmalloc_socket("bnxt_cp_ring_struct", - sizeof(struct bnxt_ring), - RTE_CACHE_LINE_SIZE, socket_id); - if (ring == NULL) - return -ENOMEM; - cpr->cp_ring_struct = ring; - ring->bd = (void *)cpr->cp_desc_ring; - ring->bd_dma = cpr->cp_desc_mapping; - ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE); - ring->ring_mask = ring->ring_size - 1; - ring->vmem_size = 0; - ring->vmem = NULL; - - return 0; + bp->eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; + bp->eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; + + rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst = + bp->eth_dev->rx_pkt_burst; + rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst = + bp->eth_dev->tx_pkt_burst; + rte_mb(); + + /* Allow time for threads to exit the real burst functions. */ + rte_delay_ms(100); }