X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fbase%2Fecore_int.c;h=4207b1853e317583942b4c524e6640b145f62ea4;hb=6f0a54b74bce90fca6264773db1b0bf13871874f;hp=acf875997e0a464550233582b48005a89c8cecbd;hpb=e4782d308973e5dcca4051df0ca8ebb818beed48;p=dpdk.git diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c index acf875997e..4207b1853e 100644 --- a/drivers/net/qede/base/ecore_int.c +++ b/drivers/net/qede/base/ecore_int.c @@ -1,15 +1,14 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ +#include + #include "bcm_osal.h" #include "ecore.h" #include "ecore_spq.h" -#include "reg_addr.h" #include "ecore_gtt_reg_addr.h" #include "ecore_init_ops.h" #include "ecore_rt_defs.h" @@ -29,8 +28,10 @@ struct ecore_pi_info { struct ecore_sb_sp_info { struct ecore_sb_info sb_info; - /* per protocol index data */ - struct ecore_pi_info pi_info_arr[PIS_PER_SB]; + + /* Per protocol index data */ + struct ecore_pi_info pi_info_arr[MAX_PIS_PER_SB]; + osal_size_t pi_info_arr_size; }; enum ecore_attention_type { @@ -59,10 +60,10 @@ struct aeu_invert_reg_bit { #define ATTENTION_OFFSET_MASK (0x000ff000) #define ATTENTION_OFFSET_SHIFT (12) -#define ATTENTION_BB_MASK (0x00700000) +#define ATTENTION_BB_MASK (0xf) #define ATTENTION_BB_SHIFT (20) #define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT) -#define ATTENTION_BB_DIFFERENT (1 << 23) +#define ATTENTION_BB_DIFFERENT (1 << 24) #define ATTENTION_CLEAR_ENABLE (1 << 28) unsigned int flags; @@ -191,7 +192,10 @@ static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) return ECORE_SUCCESS; } -#define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0) +/* Register GRC_REG_TIMEOUT_ATTN_ACCESS_VALID */ +#define ECORE_GRC_ATTENTION_VALID_BIT_MASK (0x1) +#define ECORE_GRC_ATTENTION_VALID_BIT_SHIFT (0) + #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) @@ -232,14 +236,17 @@ static const char *grc_timeout_attn_master_to_str(u8 master) static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) { + enum _ecore_status_t rc = ECORE_SUCCESS; u32 tmp, tmp2; /* We've already cleared the timeout interrupt register, so we learn - * of interrupts via the validity register + * of interrupts via the validity register. If it is not a timeout do + * nothing. It is too late at this stage to differentiate spurious + * interrupt from fatal grc attention. */ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); - if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) + if (!(GET_FIELD(tmp, ECORE_GRC_ATTENTION_VALID_BIT))) goto out; /* Read the GRC timeout information */ @@ -264,11 +271,11 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> ECORE_GRC_ATTENTION_VF_SHIFT); -out: - /* Regardles of anything else, clean the validity bit */ + /* Clean the validity bit */ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); - return ECORE_SUCCESS; +out: + return rc; } #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) @@ -286,9 +293,11 @@ out: #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt) + struct ecore_ptt *p_ptt, + bool is_hw_init) { u32 tmp; + char str[512] = {0}; tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); if (tmp & ECORE_PGLUE_ATTENTION_VALID) { @@ -300,9 +309,8 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, PGLUE_B_REG_TX_ERR_WR_ADD_63_32); details = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - - DP_NOTICE(p_hwfn, false, - "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + OSAL_SNPRINTF(str, 512, + "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", addr_hi, addr_lo, details, (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> @@ -319,6 +327,10 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 1 : 0), (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0)); + if (is_hw_init) + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str); + else + DP_NOTICE(p_hwfn, false, "%s", str); } tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); @@ -394,7 +406,7 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) { - return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); + return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); } static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) @@ -414,31 +426,151 @@ ecore_general_attention_35(struct ecore_hwfn *p_hwfn) return ECORE_SUCCESS; } -#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) -#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) -#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f0000) -#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) +#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) +#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) +#define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) +#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) +#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) + +#define ECORE_DB_REC_COUNT 1000 +#define ECORE_DB_REC_INTERVAL 100 + +static enum _ecore_status_t ecore_db_rec_flush_queue(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 count = ECORE_DB_REC_COUNT; + u32 usage = 1; + + /* wait for usage to zero or count to run out. This is necessary since + * EDPM doorbell transactions can take multiple 64b cycles, and as such + * can "split" over the pci. Possibly, the doorbell drop can happen with + * half an EDPM in the queue and other half dropped. Another EDPM + * doorbell to the same address (from doorbell recovery mechanism or + * from the doorbelling entity) could have first half dropped and second + * half interperted as continuation of the first. To prevent such + * malformed doorbells from reaching the device, flush the queue before + * releaseing the overflow sticky indication. + */ + while (count-- && usage) { + usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); + OSAL_UDELAY(ECORE_DB_REC_INTERVAL); + } + + /* should have been depleted by now */ + if (usage) { + DP_NOTICE(p_hwfn->p_dev, false, + "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", + ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage); + return ECORE_TIMEOUT; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 overflow; + enum _ecore_status_t rc; + + overflow = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + DP_NOTICE(p_hwfn, false, "PF Overflow sticky 0x%x\n", overflow); + if (!overflow) { + ecore_db_recovery_execute(p_hwfn, DB_REC_ONCE); + return ECORE_SUCCESS; + } + + if (ecore_edpm_enabled(p_hwfn)) { + rc = ecore_db_rec_flush_queue(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) + return rc; + } + + /* flush any pedning (e)dpm as they may never arrive */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); + + /* release overflow sticky indication (stop silently dropping + * everything) + */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); + + /* repeat all last doorbells (doorbell drop recovery) */ + ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); + + return ECORE_SUCCESS; +} static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) { - u32 reason; + u32 int_sts, first_drop_reason, details, address, all_drops_reason; + struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt; + enum _ecore_status_t rc; - reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) & - ECORE_DORQ_ATTENTION_REASON_MASK; - if (reason) { - u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, - DORQ_REG_DB_DROP_DETAILS); + int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); + DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n", + int_sts); - DP_INFO(p_hwfn->p_dev, - "DORQ db_drop: address 0x%08x Opaque FID 0x%04x" - " Size [bytes] 0x%08x Reason: 0x%08x\n", - ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, - DORQ_REG_DB_DROP_DETAILS_ADDRESS), - (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK), - ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >> - ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason); + /* int_sts may be zero since all PFs were interrupted for doorbell + * overflow but another one already handled it. Can abort here. If + * This PF also requires overflow recovery we will be interrupted again + */ + if (!int_sts) + return ECORE_SUCCESS; + + /* check if db_drop or overflow happened */ + if (int_sts & (DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { + /* obtain data about db drop/overflow */ + first_drop_reason = ecore_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_REASON) & + ECORE_DORQ_ATTENTION_REASON_MASK; + details = ecore_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS); + address = ecore_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_ADDRESS); + all_drops_reason = ecore_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_REASON); + + /* log info */ + DP_NOTICE(p_hwfn->p_dev, false, + "Doorbell drop occurred\n" + "Address\t\t0x%08x\t(second BAR address)\n" + "FID\t\t0x%04x\t\t(Opaque FID)\n" + "Size\t\t0x%04x\t\t(in bytes)\n" + "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" + "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", + address, + GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE), + GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4, + first_drop_reason, all_drops_reason); + + rc = ecore_db_rec_handler(p_hwfn, p_ptt); + OSAL_DB_REC_OCCURRED(p_hwfn); + if (rc != ECORE_SUCCESS) + return rc; + + /* clear the doorbell drop details and prepare for next drop */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); + + /* mark interrupt as handeld (note: even if drop was due to a + * different reason than overflow we mark as handled) + */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, + DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); + + /* if there are no indications otherthan drop indications, + * success + */ + if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | + DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) + return ECORE_SUCCESS; } + /* some other indication was present - non recoverable */ + DP_INFO(p_hwfn, "DORQ fatal attention\n"); + return ECORE_INVAL; } @@ -478,6 +610,8 @@ enum aeu_invert_reg_special_type { AEU_INVERT_REG_SPECIAL_CNIG_1, AEU_INVERT_REG_SPECIAL_CNIG_2, AEU_INVERT_REG_SPECIAL_CNIG_3, + AEU_INVERT_REG_SPECIAL_MCP_UMP_TX, + AEU_INVERT_REG_SPECIAL_MCP_SCPAD, AEU_INVERT_REG_SPECIAL_MAX, }; @@ -487,6 +621,8 @@ aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, + {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, }; /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ @@ -550,10 +686,15 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, - {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, - MAX_BLOCK_ID}, + {"OPTE", ATTENTION_PAR, OSAL_NULL, BLOCK_OPTE}, + {"MCP", ATTENTION_PAR, OSAL_NULL, BLOCK_MCP}, + {"MS", ATTENTION_SINGLE, OSAL_NULL, BLOCK_MS}, + {"UMAC", ATTENTION_SINGLE, OSAL_NULL, BLOCK_UMAC}, + {"LED", ATTENTION_SINGLE, OSAL_NULL, BLOCK_LED}, + {"BMBN", ATTENTION_SINGLE, OSAL_NULL, BLOCK_BMBN}, {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, + {"BMB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, @@ -656,10 +797,17 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, - {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, - {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, - {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, - MAX_BLOCK_ID}, + {"AVS", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_UMP_TX), OSAL_NULL, + BLOCK_AVS_WRAP}, + {"AVS", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_SCPAD), OSAL_NULL, + BLOCK_AVS_WRAP}, + {"PCIe core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, + {"PCIe link up", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, + {"PCIe hot reset", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, + {"Reserved %d", (9 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + MAX_BLOCK_ID}, } }, @@ -827,14 +975,22 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, /* @DPDK */ /* Reach assertion if attention is fatal */ if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) { +#ifndef ASIC_ONLY + DP_NOTICE(p_hwfn, !CHIP_REV_IS_EMUL(p_hwfn->p_dev), + "`%s': Fatal attention\n", p_bit_name); +#else DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", p_bit_name); +#endif ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); } /* Prevent this Attention from being asserted in the future */ if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || +#ifndef ASIC_ONLY + CHIP_REV_IS_EMUL(p_hwfn->p_dev) || +#endif p_hwfn->p_dev->attn_clr_en) { u32 val; u32 mask = ~bitmask; @@ -885,6 +1041,13 @@ static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, p_aeu->bit_name); } +#define MISC_REG_AEU_AFTER_INVERT_IGU(n) \ + (MISC_REG_AEU_AFTER_INVERT_1_IGU + (n) * 0x4) + +#define MISC_REG_AEU_ENABLE_IGU_OUT(n, group) \ + (MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (n) * 0x4 + \ + (group) * 0x4 * NUM_ATTN_REGS) + /** * @brief - handles deassertion of previously asserted attentions. * @@ -904,8 +1067,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, /* Read the attention registers in the AEU */ for (i = 0; i < NUM_ATTN_REGS; i++) { aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, - MISC_REG_AEU_AFTER_INVERT_1_IGU + - i * 0x4); + MISC_REG_AEU_AFTER_INVERT_IGU(i)); DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]); } @@ -915,7 +1077,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; u32 parities; - aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); + aeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, 0); en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; @@ -946,9 +1108,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, for (i = 0; i < NUM_ATTN_REGS; i++) { u32 bits; - aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + - i * sizeof(u32) + - k * sizeof(u32) * NUM_ATTN_REGS; + aeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, k); en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); bits = aeu_inv_arr[i] & en; @@ -1000,9 +1160,9 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, p_aeu->bit_name, num); else - OSAL_STRNCPY(bit_name, - p_aeu->bit_name, - 30); + strlcpy(bit_name, + p_aeu->bit_name, + sizeof(bit_name)); /* We now need to pass bitmask in its * correct position. @@ -1096,8 +1256,9 @@ static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *igu_addr, u32 ack_cons) { - struct igu_prod_cons_update igu_ack = { 0 }; + struct igu_prod_cons_update igu_ack; + OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update)); igu_ack.sb_id_and_flags = ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | @@ -1120,7 +1281,6 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) struct ecore_pi_info *pi_info = OSAL_NULL; struct ecore_sb_attn_info *sb_attn; struct ecore_sb_info *sb_info; - int arr_size; u16 rc = 0; if (!p_hwfn) @@ -1132,7 +1292,6 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) } sb_info = &p_hwfn->p_sp_sb->sb_info; - arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); if (!sb_info) { DP_ERR(p_hwfn->p_dev, "Status block is NULL - cannot ack interrupts\n"); @@ -1197,14 +1356,14 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) ecore_int_attentions(p_hwfn); if (rc & ECORE_SB_IDX) { - int pi; + osal_size_t pi; /* Since we only looked at the SB index, it's possible more * than a single protocol-index on the SB incremented. * Iterate over all configured protocol indices and check * whether something happened for each. */ - for (pi = 0; pi < arr_size; pi++) { + for (pi = 0; pi < p_hwfn->p_sp_sb->pi_info_arr_size; pi++) { pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; if (pi_info->comp_cb != OSAL_NULL) pi_info->comp_cb(p_hwfn, pi_info->cookie); @@ -1302,8 +1461,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, /* SB struct */ p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); if (!p_sb) { - DP_NOTICE(p_dev, true, - "Failed to allocate `struct ecore_sb_attn_info'\n"); + DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); return ECORE_NOMEM; } @@ -1311,8 +1469,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, SB_ATTN_ALIGNED_SIZE(p_hwfn)); if (!p_virt) { - DP_NOTICE(p_dev, true, - "Failed to allocate status block (attentions)\n"); + DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); OSAL_FREE(p_dev, p_sb); return ECORE_NOMEM; } @@ -1435,11 +1592,13 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&phys_addr, CAU_REG_SB_ADDR_MEMORY + - igu_sb_id * sizeof(u64), 2, 0); + igu_sb_id * sizeof(u64), 2, + OSAL_NULL /* default parameters */); ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry, CAU_REG_SB_VAR_MEMORY + - igu_sb_id * sizeof(u64), 2, 0); + igu_sb_id * sizeof(u64), 2, + OSAL_NULL /* default parameters */); } else { /* Initialize Status Block Address */ STORE_RT_REG_AGG(p_hwfn, @@ -1494,7 +1653,7 @@ void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, { /* zero status block and ack counter */ sb_info->sb_ack = 0; - OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); + OSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size); if (IS_PF(p_hwfn->p_dev)) ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, @@ -1577,6 +1736,14 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, dma_addr_t sb_phy_addr, u16 sb_id) { sb_info->sb_virt = sb_virt_addr; + struct status_block *sb_virt; + + sb_virt = (struct status_block *)sb_info->sb_virt; + + sb_info->sb_size = sizeof(*sb_virt); + sb_info->sb_pi_array = sb_virt->pi_array; + sb_info->sb_prod_index = &sb_virt->prod_index; + sb_info->sb_phys = sb_phy_addr; sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); @@ -1608,16 +1775,16 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, /* The igu address will hold the absolute address that needs to be * written to for a specific status block */ - if (IS_PF(p_hwfn->p_dev)) { + if (IS_PF(p_hwfn->p_dev)) sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + - GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3); + GTT_BAR0_MAP_REG_IGU_CMD + + (sb_info->igu_sb_id << 3); - } else { - sb_info->igu_addr = - (u8 OSAL_IOMEM *)p_hwfn->regview + + else + sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + PXP_VF_BAR0_START_IGU + - ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); - } + ((IGU_CMD_INT_ACK_BASE + + sb_info->igu_sb_id) << 3); sb_info->flags |= ECORE_SB_INFO_INIT; @@ -1638,7 +1805,7 @@ enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, /* zero status block and ack counter */ sb_info->sb_ack = 0; - OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); + OSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size); if (IS_VF(p_hwfn->p_dev)) { ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL); @@ -1687,11 +1854,9 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, void *p_virt; /* SB struct */ - p_sb = - OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, - sizeof(*p_sb)); + p_sb = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb)); if (!p_sb) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n"); return ECORE_NOMEM; } @@ -1700,7 +1865,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, SB_ALIGNED_SIZE(p_hwfn)); if (!p_virt) { - DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); OSAL_FREE(p_hwfn->p_dev, p_sb); return ECORE_NOMEM; } @@ -1710,7 +1875,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, p_phys, ECORE_SP_SB_ID); - OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); + p_sb->pi_info_arr_size = PIS_PER_SB; return ECORE_SUCCESS; } @@ -1725,14 +1890,14 @@ enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, u8 pi; /* Look for a free index */ - for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { + for (pi = 0; pi < p_sp_sb->pi_info_arr_size; pi++) { if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) continue; p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; p_sp_sb->pi_info_arr[pi].cookie = cookie; *sb_idx = pi; - *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; + *p_fw_cons = &p_sp_sb->sb_info.sb_pi_array[pi]; rc = ECORE_SUCCESS; break; } @@ -1821,15 +1986,6 @@ ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_int_mode int_mode) { enum _ecore_status_t rc = ECORE_SUCCESS; - u32 tmp; - - /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop - * attentions. Since we're waiting for BRCM answer regarding this - * attention, in the meanwhile we simply mask it. - */ - tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0); - tmp &= ~0x800; - ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp); ecore_int_igu_enable_attn(p_hwfn, p_ptt); @@ -1869,10 +2025,9 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, bool cleanup_set, u16 opaque_fid) { - u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; - u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; - u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; - u8 type = 0; /* FIXME MichalS type??? */ + u32 data = 0, cmd_ctrl = 0, sb_bit, sb_bit_addr, pxp_addr; + u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH, val; + u8 type = 0; OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - IGU_REG_CLEANUP_STATUS_0) != 0x200); @@ -1887,6 +2042,7 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); /* Set the control register */ + pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); @@ -1958,9 +2114,11 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, igu_sb_id); /* Clear the CAU for the SB */ - for (pi = 0; pi < 12; pi++) + for (pi = 0; pi < PIS_PER_SB; pi++) ecore_wr(p_hwfn, p_ptt, - CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); + CAU_REG_PI_MEMORY + + (igu_sb_id * PIS_PER_SB + pi) * 4, + 0); } void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, @@ -2530,7 +2688,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), - (u64)(osal_uintptr_t)&sb_entry, 2, 0); + (u64)(osal_uintptr_t)&sb_entry, 2, + OSAL_NULL /* default parameters */); if (rc != ECORE_SUCCESS) { DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); return rc; @@ -2543,8 +2702,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, rc = ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry, - CAU_REG_SB_VAR_MEMORY + - sb_id * sizeof(u64), 2, 0); + CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2, + OSAL_NULL /* default parameters */); if (rc != ECORE_SUCCESS) { DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); return rc; @@ -2559,12 +2718,12 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, struct ecore_sb_info_dbg *p_info) { u16 sbid = p_sb->igu_sb_id; - int i; + u32 i; if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; - if (sbid > NUM_OF_SBS(p_hwfn->p_dev)) + if (sbid >= NUM_OF_SBS(p_hwfn->p_dev)) return ECORE_INVAL; p_info->igu_prod = ecore_rd(p_hwfn, p_ptt, @@ -2575,7 +2734,40 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, for (i = 0; i < PIS_PER_SB; i++) p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt, CAU_REG_PI_MEMORY + - sbid * 4 * PIS_PER_SB + i * 4); + sbid * 4 * PIS_PER_SB + + i * 4); return ECORE_SUCCESS; } + +void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; + struct ecore_ptt *p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, + RESERVED_PTT_DPC); + int i; + + /* Do not reorder the following cleanup sequence */ + /* Ack all attentions */ + ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ACK_BITS, 0xfff); + + /* Clear driver attention */ + ecore_wr(p_hwfn, p_dpc_ptt, + ((p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0), 0); + + /* Clear per-PF IGU registers to restore them as if the IGU + * was reset for this PF + */ + ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); + ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); + ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); + + /* Execute IGU clean up*/ + ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_FUNCTIONAL_CLEANUP, 1); + + /* Clear Stats */ + ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED, 0); + + for (i = 0; i < IGU_REG_PBA_STS_PF_SIZE; i++) + ecore_wr(p_hwfn, p_ptt, IGU_REG_PBA_STS_PF + i * 4, 0); +}