2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_spq.h"
13 #include "ecore_gtt_reg_addr.h"
14 #include "ecore_init_ops.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_int.h"
19 #include "ecore_sriov.h"
21 #include "ecore_hw_defs.h"
22 #include "ecore_hsi_common.h"
23 #include "ecore_mcp.h"
24 #include "ecore_attn_values.h"
26 struct ecore_pi_info {
27 ecore_int_comp_cb_t comp_cb;
28 void *cookie; /* Will be sent to the compl cb function */
31 struct ecore_sb_sp_info {
32 struct ecore_sb_info sb_info;
33 /* per protocol index data */
34 struct ecore_pi_info pi_info_arr[PIS_PER_SB];
37 enum ecore_attention_type {
39 ECORE_ATTN_TYPE_PARITY,
42 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
43 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
45 struct aeu_invert_reg_bit {
48 #define ATTENTION_PARITY (1 << 0)
50 #define ATTENTION_LENGTH_MASK (0x00000ff0)
51 #define ATTENTION_LENGTH_SHIFT (4)
52 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
53 ATTENTION_LENGTH_SHIFT)
54 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
55 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
56 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
59 /* Multiple bits start with this offset */
60 #define ATTENTION_OFFSET_MASK (0x000ff000)
61 #define ATTENTION_OFFSET_SHIFT (12)
63 #define ATTENTION_CLEAR_ENABLE (1 << 28)
64 #define ATTENTION_FW_DUMP (1 << 29)
65 #define ATTENTION_PANIC_DUMP (1 << 30)
68 /* Callback to call if attention will be triggered */
69 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
71 enum block_id block_index;
74 struct aeu_invert_reg {
75 struct aeu_invert_reg_bit bits[32];
78 #define MAX_ATTN_GRPS (8)
79 #define NUM_ATTN_REGS (9)
81 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
83 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
85 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
86 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
91 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
92 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
93 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
94 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
95 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
96 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
97 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
98 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
99 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
100 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
101 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
102 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
103 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
115 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
118 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
119 PSWHST_REG_VF_DISABLED_ERROR_VALID);
121 /* Disabled VF access */
122 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
125 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
126 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
127 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
128 PSWHST_REG_VF_DISABLED_ERROR_DATA);
129 DP_INFO(p_hwfn->p_dev,
130 "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
131 " Write [0x%02x] Addr [0x%08x]\n",
132 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
133 >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
134 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
135 >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
137 ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
138 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
140 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
141 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
143 ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
144 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
148 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
149 PSWHST_REG_INCORRECT_ACCESS_VALID);
150 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
151 u32 addr, data, length;
153 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
154 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
155 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
156 PSWHST_REG_INCORRECT_ACCESS_DATA);
157 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
158 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
160 DP_INFO(p_hwfn->p_dev,
161 "Incorrect access to %08x of length %08x - PF [%02x]"
162 " VF [%04x] [valid %02x] client [%02x] write [%02x]"
163 " Byte-Enable [%04x] [%08x]\n",
166 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
167 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
169 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
170 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
172 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
173 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
175 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
176 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
178 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
179 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
181 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
182 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
186 /* TODO - We know 'some' of these are legal due to virtualization,
187 * but is it true for all of them?
189 return ECORE_SUCCESS;
192 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0)
193 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
194 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
195 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
196 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
197 #define ECORE_GRC_ATTENTION_PF_MASK (0xf)
198 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
199 #define ECORE_GRC_ATTENTION_VF_SHIFT (4)
200 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
201 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
202 #define ECORE_GRC_ATTENTION_PRIV_VF (0)
203 static const char *grc_timeout_attn_master_to_str(u8 master)
231 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
235 /* We've already cleared the timeout interrupt register, so we learn
236 * of interrupts via the validity register
238 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
239 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
240 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
243 /* Read the GRC timeout information */
244 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
245 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
246 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
247 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
249 DP_INFO(p_hwfn->p_dev,
250 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
251 " [PF: %02x %s %02x]\n",
253 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
254 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
255 grc_timeout_attn_master_to_str((tmp &
256 ECORE_GRC_ATTENTION_MASTER_MASK) >>
257 ECORE_GRC_ATTENTION_MASTER_SHIFT),
258 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
259 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
260 ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
261 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
262 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
263 ECORE_GRC_ATTENTION_VF_SHIFT);
266 /* Regardles of anything else, clean the validity bit */
267 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
268 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
269 return ECORE_SUCCESS;
272 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
273 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
274 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
275 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
276 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
277 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
278 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
279 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
280 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
281 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
282 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
283 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
284 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
285 static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
289 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
290 PGLUE_B_REG_TX_ERR_WR_DETAILS2);
291 if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
292 u32 addr_lo, addr_hi, details;
294 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
295 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
296 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
297 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
298 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
299 PGLUE_B_REG_TX_ERR_WR_DETAILS);
302 "Illegal write by chip to [%08x:%08x] blocked."
303 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
304 " Details2 %08x [Was_error %02x BME deassert %02x"
305 " FID_enable deassert %02x]\n",
306 addr_hi, addr_lo, details,
308 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
309 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
311 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
312 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
313 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
315 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
317 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
319 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
323 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
324 PGLUE_B_REG_TX_ERR_RD_DETAILS2);
325 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
326 u32 addr_lo, addr_hi, details;
328 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
329 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
330 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
331 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
332 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
333 PGLUE_B_REG_TX_ERR_RD_DETAILS);
336 "Illegal read by chip from [%08x:%08x] blocked."
337 " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
338 " Details2 %08x [Was_error %02x BME deassert %02x"
339 " FID_enable deassert %02x]\n",
340 addr_hi, addr_lo, details,
342 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
343 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
345 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
346 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
347 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
349 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
351 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
353 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
357 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
358 PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
359 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
360 DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
362 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
363 PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
364 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
365 u32 addr_hi, addr_lo;
367 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
368 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
369 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
370 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
372 DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n",
373 tmp, addr_hi, addr_lo);
376 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
377 PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
378 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
379 u32 addr_hi, addr_lo, details;
381 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
382 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
383 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
384 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
385 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
386 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
389 "ILT error - Details %08x Details2 %08x"
390 " [Address %08x:%08x]\n",
391 details, tmp, addr_hi, addr_lo);
394 /* Clear the indications */
395 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
396 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
398 return ECORE_SUCCESS;
401 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
403 DP_NOTICE(p_hwfn, false, "FW assertion!\n");
405 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
410 static enum _ecore_status_t
411 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
413 DP_INFO(p_hwfn, "General attention 35!\n");
415 return ECORE_SUCCESS;
418 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
419 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
420 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
421 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
423 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
427 reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
428 ECORE_DORQ_ATTENTION_REASON_MASK;
430 u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
431 DORQ_REG_DB_DROP_DETAILS);
433 DP_INFO(p_hwfn->p_dev,
434 "DORQ db_drop: address 0x%08x Opaque FID 0x%04x"
435 " Size [bytes] 0x%08x Reason: 0x%08x\n",
436 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
437 DORQ_REG_DB_DROP_DETAILS_ADDRESS),
438 (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
439 ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
440 ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
446 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
449 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
450 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
453 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
454 TM_REG_INT_STS_1_PEND_CONN_SCAN))
457 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
458 TM_REG_INT_STS_1_PEND_CONN_SCAN))
460 "TM attention on emulation - most likely"
461 " results of clock-ratios\n");
462 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
463 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
464 TM_REG_INT_MASK_1_PEND_TASK_SCAN;
465 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
467 return ECORE_SUCCESS;
474 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
475 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
477 { /* After Invert 1 */
478 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
484 { /* After Invert 2 */
485 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
486 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
487 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
489 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
490 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
491 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
492 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
494 (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
495 OSAL_NULL, MAX_BLOCK_ID},
496 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
502 { /* After Invert 3 */
503 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
509 { /* After Invert 4 */
510 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
511 ecore_fw_assertion, MAX_BLOCK_ID},
512 {"General Attention %d",
513 (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
514 OSAL_NULL, MAX_BLOCK_ID},
515 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
516 ecore_general_attention_35, MAX_BLOCK_ID},
517 {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
519 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
520 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
521 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
522 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
523 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
524 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
525 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
527 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
528 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
529 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
530 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
531 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
536 { /* After Invert 5 */
537 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
538 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
539 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
540 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
541 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
542 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
543 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
544 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
545 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
546 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
547 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
548 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
549 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
550 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
551 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
552 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
557 { /* After Invert 6 */
558 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
559 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
560 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
561 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
562 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
563 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
564 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
565 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
566 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
567 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
568 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
569 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
570 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
571 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
572 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
573 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
578 { /* After Invert 7 */
579 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
580 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
581 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
582 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
583 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
584 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
585 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
586 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
587 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
588 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
589 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
590 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
591 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
592 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
593 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
594 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
595 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
600 { /* After Invert 8 */
601 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
602 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
603 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
604 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
605 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
606 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
607 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
608 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
609 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
610 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
611 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
612 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
613 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
614 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
615 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
616 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
617 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
618 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
619 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
620 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
621 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
627 { /* After Invert 9 */
628 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
629 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
631 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
632 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
633 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
640 #define ATTN_STATE_BITS (0xfff)
641 #define ATTN_BITS_MASKABLE (0x3ff)
642 struct ecore_sb_attn_info {
643 /* Virtual & Physical address of the SB */
644 struct atten_status_block *sb_attn;
647 /* Last seen running index */
650 /* A mask of the AEU bits resulting in a parity error */
651 u32 parity_mask[NUM_ATTN_REGS];
653 /* A pointer to the attention description structure */
654 struct aeu_invert_reg *p_aeu_desc;
656 /* Previously asserted attentions, which are still unasserted */
659 /* Cleanup address for the link's general hw attention */
663 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
664 struct ecore_sb_attn_info *p_sb_desc)
668 OSAL_MMIOWB(p_hwfn->p_dev);
670 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
671 if (p_sb_desc->index != index) {
672 p_sb_desc->index = index;
673 rc = ECORE_SB_ATT_IDX;
676 OSAL_MMIOWB(p_hwfn->p_dev);
682 * @brief ecore_int_assertion - handles asserted attention bits
685 * @param asserted_bits newly asserted bits
686 * @return enum _ecore_status_t
688 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
691 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
694 /* Mask the source of the attention in the IGU */
695 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
696 IGU_REG_ATTENTION_ENABLE);
697 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
698 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
699 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
700 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
702 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
703 "inner known ATTN state: 0x%04x --> 0x%04x\n",
704 sb_attn_sw->known_attn,
705 sb_attn_sw->known_attn | asserted_bits);
706 sb_attn_sw->known_attn |= asserted_bits;
708 /* Handle MCP events */
709 if (asserted_bits & 0x100) {
710 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
711 /* Clean the MCP attention */
712 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
713 sb_attn_sw->mfw_attn_addr, 0);
716 /* FIXME - this will change once we'll have GOOD gtt definitions */
717 DIRECT_REG_WR(p_hwfn,
718 (u8 OSAL_IOMEM *) p_hwfn->regview +
719 GTT_BAR0_MAP_REG_IGU_CMD +
720 ((IGU_CMD_ATTN_BIT_SET_UPPER -
721 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
723 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
726 return ECORE_SUCCESS;
729 static void ecore_int_deassertion_print_bit(struct ecore_hwfn *p_hwfn,
730 struct attn_hw_reg *p_reg_desc,
731 struct attn_hw_block *p_block,
732 enum ecore_attention_type type,
737 const char **description;
739 if (type == ECORE_ATTN_TYPE_ATTN)
740 description = p_block->int_desc;
742 description = p_block->prty_desc;
745 for (j = 0; j < p_reg_desc->num_of_bits; j++) {
746 if (val & (1 << j)) {
748 DP_NOTICE(p_hwfn, false,
749 "%s (%s): %s [reg %d [0x%08x], bit %d]%s\n",
751 type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
753 description[p_reg_desc->bit_attn_idx[j]],
755 p_reg_desc->sts_addr, j,
756 (mask & (1 << j)) ? " [MASKED]" : "");
758 DP_NOTICE(p_hwfn->p_dev, false,
759 "%s (%s): [reg %d [0x%08x], bit %d]%s\n",
761 type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
764 p_reg_desc->sts_addr, j,
765 (mask & (1 << j)) ? " [MASKED]" : "");
772 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
773 * cause of the attention
776 * @param p_aeu - descriptor of an AEU bit which caused the attention
777 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
778 * this bit to this group.
779 * @param bit_index - index of this bit in the aeu_en_reg
781 * @return enum _ecore_status_t
783 static enum _ecore_status_t
784 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
785 struct aeu_invert_reg_bit *p_aeu,
786 u32 aeu_en_reg, u32 bitmask)
788 enum _ecore_status_t rc = ECORE_INVAL;
792 u32 interrupts[20]; /* TODO- change into HSI define once supplied */
794 OSAL_MEMSET(interrupts, 0, sizeof(u32) * 20); /* FIXME real size) */
797 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
798 p_aeu->bit_name, bitmask);
800 /* Call callback before clearing the interrupt status */
802 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
804 rc = p_aeu->cb(p_hwfn);
807 /* Print HW block interrupt registers */
808 if (p_aeu->block_index != MAX_BLOCK_ID)
809 DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n",
810 p_aeu->block_index, ATTN_TYPE_INTERRUPT);
812 /* Reach assertion if attention is fatal */
813 if (rc != ECORE_SUCCESS) {
814 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
817 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
820 /* Prevent this Attention from being asserted in the future */
821 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE) {
824 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
825 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
826 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
830 if (p_aeu->flags & (ATTENTION_FW_DUMP | ATTENTION_PANIC_DUMP)) {
831 /* @@@TODO - what to dump? <yuvalmin 04/02/13> */
832 DP_ERR(p_hwfn->p_dev, "`%s' - Dumps aren't implemented yet\n",
834 return ECORE_NOTIMPL;
841 * @brief ecore_int_deassertion_parity - handle a single parity AEU source
844 * @param p_aeu - descriptor of an AEU bit which caused the
848 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
849 struct aeu_invert_reg_bit *p_aeu,
852 u32 block_id = p_aeu->block_index;
854 DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
855 p_aeu->bit_name, bit_index);
857 if (block_id != MAX_BLOCK_ID)
860 /* In A0, there's a single parity bit for several blocks */
861 if (block_id == BLOCK_BTB) {
862 DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n",
863 BLOCK_OPTE, ATTN_TYPE_PARITY);
864 DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n",
865 BLOCK_MCP, ATTN_TYPE_PARITY);
870 * @brief - handles deassertion of previously asserted attentions.
873 * @param deasserted_bits - newly deasserted bits
874 * @return enum _ecore_status_t
877 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
880 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
881 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
882 bool b_parity = false;
884 enum _ecore_status_t rc = ECORE_SUCCESS;
886 /* Read the attention registers in the AEU */
887 for (i = 0; i < NUM_ATTN_REGS; i++) {
888 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
889 MISC_REG_AEU_AFTER_INVERT_1_IGU +
891 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
892 "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
895 /* Handle parity attentions first */
896 for (i = 0; i < NUM_ATTN_REGS; i++) {
897 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
898 u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
899 MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
902 u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
904 /* Skip register in which no parity bit is currently set */
908 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
909 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
911 if ((p_bit->flags & ATTENTION_PARITY) &&
912 !!(parities & (1 << bit_idx))) {
913 ecore_int_deassertion_parity(p_hwfn, p_bit,
918 bit_idx += ATTENTION_LENGTH(p_bit->flags);
922 /* Find non-parity cause for attention and act */
923 for (k = 0; k < MAX_ATTN_GRPS; k++) {
924 struct aeu_invert_reg_bit *p_aeu;
926 /* Handle only groups whose attention is currently deasserted */
927 if (!(deasserted_bits & (1 << k)))
930 for (i = 0; i < NUM_ATTN_REGS; i++) {
931 u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
932 i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS;
933 u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
934 u32 bits = aeu_inv_arr[i] & en;
936 /* Skip if no bit from this group is currently set */
940 /* Find all set bits from current register which belong
941 * to current group, making them responsible for the
942 * previous assertion.
944 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
948 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
950 /* No need to handle attention-only bits */
951 if (p_aeu->flags == ATTENTION_PAR)
955 bit_len = ATTENTION_LENGTH(p_aeu->flags);
956 if (p_aeu->flags & ATTENTION_PAR_INT) {
962 bitmask = bits & (((1 << bit_len) - 1) << bit);
964 /* Handle source of the attention */
965 ecore_int_deassertion_aeu_bit(p_hwfn,
971 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
976 /* Clear IGU indication for the deasserted bits */
977 /* FIXME - this will change once we'll have GOOD gtt definitions */
978 DIRECT_REG_WR(p_hwfn,
979 (u8 OSAL_IOMEM *) p_hwfn->regview +
980 GTT_BAR0_MAP_REG_IGU_CMD +
981 ((IGU_CMD_ATTN_BIT_CLR_UPPER -
982 IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
984 /* Unmask deasserted attentions in IGU */
985 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
986 IGU_REG_ATTENTION_ENABLE);
987 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
988 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
990 /* Clear deassertion from inner state */
991 sb_attn_sw->known_attn &= ~deasserted_bits;
996 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
998 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
999 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1000 u16 index = 0, asserted_bits, deasserted_bits;
1001 enum _ecore_status_t rc = ECORE_SUCCESS;
1002 u32 attn_bits = 0, attn_acks = 0;
1004 /* Read current attention bits/acks - safeguard against attentions
1005 * by guaranting work on a synchronized timeframe
1008 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1009 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1010 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1011 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1012 p_sb_attn->sb_index = index;
1014 /* Attention / Deassertion are meaningful (and in correct state)
1015 * only when they differ and consistent with known state - deassertion
1016 * when previous attention & current ack, and assertion when current
1017 * attention with no previous attention
1019 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1020 ~p_sb_attn_sw->known_attn;
1021 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1022 p_sb_attn_sw->known_attn;
1024 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1026 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1027 index, attn_bits, attn_acks, asserted_bits,
1028 deasserted_bits, p_sb_attn_sw->known_attn);
1029 else if (asserted_bits == 0x100)
1030 DP_INFO(p_hwfn, "MFW indication via attention\n");
1032 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1033 "MFW indication [deassertion]\n");
1035 if (asserted_bits) {
1036 rc = ecore_int_assertion(p_hwfn, asserted_bits);
1041 if (deasserted_bits)
1042 rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1047 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1048 void OSAL_IOMEM *igu_addr, u32 ack_cons)
1050 struct igu_prod_cons_update igu_ack = { 0 };
1052 igu_ack.sb_id_and_flags =
1053 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1054 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1055 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1056 (IGU_SEG_ACCESS_ATTN <<
1057 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1059 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1061 /* Both segments (interrupts & acks) are written to same place address;
1062 * Need to guarantee all commands will be received (in-order) by HW.
1064 OSAL_MMIOWB(p_hwfn->p_dev);
1065 OSAL_BARRIER(p_hwfn->p_dev);
1068 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1070 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1071 struct ecore_pi_info *pi_info = OSAL_NULL;
1072 struct ecore_sb_attn_info *sb_attn;
1073 struct ecore_sb_info *sb_info;
1074 static int arr_size;
1078 DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n");
1082 if (!p_hwfn->p_sp_sb) {
1083 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1087 sb_info = &p_hwfn->p_sp_sb->sb_info;
1088 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1090 DP_ERR(p_hwfn->p_dev,
1091 "Status block is NULL - cannot ack interrupts\n");
1095 if (!p_hwfn->p_sb_attn) {
1096 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1099 sb_attn = p_hwfn->p_sb_attn;
1101 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1102 p_hwfn, p_hwfn->my_id);
1104 /* Disable ack for def status block. Required both for msix +
1105 * inta in non-mask mode, in inta does no harm.
1107 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1109 /* Gather Interrupts/Attentions information */
1110 if (!sb_info->sb_virt) {
1111 DP_ERR(p_hwfn->p_dev,
1112 "Interrupt Status block is NULL -"
1113 " cannot check for new interrupts!\n");
1115 u32 tmp_index = sb_info->sb_ack;
1116 rc = ecore_sb_update_sb_idx(sb_info);
1117 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1118 "Interrupt indices: 0x%08x --> 0x%08x\n",
1119 tmp_index, sb_info->sb_ack);
1122 if (!sb_attn || !sb_attn->sb_attn) {
1123 DP_ERR(p_hwfn->p_dev,
1124 "Attentions Status block is NULL -"
1125 " cannot check for new attentions!\n");
1127 u16 tmp_index = sb_attn->index;
1129 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1130 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1131 "Attention indices: 0x%08x --> 0x%08x\n",
1132 tmp_index, sb_attn->index);
1135 /* Check if we expect interrupts at this time. if not just ack them */
1136 if (!(rc & ECORE_SB_EVENT_MASK)) {
1137 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1141 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1142 if (!p_hwfn->p_dpc_ptt) {
1143 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1144 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1148 if (rc & ECORE_SB_ATT_IDX)
1149 ecore_int_attentions(p_hwfn);
1151 if (rc & ECORE_SB_IDX) {
1154 /* Since we only looked at the SB index, it's possible more
1155 * than a single protocol-index on the SB incremented.
1156 * Iterate over all configured protocol indices and check
1157 * whether something happened for each.
1159 for (pi = 0; pi < arr_size; pi++) {
1160 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1161 if (pi_info->comp_cb != OSAL_NULL)
1162 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1166 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1167 /* This should be done before the interrupts are enabled,
1168 * since otherwise a new attention will be generated.
1170 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1173 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1176 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1178 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1183 if (p_sb->sb_attn) {
1184 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1186 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1188 OSAL_FREE(p_hwfn->p_dev, p_sb);
1191 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1192 struct ecore_ptt *p_ptt)
1194 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1196 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1199 sb_info->known_attn = 0;
1201 /* Configure Attention Status Block in IGU */
1202 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1203 DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1204 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1205 DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1208 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1209 struct ecore_ptt *p_ptt,
1210 void *sb_virt_addr, dma_addr_t sb_phy_addr)
1212 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1215 sb_info->sb_attn = sb_virt_addr;
1216 sb_info->sb_phys = sb_phy_addr;
1218 /* Set the pointer to the AEU descriptors */
1219 sb_info->p_aeu_desc = aeu_descs;
1221 /* Calculate Parity Masks */
1222 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1223 for (i = 0; i < NUM_ATTN_REGS; i++) {
1224 /* j is array index, k is bit index */
1225 for (j = 0, k = 0; k < 32; j++) {
1226 unsigned int flags = aeu_descs[i].bits[j].flags;
1228 if (flags & ATTENTION_PARITY)
1229 sb_info->parity_mask[i] |= 1 << k;
1231 k += ATTENTION_LENGTH(flags);
1233 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1234 "Attn Mask [Reg %d]: 0x%08x\n",
1235 i, sb_info->parity_mask[i]);
1238 /* Set the address of cleanup for the mcp attention */
1239 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1240 MISC_REG_AEU_GENERAL_ATTN_0;
1242 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1245 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1246 struct ecore_ptt *p_ptt)
1248 struct ecore_dev *p_dev = p_hwfn->p_dev;
1249 struct ecore_sb_attn_info *p_sb;
1250 dma_addr_t p_phys = 0;
1254 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(struct ecore_sb_attn_info));
1256 DP_NOTICE(p_dev, true,
1257 "Failed to allocate `struct ecore_sb_attn_info'");
1262 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1263 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1265 DP_NOTICE(p_dev, true,
1266 "Failed to allocate status block (attentions)");
1267 OSAL_FREE(p_dev, p_sb);
1271 /* Attention setup */
1272 p_hwfn->p_sb_attn = p_sb;
1273 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1275 return ECORE_SUCCESS;
1278 /* coalescing timeout = timeset << (timer_res + 1) */
1279 #ifdef RTE_LIBRTE_QEDE_RX_COAL_US
1280 #define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US
1282 #define ECORE_CAU_DEF_RX_USECS 24
1285 #ifdef RTE_LIBRTE_QEDE_TX_COAL_US
1286 #define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US
1288 #define ECORE_CAU_DEF_TX_USECS 48
1291 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1292 struct cau_sb_entry *p_sb_entry,
1293 u8 pf_id, u16 vf_number, u8 vf_valid)
1295 struct ecore_dev *p_dev = p_hwfn->p_dev;
1298 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1300 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1301 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1302 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1303 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1304 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1306 /* setting the time resultion to a fixed value ( = 1) */
1307 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
1308 ECORE_CAU_DEF_RX_TIMER_RES);
1309 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
1310 ECORE_CAU_DEF_TX_TIMER_RES);
1312 cau_state = CAU_HC_DISABLE_STATE;
1314 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1315 cau_state = CAU_HC_ENABLE_STATE;
1316 if (!p_dev->rx_coalesce_usecs) {
1317 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1318 DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n",
1319 p_dev->rx_coalesce_usecs);
1321 if (!p_dev->tx_coalesce_usecs) {
1322 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1323 DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n",
1324 p_dev->tx_coalesce_usecs);
1328 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1329 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1332 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1333 struct ecore_ptt *p_ptt,
1334 dma_addr_t sb_phys, u16 igu_sb_id,
1335 u16 vf_number, u8 vf_valid)
1337 struct cau_sb_entry sb_entry;
1339 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1340 vf_number, vf_valid);
1342 if (p_hwfn->hw_init_done) {
1343 /* Wide-bus, initialize via DMAE */
1344 u64 phys_addr = (u64)sb_phys;
1346 ecore_dmae_host2grc(p_hwfn, p_ptt,
1347 (u64)(osal_uintptr_t)&phys_addr,
1348 CAU_REG_SB_ADDR_MEMORY +
1349 igu_sb_id * sizeof(u64), 2, 0);
1350 ecore_dmae_host2grc(p_hwfn, p_ptt,
1351 (u64)(osal_uintptr_t)&sb_entry,
1352 CAU_REG_SB_VAR_MEMORY +
1353 igu_sb_id * sizeof(u64), 2, 0);
1355 /* Initialize Status Block Address */
1356 STORE_RT_REG_AGG(p_hwfn,
1357 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1358 igu_sb_id * 2, sb_phys);
1360 STORE_RT_REG_AGG(p_hwfn,
1361 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1362 igu_sb_id * 2, sb_entry);
1365 /* Configure pi coalescing if set */
1366 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1367 u8 num_tc = 1; /* @@@TBD aelior ECORE_MULTI_COS */
1368 u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >>
1369 (ECORE_CAU_DEF_RX_TIMER_RES + 1);
1372 ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1373 ECORE_COAL_RX_STATE_MACHINE, timeset);
1375 timeset = p_hwfn->p_dev->tx_coalesce_usecs >>
1376 (ECORE_CAU_DEF_TX_TIMER_RES + 1);
1378 for (i = 0; i < num_tc; i++) {
1379 ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1380 igu_sb_id, TX_PI(i),
1381 ECORE_COAL_TX_STATE_MACHINE,
1387 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1388 struct ecore_ptt *p_ptt,
1389 u16 igu_sb_id, u32 pi_index,
1390 enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
1392 struct cau_pi_entry pi_entry;
1393 u32 sb_offset, pi_offset;
1395 if (IS_VF(p_hwfn->p_dev))
1396 return; /* @@@TBD MichalK- VF CAU... */
1398 sb_offset = igu_sb_id * PIS_PER_SB;
1399 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1401 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1402 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1403 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1405 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1407 pi_offset = sb_offset + pi_index;
1408 if (p_hwfn->hw_init_done) {
1409 ecore_wr(p_hwfn, p_ptt,
1410 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1411 *((u32 *)&(pi_entry)));
1413 STORE_RT_REG(p_hwfn,
1414 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1415 *((u32 *)&(pi_entry)));
1419 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1420 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
1422 /* zero status block and ack counter */
1423 sb_info->sb_ack = 0;
1424 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1426 if (IS_PF(p_hwfn->p_dev))
1427 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1428 sb_info->igu_sb_id, 0, 0);
1432 * @brief ecore_get_igu_sb_id - given a sw sb_id return the
1440 static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1444 /* Assuming continuous set of IGU SBs dedicated for given PF */
1445 if (sb_id == ECORE_SP_SB_ID)
1446 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1447 else if (IS_PF(p_hwfn->p_dev))
1448 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
1450 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1452 if (sb_id == ECORE_SP_SB_ID)
1453 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1454 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1456 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1457 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1462 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1463 struct ecore_ptt *p_ptt,
1464 struct ecore_sb_info *sb_info,
1466 dma_addr_t sb_phy_addr, u16 sb_id)
1468 sb_info->sb_virt = sb_virt_addr;
1469 sb_info->sb_phys = sb_phy_addr;
1471 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1473 if (sb_id != ECORE_SP_SB_ID) {
1474 p_hwfn->sbs_info[sb_id] = sb_info;
1477 #ifdef ECORE_CONFIG_DIRECT_HWFN
1478 sb_info->p_hwfn = p_hwfn;
1480 sb_info->p_dev = p_hwfn->p_dev;
1482 /* The igu address will hold the absolute address that needs to be
1483 * written to for a specific status block
1485 if (IS_PF(p_hwfn->p_dev)) {
1486 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
1487 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
1491 (u8 OSAL_IOMEM *)p_hwfn->regview +
1492 PXP_VF_BAR0_START_IGU +
1493 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1496 sb_info->flags |= ECORE_SB_INFO_INIT;
1498 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1500 return ECORE_SUCCESS;
1503 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1504 struct ecore_sb_info *sb_info,
1507 if (sb_id == ECORE_SP_SB_ID) {
1508 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1512 /* zero status block and ack counter */
1513 sb_info->sb_ack = 0;
1514 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1516 if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
1517 p_hwfn->sbs_info[sb_id] = OSAL_NULL;
1521 return ECORE_SUCCESS;
1524 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1526 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1531 if (p_sb->sb_info.sb_virt) {
1532 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1533 p_sb->sb_info.sb_virt,
1534 p_sb->sb_info.sb_phys,
1535 SB_ALIGNED_SIZE(p_hwfn));
1538 OSAL_FREE(p_hwfn->p_dev, p_sb);
1541 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1542 struct ecore_ptt *p_ptt)
1544 struct ecore_sb_sp_info *p_sb;
1545 dma_addr_t p_phys = 0;
1550 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
1551 sizeof(struct ecore_sb_sp_info));
1553 DP_NOTICE(p_hwfn, true,
1554 "Failed to allocate `struct ecore_sb_info'");
1559 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1560 &p_phys, SB_ALIGNED_SIZE(p_hwfn));
1562 DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
1563 OSAL_FREE(p_hwfn->p_dev, p_sb);
1567 /* Status Block setup */
1568 p_hwfn->p_sp_sb = p_sb;
1569 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1570 p_virt, p_phys, ECORE_SP_SB_ID);
1572 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1574 return ECORE_SUCCESS;
1577 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1578 ecore_int_comp_cb_t comp_cb,
1580 u8 *sb_idx, __le16 **p_fw_cons)
1582 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1583 enum _ecore_status_t rc = ECORE_NOMEM;
1586 /* Look for a free index */
1587 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1588 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1591 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1592 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1594 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1602 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
1604 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1606 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1609 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1610 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1611 return ECORE_SUCCESS;
1614 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1616 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1619 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1620 struct ecore_ptt *p_ptt,
1621 enum ecore_int_mode int_mode)
1623 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
1626 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
1627 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1630 igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN;
1632 p_hwfn->p_dev->int_mode = int_mode;
1633 switch (p_hwfn->p_dev->int_mode) {
1634 case ECORE_INT_MODE_INTA:
1635 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1636 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1639 case ECORE_INT_MODE_MSI:
1640 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1641 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1644 case ECORE_INT_MODE_MSIX:
1645 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1647 case ECORE_INT_MODE_POLL:
1651 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1654 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1655 struct ecore_ptt *p_ptt)
1658 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1660 "FPGA - Don't enable Attentions in IGU and MISC\n");
1665 /* Configure AEU signal change to produce attentions */
1666 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1667 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1668 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1669 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1671 OSAL_MMIOWB(p_hwfn->p_dev);
1673 /* Unmask AEU signals toward IGU */
1674 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1677 enum _ecore_status_t
1678 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1679 enum ecore_int_mode int_mode)
1681 enum _ecore_status_t rc = ECORE_SUCCESS;
1684 /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */
1685 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1687 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0);
1688 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1690 /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
1691 * attentions. Since we're waiting for BRCM answer regarding this
1692 * attention, in the meanwhile we simply mask it.
1694 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1696 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1698 ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1700 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1701 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1702 if (rc != ECORE_SUCCESS) {
1703 DP_NOTICE(p_hwfn, true,
1704 "Slowpath IRQ request failed\n");
1705 return ECORE_NORESOURCES;
1707 p_hwfn->b_int_requested = true;
1710 /* Enable interrupt Generation */
1711 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1713 p_hwfn->b_int_enabled = 1;
1718 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1719 struct ecore_ptt *p_ptt)
1721 p_hwfn->b_int_enabled = 0;
1723 if (IS_VF(p_hwfn->p_dev))
1726 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1729 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1730 void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1731 struct ecore_ptt *p_ptt,
1732 u32 sb_id, bool cleanup_set, u16 opaque_fid)
1734 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1735 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
1736 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1737 u8 type = 0; /* FIXME MichalS type??? */
1739 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1740 IGU_REG_CLEANUP_STATUS_0) != 0x200);
1742 /* USE Control Command Register to perform cleanup. There is an
1743 * option to do this using IGU bar, but then it can't be used for VFs.
1746 /* Set the data field */
1747 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1748 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1749 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1751 /* Set the control register */
1752 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1753 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1754 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1756 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1758 OSAL_BARRIER(p_hwfn->p_dev);
1760 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1762 OSAL_MMIOWB(p_hwfn->p_dev);
1764 /* calculate where to read the status bit from */
1765 sb_bit = 1 << (sb_id % 32);
1766 sb_bit_addr = sb_id / 32 * sizeof(u32);
1768 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
1770 /* Now wait for the command to complete */
1771 while (--sleep_cnt) {
1772 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
1773 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1779 DP_NOTICE(p_hwfn, true,
1780 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1784 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
1785 struct ecore_ptt *p_ptt,
1786 u32 sb_id, u16 opaque, bool b_set)
1792 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
1795 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
1797 /* Clear the CAU for the SB */
1798 for (pi = 0; pi < 12; pi++)
1799 ecore_wr(p_hwfn, p_ptt,
1800 CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
1803 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
1804 struct ecore_ptt *p_ptt,
1805 bool b_set, bool b_slowpath)
1807 u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
1808 u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
1809 u32 sb_id = 0, val = 0;
1811 /* @@@TBD MichalK temporary... should be moved to init-tool... */
1812 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1813 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1814 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1815 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1818 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1819 "IGU cleaning SBs [%d,...,%d]\n",
1820 igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
1822 for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
1823 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1824 p_hwfn->hw_info.opaque_fid,
1830 sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1831 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1832 "IGU cleaning slowpath SB [%d]\n", sb_id);
1833 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1834 p_hwfn->hw_info.opaque_fid, b_set);
1837 static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
1838 struct ecore_ptt *p_ptt, u16 sb_id)
1840 u32 val = ecore_rd(p_hwfn, p_ptt,
1841 IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
1842 struct ecore_igu_block *p_block;
1844 p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
1846 /* stop scanning when hit first invalid PF entry */
1847 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
1848 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
1851 /* Fill the block information */
1852 p_block->status = ECORE_IGU_STATUS_VALID;
1853 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
1854 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
1855 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
1857 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1858 "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
1859 " is_pf = %d vector_num = 0x%x\n",
1860 sb_id, val, p_block->function_id, p_block->is_pf,
1861 p_block->vector_number);
1867 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
1868 struct ecore_ptt *p_ptt)
1870 struct ecore_igu_info *p_igu_info;
1871 struct ecore_igu_block *p_block;
1872 u16 sb_id, last_iov_sb_id = 0;
1873 u32 min_vf, max_vf, val;
1874 u16 prev_sb_id = 0xFF;
1876 p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
1878 sizeof(*p_igu_info));
1879 if (!p_hwfn->hw_info.p_igu_info)
1882 OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
1884 p_igu_info = p_hwfn->hw_info.p_igu_info;
1886 /* Initialize base sb / sb cnt for PFs and VFs */
1887 p_igu_info->igu_base_sb = 0xffff;
1888 p_igu_info->igu_sb_cnt = 0;
1889 p_igu_info->igu_dsb_id = 0xffff;
1890 p_igu_info->igu_base_sb_iov = 0xffff;
1892 #ifdef CONFIG_ECORE_SRIOV
1893 min_vf = p_hwfn->hw_info.first_vf_in_pf;
1894 max_vf = p_hwfn->hw_info.first_vf_in_pf +
1895 p_hwfn->p_dev->sriov_info.total_vfs;
1901 for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1903 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
1904 val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
1905 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
1906 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
1909 if (p_block->is_pf) {
1910 if (p_block->function_id == p_hwfn->rel_pf_id) {
1911 p_block->status |= ECORE_IGU_STATUS_PF;
1913 if (p_block->vector_number == 0) {
1914 if (p_igu_info->igu_dsb_id == 0xffff)
1915 p_igu_info->igu_dsb_id = sb_id;
1917 if (p_igu_info->igu_base_sb == 0xffff) {
1918 p_igu_info->igu_base_sb = sb_id;
1919 } else if (prev_sb_id != sb_id - 1) {
1920 DP_NOTICE(p_hwfn->p_dev, false,
1928 /* we don't count the default */
1929 (p_igu_info->igu_sb_cnt)++;
1933 if ((p_block->function_id >= min_vf) &&
1934 (p_block->function_id < max_vf)) {
1935 /* Available for VFs of this PF */
1936 if (p_igu_info->igu_base_sb_iov == 0xffff) {
1937 p_igu_info->igu_base_sb_iov = sb_id;
1938 } else if (last_iov_sb_id != sb_id - 1) {
1940 DP_VERBOSE(p_hwfn->p_dev,
1942 "First uninited IGU"
1947 DP_NOTICE(p_hwfn->p_dev, false,
1958 p_block->status |= ECORE_IGU_STATUS_FREE;
1959 p_hwfn->hw_info.p_igu_info->free_blks++;
1960 last_iov_sb_id = sb_id;
1964 p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
1966 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1967 "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
1968 "igu_dsb_id=0x%x\n",
1969 p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
1970 p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
1971 p_igu_info->igu_dsb_id);
1973 if (p_igu_info->igu_base_sb == 0xffff ||
1974 p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
1975 DP_NOTICE(p_hwfn, true,
1976 "IGU CAM returned invalid values igu_base_sb=0x%x "
1977 "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
1978 p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
1979 p_igu_info->igu_dsb_id);
1983 return ECORE_SUCCESS;
1987 * @brief Initialize igu runtime registers
1991 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
1993 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
1995 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
1998 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
1999 IGU_CMD_INT_ACK_BASE)
2000 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2001 IGU_CMD_INT_ACK_BASE)
2002 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2004 u32 intr_status_hi = 0, intr_status_lo = 0;
2005 u64 intr_status = 0;
2007 intr_status_lo = REG_RD(p_hwfn,
2008 GTT_BAR0_MAP_REG_IGU_CMD +
2009 LSB_IGU_CMD_ADDR * 8);
2010 intr_status_hi = REG_RD(p_hwfn,
2011 GTT_BAR0_MAP_REG_IGU_CMD +
2012 MSB_IGU_CMD_ADDR * 8);
2013 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2018 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2020 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2021 p_hwfn->b_sp_dpc_enabled = true;
2024 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2026 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2027 if (!p_hwfn->sp_dpc)
2030 return ECORE_SUCCESS;
2033 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2035 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2038 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2039 struct ecore_ptt *p_ptt)
2041 enum _ecore_status_t rc = ECORE_SUCCESS;
2043 rc = ecore_int_sp_dpc_alloc(p_hwfn);
2044 if (rc != ECORE_SUCCESS) {
2045 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2049 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2050 if (rc != ECORE_SUCCESS) {
2051 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2055 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2056 if (rc != ECORE_SUCCESS)
2057 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2062 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2064 ecore_int_sp_sb_free(p_hwfn);
2065 ecore_int_sb_attn_free(p_hwfn);
2066 ecore_int_sp_dpc_free(p_hwfn);
2069 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2071 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2074 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2075 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2076 ecore_int_sp_dpc_setup(p_hwfn);
2079 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2080 struct ecore_sb_cnt_info *p_sb_cnt_info)
2082 struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
2084 if (!info || !p_sb_cnt_info)
2087 p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
2088 p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
2089 p_sb_cnt_info->sb_free_blk = info->free_blks;
2092 u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
2094 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2096 /* Determine origin of SB id */
2097 if ((sb_id >= p_info->igu_base_sb) &&
2098 (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
2099 return sb_id - p_info->igu_base_sb;
2100 } else if ((sb_id >= p_info->igu_base_sb_iov) &&
2101 (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
2102 return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
2105 DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
2110 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2114 for_each_hwfn(p_dev, i)
2115 p_dev->hwfns[i].b_int_requested = false;