2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_spq.h"
13 #include "ecore_gtt_reg_addr.h"
14 #include "ecore_init_ops.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_int.h"
19 #include "ecore_sriov.h"
21 #include "ecore_hw_defs.h"
22 #include "ecore_hsi_common.h"
23 #include "ecore_mcp.h"
24 #include "ecore_attn_values.h"
26 struct ecore_pi_info {
27 ecore_int_comp_cb_t comp_cb;
28 void *cookie; /* Will be sent to the compl cb function */
31 struct ecore_sb_sp_info {
32 struct ecore_sb_info sb_info;
33 /* per protocol index data */
34 struct ecore_pi_info pi_info_arr[PIS_PER_SB];
37 enum ecore_attention_type {
39 ECORE_ATTN_TYPE_PARITY,
42 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
43 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
45 struct aeu_invert_reg_bit {
48 #define ATTENTION_PARITY (1 << 0)
50 #define ATTENTION_LENGTH_MASK (0x00000ff0)
51 #define ATTENTION_LENGTH_SHIFT (4)
52 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
53 ATTENTION_LENGTH_SHIFT)
54 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
55 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
56 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
59 /* Multiple bits start with this offset */
60 #define ATTENTION_OFFSET_MASK (0x000ff000)
61 #define ATTENTION_OFFSET_SHIFT (12)
63 #define ATTENTION_CLEAR_ENABLE (1 << 28)
64 #define ATTENTION_FW_DUMP (1 << 29)
65 #define ATTENTION_PANIC_DUMP (1 << 30)
68 /* Callback to call if attention will be triggered */
69 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
71 enum block_id block_index;
74 struct aeu_invert_reg {
75 struct aeu_invert_reg_bit bits[32];
78 #define MAX_ATTN_GRPS (8)
79 #define NUM_ATTN_REGS (9)
81 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
83 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
85 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
86 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
91 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
92 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
93 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
94 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
95 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
96 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
97 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
98 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
99 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
100 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
101 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
102 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
103 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
115 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
118 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
119 PSWHST_REG_VF_DISABLED_ERROR_VALID);
121 /* Disabled VF access */
122 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
125 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
126 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
127 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
128 PSWHST_REG_VF_DISABLED_ERROR_DATA);
129 DP_INFO(p_hwfn->p_dev,
130 "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
131 " Write [0x%02x] Addr [0x%08x]\n",
132 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
133 >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
134 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
135 >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
137 ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
138 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
140 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
141 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
143 ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
144 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
148 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
149 PSWHST_REG_INCORRECT_ACCESS_VALID);
150 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
151 u32 addr, data, length;
153 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
154 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
155 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
156 PSWHST_REG_INCORRECT_ACCESS_DATA);
157 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
158 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
160 DP_INFO(p_hwfn->p_dev,
161 "Incorrect access to %08x of length %08x - PF [%02x]"
162 " VF [%04x] [valid %02x] client [%02x] write [%02x]"
163 " Byte-Enable [%04x] [%08x]\n",
166 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
167 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
169 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
170 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
172 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
173 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
175 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
176 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
178 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
179 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
181 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
182 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
186 /* TODO - We know 'some' of these are legal due to virtualization,
187 * but is it true for all of them?
189 return ECORE_SUCCESS;
192 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0)
193 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
194 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
195 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
196 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
197 #define ECORE_GRC_ATTENTION_PF_MASK (0xf)
198 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
199 #define ECORE_GRC_ATTENTION_VF_SHIFT (4)
200 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
201 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
202 #define ECORE_GRC_ATTENTION_PRIV_VF (0)
203 static const char *grc_timeout_attn_master_to_str(u8 master)
231 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
235 /* We've already cleared the timeout interrupt register, so we learn
236 * of interrupts via the validity register
238 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
239 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
240 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
243 /* Read the GRC timeout information */
244 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
245 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
246 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
247 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
249 DP_INFO(p_hwfn->p_dev,
250 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
251 " [PF: %02x %s %02x]\n",
253 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
254 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
255 grc_timeout_attn_master_to_str((tmp &
256 ECORE_GRC_ATTENTION_MASTER_MASK) >>
257 ECORE_GRC_ATTENTION_MASTER_SHIFT),
258 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
259 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
260 ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
261 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
262 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
263 ECORE_GRC_ATTENTION_VF_SHIFT);
266 /* Regardles of anything else, clean the validity bit */
267 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
268 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
269 return ECORE_SUCCESS;
272 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
273 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
274 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
275 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
276 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
277 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
278 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
279 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
280 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
281 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
282 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
283 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
284 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
285 static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
290 attn_blocks[BLOCK_PGLUE_B].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
291 int_regs[0]->mask_addr;
293 /* Mask unnecessary attentions -@TBD move to MFW */
294 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
295 tmp |= (1 << 19); /* Was PGL_PCIE_ATTN */
296 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
298 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
299 PGLUE_B_REG_TX_ERR_WR_DETAILS2);
300 if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
301 u32 addr_lo, addr_hi, details;
303 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
304 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
305 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
306 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
307 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
308 PGLUE_B_REG_TX_ERR_WR_DETAILS);
311 "Illegal write by chip to [%08x:%08x] blocked."
312 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
313 " Details2 %08x [Was_error %02x BME deassert %02x"
314 " FID_enable deassert %02x]\n",
315 addr_hi, addr_lo, details,
317 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
318 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
320 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
321 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
322 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
324 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
326 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
328 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
332 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
333 PGLUE_B_REG_TX_ERR_RD_DETAILS2);
334 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
335 u32 addr_lo, addr_hi, details;
337 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
338 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
339 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
340 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
341 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
342 PGLUE_B_REG_TX_ERR_RD_DETAILS);
345 "Illegal read by chip from [%08x:%08x] blocked."
346 " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
347 " Details2 %08x [Was_error %02x BME deassert %02x"
348 " FID_enable deassert %02x]\n",
349 addr_hi, addr_lo, details,
351 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
352 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
354 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
355 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
356 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
358 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
360 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
362 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
366 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
367 PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
368 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
369 DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
371 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
372 PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
373 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
374 u32 addr_hi, addr_lo;
376 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
377 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
378 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
379 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
381 DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n",
382 tmp, addr_hi, addr_lo);
385 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
386 PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
387 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
388 u32 addr_hi, addr_lo, details;
390 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
391 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
392 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
393 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
394 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
395 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
398 "ILT error - Details %08x Details2 %08x"
399 " [Address %08x:%08x]\n",
400 details, tmp, addr_hi, addr_lo);
403 /* Clear the indications */
404 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
405 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
407 return ECORE_SUCCESS;
410 static enum _ecore_status_t ecore_nig_attn_cb(struct ecore_hwfn *p_hwfn)
414 /* Mask unnecessary attentions -@TBD move to MFW */
416 attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
417 int_regs[3]->mask_addr;
418 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
419 tmp |= (1 << 0); /* Was 3_P0_TX_PAUSE_TOO_LONG_INT */
420 tmp |= NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT;
421 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
424 attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
425 int_regs[5]->mask_addr;
426 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
427 tmp |= (1 << 0); /* Was 5_P1_TX_PAUSE_TOO_LONG_INT */
428 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
430 /* TODO - a bit risky to return success here; But alternative is to
431 * actually read the multitdue of interrupt register of the block.
433 return ECORE_SUCCESS;
436 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
438 DP_NOTICE(p_hwfn, false, "FW assertion!\n");
440 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
445 static enum _ecore_status_t
446 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
448 DP_INFO(p_hwfn, "General attention 35!\n");
450 return ECORE_SUCCESS;
453 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
454 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
455 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
456 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
458 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
462 reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
463 ECORE_DORQ_ATTENTION_REASON_MASK;
465 u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
466 DORQ_REG_DB_DROP_DETAILS);
468 DP_INFO(p_hwfn->p_dev,
469 "DORQ db_drop: address 0x%08x Opaque FID 0x%04x"
470 " Size [bytes] 0x%08x Reason: 0x%08x\n",
471 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
472 DORQ_REG_DB_DROP_DETAILS_ADDRESS),
473 (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
474 ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
475 ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
481 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
484 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
485 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
488 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
489 TM_REG_INT_STS_1_PEND_CONN_SCAN))
492 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
493 TM_REG_INT_STS_1_PEND_CONN_SCAN))
495 "TM attention on emulation - most likely"
496 " results of clock-ratios\n");
497 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
498 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
499 TM_REG_INT_MASK_1_PEND_TASK_SCAN;
500 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
502 return ECORE_SUCCESS;
509 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
510 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
512 { /* After Invert 1 */
513 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
519 { /* After Invert 2 */
520 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
521 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
522 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
524 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
525 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
526 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
527 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
529 (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
530 OSAL_NULL, MAX_BLOCK_ID},
531 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
537 { /* After Invert 3 */
538 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
544 { /* After Invert 4 */
545 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
546 ecore_fw_assertion, MAX_BLOCK_ID},
547 {"General Attention %d",
548 (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
549 OSAL_NULL, MAX_BLOCK_ID},
550 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
551 ecore_general_attention_35, MAX_BLOCK_ID},
552 {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
554 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
555 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
556 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
557 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
558 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
559 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
560 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
562 {"NIG", ATTENTION_PAR_INT, ecore_nig_attn_cb, BLOCK_NIG},
563 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
564 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
565 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
566 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
571 { /* After Invert 5 */
572 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
573 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
574 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
575 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
576 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
577 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
578 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
579 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
580 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
581 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
582 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
583 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
584 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
585 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
586 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
587 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
592 { /* After Invert 6 */
593 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
594 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
595 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
596 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
597 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
598 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
599 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
600 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
601 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
602 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
603 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
604 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
605 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
606 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
607 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
608 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
613 { /* After Invert 7 */
614 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
615 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
616 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
617 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
618 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
619 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
620 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
621 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
622 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
623 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
624 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
625 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
626 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
627 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
628 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
629 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
630 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
635 { /* After Invert 8 */
636 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
637 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
638 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
639 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
640 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
641 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
642 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
643 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
644 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
645 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
646 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
647 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
648 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
649 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
650 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
651 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
652 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
653 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
654 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
655 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
656 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
662 { /* After Invert 9 */
663 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
664 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
666 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
667 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
668 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
675 #define ATTN_STATE_BITS (0xfff)
676 #define ATTN_BITS_MASKABLE (0x3ff)
677 struct ecore_sb_attn_info {
678 /* Virtual & Physical address of the SB */
679 struct atten_status_block *sb_attn;
682 /* Last seen running index */
685 /* A mask of the AEU bits resulting in a parity error */
686 u32 parity_mask[NUM_ATTN_REGS];
688 /* A pointer to the attention description structure */
689 struct aeu_invert_reg *p_aeu_desc;
691 /* Previously asserted attentions, which are still unasserted */
694 /* Cleanup address for the link's general hw attention */
698 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
699 struct ecore_sb_attn_info *p_sb_desc)
703 OSAL_MMIOWB(p_hwfn->p_dev);
705 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
706 if (p_sb_desc->index != index) {
707 p_sb_desc->index = index;
708 rc = ECORE_SB_ATT_IDX;
711 OSAL_MMIOWB(p_hwfn->p_dev);
717 * @brief ecore_int_assertion - handles asserted attention bits
720 * @param asserted_bits newly asserted bits
721 * @return enum _ecore_status_t
723 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
726 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
729 /* Mask the source of the attention in the IGU */
730 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
731 IGU_REG_ATTENTION_ENABLE);
732 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
733 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
734 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
735 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
737 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
738 "inner known ATTN state: 0x%04x --> 0x%04x\n",
739 sb_attn_sw->known_attn,
740 sb_attn_sw->known_attn | asserted_bits);
741 sb_attn_sw->known_attn |= asserted_bits;
743 /* Handle MCP events */
744 if (asserted_bits & 0x100) {
745 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
746 /* Clean the MCP attention */
747 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
748 sb_attn_sw->mfw_attn_addr, 0);
751 /* FIXME - this will change once we'll have GOOD gtt definitions */
752 DIRECT_REG_WR(p_hwfn,
753 (u8 OSAL_IOMEM *) p_hwfn->regview +
754 GTT_BAR0_MAP_REG_IGU_CMD +
755 ((IGU_CMD_ATTN_BIT_SET_UPPER -
756 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
758 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
761 return ECORE_SUCCESS;
764 static void ecore_int_deassertion_print_bit(struct ecore_hwfn *p_hwfn,
765 struct attn_hw_reg *p_reg_desc,
766 struct attn_hw_block *p_block,
767 enum ecore_attention_type type,
772 const char **description;
774 if (type == ECORE_ATTN_TYPE_ATTN)
775 description = p_block->int_desc;
777 description = p_block->prty_desc;
780 for (j = 0; j < p_reg_desc->num_of_bits; j++) {
781 if (val & (1 << j)) {
783 DP_NOTICE(p_hwfn, false,
784 "%s (%s): %s [reg %d [0x%08x], bit %d]%s\n",
786 type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
788 description[p_reg_desc->bit_attn_idx[j]],
790 p_reg_desc->sts_addr, j,
791 (mask & (1 << j)) ? " [MASKED]" : "");
793 DP_NOTICE(p_hwfn->p_dev, false,
794 "%s (%s): [reg %d [0x%08x], bit %d]%s\n",
796 type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
799 p_reg_desc->sts_addr, j,
800 (mask & (1 << j)) ? " [MASKED]" : "");
807 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
808 * cause of the attention
811 * @param p_aeu - descriptor of an AEU bit which caused the attention
812 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
813 * this bit to this group.
814 * @param bit_index - index of this bit in the aeu_en_reg
816 * @return enum _ecore_status_t
818 static enum _ecore_status_t
819 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
820 struct aeu_invert_reg_bit *p_aeu,
821 u32 aeu_en_reg, u32 bitmask)
823 enum _ecore_status_t rc = ECORE_INVAL;
827 u32 interrupts[20]; /* TODO- change into HSI define once supplied */
829 OSAL_MEMSET(interrupts, 0, sizeof(u32) * 20); /* FIXME real size) */
832 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
833 p_aeu->bit_name, bitmask);
835 /* Call callback before clearing the interrupt status */
837 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
839 rc = p_aeu->cb(p_hwfn);
842 /* Handle HW block interrupt registers */
843 if (p_aeu->block_index != MAX_BLOCK_ID) {
844 u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev);
845 struct attn_hw_block *p_block;
848 p_block = &attn_blocks[p_aeu->block_index];
850 /* Handle each interrupt register */
852 i < p_block->chip_regs[chip_type].num_of_int_regs; i++) {
853 struct attn_hw_reg *p_reg_desc;
856 p_reg_desc = p_block->chip_regs[chip_type].int_regs[i];
858 /* In case of fatal attention, don't clear the status
859 * so it would appear in idle check.
861 if (rc == ECORE_SUCCESS)
862 sts_addr = p_reg_desc->sts_clr_addr;
864 sts_addr = p_reg_desc->sts_addr;
866 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
867 mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
868 p_reg_desc->mask_addr);
869 ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc,
871 ECORE_ATTN_TYPE_ATTN,
880 /* Reach assertion if attention is fatal */
881 if (rc != ECORE_SUCCESS) {
882 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
885 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
888 /* Prevent this Attention from being asserted in the future */
889 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE) {
892 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
893 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
894 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
898 if (p_aeu->flags & (ATTENTION_FW_DUMP | ATTENTION_PANIC_DUMP)) {
899 /* @@@TODO - what to dump? <yuvalmin 04/02/13> */
900 DP_ERR(p_hwfn->p_dev, "`%s' - Dumps aren't implemented yet\n",
902 return ECORE_NOTIMPL;
908 static void ecore_int_parity_print(struct ecore_hwfn *p_hwfn,
909 struct aeu_invert_reg_bit *p_aeu,
910 struct attn_hw_block *p_block, u8 bit_index)
912 u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev);
915 for (i = 0; i < p_block->chip_regs[chip_type].num_of_prty_regs; i++) {
916 struct attn_hw_reg *p_reg_desc;
919 p_reg_desc = p_block->chip_regs[chip_type].prty_regs[i];
921 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
922 p_reg_desc->sts_clr_addr);
923 mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
924 p_reg_desc->mask_addr);
925 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
926 "%s[%d] - parity register[%d] is %08x [mask is %08x]\n",
927 p_aeu->bit_name, bit_index, i, val, mask);
928 ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc,
930 ECORE_ATTN_TYPE_PARITY,
936 * @brief ecore_int_deassertion_parity - handle a single parity AEU source
939 * @param p_aeu - descriptor of an AEU bit which caused the
943 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
944 struct aeu_invert_reg_bit *p_aeu,
947 u32 block_id = p_aeu->block_index;
949 DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
950 p_aeu->bit_name, bit_index);
952 if (block_id != MAX_BLOCK_ID) {
953 ecore_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
956 /* In A0, there's a single parity bit for several blocks */
957 if (block_id == BLOCK_BTB) {
958 ecore_int_parity_print(p_hwfn, p_aeu,
959 &attn_blocks[BLOCK_OPTE],
961 ecore_int_parity_print(p_hwfn, p_aeu,
962 &attn_blocks[BLOCK_MCP],
969 * @brief - handles deassertion of previously asserted attentions.
972 * @param deasserted_bits - newly deasserted bits
973 * @return enum _ecore_status_t
976 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
979 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
980 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
981 bool b_parity = false;
983 enum _ecore_status_t rc = ECORE_SUCCESS;
985 /* Read the attention registers in the AEU */
986 for (i = 0; i < NUM_ATTN_REGS; i++) {
987 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
988 MISC_REG_AEU_AFTER_INVERT_1_IGU +
990 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
991 "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
994 /* Handle parity attentions first */
995 for (i = 0; i < NUM_ATTN_REGS; i++) {
996 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
997 u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
998 MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1001 u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
1003 /* Skip register in which no parity bit is currently set */
1007 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1008 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
1010 if ((p_bit->flags & ATTENTION_PARITY) &&
1011 !!(parities & (1 << bit_idx))) {
1012 ecore_int_deassertion_parity(p_hwfn, p_bit,
1017 bit_idx += ATTENTION_LENGTH(p_bit->flags);
1021 /* Find non-parity cause for attention and act */
1022 for (k = 0; k < MAX_ATTN_GRPS; k++) {
1023 struct aeu_invert_reg_bit *p_aeu;
1025 /* Handle only groups whose attention is currently deasserted */
1026 if (!(deasserted_bits & (1 << k)))
1029 for (i = 0; i < NUM_ATTN_REGS; i++) {
1030 u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1031 i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS;
1032 u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1033 u32 bits = aeu_inv_arr[i] & en;
1035 /* Skip if no bit from this group is currently set */
1039 /* Find all set bits from current register which belong
1040 * to current group, making them responsible for the
1041 * previous assertion.
1043 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1047 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
1049 /* No need to handle attention-only bits */
1050 if (p_aeu->flags == ATTENTION_PAR)
1054 bit_len = ATTENTION_LENGTH(p_aeu->flags);
1055 if (p_aeu->flags & ATTENTION_PAR_INT) {
1061 bitmask = bits & (((1 << bit_len) - 1) << bit);
1063 /* Handle source of the attention */
1064 ecore_int_deassertion_aeu_bit(p_hwfn,
1070 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1075 /* Clear IGU indication for the deasserted bits */
1076 /* FIXME - this will change once we'll have GOOD gtt definitions */
1077 DIRECT_REG_WR(p_hwfn,
1078 (u8 OSAL_IOMEM *) p_hwfn->regview +
1079 GTT_BAR0_MAP_REG_IGU_CMD +
1080 ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1081 IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
1083 /* Unmask deasserted attentions in IGU */
1084 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1085 IGU_REG_ATTENTION_ENABLE);
1086 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1087 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1089 /* Clear deassertion from inner state */
1090 sb_attn_sw->known_attn &= ~deasserted_bits;
1095 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
1097 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1098 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1099 u16 index = 0, asserted_bits, deasserted_bits;
1100 enum _ecore_status_t rc = ECORE_SUCCESS;
1101 u32 attn_bits = 0, attn_acks = 0;
1103 /* Read current attention bits/acks - safeguard against attentions
1104 * by guaranting work on a synchronized timeframe
1107 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1108 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1109 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1110 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1111 p_sb_attn->sb_index = index;
1113 /* Attention / Deassertion are meaningful (and in correct state)
1114 * only when they differ and consistent with known state - deassertion
1115 * when previous attention & current ack, and assertion when current
1116 * attention with no previous attention
1118 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1119 ~p_sb_attn_sw->known_attn;
1120 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1121 p_sb_attn_sw->known_attn;
1123 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1125 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1126 index, attn_bits, attn_acks, asserted_bits,
1127 deasserted_bits, p_sb_attn_sw->known_attn);
1128 else if (asserted_bits == 0x100)
1129 DP_INFO(p_hwfn, "MFW indication via attention\n");
1131 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1132 "MFW indication [deassertion]\n");
1134 if (asserted_bits) {
1135 rc = ecore_int_assertion(p_hwfn, asserted_bits);
1140 if (deasserted_bits)
1141 rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1146 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1147 void OSAL_IOMEM *igu_addr, u32 ack_cons)
1149 struct igu_prod_cons_update igu_ack = { 0 };
1151 igu_ack.sb_id_and_flags =
1152 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1153 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1154 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1155 (IGU_SEG_ACCESS_ATTN <<
1156 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1158 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1160 /* Both segments (interrupts & acks) are written to same place address;
1161 * Need to guarantee all commands will be received (in-order) by HW.
1163 OSAL_MMIOWB(p_hwfn->p_dev);
1164 OSAL_BARRIER(p_hwfn->p_dev);
1167 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1169 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1170 struct ecore_pi_info *pi_info = OSAL_NULL;
1171 struct ecore_sb_attn_info *sb_attn;
1172 struct ecore_sb_info *sb_info;
1173 static int arr_size;
1177 DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n");
1181 if (!p_hwfn->p_sp_sb) {
1182 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1186 sb_info = &p_hwfn->p_sp_sb->sb_info;
1187 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1189 DP_ERR(p_hwfn->p_dev,
1190 "Status block is NULL - cannot ack interrupts\n");
1194 if (!p_hwfn->p_sb_attn) {
1195 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1198 sb_attn = p_hwfn->p_sb_attn;
1200 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1201 p_hwfn, p_hwfn->my_id);
1203 /* Disable ack for def status block. Required both for msix +
1204 * inta in non-mask mode, in inta does no harm.
1206 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1208 /* Gather Interrupts/Attentions information */
1209 if (!sb_info->sb_virt) {
1210 DP_ERR(p_hwfn->p_dev,
1211 "Interrupt Status block is NULL -"
1212 " cannot check for new interrupts!\n");
1214 u32 tmp_index = sb_info->sb_ack;
1215 rc = ecore_sb_update_sb_idx(sb_info);
1216 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1217 "Interrupt indices: 0x%08x --> 0x%08x\n",
1218 tmp_index, sb_info->sb_ack);
1221 if (!sb_attn || !sb_attn->sb_attn) {
1222 DP_ERR(p_hwfn->p_dev,
1223 "Attentions Status block is NULL -"
1224 " cannot check for new attentions!\n");
1226 u16 tmp_index = sb_attn->index;
1228 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1229 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1230 "Attention indices: 0x%08x --> 0x%08x\n",
1231 tmp_index, sb_attn->index);
1234 /* Check if we expect interrupts at this time. if not just ack them */
1235 if (!(rc & ECORE_SB_EVENT_MASK)) {
1236 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1240 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1241 if (!p_hwfn->p_dpc_ptt) {
1242 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1243 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1247 if (rc & ECORE_SB_ATT_IDX)
1248 ecore_int_attentions(p_hwfn);
1250 if (rc & ECORE_SB_IDX) {
1253 /* Since we only looked at the SB index, it's possible more
1254 * than a single protocol-index on the SB incremented.
1255 * Iterate over all configured protocol indices and check
1256 * whether something happened for each.
1258 for (pi = 0; pi < arr_size; pi++) {
1259 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1260 if (pi_info->comp_cb != OSAL_NULL)
1261 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1265 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1266 /* This should be done before the interrupts are enabled,
1267 * since otherwise a new attention will be generated.
1269 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1272 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1275 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1277 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1282 if (p_sb->sb_attn) {
1283 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1285 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1287 OSAL_FREE(p_hwfn->p_dev, p_sb);
1290 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1291 struct ecore_ptt *p_ptt)
1293 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1295 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1298 sb_info->known_attn = 0;
1300 /* Configure Attention Status Block in IGU */
1301 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1302 DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1303 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1304 DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1307 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1308 struct ecore_ptt *p_ptt,
1309 void *sb_virt_addr, dma_addr_t sb_phy_addr)
1311 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1314 sb_info->sb_attn = sb_virt_addr;
1315 sb_info->sb_phys = sb_phy_addr;
1317 /* Set the pointer to the AEU descriptors */
1318 sb_info->p_aeu_desc = aeu_descs;
1320 /* Calculate Parity Masks */
1321 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1322 for (i = 0; i < NUM_ATTN_REGS; i++) {
1323 /* j is array index, k is bit index */
1324 for (j = 0, k = 0; k < 32; j++) {
1325 unsigned int flags = aeu_descs[i].bits[j].flags;
1327 if (flags & ATTENTION_PARITY)
1328 sb_info->parity_mask[i] |= 1 << k;
1330 k += ATTENTION_LENGTH(flags);
1332 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1333 "Attn Mask [Reg %d]: 0x%08x\n",
1334 i, sb_info->parity_mask[i]);
1337 /* Set the address of cleanup for the mcp attention */
1338 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1339 MISC_REG_AEU_GENERAL_ATTN_0;
1341 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1344 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1345 struct ecore_ptt *p_ptt)
1347 struct ecore_dev *p_dev = p_hwfn->p_dev;
1348 struct ecore_sb_attn_info *p_sb;
1349 dma_addr_t p_phys = 0;
1353 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(struct ecore_sb_attn_info));
1355 DP_NOTICE(p_dev, true,
1356 "Failed to allocate `struct ecore_sb_attn_info'");
1361 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1362 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1364 DP_NOTICE(p_dev, true,
1365 "Failed to allocate status block (attentions)");
1366 OSAL_FREE(p_dev, p_sb);
1370 /* Attention setup */
1371 p_hwfn->p_sb_attn = p_sb;
1372 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1374 return ECORE_SUCCESS;
1377 /* coalescing timeout = timeset << (timer_res + 1) */
1378 #ifdef RTE_LIBRTE_QEDE_RX_COAL_US
1379 #define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US
1381 #define ECORE_CAU_DEF_RX_USECS 24
1384 #ifdef RTE_LIBRTE_QEDE_TX_COAL_US
1385 #define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US
1387 #define ECORE_CAU_DEF_TX_USECS 48
1390 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1391 struct cau_sb_entry *p_sb_entry,
1392 u8 pf_id, u16 vf_number, u8 vf_valid)
1394 struct ecore_dev *p_dev = p_hwfn->p_dev;
1397 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1399 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1400 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1401 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1402 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1403 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1405 /* setting the time resultion to a fixed value ( = 1) */
1406 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
1407 ECORE_CAU_DEF_RX_TIMER_RES);
1408 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
1409 ECORE_CAU_DEF_TX_TIMER_RES);
1411 cau_state = CAU_HC_DISABLE_STATE;
1413 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1414 cau_state = CAU_HC_ENABLE_STATE;
1415 if (!p_dev->rx_coalesce_usecs) {
1416 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1417 DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n",
1418 p_dev->rx_coalesce_usecs);
1420 if (!p_dev->tx_coalesce_usecs) {
1421 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1422 DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n",
1423 p_dev->tx_coalesce_usecs);
1427 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1428 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1431 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1432 struct ecore_ptt *p_ptt,
1433 dma_addr_t sb_phys, u16 igu_sb_id,
1434 u16 vf_number, u8 vf_valid)
1436 struct cau_sb_entry sb_entry;
1438 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1439 vf_number, vf_valid);
1441 if (p_hwfn->hw_init_done) {
1442 /* Wide-bus, initialize via DMAE */
1443 u64 phys_addr = (u64)sb_phys;
1445 ecore_dmae_host2grc(p_hwfn, p_ptt,
1446 (u64)(osal_uintptr_t)&phys_addr,
1447 CAU_REG_SB_ADDR_MEMORY +
1448 igu_sb_id * sizeof(u64), 2, 0);
1449 ecore_dmae_host2grc(p_hwfn, p_ptt,
1450 (u64)(osal_uintptr_t)&sb_entry,
1451 CAU_REG_SB_VAR_MEMORY +
1452 igu_sb_id * sizeof(u64), 2, 0);
1454 /* Initialize Status Block Address */
1455 STORE_RT_REG_AGG(p_hwfn,
1456 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1457 igu_sb_id * 2, sb_phys);
1459 STORE_RT_REG_AGG(p_hwfn,
1460 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1461 igu_sb_id * 2, sb_entry);
1464 /* Configure pi coalescing if set */
1465 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1466 u8 num_tc = 1; /* @@@TBD aelior ECORE_MULTI_COS */
1467 u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >>
1468 (ECORE_CAU_DEF_RX_TIMER_RES + 1);
1471 ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1472 ECORE_COAL_RX_STATE_MACHINE, timeset);
1474 timeset = p_hwfn->p_dev->tx_coalesce_usecs >>
1475 (ECORE_CAU_DEF_TX_TIMER_RES + 1);
1477 for (i = 0; i < num_tc; i++) {
1478 ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1479 igu_sb_id, TX_PI(i),
1480 ECORE_COAL_TX_STATE_MACHINE,
1486 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1487 struct ecore_ptt *p_ptt,
1488 u16 igu_sb_id, u32 pi_index,
1489 enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
1491 struct cau_pi_entry pi_entry;
1492 u32 sb_offset, pi_offset;
1494 if (IS_VF(p_hwfn->p_dev))
1495 return; /* @@@TBD MichalK- VF CAU... */
1497 sb_offset = igu_sb_id * PIS_PER_SB;
1498 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1500 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1501 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1502 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1504 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1506 pi_offset = sb_offset + pi_index;
1507 if (p_hwfn->hw_init_done) {
1508 ecore_wr(p_hwfn, p_ptt,
1509 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1510 *((u32 *)&(pi_entry)));
1512 STORE_RT_REG(p_hwfn,
1513 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1514 *((u32 *)&(pi_entry)));
1518 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1519 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
1521 /* zero status block and ack counter */
1522 sb_info->sb_ack = 0;
1523 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1525 if (IS_PF(p_hwfn->p_dev))
1526 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1527 sb_info->igu_sb_id, 0, 0);
1531 * @brief ecore_get_igu_sb_id - given a sw sb_id return the
1539 static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1543 /* Assuming continuous set of IGU SBs dedicated for given PF */
1544 if (sb_id == ECORE_SP_SB_ID)
1545 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1546 else if (IS_PF(p_hwfn->p_dev))
1547 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
1549 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1551 if (sb_id == ECORE_SP_SB_ID)
1552 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1553 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1555 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1556 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1561 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1562 struct ecore_ptt *p_ptt,
1563 struct ecore_sb_info *sb_info,
1565 dma_addr_t sb_phy_addr, u16 sb_id)
1567 sb_info->sb_virt = sb_virt_addr;
1568 sb_info->sb_phys = sb_phy_addr;
1570 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1572 if (sb_id != ECORE_SP_SB_ID) {
1573 p_hwfn->sbs_info[sb_id] = sb_info;
1576 #ifdef ECORE_CONFIG_DIRECT_HWFN
1577 sb_info->p_hwfn = p_hwfn;
1579 sb_info->p_dev = p_hwfn->p_dev;
1581 /* The igu address will hold the absolute address that needs to be
1582 * written to for a specific status block
1584 if (IS_PF(p_hwfn->p_dev)) {
1585 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
1586 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
1590 (u8 OSAL_IOMEM *)p_hwfn->regview +
1591 PXP_VF_BAR0_START_IGU +
1592 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1595 sb_info->flags |= ECORE_SB_INFO_INIT;
1597 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1599 return ECORE_SUCCESS;
1602 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1603 struct ecore_sb_info *sb_info,
1606 if (sb_id == ECORE_SP_SB_ID) {
1607 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1611 /* zero status block and ack counter */
1612 sb_info->sb_ack = 0;
1613 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1615 if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
1616 p_hwfn->sbs_info[sb_id] = OSAL_NULL;
1620 return ECORE_SUCCESS;
1623 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1625 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1630 if (p_sb->sb_info.sb_virt) {
1631 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1632 p_sb->sb_info.sb_virt,
1633 p_sb->sb_info.sb_phys,
1634 SB_ALIGNED_SIZE(p_hwfn));
1637 OSAL_FREE(p_hwfn->p_dev, p_sb);
1640 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1641 struct ecore_ptt *p_ptt)
1643 struct ecore_sb_sp_info *p_sb;
1644 dma_addr_t p_phys = 0;
1649 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
1650 sizeof(struct ecore_sb_sp_info));
1652 DP_NOTICE(p_hwfn, true,
1653 "Failed to allocate `struct ecore_sb_info'");
1658 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1659 &p_phys, SB_ALIGNED_SIZE(p_hwfn));
1661 DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
1662 OSAL_FREE(p_hwfn->p_dev, p_sb);
1666 /* Status Block setup */
1667 p_hwfn->p_sp_sb = p_sb;
1668 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1669 p_virt, p_phys, ECORE_SP_SB_ID);
1671 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1673 return ECORE_SUCCESS;
1676 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1677 ecore_int_comp_cb_t comp_cb,
1679 u8 *sb_idx, __le16 **p_fw_cons)
1681 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1682 enum _ecore_status_t rc = ECORE_NOMEM;
1685 /* Look for a free index */
1686 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1687 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1690 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1691 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1693 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1701 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
1703 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1705 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1708 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1709 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1710 return ECORE_SUCCESS;
1713 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1715 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1718 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1719 struct ecore_ptt *p_ptt,
1720 enum ecore_int_mode int_mode)
1722 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
1725 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
1726 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1729 igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN;
1731 p_hwfn->p_dev->int_mode = int_mode;
1732 switch (p_hwfn->p_dev->int_mode) {
1733 case ECORE_INT_MODE_INTA:
1734 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1735 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1738 case ECORE_INT_MODE_MSI:
1739 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1740 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1743 case ECORE_INT_MODE_MSIX:
1744 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1746 case ECORE_INT_MODE_POLL:
1750 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1753 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1754 struct ecore_ptt *p_ptt)
1757 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1759 "FPGA - Don't enable Attentions in IGU and MISC\n");
1764 /* Configure AEU signal change to produce attentions */
1765 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1766 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1767 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1768 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1770 OSAL_MMIOWB(p_hwfn->p_dev);
1772 /* Unmask AEU signals toward IGU */
1773 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1776 enum _ecore_status_t
1777 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1778 enum ecore_int_mode int_mode)
1780 enum _ecore_status_t rc = ECORE_SUCCESS;
1783 /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */
1784 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1786 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0);
1787 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1789 /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
1790 * attentions. Since we're waiting for BRCM answer regarding this
1791 * attention, in the meanwhile we simply mask it.
1793 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1795 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1797 /* @@@tmp - Mask interrupt sources - should move to init tool;
1798 * Also, correct for A0 [might still change in B0.
1801 attn_blocks[BLOCK_BRB].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
1802 int_regs[0]->mask_addr;
1803 tmp = ecore_rd(p_hwfn, p_ptt, reg_addr);
1804 tmp |= (1 << 21); /* Was PKT4_LEN_ERROR */
1805 ecore_wr(p_hwfn, p_ptt, reg_addr, tmp);
1807 ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1809 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1810 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1811 if (rc != ECORE_SUCCESS) {
1812 DP_NOTICE(p_hwfn, true,
1813 "Slowpath IRQ request failed\n");
1814 return ECORE_NORESOURCES;
1816 p_hwfn->b_int_requested = true;
1819 /* Enable interrupt Generation */
1820 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1822 p_hwfn->b_int_enabled = 1;
1827 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1828 struct ecore_ptt *p_ptt)
1830 p_hwfn->b_int_enabled = 0;
1832 if (IS_VF(p_hwfn->p_dev))
1835 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1838 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1839 void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1840 struct ecore_ptt *p_ptt,
1841 u32 sb_id, bool cleanup_set, u16 opaque_fid)
1843 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1844 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
1845 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1846 u8 type = 0; /* FIXME MichalS type??? */
1848 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1849 IGU_REG_CLEANUP_STATUS_0) != 0x200);
1851 /* USE Control Command Register to perform cleanup. There is an
1852 * option to do this using IGU bar, but then it can't be used for VFs.
1855 /* Set the data field */
1856 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1857 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1858 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1860 /* Set the control register */
1861 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1862 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1863 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1865 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1867 OSAL_BARRIER(p_hwfn->p_dev);
1869 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1871 OSAL_MMIOWB(p_hwfn->p_dev);
1873 /* calculate where to read the status bit from */
1874 sb_bit = 1 << (sb_id % 32);
1875 sb_bit_addr = sb_id / 32 * sizeof(u32);
1877 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
1879 /* Now wait for the command to complete */
1880 while (--sleep_cnt) {
1881 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
1882 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1888 DP_NOTICE(p_hwfn, true,
1889 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1893 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
1894 struct ecore_ptt *p_ptt,
1895 u32 sb_id, u16 opaque, bool b_set)
1901 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
1904 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
1906 /* Clear the CAU for the SB */
1907 for (pi = 0; pi < 12; pi++)
1908 ecore_wr(p_hwfn, p_ptt,
1909 CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
1912 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
1913 struct ecore_ptt *p_ptt,
1914 bool b_set, bool b_slowpath)
1916 u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
1917 u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
1918 u32 sb_id = 0, val = 0;
1920 /* @@@TBD MichalK temporary... should be moved to init-tool... */
1921 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1922 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1923 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1924 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1927 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1928 "IGU cleaning SBs [%d,...,%d]\n",
1929 igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
1931 for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
1932 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1933 p_hwfn->hw_info.opaque_fid,
1939 sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1940 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1941 "IGU cleaning slowpath SB [%d]\n", sb_id);
1942 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1943 p_hwfn->hw_info.opaque_fid, b_set);
1946 static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
1947 struct ecore_ptt *p_ptt, u16 sb_id)
1949 u32 val = ecore_rd(p_hwfn, p_ptt,
1950 IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
1951 struct ecore_igu_block *p_block;
1953 p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
1955 /* stop scanning when hit first invalid PF entry */
1956 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
1957 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
1960 /* Fill the block information */
1961 p_block->status = ECORE_IGU_STATUS_VALID;
1962 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
1963 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
1964 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
1966 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1967 "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
1968 " is_pf = %d vector_num = 0x%x\n",
1969 sb_id, val, p_block->function_id, p_block->is_pf,
1970 p_block->vector_number);
1976 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
1977 struct ecore_ptt *p_ptt)
1979 struct ecore_igu_info *p_igu_info;
1980 struct ecore_igu_block *p_block;
1981 u16 sb_id, last_iov_sb_id = 0;
1982 u32 min_vf, max_vf, val;
1983 u16 prev_sb_id = 0xFF;
1985 p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
1987 sizeof(*p_igu_info));
1988 if (!p_hwfn->hw_info.p_igu_info)
1991 OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
1993 p_igu_info = p_hwfn->hw_info.p_igu_info;
1995 /* Initialize base sb / sb cnt for PFs and VFs */
1996 p_igu_info->igu_base_sb = 0xffff;
1997 p_igu_info->igu_sb_cnt = 0;
1998 p_igu_info->igu_dsb_id = 0xffff;
1999 p_igu_info->igu_base_sb_iov = 0xffff;
2001 #ifdef CONFIG_ECORE_SRIOV
2002 min_vf = p_hwfn->hw_info.first_vf_in_pf;
2003 max_vf = p_hwfn->hw_info.first_vf_in_pf +
2004 p_hwfn->p_dev->sriov_info.total_vfs;
2010 for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2012 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
2013 val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
2014 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
2015 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
2018 if (p_block->is_pf) {
2019 if (p_block->function_id == p_hwfn->rel_pf_id) {
2020 p_block->status |= ECORE_IGU_STATUS_PF;
2022 if (p_block->vector_number == 0) {
2023 if (p_igu_info->igu_dsb_id == 0xffff)
2024 p_igu_info->igu_dsb_id = sb_id;
2026 if (p_igu_info->igu_base_sb == 0xffff) {
2027 p_igu_info->igu_base_sb = sb_id;
2028 } else if (prev_sb_id != sb_id - 1) {
2029 DP_NOTICE(p_hwfn->p_dev, false,
2037 /* we don't count the default */
2038 (p_igu_info->igu_sb_cnt)++;
2042 if ((p_block->function_id >= min_vf) &&
2043 (p_block->function_id < max_vf)) {
2044 /* Available for VFs of this PF */
2045 if (p_igu_info->igu_base_sb_iov == 0xffff) {
2046 p_igu_info->igu_base_sb_iov = sb_id;
2047 } else if (last_iov_sb_id != sb_id - 1) {
2049 DP_VERBOSE(p_hwfn->p_dev,
2051 "First uninited IGU"
2056 DP_NOTICE(p_hwfn->p_dev, false,
2067 p_block->status |= ECORE_IGU_STATUS_FREE;
2068 p_hwfn->hw_info.p_igu_info->free_blks++;
2069 last_iov_sb_id = sb_id;
2073 p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
2075 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2076 "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
2077 "igu_dsb_id=0x%x\n",
2078 p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
2079 p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
2080 p_igu_info->igu_dsb_id);
2082 if (p_igu_info->igu_base_sb == 0xffff ||
2083 p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
2084 DP_NOTICE(p_hwfn, true,
2085 "IGU CAM returned invalid values igu_base_sb=0x%x "
2086 "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
2087 p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
2088 p_igu_info->igu_dsb_id);
2092 return ECORE_SUCCESS;
2096 * @brief Initialize igu runtime registers
2100 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2102 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2104 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2107 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2108 IGU_CMD_INT_ACK_BASE)
2109 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2110 IGU_CMD_INT_ACK_BASE)
2111 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2113 u32 intr_status_hi = 0, intr_status_lo = 0;
2114 u64 intr_status = 0;
2116 intr_status_lo = REG_RD(p_hwfn,
2117 GTT_BAR0_MAP_REG_IGU_CMD +
2118 LSB_IGU_CMD_ADDR * 8);
2119 intr_status_hi = REG_RD(p_hwfn,
2120 GTT_BAR0_MAP_REG_IGU_CMD +
2121 MSB_IGU_CMD_ADDR * 8);
2122 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2127 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2129 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2130 p_hwfn->b_sp_dpc_enabled = true;
2133 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2135 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2136 if (!p_hwfn->sp_dpc)
2139 return ECORE_SUCCESS;
2142 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2144 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2147 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2148 struct ecore_ptt *p_ptt)
2150 enum _ecore_status_t rc = ECORE_SUCCESS;
2152 rc = ecore_int_sp_dpc_alloc(p_hwfn);
2153 if (rc != ECORE_SUCCESS) {
2154 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2158 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2159 if (rc != ECORE_SUCCESS) {
2160 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2164 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2165 if (rc != ECORE_SUCCESS)
2166 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2171 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2173 ecore_int_sp_sb_free(p_hwfn);
2174 ecore_int_sb_attn_free(p_hwfn);
2175 ecore_int_sp_dpc_free(p_hwfn);
2178 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2180 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2183 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2184 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2185 ecore_int_sp_dpc_setup(p_hwfn);
2188 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2189 struct ecore_sb_cnt_info *p_sb_cnt_info)
2191 struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
2193 if (!info || !p_sb_cnt_info)
2196 p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
2197 p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
2198 p_sb_cnt_info->sb_free_blk = info->free_blks;
2201 u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
2203 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2205 /* Determine origin of SB id */
2206 if ((sb_id >= p_info->igu_base_sb) &&
2207 (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
2208 return sb_id - p_info->igu_base_sb;
2209 } else if ((sb_id >= p_info->igu_base_sb_iov) &&
2210 (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
2211 return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
2214 DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
2219 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2223 for_each_hwfn(p_dev, i)
2224 p_dev->hwfns[i].b_int_requested = false;