2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_spq.h"
13 #include "ecore_gtt_reg_addr.h"
14 #include "ecore_init_ops.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_int.h"
19 #include "ecore_sriov.h"
21 #include "ecore_hw_defs.h"
22 #include "ecore_hsi_common.h"
23 #include "ecore_mcp.h"
25 struct ecore_pi_info {
26 ecore_int_comp_cb_t comp_cb;
27 void *cookie; /* Will be sent to the compl cb function */
30 struct ecore_sb_sp_info {
31 struct ecore_sb_info sb_info;
32 /* per protocol index data */
33 struct ecore_pi_info pi_info_arr[PIS_PER_SB];
36 enum ecore_attention_type {
38 ECORE_ATTN_TYPE_PARITY,
41 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
42 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
44 struct aeu_invert_reg_bit {
47 #define ATTENTION_PARITY (1 << 0)
49 #define ATTENTION_LENGTH_MASK (0x00000ff0)
50 #define ATTENTION_LENGTH_SHIFT (4)
51 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
52 ATTENTION_LENGTH_SHIFT)
53 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
54 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
55 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
58 /* Multiple bits start with this offset */
59 #define ATTENTION_OFFSET_MASK (0x000ff000)
60 #define ATTENTION_OFFSET_SHIFT (12)
62 #define ATTENTION_BB_MASK (0x00700000)
63 #define ATTENTION_BB_SHIFT (20)
64 #define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT)
65 #define ATTENTION_BB_DIFFERENT (1 << 23)
67 #define ATTENTION_CLEAR_ENABLE (1 << 28)
70 /* Callback to call if attention will be triggered */
71 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
73 enum block_id block_index;
76 struct aeu_invert_reg {
77 struct aeu_invert_reg_bit bits[32];
80 #define MAX_ATTN_GRPS (8)
81 #define NUM_ATTN_REGS (9)
83 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
85 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
87 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
88 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
93 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
94 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
95 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
96 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
97 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
98 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
99 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
100 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
101 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
102 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
103 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
115 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
116 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
117 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
120 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
121 PSWHST_REG_VF_DISABLED_ERROR_VALID);
123 /* Disabled VF access */
124 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
127 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
128 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
129 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
130 PSWHST_REG_VF_DISABLED_ERROR_DATA);
131 DP_INFO(p_hwfn->p_dev,
132 "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
133 " Write [0x%02x] Addr [0x%08x]\n",
134 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
135 >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
136 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
137 >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
139 ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
140 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
142 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
143 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
145 ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
146 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
150 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
151 PSWHST_REG_INCORRECT_ACCESS_VALID);
152 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
153 u32 addr, data, length;
155 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
156 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
157 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
158 PSWHST_REG_INCORRECT_ACCESS_DATA);
159 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
160 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
162 DP_INFO(p_hwfn->p_dev,
163 "Incorrect access to %08x of length %08x - PF [%02x]"
164 " VF [%04x] [valid %02x] client [%02x] write [%02x]"
165 " Byte-Enable [%04x] [%08x]\n",
168 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
169 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
171 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
172 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
174 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
175 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
177 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
178 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
180 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
181 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
183 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
184 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
188 /* TODO - We know 'some' of these are legal due to virtualization,
189 * but is it true for all of them?
191 return ECORE_SUCCESS;
194 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0)
195 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
196 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
197 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
198 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
199 #define ECORE_GRC_ATTENTION_PF_MASK (0xf)
200 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
201 #define ECORE_GRC_ATTENTION_VF_SHIFT (4)
202 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
203 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
204 #define ECORE_GRC_ATTENTION_PRIV_VF (0)
205 static const char *grc_timeout_attn_master_to_str(u8 master)
233 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
237 /* We've already cleared the timeout interrupt register, so we learn
238 * of interrupts via the validity register
240 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
241 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
242 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
245 /* Read the GRC timeout information */
246 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
247 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
248 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
249 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
251 DP_NOTICE(p_hwfn->p_dev, false,
252 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
254 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
256 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
257 grc_timeout_attn_master_to_str(
258 (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
259 ECORE_GRC_ATTENTION_MASTER_SHIFT),
260 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
261 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
262 ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
263 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
264 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
265 ECORE_GRC_ATTENTION_VF_SHIFT);
268 /* Regardles of anything else, clean the validity bit */
269 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
270 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
271 return ECORE_SUCCESS;
274 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
275 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
276 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
277 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
278 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
279 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
280 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
281 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
282 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
283 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
284 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
285 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
286 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
288 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
289 struct ecore_ptt *p_ptt)
293 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
294 if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
295 u32 addr_lo, addr_hi, details;
297 addr_lo = ecore_rd(p_hwfn, p_ptt,
298 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
299 addr_hi = ecore_rd(p_hwfn, p_ptt,
300 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
301 details = ecore_rd(p_hwfn, p_ptt,
302 PGLUE_B_REG_TX_ERR_WR_DETAILS);
304 DP_NOTICE(p_hwfn, false,
305 "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
306 addr_hi, addr_lo, details,
308 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
309 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
311 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
312 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
314 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
316 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
318 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
320 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
324 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
325 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
326 u32 addr_lo, addr_hi, details;
328 addr_lo = ecore_rd(p_hwfn, p_ptt,
329 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
330 addr_hi = ecore_rd(p_hwfn, p_ptt,
331 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
332 details = ecore_rd(p_hwfn, p_ptt,
333 PGLUE_B_REG_TX_ERR_RD_DETAILS);
335 DP_NOTICE(p_hwfn, false,
336 "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
337 addr_hi, addr_lo, details,
339 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
340 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
342 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
343 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
345 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
347 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
349 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
351 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
355 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
356 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
357 DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp);
359 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
360 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
361 u32 addr_hi, addr_lo;
363 addr_lo = ecore_rd(p_hwfn, p_ptt,
364 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
365 addr_hi = ecore_rd(p_hwfn, p_ptt,
366 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
368 DP_NOTICE(p_hwfn, false,
369 "ICPL erorr - %08x [Address %08x:%08x]\n",
370 tmp, addr_hi, addr_lo);
373 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
374 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
375 u32 addr_hi, addr_lo, details;
377 addr_lo = ecore_rd(p_hwfn, p_ptt,
378 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
379 addr_hi = ecore_rd(p_hwfn, p_ptt,
380 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
381 details = ecore_rd(p_hwfn, p_ptt,
382 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
384 DP_NOTICE(p_hwfn, false,
385 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
386 details, tmp, addr_hi, addr_lo);
389 /* Clear the indications */
390 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
392 return ECORE_SUCCESS;
395 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
397 return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
400 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
402 DP_NOTICE(p_hwfn, false, "FW assertion!\n");
404 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
409 static enum _ecore_status_t
410 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
412 DP_INFO(p_hwfn, "General attention 35!\n");
414 return ECORE_SUCCESS;
417 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
418 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
419 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f0000)
420 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
422 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
426 reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
427 ECORE_DORQ_ATTENTION_REASON_MASK;
429 u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
430 DORQ_REG_DB_DROP_DETAILS);
432 DP_INFO(p_hwfn->p_dev,
433 "DORQ db_drop: address 0x%08x Opaque FID 0x%04x"
434 " Size [bytes] 0x%08x Reason: 0x%08x\n",
435 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
436 DORQ_REG_DB_DROP_DETAILS_ADDRESS),
437 (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
438 ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
439 ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
445 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
448 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
449 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
452 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
453 TM_REG_INT_STS_1_PEND_CONN_SCAN))
456 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
457 TM_REG_INT_STS_1_PEND_CONN_SCAN))
459 "TM attention on emulation - most likely"
460 " results of clock-ratios\n");
461 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
462 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
463 TM_REG_INT_MASK_1_PEND_TASK_SCAN;
464 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
466 return ECORE_SUCCESS;
473 /* Instead of major changes to the data-structure, we have a some 'special'
474 * identifiers for sources that changed meaning between adapters.
476 enum aeu_invert_reg_special_type {
477 AEU_INVERT_REG_SPECIAL_CNIG_0,
478 AEU_INVERT_REG_SPECIAL_CNIG_1,
479 AEU_INVERT_REG_SPECIAL_CNIG_2,
480 AEU_INVERT_REG_SPECIAL_CNIG_3,
481 AEU_INVERT_REG_SPECIAL_MAX,
484 static struct aeu_invert_reg_bit
485 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
486 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
487 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
488 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
489 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
492 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
493 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
495 { /* After Invert 1 */
496 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
502 { /* After Invert 2 */
503 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
504 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
505 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb,
507 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
508 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
509 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
510 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
512 (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
513 OSAL_NULL, MAX_BLOCK_ID},
514 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
520 { /* After Invert 3 */
521 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
527 { /* After Invert 4 */
528 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
529 ecore_fw_assertion, MAX_BLOCK_ID},
530 {"General Attention %d",
531 (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
532 OSAL_NULL, MAX_BLOCK_ID},
533 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
534 ecore_general_attention_35, MAX_BLOCK_ID},
535 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
536 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
537 OSAL_NULL, BLOCK_NWS},
538 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
539 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
540 OSAL_NULL, BLOCK_NWS},
541 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
542 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
543 OSAL_NULL, BLOCK_NWM},
544 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
545 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
546 OSAL_NULL, BLOCK_NWM},
547 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
548 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
549 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
550 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
551 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
552 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
553 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
555 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
556 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
557 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
558 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
559 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
564 { /* After Invert 5 */
565 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
566 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
567 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
568 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
569 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
570 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
571 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
572 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
573 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
574 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
575 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
576 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
577 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
578 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
579 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
580 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
585 { /* After Invert 6 */
586 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
587 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
588 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
589 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
590 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
591 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
592 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
593 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
594 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
595 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
596 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
597 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
598 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
599 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
600 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
601 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
606 { /* After Invert 7 */
607 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
608 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
609 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
610 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
611 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
612 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
613 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
614 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
615 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
616 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
617 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
618 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
619 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
620 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
621 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
622 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
623 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
628 { /* After Invert 8 */
629 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
630 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
631 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
632 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
633 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
634 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
635 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
636 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
637 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
638 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
639 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
640 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
641 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
642 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
643 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
644 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
645 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
646 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
647 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
648 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
649 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
655 { /* After Invert 9 */
656 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
657 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
659 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
660 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
661 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
668 static struct aeu_invert_reg_bit *
669 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
670 struct aeu_invert_reg_bit *p_bit)
672 if (!ECORE_IS_BB(p_hwfn->p_dev))
675 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
678 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
682 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
683 struct aeu_invert_reg_bit *p_bit)
685 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
689 #define ATTN_STATE_BITS (0xfff)
690 #define ATTN_BITS_MASKABLE (0x3ff)
691 struct ecore_sb_attn_info {
692 /* Virtual & Physical address of the SB */
693 struct atten_status_block *sb_attn;
696 /* Last seen running index */
699 /* A mask of the AEU bits resulting in a parity error */
700 u32 parity_mask[NUM_ATTN_REGS];
702 /* A pointer to the attention description structure */
703 struct aeu_invert_reg *p_aeu_desc;
705 /* Previously asserted attentions, which are still unasserted */
708 /* Cleanup address for the link's general hw attention */
712 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
713 struct ecore_sb_attn_info *p_sb_desc)
717 OSAL_MMIOWB(p_hwfn->p_dev);
719 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
720 if (p_sb_desc->index != index) {
721 p_sb_desc->index = index;
722 rc = ECORE_SB_ATT_IDX;
725 OSAL_MMIOWB(p_hwfn->p_dev);
731 * @brief ecore_int_assertion - handles asserted attention bits
734 * @param asserted_bits newly asserted bits
735 * @return enum _ecore_status_t
737 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
740 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
743 /* Mask the source of the attention in the IGU */
744 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
745 IGU_REG_ATTENTION_ENABLE);
746 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
747 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
748 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
749 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
751 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
752 "inner known ATTN state: 0x%04x --> 0x%04x\n",
753 sb_attn_sw->known_attn,
754 sb_attn_sw->known_attn | asserted_bits);
755 sb_attn_sw->known_attn |= asserted_bits;
757 /* Handle MCP events */
758 if (asserted_bits & 0x100) {
759 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
760 /* Clean the MCP attention */
761 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
762 sb_attn_sw->mfw_attn_addr, 0);
765 /* FIXME - this will change once we'll have GOOD gtt definitions */
766 DIRECT_REG_WR(p_hwfn,
767 (u8 OSAL_IOMEM *) p_hwfn->regview +
768 GTT_BAR0_MAP_REG_IGU_CMD +
769 ((IGU_CMD_ATTN_BIT_SET_UPPER -
770 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
772 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
775 return ECORE_SUCCESS;
778 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
779 enum block_id id, enum dbg_attn_type type,
783 DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type);
787 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
788 * cause of the attention
791 * @param p_aeu - descriptor of an AEU bit which caused the attention
792 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
793 * this bit to this group.
794 * @param bit_index - index of this bit in the aeu_en_reg
796 * @return enum _ecore_status_t
798 static enum _ecore_status_t
799 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
800 struct aeu_invert_reg_bit *p_aeu,
802 const char *p_bit_name,
805 enum _ecore_status_t rc = ECORE_INVAL;
806 bool b_fatal = false;
808 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
809 p_bit_name, bitmask);
811 /* Call callback before clearing the interrupt status */
813 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
815 rc = p_aeu->cb(p_hwfn);
818 if (rc != ECORE_SUCCESS)
821 /* Print HW block interrupt registers */
822 if (p_aeu->block_index != MAX_BLOCK_ID) {
823 ecore_int_attn_print(p_hwfn, p_aeu->block_index,
824 ATTN_TYPE_INTERRUPT, !b_fatal);
828 /* Reach assertion if attention is fatal */
829 if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
830 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
833 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
836 /* Prevent this Attention from being asserted in the future */
837 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
838 p_hwfn->p_dev->attn_clr_en) {
841 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
842 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
843 DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n",
851 * @brief ecore_int_deassertion_parity - handle a single parity AEU source
854 * @param p_aeu - descriptor of an AEU bit which caused the
858 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
859 struct aeu_invert_reg_bit *p_aeu,
862 u32 block_id = p_aeu->block_index;
864 DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
865 p_aeu->bit_name, bit_index);
867 if (block_id == MAX_BLOCK_ID)
870 ecore_int_attn_print(p_hwfn, block_id,
871 ATTN_TYPE_PARITY, false);
873 /* In A0, there's a single parity bit for several blocks */
874 if (block_id == BLOCK_BTB) {
875 ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
876 ATTN_TYPE_PARITY, false);
877 ecore_int_attn_print(p_hwfn, BLOCK_MCP,
878 ATTN_TYPE_PARITY, false);
883 * @brief - handles deassertion of previously asserted attentions.
886 * @param deasserted_bits - newly deasserted bits
887 * @return enum _ecore_status_t
890 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
893 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
894 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
895 bool b_parity = false;
897 enum _ecore_status_t rc = ECORE_SUCCESS;
899 /* Read the attention registers in the AEU */
900 for (i = 0; i < NUM_ATTN_REGS; i++) {
901 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
902 MISC_REG_AEU_AFTER_INVERT_1_IGU +
904 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
905 "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
908 /* Handle parity attentions first */
909 for (i = 0; i < NUM_ATTN_REGS; i++) {
910 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
911 u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
912 MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
915 u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
917 /* Skip register in which no parity bit is currently set */
921 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
922 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
924 if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
925 !!(parities & (1 << bit_idx))) {
926 ecore_int_deassertion_parity(p_hwfn, p_bit,
931 bit_idx += ATTENTION_LENGTH(p_bit->flags);
935 /* Find non-parity cause for attention and act */
936 for (k = 0; k < MAX_ATTN_GRPS; k++) {
937 struct aeu_invert_reg_bit *p_aeu;
939 /* Handle only groups whose attention is currently deasserted */
940 if (!(deasserted_bits & (1 << k)))
943 for (i = 0; i < NUM_ATTN_REGS; i++) {
944 u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
945 i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS;
946 u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
947 u32 bits = aeu_inv_arr[i] & en;
949 /* Skip if no bit from this group is currently set */
953 /* Find all set bits from current register which belong
954 * to current group, making them responsible for the
955 * previous assertion.
957 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
958 unsigned long int bitmask;
961 /* Need to account bits with changed meaning */
962 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
965 bit_len = ATTENTION_LENGTH(p_aeu->flags);
966 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
972 /* Find the bits relating to HW-block, then
973 * shift so they'll become LSB.
975 bitmask = bits & (((1 << bit_len) - 1) << bit);
979 u32 flags = p_aeu->flags;
983 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
986 /* Some bits represent more than a
987 * a single interrupt. Correctly print
990 if (ATTENTION_LENGTH(flags) > 2 ||
991 ((flags & ATTENTION_PAR_INT) &&
992 ATTENTION_LENGTH(flags) > 1))
993 OSAL_SNPRINTF(bit_name, 30,
997 OSAL_STRNCPY(bit_name,
1001 /* We now need to pass bitmask in its
1006 /* Handle source of the attention */
1007 ecore_int_deassertion_aeu_bit(p_hwfn,
1014 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1019 /* Clear IGU indication for the deasserted bits */
1020 /* FIXME - this will change once we'll have GOOD gtt definitions */
1021 DIRECT_REG_WR(p_hwfn,
1022 (u8 OSAL_IOMEM *) p_hwfn->regview +
1023 GTT_BAR0_MAP_REG_IGU_CMD +
1024 ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1025 IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
1027 /* Unmask deasserted attentions in IGU */
1028 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1029 IGU_REG_ATTENTION_ENABLE);
1030 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1031 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1033 /* Clear deassertion from inner state */
1034 sb_attn_sw->known_attn &= ~deasserted_bits;
1039 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
1041 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1042 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1043 u16 index = 0, asserted_bits, deasserted_bits;
1044 u32 attn_bits = 0, attn_acks = 0;
1045 enum _ecore_status_t rc = ECORE_SUCCESS;
1047 /* Read current attention bits/acks - safeguard against attentions
1048 * by guaranting work on a synchronized timeframe
1051 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1052 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1053 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1054 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1055 p_sb_attn->sb_index = index;
1057 /* Attention / Deassertion are meaningful (and in correct state)
1058 * only when they differ and consistent with known state - deassertion
1059 * when previous attention & current ack, and assertion when current
1060 * attention with no previous attention
1062 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1063 ~p_sb_attn_sw->known_attn;
1064 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1065 p_sb_attn_sw->known_attn;
1067 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1069 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1070 index, attn_bits, attn_acks, asserted_bits,
1071 deasserted_bits, p_sb_attn_sw->known_attn);
1072 else if (asserted_bits == 0x100)
1073 DP_INFO(p_hwfn, "MFW indication via attention\n");
1075 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1076 "MFW indication [deassertion]\n");
1078 if (asserted_bits) {
1079 rc = ecore_int_assertion(p_hwfn, asserted_bits);
1084 if (deasserted_bits)
1085 rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1090 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1091 void OSAL_IOMEM *igu_addr, u32 ack_cons)
1093 struct igu_prod_cons_update igu_ack = { 0 };
1095 igu_ack.sb_id_and_flags =
1096 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1097 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1098 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1099 (IGU_SEG_ACCESS_ATTN <<
1100 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1102 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1104 /* Both segments (interrupts & acks) are written to same place address;
1105 * Need to guarantee all commands will be received (in-order) by HW.
1107 OSAL_MMIOWB(p_hwfn->p_dev);
1108 OSAL_BARRIER(p_hwfn->p_dev);
1111 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1113 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1114 struct ecore_pi_info *pi_info = OSAL_NULL;
1115 struct ecore_sb_attn_info *sb_attn;
1116 struct ecore_sb_info *sb_info;
1123 if (!p_hwfn->p_sp_sb) {
1124 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1128 sb_info = &p_hwfn->p_sp_sb->sb_info;
1129 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1131 DP_ERR(p_hwfn->p_dev,
1132 "Status block is NULL - cannot ack interrupts\n");
1136 if (!p_hwfn->p_sb_attn) {
1137 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1140 sb_attn = p_hwfn->p_sb_attn;
1142 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1143 p_hwfn, p_hwfn->my_id);
1145 /* Disable ack for def status block. Required both for msix +
1146 * inta in non-mask mode, in inta does no harm.
1148 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1150 /* Gather Interrupts/Attentions information */
1151 if (!sb_info->sb_virt) {
1152 DP_ERR(p_hwfn->p_dev,
1153 "Interrupt Status block is NULL -"
1154 " cannot check for new interrupts!\n");
1156 u32 tmp_index = sb_info->sb_ack;
1157 rc = ecore_sb_update_sb_idx(sb_info);
1158 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1159 "Interrupt indices: 0x%08x --> 0x%08x\n",
1160 tmp_index, sb_info->sb_ack);
1163 if (!sb_attn || !sb_attn->sb_attn) {
1164 DP_ERR(p_hwfn->p_dev,
1165 "Attentions Status block is NULL -"
1166 " cannot check for new attentions!\n");
1168 u16 tmp_index = sb_attn->index;
1170 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1171 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1172 "Attention indices: 0x%08x --> 0x%08x\n",
1173 tmp_index, sb_attn->index);
1176 /* Check if we expect interrupts at this time. if not just ack them */
1177 if (!(rc & ECORE_SB_EVENT_MASK)) {
1178 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1182 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1184 if (!p_hwfn->p_dpc_ptt) {
1185 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1186 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1190 if (rc & ECORE_SB_ATT_IDX)
1191 ecore_int_attentions(p_hwfn);
1193 if (rc & ECORE_SB_IDX) {
1196 /* Since we only looked at the SB index, it's possible more
1197 * than a single protocol-index on the SB incremented.
1198 * Iterate over all configured protocol indices and check
1199 * whether something happened for each.
1201 for (pi = 0; pi < arr_size; pi++) {
1202 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1203 if (pi_info->comp_cb != OSAL_NULL)
1204 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1208 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1209 /* This should be done before the interrupts are enabled,
1210 * since otherwise a new attention will be generated.
1212 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1215 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1218 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1220 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1225 if (p_sb->sb_attn) {
1226 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1228 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1230 OSAL_FREE(p_hwfn->p_dev, p_sb);
1233 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1234 struct ecore_ptt *p_ptt)
1236 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1238 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1241 sb_info->known_attn = 0;
1243 /* Configure Attention Status Block in IGU */
1244 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1245 DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1246 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1247 DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1250 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1251 struct ecore_ptt *p_ptt,
1252 void *sb_virt_addr, dma_addr_t sb_phy_addr)
1254 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1257 sb_info->sb_attn = sb_virt_addr;
1258 sb_info->sb_phys = sb_phy_addr;
1260 /* Set the pointer to the AEU descriptors */
1261 sb_info->p_aeu_desc = aeu_descs;
1263 /* Calculate Parity Masks */
1264 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1265 for (i = 0; i < NUM_ATTN_REGS; i++) {
1266 /* j is array index, k is bit index */
1267 for (j = 0, k = 0; k < 32; j++) {
1268 struct aeu_invert_reg_bit *p_aeu;
1270 p_aeu = &aeu_descs[i].bits[j];
1271 if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
1272 sb_info->parity_mask[i] |= 1 << k;
1274 k += ATTENTION_LENGTH(p_aeu->flags);
1276 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1277 "Attn Mask [Reg %d]: 0x%08x\n",
1278 i, sb_info->parity_mask[i]);
1281 /* Set the address of cleanup for the mcp attention */
1282 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1283 MISC_REG_AEU_GENERAL_ATTN_0;
1285 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1288 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1289 struct ecore_ptt *p_ptt)
1291 struct ecore_dev *p_dev = p_hwfn->p_dev;
1292 struct ecore_sb_attn_info *p_sb;
1293 dma_addr_t p_phys = 0;
1297 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
1299 DP_NOTICE(p_dev, true,
1300 "Failed to allocate `struct ecore_sb_attn_info'\n");
1305 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1306 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1308 DP_NOTICE(p_dev, true,
1309 "Failed to allocate status block (attentions)\n");
1310 OSAL_FREE(p_dev, p_sb);
1314 /* Attention setup */
1315 p_hwfn->p_sb_attn = p_sb;
1316 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1318 return ECORE_SUCCESS;
1321 /* coalescing timeout = timeset << (timer_res + 1) */
1322 #define ECORE_CAU_DEF_RX_USECS 24
1323 #define ECORE_CAU_DEF_TX_USECS 48
1325 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1326 struct cau_sb_entry *p_sb_entry,
1327 u8 pf_id, u16 vf_number, u8 vf_valid)
1329 struct ecore_dev *p_dev = p_hwfn->p_dev;
1333 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1335 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1336 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1337 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1338 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1339 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1341 cau_state = CAU_HC_DISABLE_STATE;
1343 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1344 cau_state = CAU_HC_ENABLE_STATE;
1345 if (!p_dev->rx_coalesce_usecs)
1346 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1347 if (!p_dev->tx_coalesce_usecs)
1348 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1351 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1352 if (p_dev->rx_coalesce_usecs <= 0x7F)
1354 else if (p_dev->rx_coalesce_usecs <= 0xFF)
1358 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1360 if (p_dev->tx_coalesce_usecs <= 0x7F)
1362 else if (p_dev->tx_coalesce_usecs <= 0xFF)
1366 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1368 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1369 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1372 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1373 struct ecore_ptt *p_ptt,
1374 u16 igu_sb_id, u32 pi_index,
1375 enum ecore_coalescing_fsm coalescing_fsm,
1378 struct cau_pi_entry pi_entry;
1379 u32 sb_offset, pi_offset;
1381 if (IS_VF(p_hwfn->p_dev))
1382 return;/* @@@TBD MichalK- VF CAU... */
1384 sb_offset = igu_sb_id * PIS_PER_SB;
1385 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1387 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1388 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1389 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1391 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1393 pi_offset = sb_offset + pi_index;
1394 if (p_hwfn->hw_init_done) {
1395 ecore_wr(p_hwfn, p_ptt,
1396 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1397 *((u32 *)&(pi_entry)));
1399 STORE_RT_REG(p_hwfn,
1400 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1401 *((u32 *)&(pi_entry)));
1405 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1406 struct ecore_ptt *p_ptt,
1407 struct ecore_sb_info *p_sb, u32 pi_index,
1408 enum ecore_coalescing_fsm coalescing_fsm,
1411 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
1412 pi_index, coalescing_fsm, timeset);
1415 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1416 struct ecore_ptt *p_ptt,
1417 dma_addr_t sb_phys, u16 igu_sb_id,
1418 u16 vf_number, u8 vf_valid)
1420 struct cau_sb_entry sb_entry;
1422 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1423 vf_number, vf_valid);
1425 if (p_hwfn->hw_init_done) {
1426 /* Wide-bus, initialize via DMAE */
1427 u64 phys_addr = (u64)sb_phys;
1429 ecore_dmae_host2grc(p_hwfn, p_ptt,
1430 (u64)(osal_uintptr_t)&phys_addr,
1431 CAU_REG_SB_ADDR_MEMORY +
1432 igu_sb_id * sizeof(u64), 2, 0);
1433 ecore_dmae_host2grc(p_hwfn, p_ptt,
1434 (u64)(osal_uintptr_t)&sb_entry,
1435 CAU_REG_SB_VAR_MEMORY +
1436 igu_sb_id * sizeof(u64), 2, 0);
1438 /* Initialize Status Block Address */
1439 STORE_RT_REG_AGG(p_hwfn,
1440 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1441 igu_sb_id * 2, sb_phys);
1443 STORE_RT_REG_AGG(p_hwfn,
1444 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1445 igu_sb_id * 2, sb_entry);
1448 /* Configure pi coalescing if set */
1449 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1450 /* eth will open queues for all tcs, so configure all of them
1451 * properly, rather than just the active ones
1453 u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1455 u8 timeset, timer_res;
1458 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1459 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
1461 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
1465 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
1466 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1467 ECORE_COAL_RX_STATE_MACHINE,
1470 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
1472 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
1476 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
1477 for (i = 0; i < num_tc; i++) {
1478 _ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1479 igu_sb_id, TX_PI(i),
1480 ECORE_COAL_TX_STATE_MACHINE,
1486 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1487 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
1489 /* zero status block and ack counter */
1490 sb_info->sb_ack = 0;
1491 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1493 if (IS_PF(p_hwfn->p_dev))
1494 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1495 sb_info->igu_sb_id, 0, 0);
1498 struct ecore_igu_block *
1499 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
1501 struct ecore_igu_block *p_block;
1504 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1506 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1508 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1509 !(p_block->status & ECORE_IGU_STATUS_FREE))
1512 if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
1520 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
1523 struct ecore_igu_block *p_block;
1526 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1528 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1530 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1532 p_block->vector_number != vector_id)
1538 return ECORE_SB_INVALID_IDX;
1541 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1545 /* Assuming continuous set of IGU SBs dedicated for given PF */
1546 if (sb_id == ECORE_SP_SB_ID)
1547 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1548 else if (IS_PF(p_hwfn->p_dev))
1549 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1551 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1553 if (igu_sb_id == ECORE_SB_INVALID_IDX)
1554 DP_NOTICE(p_hwfn, true,
1555 "Slowpath SB vector %04x doesn't exist\n",
1557 else if (sb_id == ECORE_SP_SB_ID)
1558 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1559 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1561 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1562 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1567 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1568 struct ecore_ptt *p_ptt,
1569 struct ecore_sb_info *sb_info,
1571 dma_addr_t sb_phy_addr, u16 sb_id)
1573 sb_info->sb_virt = sb_virt_addr;
1574 sb_info->sb_phys = sb_phy_addr;
1576 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1578 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
1581 /* Let the igu info reference the client's SB info */
1582 if (sb_id != ECORE_SP_SB_ID) {
1583 if (IS_PF(p_hwfn->p_dev)) {
1584 struct ecore_igu_info *p_info;
1585 struct ecore_igu_block *p_block;
1587 p_info = p_hwfn->hw_info.p_igu_info;
1588 p_block = &p_info->entry[sb_info->igu_sb_id];
1590 p_block->sb_info = sb_info;
1591 p_block->status &= ~ECORE_IGU_STATUS_FREE;
1592 p_info->usage.free_cnt--;
1594 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1597 #ifdef ECORE_CONFIG_DIRECT_HWFN
1598 sb_info->p_hwfn = p_hwfn;
1600 sb_info->p_dev = p_hwfn->p_dev;
1602 /* The igu address will hold the absolute address that needs to be
1603 * written to for a specific status block
1605 if (IS_PF(p_hwfn->p_dev)) {
1606 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
1607 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
1611 (u8 OSAL_IOMEM *)p_hwfn->regview +
1612 PXP_VF_BAR0_START_IGU +
1613 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1616 sb_info->flags |= ECORE_SB_INFO_INIT;
1618 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1620 return ECORE_SUCCESS;
1623 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1624 struct ecore_sb_info *sb_info,
1627 struct ecore_igu_info *p_info;
1628 struct ecore_igu_block *p_block;
1630 if (sb_info == OSAL_NULL)
1631 return ECORE_SUCCESS;
1633 /* zero status block and ack counter */
1634 sb_info->sb_ack = 0;
1635 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1637 if (IS_VF(p_hwfn->p_dev)) {
1638 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
1639 return ECORE_SUCCESS;
1642 p_info = p_hwfn->hw_info.p_igu_info;
1643 p_block = &p_info->entry[sb_info->igu_sb_id];
1645 /* Vector 0 is reserved to Default SB */
1646 if (p_block->vector_number == 0) {
1647 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1651 /* Lose reference to client's SB info, and fix counters */
1652 p_block->sb_info = OSAL_NULL;
1653 p_block->status |= ECORE_IGU_STATUS_FREE;
1654 p_info->usage.free_cnt++;
1656 return ECORE_SUCCESS;
1659 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1661 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1666 if (p_sb->sb_info.sb_virt) {
1667 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1668 p_sb->sb_info.sb_virt,
1669 p_sb->sb_info.sb_phys,
1670 SB_ALIGNED_SIZE(p_hwfn));
1673 OSAL_FREE(p_hwfn->p_dev, p_sb);
1676 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1677 struct ecore_ptt *p_ptt)
1679 struct ecore_sb_sp_info *p_sb;
1680 dma_addr_t p_phys = 0;
1685 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
1688 DP_NOTICE(p_hwfn, true,
1689 "Failed to allocate `struct ecore_sb_info'\n");
1694 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1695 &p_phys, SB_ALIGNED_SIZE(p_hwfn));
1697 DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
1698 OSAL_FREE(p_hwfn->p_dev, p_sb);
1702 /* Status Block setup */
1703 p_hwfn->p_sp_sb = p_sb;
1704 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1705 p_virt, p_phys, ECORE_SP_SB_ID);
1707 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1709 return ECORE_SUCCESS;
1712 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1713 ecore_int_comp_cb_t comp_cb,
1715 u8 *sb_idx, __le16 **p_fw_cons)
1717 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1718 enum _ecore_status_t rc = ECORE_NOMEM;
1721 /* Look for a free index */
1722 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1723 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1726 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1727 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1729 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1737 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
1739 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1741 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1744 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1745 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1746 return ECORE_SUCCESS;
1749 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1751 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1754 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1755 struct ecore_ptt *p_ptt,
1756 enum ecore_int_mode int_mode)
1758 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1761 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1762 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1763 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
1767 p_hwfn->p_dev->int_mode = int_mode;
1768 switch (p_hwfn->p_dev->int_mode) {
1769 case ECORE_INT_MODE_INTA:
1770 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1771 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1774 case ECORE_INT_MODE_MSI:
1775 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1776 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1779 case ECORE_INT_MODE_MSIX:
1780 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1782 case ECORE_INT_MODE_POLL:
1786 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1789 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1790 struct ecore_ptt *p_ptt)
1793 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1795 "FPGA - Don't enable Attentions in IGU and MISC\n");
1800 /* Configure AEU signal change to produce attentions */
1801 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1802 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1803 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1804 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1806 /* Flush the writes to IGU */
1807 OSAL_MMIOWB(p_hwfn->p_dev);
1809 /* Unmask AEU signals toward IGU */
1810 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1813 enum _ecore_status_t
1814 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1815 enum ecore_int_mode int_mode)
1817 enum _ecore_status_t rc = ECORE_SUCCESS;
1820 /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
1821 * attentions. Since we're waiting for BRCM answer regarding this
1822 * attention, in the meanwhile we simply mask it.
1824 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1826 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1828 ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1830 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1831 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1832 if (rc != ECORE_SUCCESS) {
1833 DP_NOTICE(p_hwfn, true,
1834 "Slowpath IRQ request failed\n");
1835 return ECORE_NORESOURCES;
1837 p_hwfn->b_int_requested = true;
1840 /* Enable interrupt Generation */
1841 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1843 p_hwfn->b_int_enabled = 1;
1848 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1849 struct ecore_ptt *p_ptt)
1851 p_hwfn->b_int_enabled = 0;
1853 if (IS_VF(p_hwfn->p_dev))
1856 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1859 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1860 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1861 struct ecore_ptt *p_ptt,
1866 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1867 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1868 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1869 u8 type = 0; /* FIXME MichalS type??? */
1871 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1872 IGU_REG_CLEANUP_STATUS_0) != 0x200);
1874 /* USE Control Command Register to perform cleanup. There is an
1875 * option to do this using IGU bar, but then it can't be used for VFs.
1878 /* Set the data field */
1879 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1880 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1881 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1883 /* Set the control register */
1884 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1885 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1886 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1888 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1890 OSAL_BARRIER(p_hwfn->p_dev);
1892 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1894 /* Flush the write to IGU */
1895 OSAL_MMIOWB(p_hwfn->p_dev);
1897 /* calculate where to read the status bit from */
1898 sb_bit = 1 << (igu_sb_id % 32);
1899 sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
1901 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
1903 /* Now wait for the command to complete */
1904 while (--sleep_cnt) {
1905 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
1906 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1912 DP_NOTICE(p_hwfn, true,
1913 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1917 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
1918 struct ecore_ptt *p_ptt,
1919 u16 igu_sb_id, u16 opaque, bool b_set)
1921 struct ecore_igu_block *p_block;
1924 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
1925 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1926 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1927 igu_sb_id, p_block->function_id, p_block->is_pf,
1928 p_block->vector_number);
1932 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
1935 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
1937 /* Wait for the IGU SB to cleanup */
1938 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
1941 val = ecore_rd(p_hwfn, p_ptt,
1942 IGU_REG_WRITE_DONE_PENDING +
1943 ((igu_sb_id / 32) * 4));
1944 if (val & (1 << (igu_sb_id % 32)))
1949 if (i == IGU_CLEANUP_SLEEP_LENGTH)
1950 DP_NOTICE(p_hwfn, true,
1951 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1954 /* Clear the CAU for the SB */
1955 for (pi = 0; pi < 12; pi++)
1956 ecore_wr(p_hwfn, p_ptt,
1957 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
1960 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
1961 struct ecore_ptt *p_ptt,
1962 bool b_set, bool b_slowpath)
1964 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1965 struct ecore_igu_block *p_block;
1969 /* @@@TBD MichalK temporary... should be moved to init-tool... */
1970 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1971 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1972 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1973 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1977 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1979 p_block = &p_info->entry[igu_sb_id];
1981 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1983 (p_block->status & ECORE_IGU_STATUS_DSB))
1986 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
1987 p_hwfn->hw_info.opaque_fid,
1992 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
1994 p_hwfn->hw_info.opaque_fid,
1998 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
1999 struct ecore_ptt *p_ptt)
2001 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2002 struct ecore_igu_block *p_block;
2007 if (!RESC_NUM(p_hwfn, ECORE_SB)) {
2008 /* We're using an old MFW - have to prevent any switching
2009 * of SBs between PF and VFs as later driver wouldn't be
2010 * able to tell which belongs to which.
2012 p_info->b_allow_pf_vf_change = false;
2014 /* Use the numbers the MFW have provided -
2015 * don't forget MFW accounts for the default SB as well.
2017 p_info->b_allow_pf_vf_change = true;
2019 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
2021 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2022 RESC_NUM(p_hwfn, ECORE_SB) - 1,
2024 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
2027 /* TODO - how do we learn about VF SBs from MFW? */
2028 if (IS_PF_SRIOV(p_hwfn)) {
2029 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
2031 if (vfs != p_info->usage.iov_cnt)
2032 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2033 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2034 p_info->usage.iov_cnt, vfs);
2036 /* At this point we know how many SBs we have totally
2037 * in IGU + number of PF SBs. So we can validate that
2038 * we'd have sufficient for VF.
2040 if (vfs > p_info->usage.free_cnt +
2041 p_info->usage.free_cnt_iov -
2042 p_info->usage.cnt) {
2043 DP_NOTICE(p_hwfn, true,
2044 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2045 p_info->usage.free_cnt +
2046 p_info->usage.free_cnt_iov,
2047 p_info->usage.cnt, vfs);
2053 /* Cap the number of VFs SBs by the number of VFs */
2054 if (IS_PF_SRIOV(p_hwfn))
2055 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs;
2057 /* Mark all SBs as free, now in the right PF/VFs division */
2058 p_info->usage.free_cnt = p_info->usage.cnt;
2059 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2060 p_info->usage.orig = p_info->usage.cnt;
2061 p_info->usage.iov_orig = p_info->usage.iov_cnt;
2063 /* We now proceed to re-configure the IGU cam to reflect the initial
2064 * configuration. We can start with the Default SB.
2066 pf_sbs = p_info->usage.cnt;
2067 vf_sbs = p_info->usage.iov_cnt;
2069 for (igu_sb_id = p_info->igu_dsb_id;
2070 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2072 p_block = &p_info->entry[igu_sb_id];
2075 if (!(p_block->status & ECORE_IGU_STATUS_VALID))
2078 if (p_block->status & ECORE_IGU_STATUS_DSB) {
2079 p_block->function_id = p_hwfn->rel_pf_id;
2081 p_block->vector_number = 0;
2082 p_block->status = ECORE_IGU_STATUS_VALID |
2083 ECORE_IGU_STATUS_PF |
2084 ECORE_IGU_STATUS_DSB;
2085 } else if (pf_sbs) {
2087 p_block->function_id = p_hwfn->rel_pf_id;
2089 p_block->vector_number = p_info->usage.cnt - pf_sbs;
2090 p_block->status = ECORE_IGU_STATUS_VALID |
2091 ECORE_IGU_STATUS_PF |
2092 ECORE_IGU_STATUS_FREE;
2093 } else if (vf_sbs) {
2094 p_block->function_id =
2095 p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
2096 p_info->usage.iov_cnt - vf_sbs;
2098 p_block->vector_number = 0;
2099 p_block->status = ECORE_IGU_STATUS_VALID |
2100 ECORE_IGU_STATUS_FREE;
2103 p_block->function_id = 0;
2105 p_block->vector_number = 0;
2108 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2109 p_block->function_id);
2110 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2111 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2112 p_block->vector_number);
2114 /* VF entries would be enabled when VF is initializaed */
2115 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2117 rval = ecore_rd(p_hwfn, p_ptt,
2118 IGU_REG_MAPPING_MEMORY +
2119 sizeof(u32) * igu_sb_id);
2122 ecore_wr(p_hwfn, p_ptt,
2123 IGU_REG_MAPPING_MEMORY +
2124 sizeof(u32) * igu_sb_id,
2127 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2128 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2129 igu_sb_id, p_block->function_id,
2130 p_block->is_pf, p_block->vector_number,
2138 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
2139 struct ecore_ptt *p_ptt)
2141 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
2143 /* Return all the usage indications to default prior to the reset;
2144 * The reset expects the !orig to reflect the initial status of the
2145 * SBs, and would re-calculate the originals based on those.
2147 p_cnt->cnt = p_cnt->orig;
2148 p_cnt->free_cnt = p_cnt->orig;
2149 p_cnt->iov_cnt = p_cnt->iov_orig;
2150 p_cnt->free_cnt_iov = p_cnt->iov_orig;
2152 p_cnt->iov_orig = 0;
2154 /* TODO - we probably need to re-configure the CAU as well... */
2155 return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
2158 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
2159 struct ecore_ptt *p_ptt,
2162 u32 val = ecore_rd(p_hwfn, p_ptt,
2163 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2164 struct ecore_igu_block *p_block;
2166 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2168 /* Fill the block information */
2169 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
2170 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2171 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
2173 p_block->igu_sb_id = igu_sb_id;
2176 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
2177 struct ecore_ptt *p_ptt)
2179 struct ecore_igu_info *p_igu_info;
2180 struct ecore_igu_block *p_block;
2181 u32 min_vf = 0, max_vf = 0;
2184 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
2186 sizeof(*p_igu_info));
2187 if (!p_hwfn->hw_info.p_igu_info)
2189 p_igu_info = p_hwfn->hw_info.p_igu_info;
2191 /* Distinguish between existent and onn-existent default SB */
2192 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
2194 /* Find the range of VF ids whose SB belong to this PF */
2195 if (p_hwfn->p_dev->p_iov_info) {
2196 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
2198 min_vf = p_iov->first_vf_in_pf;
2199 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
2203 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2205 /* Read current entry; Notice it might not belong to this PF */
2206 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2207 p_block = &p_igu_info->entry[igu_sb_id];
2209 if ((p_block->is_pf) &&
2210 (p_block->function_id == p_hwfn->rel_pf_id)) {
2211 p_block->status = ECORE_IGU_STATUS_PF |
2212 ECORE_IGU_STATUS_VALID |
2213 ECORE_IGU_STATUS_FREE;
2215 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2216 p_igu_info->usage.cnt++;
2217 } else if (!(p_block->is_pf) &&
2218 (p_block->function_id >= min_vf) &&
2219 (p_block->function_id < max_vf)) {
2220 /* Available for VFs of this PF */
2221 p_block->status = ECORE_IGU_STATUS_VALID |
2222 ECORE_IGU_STATUS_FREE;
2224 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2225 p_igu_info->usage.iov_cnt++;
2228 /* Mark the First entry belonging to the PF or its VFs
2229 * as the default SB [we'll reset IGU prior to first usage].
2231 if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
2232 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
2233 p_igu_info->igu_dsb_id = igu_sb_id;
2234 p_block->status |= ECORE_IGU_STATUS_DSB;
2237 /* While this isn't suitable for all clients, limit number
2238 * of prints by having each PF print only its entries with the
2239 * exception of PF0 which would print everything.
2241 if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
2242 (p_hwfn->abs_pf_id == 0))
2243 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2244 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2245 igu_sb_id, p_block->function_id,
2246 p_block->is_pf, p_block->vector_number);
2249 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
2250 DP_NOTICE(p_hwfn, true,
2251 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2252 p_igu_info->igu_dsb_id);
2256 /* All non default SB are considered free at this point */
2257 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2258 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2260 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2261 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2262 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
2263 p_igu_info->usage.iov_cnt);
2265 return ECORE_SUCCESS;
2268 enum _ecore_status_t
2269 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2270 u16 sb_id, bool b_to_vf)
2272 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2273 struct ecore_igu_block *p_block = OSAL_NULL;
2274 u16 igu_sb_id = 0, vf_num = 0;
2277 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
2280 if (sb_id == ECORE_SP_SB_ID)
2283 if (!p_info->b_allow_pf_vf_change) {
2284 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
2288 /* If we're moving a SB from PF to VF, the client had to specify
2289 * which vector it wants to move.
2292 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
2293 if (igu_sb_id == ECORE_SB_INVALID_IDX)
2297 /* If we're moving a SB from VF to PF, need to validate there isn't
2298 * already a line configured for that vector.
2301 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
2302 ECORE_SB_INVALID_IDX)
2306 /* We need to validate that the SB can actually be relocated.
2307 * This would also handle the previous case where we've explicitly
2308 * stated which IGU SB needs to move.
2310 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2312 p_block = &p_info->entry[igu_sb_id];
2314 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2315 !(p_block->status & ECORE_IGU_STATUS_FREE) ||
2316 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
2326 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
2327 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
2328 "Failed to find a free SB to move\n");
2332 /* At this point, p_block points to the SB we want to relocate */
2334 p_block->status &= ~ECORE_IGU_STATUS_PF;
2336 /* It doesn't matter which VF number we choose, since we're
2337 * going to disable the line; But let's keep it in range.
2339 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
2341 p_block->function_id = (u8)vf_num;
2343 p_block->vector_number = 0;
2345 p_info->usage.cnt--;
2346 p_info->usage.free_cnt--;
2347 p_info->usage.iov_cnt++;
2348 p_info->usage.free_cnt_iov++;
2350 /* TODO - if SBs aren't really the limiting factor,
2351 * then it might not be accurate [in the since that
2352 * we might not need decrement the feature].
2354 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
2355 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
2357 p_block->status |= ECORE_IGU_STATUS_PF;
2358 p_block->function_id = p_hwfn->rel_pf_id;
2360 p_block->vector_number = sb_id + 1;
2362 p_info->usage.cnt++;
2363 p_info->usage.free_cnt++;
2364 p_info->usage.iov_cnt--;
2365 p_info->usage.free_cnt_iov--;
2367 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
2368 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
2371 /* Update the IGU and CAU with the new configuration */
2372 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2373 p_block->function_id);
2374 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2375 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2376 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2377 p_block->vector_number);
2379 ecore_wr(p_hwfn, p_ptt,
2380 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
2383 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
2385 p_block->is_pf ? 0 : 1);
2387 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2388 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2389 igu_sb_id, p_block->function_id,
2390 p_block->is_pf, p_block->vector_number);
2392 return ECORE_SUCCESS;
2396 * @brief Initialize igu runtime registers
2400 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2402 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2404 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2407 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2408 IGU_CMD_INT_ACK_BASE)
2409 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2410 IGU_CMD_INT_ACK_BASE)
2411 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2413 u32 intr_status_hi = 0, intr_status_lo = 0;
2414 u64 intr_status = 0;
2416 intr_status_lo = REG_RD(p_hwfn,
2417 GTT_BAR0_MAP_REG_IGU_CMD +
2418 LSB_IGU_CMD_ADDR * 8);
2419 intr_status_hi = REG_RD(p_hwfn,
2420 GTT_BAR0_MAP_REG_IGU_CMD +
2421 MSB_IGU_CMD_ADDR * 8);
2422 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2427 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2429 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2430 p_hwfn->b_sp_dpc_enabled = true;
2433 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2435 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2436 if (!p_hwfn->sp_dpc)
2439 return ECORE_SUCCESS;
2442 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2444 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2447 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2448 struct ecore_ptt *p_ptt)
2450 enum _ecore_status_t rc = ECORE_SUCCESS;
2452 rc = ecore_int_sp_dpc_alloc(p_hwfn);
2453 if (rc != ECORE_SUCCESS) {
2454 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2458 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2459 if (rc != ECORE_SUCCESS) {
2460 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2464 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2465 if (rc != ECORE_SUCCESS)
2466 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2471 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2473 ecore_int_sp_sb_free(p_hwfn);
2474 ecore_int_sb_attn_free(p_hwfn);
2475 ecore_int_sp_dpc_free(p_hwfn);
2478 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2480 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2483 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2484 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2485 ecore_int_sp_dpc_setup(p_hwfn);
2488 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2489 struct ecore_sb_cnt_info *p_sb_cnt_info)
2491 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
2493 if (!p_igu_info || !p_sb_cnt_info)
2496 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
2497 sizeof(*p_sb_cnt_info));
2500 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2504 for_each_hwfn(p_dev, i)
2505 p_dev->hwfns[i].b_int_requested = false;
2508 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
2510 p_dev->attn_clr_en = clr_enable;
2513 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
2514 struct ecore_ptt *p_ptt,
2515 u8 timer_res, u16 sb_id, bool tx)
2517 struct cau_sb_entry sb_entry;
2518 enum _ecore_status_t rc;
2520 if (!p_hwfn->hw_init_done) {
2521 DP_ERR(p_hwfn, "hardware not initialized yet\n");
2525 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2526 sb_id * sizeof(u64),
2527 (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2528 if (rc != ECORE_SUCCESS) {
2529 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2534 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2536 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2538 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
2539 (u64)(osal_uintptr_t)&sb_entry,
2540 CAU_REG_SB_VAR_MEMORY +
2541 sb_id * sizeof(u64), 2, 0);
2542 if (rc != ECORE_SUCCESS) {
2543 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2550 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
2551 struct ecore_ptt *p_ptt,
2552 struct ecore_sb_info *p_sb,
2553 struct ecore_sb_info_dbg *p_info)
2555 u16 sbid = p_sb->igu_sb_id;
2558 if (IS_VF(p_hwfn->p_dev))
2561 if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
2564 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
2565 IGU_REG_PRODUCER_MEMORY + sbid * 4);
2566 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
2567 IGU_REG_CONSUMER_MEM + sbid * 4);
2569 for (i = 0; i < PIS_PER_SB; i++)
2570 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2572 sbid * 4 * PIS_PER_SB + i * 4);
2574 return ECORE_SUCCESS;