net/qede: replace strncpy by strlcpy
[dpdk.git] / drivers / net / qede / base / ecore_int.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include <rte_string_fns.h>
10
11 #include "bcm_osal.h"
12 #include "ecore.h"
13 #include "ecore_spq.h"
14 #include "ecore_gtt_reg_addr.h"
15 #include "ecore_init_ops.h"
16 #include "ecore_rt_defs.h"
17 #include "ecore_int.h"
18 #include "reg_addr.h"
19 #include "ecore_hw.h"
20 #include "ecore_sriov.h"
21 #include "ecore_vf.h"
22 #include "ecore_hw_defs.h"
23 #include "ecore_hsi_common.h"
24 #include "ecore_mcp.h"
25
26 struct ecore_pi_info {
27         ecore_int_comp_cb_t comp_cb;
28         void *cookie;           /* Will be sent to the compl cb function */
29 };
30
31 struct ecore_sb_sp_info {
32         struct ecore_sb_info sb_info;
33         /* per protocol index data */
34         struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4];
35 };
36
37 enum ecore_attention_type {
38         ECORE_ATTN_TYPE_ATTN,
39         ECORE_ATTN_TYPE_PARITY,
40 };
41
42 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
43         ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
44
45 struct aeu_invert_reg_bit {
46         char bit_name[30];
47
48 #define ATTENTION_PARITY                (1 << 0)
49
50 #define ATTENTION_LENGTH_MASK           (0x00000ff0)
51 #define ATTENTION_LENGTH_SHIFT          (4)
52 #define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
53                                          ATTENTION_LENGTH_SHIFT)
54 #define ATTENTION_SINGLE                (1 << ATTENTION_LENGTH_SHIFT)
55 #define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
56 #define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
57                                          ATTENTION_PARITY)
58
59 /* Multiple bits start with this offset */
60 #define ATTENTION_OFFSET_MASK           (0x000ff000)
61 #define ATTENTION_OFFSET_SHIFT          (12)
62
63 #define ATTENTION_BB_MASK               (0x00700000)
64 #define ATTENTION_BB_SHIFT              (20)
65 #define ATTENTION_BB(value)             ((value) << ATTENTION_BB_SHIFT)
66 #define ATTENTION_BB_DIFFERENT          (1 << 23)
67
68 #define ATTENTION_CLEAR_ENABLE          (1 << 28)
69         unsigned int flags;
70
71         /* Callback to call if attention will be triggered */
72         enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
73
74         enum block_id block_index;
75 };
76
77 struct aeu_invert_reg {
78         struct aeu_invert_reg_bit bits[32];
79 };
80
81 #define MAX_ATTN_GRPS           (8)
82 #define NUM_ATTN_REGS           (9)
83
84 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
85 {
86         u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
87
88         DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
89         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
90
91         return ECORE_SUCCESS;
92 }
93
94 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK         (0x3c000)
95 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT        (14)
96 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK         (0x03fc0)
97 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT        (6)
98 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK      (0x00020)
99 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT     (5)
100 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK     (0x0001e)
101 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT    (1)
102 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK      (0x1)
103 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT     (0)
104 #define ECORE_PSWHST_ATTENTION_VF_DISABLED              (0x1)
105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS         (0x1)
106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK         (0x1)
107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT        (0)
108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK     (0x1e)
109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT    (1)
110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK   (0x20)
111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT  (5)
112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK      (0x3fc0)
113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT     (6)
114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK      (0x3c000)
115 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT     (14)
116 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK    (0x3fc0000)
117 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT   (18)
118 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
119 {
120         u32 tmp =
121             ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
122                      PSWHST_REG_VF_DISABLED_ERROR_VALID);
123
124         /* Disabled VF access */
125         if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
126                 u32 addr, data;
127
128                 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
129                                 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
130                 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
131                                 PSWHST_REG_VF_DISABLED_ERROR_DATA);
132                 DP_INFO(p_hwfn->p_dev,
133                         "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
134                         " Write [0x%02x] Addr [0x%08x]\n",
135                         (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
136                              >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
137                         (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
138                              >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
139                         (u8)((data &
140                               ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
141                               ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
142                         (u8)((data &
143                               ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
144                               ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
145                         (u8)((data &
146                               ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
147                               ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
148                         addr);
149         }
150
151         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
152                        PSWHST_REG_INCORRECT_ACCESS_VALID);
153         if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
154                 u32 addr, data, length;
155
156                 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
157                                 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
158                 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
159                                 PSWHST_REG_INCORRECT_ACCESS_DATA);
160                 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
161                                   PSWHST_REG_INCORRECT_ACCESS_LENGTH);
162
163                 DP_INFO(p_hwfn->p_dev,
164                         "Incorrect access to %08x of length %08x - PF [%02x]"
165                         " VF [%04x] [valid %02x] client [%02x] write [%02x]"
166                         " Byte-Enable [%04x] [%08x]\n",
167                         addr, length,
168                         (u8)((data &
169                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
170                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
171                         (u8)((data &
172                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
173                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
174                         (u8)((data &
175                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
176                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
177                         (u8)((data &
178                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
179                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
180                         (u8)((data &
181                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
182                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
183                         (u8)((data &
184                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
185                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
186                         data);
187         }
188
189         /* TODO - We know 'some' of these are legal due to virtualization,
190          * but is it true for all of them?
191          */
192         return ECORE_SUCCESS;
193 }
194
195 #define ECORE_GRC_ATTENTION_VALID_BIT           (1 << 0)
196 #define ECORE_GRC_ATTENTION_ADDRESS_MASK        (0x7fffff << 0)
197 #define ECORE_GRC_ATTENTION_RDWR_BIT            (1 << 23)
198 #define ECORE_GRC_ATTENTION_MASTER_MASK         (0xf << 24)
199 #define ECORE_GRC_ATTENTION_MASTER_SHIFT        (24)
200 #define ECORE_GRC_ATTENTION_PF_MASK             (0xf)
201 #define ECORE_GRC_ATTENTION_VF_MASK             (0xff << 4)
202 #define ECORE_GRC_ATTENTION_VF_SHIFT            (4)
203 #define ECORE_GRC_ATTENTION_PRIV_MASK           (0x3 << 14)
204 #define ECORE_GRC_ATTENTION_PRIV_SHIFT          (14)
205 #define ECORE_GRC_ATTENTION_PRIV_VF             (0)
206 static const char *grc_timeout_attn_master_to_str(u8 master)
207 {
208         switch (master) {
209         case 1:
210                 return "PXP";
211         case 2:
212                 return "MCP";
213         case 3:
214                 return "MSDM";
215         case 4:
216                 return "PSDM";
217         case 5:
218                 return "YSDM";
219         case 6:
220                 return "USDM";
221         case 7:
222                 return "TSDM";
223         case 8:
224                 return "XSDM";
225         case 9:
226                 return "DBU";
227         case 10:
228                 return "DMAE";
229         default:
230                 return "Unknown";
231         }
232 }
233
234 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
235 {
236         u32 tmp, tmp2;
237
238         /* We've already cleared the timeout interrupt register, so we learn
239          * of interrupts via the validity register
240          */
241         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
242                        GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
243         if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
244                 goto out;
245
246         /* Read the GRC timeout information */
247         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
248                        GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
249         tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
250                         GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
251
252         DP_NOTICE(p_hwfn->p_dev, false,
253                   "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
254                   tmp2, tmp,
255                   (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
256                                                        : "Read from",
257                   (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
258                   grc_timeout_attn_master_to_str(
259                         (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
260                          ECORE_GRC_ATTENTION_MASTER_SHIFT),
261                   (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
262                   (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
263                   ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
264                   ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
265                   (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
266                   ECORE_GRC_ATTENTION_VF_SHIFT);
267
268 out:
269         /* Regardles of anything else, clean the validity bit */
270         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
271                  GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
272         return ECORE_SUCCESS;
273 }
274
275 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
276 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
277 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
278 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
279 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
280 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
281 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
282 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
283 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME      (1 << 22)
284 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
285 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
286 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
287 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
288
289 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
290                                                    struct ecore_ptt *p_ptt)
291 {
292         u32 tmp;
293
294         tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
295         if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
296                 u32 addr_lo, addr_hi, details;
297
298                 addr_lo = ecore_rd(p_hwfn, p_ptt,
299                                    PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
300                 addr_hi = ecore_rd(p_hwfn, p_ptt,
301                                    PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
302                 details = ecore_rd(p_hwfn, p_ptt,
303                                    PGLUE_B_REG_TX_ERR_WR_DETAILS);
304
305                 DP_NOTICE(p_hwfn, false,
306                           "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
307                           addr_hi, addr_lo, details,
308                           (u8)((details &
309                                 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
310                                ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
311                           (u8)((details &
312                                 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
313                                ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
314                           (u8)((details &
315                                ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
316                           tmp,
317                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
318                                 1 : 0),
319                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
320                                 1 : 0),
321                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
322                                 1 : 0));
323         }
324
325         tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
326         if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
327                 u32 addr_lo, addr_hi, details;
328
329                 addr_lo = ecore_rd(p_hwfn, p_ptt,
330                                    PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
331                 addr_hi = ecore_rd(p_hwfn, p_ptt,
332                                    PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
333                 details = ecore_rd(p_hwfn, p_ptt,
334                                    PGLUE_B_REG_TX_ERR_RD_DETAILS);
335
336                 DP_NOTICE(p_hwfn, false,
337                           "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
338                           addr_hi, addr_lo, details,
339                           (u8)((details &
340                                 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
341                                ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
342                           (u8)((details &
343                                 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
344                                ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
345                           (u8)((details &
346                                ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
347                           tmp,
348                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
349                                 1 : 0),
350                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
351                                 1 : 0),
352                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
353                                 1 : 0));
354         }
355
356         tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
357         if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
358                 DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp);
359
360         tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
361         if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
362                 u32 addr_hi, addr_lo;
363
364                 addr_lo = ecore_rd(p_hwfn, p_ptt,
365                                    PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
366                 addr_hi = ecore_rd(p_hwfn, p_ptt,
367                                    PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
368
369                 DP_NOTICE(p_hwfn, false,
370                           "ICPL erorr - %08x [Address %08x:%08x]\n",
371                           tmp, addr_hi, addr_lo);
372         }
373
374         tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
375         if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
376                 u32 addr_hi, addr_lo, details;
377
378                 addr_lo = ecore_rd(p_hwfn, p_ptt,
379                                    PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
380                 addr_hi = ecore_rd(p_hwfn, p_ptt,
381                                    PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
382                 details = ecore_rd(p_hwfn, p_ptt,
383                                    PGLUE_B_REG_VF_ILT_ERR_DETAILS);
384
385                 DP_NOTICE(p_hwfn, false,
386                           "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
387                           details, tmp, addr_hi, addr_lo);
388         }
389
390         /* Clear the indications */
391         ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
392
393         return ECORE_SUCCESS;
394 }
395
396 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
397 {
398         return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
399 }
400
401 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
402 {
403         DP_NOTICE(p_hwfn, false, "FW assertion!\n");
404
405         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
406
407         return ECORE_INVAL;
408 }
409
410 static enum _ecore_status_t
411 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
412 {
413         DP_INFO(p_hwfn, "General attention 35!\n");
414
415         return ECORE_SUCCESS;
416 }
417
418 #define ECORE_DORQ_ATTENTION_REASON_MASK        (0xfffff)
419 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK        (0xffff)
420 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT       (0x0)
421 #define ECORE_DORQ_ATTENTION_SIZE_MASK          (0x7f)
422 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT         (16)
423
424 #define ECORE_DB_REC_COUNT                      10
425 #define ECORE_DB_REC_INTERVAL                   100
426
427 /* assumes sticky overflow indication was set for this PF */
428 static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
429                                               struct ecore_ptt *p_ptt)
430 {
431         u8 count = ECORE_DB_REC_COUNT;
432         u32 usage = 1;
433
434         /* wait for usage to zero or count to run out. This is necessary since
435          * EDPM doorbell transactions can take multiple 64b cycles, and as such
436          * can "split" over the pci. Possibly, the doorbell drop can happen with
437          * half an EDPM in the queue and other half dropped. Another EDPM
438          * doorbell to the same address (from doorbell recovery mechanism or
439          * from the doorbelling entity) could have first half dropped and second
440          * half interperted as continuation of the first. To prevent such
441          * malformed doorbells from reaching the device, flush the queue before
442          * releaseing the overflow sticky indication.
443          */
444         while (count-- && usage) {
445                 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
446                 OSAL_UDELAY(ECORE_DB_REC_INTERVAL);
447         }
448
449         /* should have been depleted by now */
450         if (usage) {
451                 DP_NOTICE(p_hwfn->p_dev, false,
452                           "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
453                           ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage);
454                 return ECORE_TIMEOUT;
455         }
456
457         /* flush any pedning (e)dpm as they may never arrive */
458         ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
459
460         /* release overflow sticky indication (stop silently dropping
461          * everything)
462          */
463         ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
464
465         /* repeat all last doorbells (doorbell drop recovery) */
466         ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
467
468         return ECORE_SUCCESS;
469 }
470
471 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
472 {
473         u32 int_sts, first_drop_reason, details, address, overflow,
474                 all_drops_reason;
475         struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
476         enum _ecore_status_t rc;
477
478         int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
479         DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
480                   int_sts);
481
482         /* int_sts may be zero since all PFs were interrupted for doorbell
483          * overflow but another one already handled it. Can abort here. If
484          * This PF also requires overflow recovery we will be interrupted again
485          */
486         if (!int_sts)
487                 return ECORE_SUCCESS;
488
489         /* check if db_drop or overflow happened */
490         if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
491                        DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
492                 /* obtain data about db drop/overflow */
493                 first_drop_reason = ecore_rd(p_hwfn, p_ptt,
494                                   DORQ_REG_DB_DROP_REASON) &
495                                   ECORE_DORQ_ATTENTION_REASON_MASK;
496                 details = ecore_rd(p_hwfn, p_ptt,
497                                    DORQ_REG_DB_DROP_DETAILS);
498                 address = ecore_rd(p_hwfn, p_ptt,
499                                    DORQ_REG_DB_DROP_DETAILS_ADDRESS);
500                 overflow = ecore_rd(p_hwfn, p_ptt,
501                                     DORQ_REG_PF_OVFL_STICKY);
502                 all_drops_reason = ecore_rd(p_hwfn, p_ptt,
503                                             DORQ_REG_DB_DROP_DETAILS_REASON);
504
505                 /* log info */
506                 DP_NOTICE(p_hwfn->p_dev, false,
507                           "Doorbell drop occurred\n"
508                           "Address\t\t0x%08x\t(second BAR address)\n"
509                           "FID\t\t0x%04x\t\t(Opaque FID)\n"
510                           "Size\t\t0x%04x\t\t(in bytes)\n"
511                           "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
512                           "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n"
513                           "Overflow\t0x%x\t\t(a per PF indication)\n",
514                           address,
515                           GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
516                           GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
517                           first_drop_reason, all_drops_reason, overflow);
518
519                 /* if this PF caused overflow, initiate recovery */
520                 if (overflow) {
521                         rc = ecore_db_rec_attn(p_hwfn, p_ptt);
522                         if (rc != ECORE_SUCCESS)
523                                 return rc;
524                 }
525
526                 /* clear the doorbell drop details and prepare for next drop */
527                 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
528
529                 /* mark interrupt as handeld (note: even if drop was due to a
530                  * different reason than overflow we mark as handled)
531                  */
532                 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR,
533                          DORQ_REG_INT_STS_DB_DROP |
534                          DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
535
536                 /* if there are no indications otherthan drop indications,
537                  * success
538                  */
539                 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
540                                  DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
541                                  DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
542                         return ECORE_SUCCESS;
543         }
544
545         /* some other indication was present - non recoverable */
546         DP_INFO(p_hwfn, "DORQ fatal attention\n");
547
548         return ECORE_INVAL;
549 }
550
551 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
552 {
553 #ifndef ASIC_ONLY
554         if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
555                 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
556                                    TM_REG_INT_STS_1);
557
558                 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
559                             TM_REG_INT_STS_1_PEND_CONN_SCAN))
560                         return ECORE_INVAL;
561
562                 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
563                            TM_REG_INT_STS_1_PEND_CONN_SCAN))
564                         DP_INFO(p_hwfn,
565                                 "TM attention on emulation - most likely"
566                                 " results of clock-ratios\n");
567                 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
568                 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
569                     TM_REG_INT_MASK_1_PEND_TASK_SCAN;
570                 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
571
572                 return ECORE_SUCCESS;
573         }
574 #endif
575
576         return ECORE_INVAL;
577 }
578
579 /* Instead of major changes to the data-structure, we have a some 'special'
580  * identifiers for sources that changed meaning between adapters.
581  */
582 enum aeu_invert_reg_special_type {
583         AEU_INVERT_REG_SPECIAL_CNIG_0,
584         AEU_INVERT_REG_SPECIAL_CNIG_1,
585         AEU_INVERT_REG_SPECIAL_CNIG_2,
586         AEU_INVERT_REG_SPECIAL_CNIG_3,
587         AEU_INVERT_REG_SPECIAL_MAX,
588 };
589
590 static struct aeu_invert_reg_bit
591 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
592         {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
593         {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
594         {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
595         {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
596 };
597
598 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
599 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
600         {
601          {                      /* After Invert 1 */
602           {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
603            MAX_BLOCK_ID},
604           }
605          },
606
607         {
608          {                      /* After Invert 2 */
609           {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
610           {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
611           {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb,
612            BLOCK_PGLUE_B},
613           {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
614           {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
615           {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
616           {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
617           {"SW timers #%d",
618            (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
619            OSAL_NULL, MAX_BLOCK_ID},
620           {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
621            BLOCK_PGLCS},
622           }
623          },
624
625         {
626          {                      /* After Invert 3 */
627           {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
628            MAX_BLOCK_ID},
629           }
630          },
631
632         {
633          {                      /* After Invert 4 */
634           {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
635            ecore_fw_assertion, MAX_BLOCK_ID},
636           {"General Attention %d",
637            (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
638            OSAL_NULL, MAX_BLOCK_ID},
639           {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
640            ecore_general_attention_35, MAX_BLOCK_ID},
641           {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
642                          ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
643                          OSAL_NULL, BLOCK_NWS},
644           {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
645                             ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
646                             OSAL_NULL, BLOCK_NWS},
647           {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
648                          ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
649                          OSAL_NULL, BLOCK_NWM},
650           {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
651                             ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
652                             OSAL_NULL, BLOCK_NWM},
653           {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
654           {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
655           {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
656           {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
657           {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
658           {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
659           {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
660            MAX_BLOCK_ID},
661           {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
662           {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
663           {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
664           {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
665           {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
666           }
667          },
668
669         {
670          {                      /* After Invert 5 */
671           {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
672           {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
673           {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
674           {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
675           {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
676           {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
677           {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
678           {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
679           {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
680           {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
681           {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
682           {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
683           {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
684           {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
685           {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
686           {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
687           }
688          },
689
690         {
691          {                      /* After Invert 6 */
692           {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
693           {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
694           {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
695           {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
696           {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
697           {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
698           {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
699           {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
700           {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
701           {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
702           {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
703           {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
704           {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
705           {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
706           {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
707           {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
708           }
709          },
710
711         {
712          {                      /* After Invert 7 */
713           {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
714           {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
715           {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
716           {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
717           {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
718           {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
719           {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
720           {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
721           {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
722           {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
723           {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
724           {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
725           {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
726           {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
727           {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
728           {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
729           {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
730           }
731          },
732
733         {
734          {                      /* After Invert 8 */
735           {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
736           {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
737           {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
738           {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
739           {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
740           {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
741           {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
742           {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
743           {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
744           {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
745           {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
746           {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
747           {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
748           {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
749           {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
750           {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
751           {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
752           {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
753           {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
754           {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
755           {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
756            MAX_BLOCK_ID},
757           }
758          },
759
760         {
761          {                      /* After Invert 9 */
762           {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
763           {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
764            MAX_BLOCK_ID},
765           {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
766           {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
767           {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
768            MAX_BLOCK_ID},
769           }
770          },
771
772 };
773
774 static struct aeu_invert_reg_bit *
775 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
776                         struct aeu_invert_reg_bit *p_bit)
777 {
778         if (!ECORE_IS_BB(p_hwfn->p_dev))
779                 return p_bit;
780
781         if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
782                 return p_bit;
783
784         return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
785                                   ATTENTION_BB_SHIFT];
786 }
787
788 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
789                                      struct aeu_invert_reg_bit *p_bit)
790 {
791         return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
792                   ATTENTION_PARITY);
793 }
794
795 #define ATTN_STATE_BITS         (0xfff)
796 #define ATTN_BITS_MASKABLE      (0x3ff)
797 struct ecore_sb_attn_info {
798         /* Virtual & Physical address of the SB */
799         struct atten_status_block *sb_attn;
800         dma_addr_t sb_phys;
801
802         /* Last seen running index */
803         u16 index;
804
805         /* A mask of the AEU bits resulting in a parity error */
806         u32 parity_mask[NUM_ATTN_REGS];
807
808         /* A pointer to the attention description structure */
809         struct aeu_invert_reg *p_aeu_desc;
810
811         /* Previously asserted attentions, which are still unasserted */
812         u16 known_attn;
813
814         /* Cleanup address for the link's general hw attention */
815         u32 mfw_attn_addr;
816 };
817
818 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
819                                  struct ecore_sb_attn_info *p_sb_desc)
820 {
821         u16 rc = 0, index;
822
823         OSAL_MMIOWB(p_hwfn->p_dev);
824
825         index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
826         if (p_sb_desc->index != index) {
827                 p_sb_desc->index = index;
828                 rc = ECORE_SB_ATT_IDX;
829         }
830
831         OSAL_MMIOWB(p_hwfn->p_dev);
832
833         return rc;
834 }
835
836 /**
837  * @brief ecore_int_assertion - handles asserted attention bits
838  *
839  * @param p_hwfn
840  * @param asserted_bits newly asserted bits
841  * @return enum _ecore_status_t
842  */
843 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
844                                                 u16 asserted_bits)
845 {
846         struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
847         u32 igu_mask;
848
849         /* Mask the source of the attention in the IGU */
850         igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
851                             IGU_REG_ATTENTION_ENABLE);
852         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
853                    igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
854         igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
855         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
856
857         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
858                    "inner known ATTN state: 0x%04x --> 0x%04x\n",
859                    sb_attn_sw->known_attn,
860                    sb_attn_sw->known_attn | asserted_bits);
861         sb_attn_sw->known_attn |= asserted_bits;
862
863         /* Handle MCP events */
864         if (asserted_bits & 0x100) {
865                 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
866                 /* Clean the MCP attention */
867                 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
868                          sb_attn_sw->mfw_attn_addr, 0);
869         }
870
871         /* FIXME - this will change once we'll have GOOD gtt definitions */
872         DIRECT_REG_WR(p_hwfn,
873                       (u8 OSAL_IOMEM *) p_hwfn->regview +
874                       GTT_BAR0_MAP_REG_IGU_CMD +
875                       ((IGU_CMD_ATTN_BIT_SET_UPPER -
876                         IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
877
878         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
879                    asserted_bits);
880
881         return ECORE_SUCCESS;
882 }
883
884 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
885                                  enum block_id id, enum dbg_attn_type type,
886                                  bool b_clear)
887 {
888         /* @DPDK */
889         DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type);
890 }
891
892 /**
893  * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
894  * cause of the attention
895  *
896  * @param p_hwfn
897  * @param p_aeu - descriptor of an AEU bit which caused the attention
898  * @param aeu_en_reg - register offset of the AEU enable reg. which configured
899  *  this bit to this group.
900  * @param bit_index - index of this bit in the aeu_en_reg
901  *
902  * @return enum _ecore_status_t
903  */
904 static enum _ecore_status_t
905 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
906                               struct aeu_invert_reg_bit *p_aeu,
907                               u32 aeu_en_reg,
908                               const char *p_bit_name,
909                               u32 bitmask)
910 {
911         enum _ecore_status_t rc = ECORE_INVAL;
912         bool b_fatal = false;
913
914         DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
915                 p_bit_name, bitmask);
916
917         /* Call callback before clearing the interrupt status */
918         if (p_aeu->cb) {
919                 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
920                         p_bit_name);
921                 rc = p_aeu->cb(p_hwfn);
922         }
923
924         if (rc != ECORE_SUCCESS)
925                 b_fatal = true;
926
927         /* Print HW block interrupt registers */
928         if (p_aeu->block_index != MAX_BLOCK_ID) {
929                 ecore_int_attn_print(p_hwfn, p_aeu->block_index,
930                                      ATTN_TYPE_INTERRUPT, !b_fatal);
931 }
932
933         /* @DPDK */
934         /* Reach assertion if attention is fatal */
935         if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
936                 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
937                           p_bit_name);
938
939                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
940         }
941
942         /* Prevent this Attention from being asserted in the future */
943         if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
944             p_hwfn->p_dev->attn_clr_en) {
945                 u32 val;
946                 u32 mask = ~bitmask;
947                 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
948                 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
949                 DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n",
950                         p_bit_name);
951         }
952
953         return rc;
954 }
955
956 /**
957  * @brief ecore_int_deassertion_parity - handle a single parity AEU source
958  *
959  * @param p_hwfn
960  * @param p_aeu - descriptor of an AEU bit which caused the parity
961  * @param aeu_en_reg - address of the AEU enable register
962  * @param bit_index
963  */
964 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
965                                          struct aeu_invert_reg_bit *p_aeu,
966                                          u32 aeu_en_reg, u8 bit_index)
967 {
968         u32 block_id = p_aeu->block_index, mask, val;
969
970         DP_NOTICE(p_hwfn->p_dev, false,
971                   "%s parity attention is set [address 0x%08x, bit %d]\n",
972                   p_aeu->bit_name, aeu_en_reg, bit_index);
973
974         if (block_id != MAX_BLOCK_ID) {
975                 ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
976
977                 /* In A0, there's a single parity bit for several blocks */
978                 if (block_id == BLOCK_BTB) {
979                         ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
980                                              ATTN_TYPE_PARITY, false);
981                         ecore_int_attn_print(p_hwfn, BLOCK_MCP,
982                                              ATTN_TYPE_PARITY, false);
983                 }
984         }
985
986         /* Prevent this parity error from being re-asserted */
987         mask = ~(0x1 << bit_index);
988         val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
989         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
990         DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
991                 p_aeu->bit_name);
992 }
993
994 /**
995  * @brief - handles deassertion of previously asserted attentions.
996  *
997  * @param p_hwfn
998  * @param deasserted_bits - newly deasserted bits
999  * @return enum _ecore_status_t
1000  *
1001  */
1002 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
1003                                                   u16 deasserted_bits)
1004 {
1005         struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
1006         u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
1007         u8 i, j, k, bit_idx;
1008         enum _ecore_status_t rc = ECORE_SUCCESS;
1009
1010         /* Read the attention registers in the AEU */
1011         for (i = 0; i < NUM_ATTN_REGS; i++) {
1012                 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1013                                           MISC_REG_AEU_AFTER_INVERT_1_IGU +
1014                                           i * 0x4);
1015                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1016                            "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
1017         }
1018
1019         /* Handle parity attentions first */
1020         for (i = 0; i < NUM_ATTN_REGS; i++) {
1021                 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
1022                 u32 parities;
1023
1024                 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
1025                 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1026                 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
1027
1028                 /* Skip register in which no parity bit is currently set */
1029                 if (!parities)
1030                         continue;
1031
1032                 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1033                         struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
1034
1035                         if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
1036                             !!(parities & (1 << bit_idx)))
1037                                 ecore_int_deassertion_parity(p_hwfn, p_bit,
1038                                                              aeu_en, bit_idx);
1039
1040                         bit_idx += ATTENTION_LENGTH(p_bit->flags);
1041                 }
1042         }
1043
1044         /* Find non-parity cause for attention and act */
1045         for (k = 0; k < MAX_ATTN_GRPS; k++) {
1046                 struct aeu_invert_reg_bit *p_aeu;
1047
1048                 /* Handle only groups whose attention is currently deasserted */
1049                 if (!(deasserted_bits & (1 << k)))
1050                         continue;
1051
1052                 for (i = 0; i < NUM_ATTN_REGS; i++) {
1053                         u32 bits;
1054
1055                         aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1056                                  i * sizeof(u32) +
1057                                  k * sizeof(u32) * NUM_ATTN_REGS;
1058                         en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1059                         bits = aeu_inv_arr[i] & en;
1060
1061                         /* Skip if no bit from this group is currently set */
1062                         if (!bits)
1063                                 continue;
1064
1065                         /* Find all set bits from current register which belong
1066                          * to current group, making them responsible for the
1067                          * previous assertion.
1068                          */
1069                         for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1070                                 unsigned long int bitmask;
1071                                 u8 bit, bit_len;
1072
1073                                 /* Need to account bits with changed meaning */
1074                                 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
1075
1076                                 bit = bit_idx;
1077                                 bit_len = ATTENTION_LENGTH(p_aeu->flags);
1078                                 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
1079                                         /* Skip Parity */
1080                                         bit++;
1081                                         bit_len--;
1082                                 }
1083
1084                                 /* Find the bits relating to HW-block, then
1085                                  * shift so they'll become LSB.
1086                                  */
1087                                 bitmask = bits & (((1 << bit_len) - 1) << bit);
1088                                 bitmask >>= bit;
1089
1090                                 if (bitmask) {
1091                                         u32 flags = p_aeu->flags;
1092                                         char bit_name[30];
1093                                         u8 num;
1094
1095                                         num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
1096                                                                 bit_len);
1097
1098                                         /* Some bits represent more than a
1099                                          * a single interrupt. Correctly print
1100                                          * their name.
1101                                          */
1102                                         if (ATTENTION_LENGTH(flags) > 2 ||
1103                                             ((flags & ATTENTION_PAR_INT) &&
1104                                             ATTENTION_LENGTH(flags) > 1))
1105                                                 OSAL_SNPRINTF(bit_name, 30,
1106                                                               p_aeu->bit_name,
1107                                                               num);
1108                                         else
1109                                                 strlcpy(bit_name,
1110                                                         p_aeu->bit_name,
1111                                                         sizeof(bit_name));
1112
1113                                         /* We now need to pass bitmask in its
1114                                          * correct position.
1115                                          */
1116                                         bitmask <<= bit;
1117
1118                                         /* Handle source of the attention */
1119                                         ecore_int_deassertion_aeu_bit(p_hwfn,
1120                                                                       p_aeu,
1121                                                                       aeu_en,
1122                                                                       bit_name,
1123                                                                       bitmask);
1124                                 }
1125
1126                                 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1127                         }
1128                 }
1129         }
1130
1131         /* Clear IGU indication for the deasserted bits */
1132         /* FIXME - this will change once we'll have GOOD gtt definitions */
1133         DIRECT_REG_WR(p_hwfn,
1134                       (u8 OSAL_IOMEM *) p_hwfn->regview +
1135                       GTT_BAR0_MAP_REG_IGU_CMD +
1136                       ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1137                         IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
1138
1139         /* Unmask deasserted attentions in IGU */
1140         aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1141                             IGU_REG_ATTENTION_ENABLE);
1142         aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1143         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1144
1145         /* Clear deassertion from inner state */
1146         sb_attn_sw->known_attn &= ~deasserted_bits;
1147
1148         return rc;
1149 }
1150
1151 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
1152 {
1153         struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1154         struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1155         u16 index = 0, asserted_bits, deasserted_bits;
1156         u32 attn_bits = 0, attn_acks = 0;
1157         enum _ecore_status_t rc = ECORE_SUCCESS;
1158
1159         /* Read current attention bits/acks - safeguard against attentions
1160          * by guaranting work on a synchronized timeframe
1161          */
1162         do {
1163                 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1164                 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1165                 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1166         } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1167         p_sb_attn->sb_index = index;
1168
1169         /* Attention / Deassertion are meaningful (and in correct state)
1170          * only when they differ and consistent with known state - deassertion
1171          * when previous attention & current ack, and assertion when current
1172          * attention with no previous attention
1173          */
1174         asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1175             ~p_sb_attn_sw->known_attn;
1176         deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1177             p_sb_attn_sw->known_attn;
1178
1179         if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1180                 DP_INFO(p_hwfn,
1181                         "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1182                         index, attn_bits, attn_acks, asserted_bits,
1183                         deasserted_bits, p_sb_attn_sw->known_attn);
1184         else if (asserted_bits == 0x100)
1185                 DP_INFO(p_hwfn, "MFW indication via attention\n");
1186         else
1187                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1188                            "MFW indication [deassertion]\n");
1189
1190         if (asserted_bits) {
1191                 rc = ecore_int_assertion(p_hwfn, asserted_bits);
1192                 if (rc)
1193                         return rc;
1194         }
1195
1196         if (deasserted_bits)
1197                 rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1198
1199         return rc;
1200 }
1201
1202 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1203                               void OSAL_IOMEM *igu_addr, u32 ack_cons)
1204 {
1205         struct igu_prod_cons_update igu_ack = { 0 };
1206
1207         igu_ack.sb_id_and_flags =
1208             ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1209              (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1210              (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1211              (IGU_SEG_ACCESS_ATTN <<
1212               IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1213
1214         DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1215
1216         /* Both segments (interrupts & acks) are written to same place address;
1217          * Need to guarantee all commands will be received (in-order) by HW.
1218          */
1219         OSAL_MMIOWB(p_hwfn->p_dev);
1220         OSAL_BARRIER(p_hwfn->p_dev);
1221 }
1222
1223 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1224 {
1225         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1226         struct ecore_pi_info *pi_info = OSAL_NULL;
1227         struct ecore_sb_attn_info *sb_attn;
1228         struct ecore_sb_info *sb_info;
1229         int arr_size;
1230         u16 rc = 0;
1231
1232         if (!p_hwfn)
1233                 return;
1234
1235         if (!p_hwfn->p_sp_sb) {
1236                 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1237                 return;
1238         }
1239
1240         sb_info = &p_hwfn->p_sp_sb->sb_info;
1241         arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1242         if (!sb_info) {
1243                 DP_ERR(p_hwfn->p_dev,
1244                        "Status block is NULL - cannot ack interrupts\n");
1245                 return;
1246         }
1247
1248         if (!p_hwfn->p_sb_attn) {
1249                 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1250                 return;
1251         }
1252         sb_attn = p_hwfn->p_sb_attn;
1253
1254         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1255                    p_hwfn, p_hwfn->my_id);
1256
1257         /* Disable ack for def status block. Required both for msix +
1258          * inta in non-mask mode, in inta does no harm.
1259          */
1260         ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1261
1262         /* Gather Interrupts/Attentions information */
1263         if (!sb_info->sb_virt) {
1264                 DP_ERR(p_hwfn->p_dev,
1265                        "Interrupt Status block is NULL -"
1266                        " cannot check for new interrupts!\n");
1267         } else {
1268                 u32 tmp_index = sb_info->sb_ack;
1269                 rc = ecore_sb_update_sb_idx(sb_info);
1270                 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1271                            "Interrupt indices: 0x%08x --> 0x%08x\n",
1272                            tmp_index, sb_info->sb_ack);
1273         }
1274
1275         if (!sb_attn || !sb_attn->sb_attn) {
1276                 DP_ERR(p_hwfn->p_dev,
1277                        "Attentions Status block is NULL -"
1278                        " cannot check for new attentions!\n");
1279         } else {
1280                 u16 tmp_index = sb_attn->index;
1281
1282                 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1283                 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1284                            "Attention indices: 0x%08x --> 0x%08x\n",
1285                            tmp_index, sb_attn->index);
1286         }
1287
1288         /* Check if we expect interrupts at this time. if not just ack them */
1289         if (!(rc & ECORE_SB_EVENT_MASK)) {
1290                 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1291                 return;
1292         }
1293
1294 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1295
1296         if (!p_hwfn->p_dpc_ptt) {
1297                 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1298                 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1299                 return;
1300         }
1301
1302         if (rc & ECORE_SB_ATT_IDX)
1303                 ecore_int_attentions(p_hwfn);
1304
1305         if (rc & ECORE_SB_IDX) {
1306                 int pi;
1307
1308                 /* Since we only looked at the SB index, it's possible more
1309                  * than a single protocol-index on the SB incremented.
1310                  * Iterate over all configured protocol indices and check
1311                  * whether something happened for each.
1312                  */
1313                 for (pi = 0; pi < arr_size; pi++) {
1314                         pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1315                         if (pi_info->comp_cb != OSAL_NULL)
1316                                 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1317                 }
1318         }
1319
1320         if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1321                 /* This should be done before the interrupts are enabled,
1322                  * since otherwise a new attention will be generated.
1323                  */
1324                 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1325         }
1326
1327         ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1328 }
1329
1330 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1331 {
1332         struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1333
1334         if (!p_sb)
1335                 return;
1336
1337         if (p_sb->sb_attn) {
1338                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1339                                        p_sb->sb_phys,
1340                                        SB_ATTN_ALIGNED_SIZE(p_hwfn));
1341         }
1342         OSAL_FREE(p_hwfn->p_dev, p_sb);
1343 }
1344
1345 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1346                                     struct ecore_ptt *p_ptt)
1347 {
1348         struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1349
1350         OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1351
1352         sb_info->index = 0;
1353         sb_info->known_attn = 0;
1354
1355         /* Configure Attention Status Block in IGU */
1356         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1357                  DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1358         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1359                  DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1360 }
1361
1362 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1363                                    struct ecore_ptt *p_ptt,
1364                                    void *sb_virt_addr, dma_addr_t sb_phy_addr)
1365 {
1366         struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1367         int i, j, k;
1368
1369         sb_info->sb_attn = sb_virt_addr;
1370         sb_info->sb_phys = sb_phy_addr;
1371
1372         /* Set the pointer to the AEU descriptors */
1373         sb_info->p_aeu_desc = aeu_descs;
1374
1375         /* Calculate Parity Masks */
1376         OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1377         for (i = 0; i < NUM_ATTN_REGS; i++) {
1378                 /* j is array index, k is bit index */
1379                 for (j = 0, k = 0; k < 32; j++) {
1380                         struct aeu_invert_reg_bit *p_aeu;
1381
1382                         p_aeu = &aeu_descs[i].bits[j];
1383                         if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
1384                                 sb_info->parity_mask[i] |= 1 << k;
1385
1386                         k += ATTENTION_LENGTH(p_aeu->flags);
1387                 }
1388                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1389                            "Attn Mask [Reg %d]: 0x%08x\n",
1390                            i, sb_info->parity_mask[i]);
1391         }
1392
1393         /* Set the address of cleanup for the mcp attention */
1394         sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1395             MISC_REG_AEU_GENERAL_ATTN_0;
1396
1397         ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1398 }
1399
1400 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1401                                                     struct ecore_ptt *p_ptt)
1402 {
1403         struct ecore_dev *p_dev = p_hwfn->p_dev;
1404         struct ecore_sb_attn_info *p_sb;
1405         dma_addr_t p_phys = 0;
1406         void *p_virt;
1407
1408         /* SB struct */
1409         p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
1410         if (!p_sb) {
1411                 DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n");
1412                 return ECORE_NOMEM;
1413         }
1414
1415         /* SB ring  */
1416         p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1417                                          SB_ATTN_ALIGNED_SIZE(p_hwfn));
1418         if (!p_virt) {
1419                 DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n");
1420                 OSAL_FREE(p_dev, p_sb);
1421                 return ECORE_NOMEM;
1422         }
1423
1424         /* Attention setup */
1425         p_hwfn->p_sb_attn = p_sb;
1426         ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1427
1428         return ECORE_SUCCESS;
1429 }
1430
1431 /* coalescing timeout = timeset << (timer_res + 1) */
1432 #define ECORE_CAU_DEF_RX_USECS 24
1433 #define ECORE_CAU_DEF_TX_USECS 48
1434
1435 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1436                              struct cau_sb_entry *p_sb_entry,
1437                              u8 pf_id, u16 vf_number, u8 vf_valid)
1438 {
1439         struct ecore_dev *p_dev = p_hwfn->p_dev;
1440         u32 cau_state;
1441         u8 timer_res;
1442
1443         OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1444
1445         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1446         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1447         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1448         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1449         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1450
1451         cau_state = CAU_HC_DISABLE_STATE;
1452
1453         if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1454                 cau_state = CAU_HC_ENABLE_STATE;
1455                 if (!p_dev->rx_coalesce_usecs)
1456                         p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1457                 if (!p_dev->tx_coalesce_usecs)
1458                         p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1459         }
1460
1461         /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1462         if (p_dev->rx_coalesce_usecs <= 0x7F)
1463                 timer_res = 0;
1464         else if (p_dev->rx_coalesce_usecs <= 0xFF)
1465                 timer_res = 1;
1466         else
1467                 timer_res = 2;
1468         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1469
1470         if (p_dev->tx_coalesce_usecs <= 0x7F)
1471                 timer_res = 0;
1472         else if (p_dev->tx_coalesce_usecs <= 0xFF)
1473                 timer_res = 1;
1474         else
1475                 timer_res = 2;
1476         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1477
1478         SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1479         SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1480 }
1481
1482 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1483                                    struct ecore_ptt *p_ptt,
1484                                    u16 igu_sb_id, u32 pi_index,
1485                                    enum ecore_coalescing_fsm coalescing_fsm,
1486                                    u8 timeset)
1487 {
1488         struct cau_pi_entry pi_entry;
1489         u32 sb_offset, pi_offset;
1490
1491         if (IS_VF(p_hwfn->p_dev))
1492                 return;/* @@@TBD MichalK- VF CAU... */
1493
1494         sb_offset = igu_sb_id * PIS_PER_SB_E4;
1495         OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1496
1497         SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1498         if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1499                 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1500         else
1501                 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1502
1503         pi_offset = sb_offset + pi_index;
1504         if (p_hwfn->hw_init_done) {
1505                 ecore_wr(p_hwfn, p_ptt,
1506                          CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1507                          *((u32 *)&(pi_entry)));
1508         } else {
1509                 STORE_RT_REG(p_hwfn,
1510                              CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1511                              *((u32 *)&(pi_entry)));
1512         }
1513 }
1514
1515 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1516                            struct ecore_ptt *p_ptt,
1517                            struct ecore_sb_info *p_sb, u32 pi_index,
1518                            enum ecore_coalescing_fsm coalescing_fsm,
1519                            u8 timeset)
1520 {
1521         _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
1522                                pi_index, coalescing_fsm, timeset);
1523 }
1524
1525 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1526                            struct ecore_ptt *p_ptt,
1527                            dma_addr_t sb_phys, u16 igu_sb_id,
1528                            u16 vf_number, u8 vf_valid)
1529 {
1530         struct cau_sb_entry sb_entry;
1531
1532         ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1533                                 vf_number, vf_valid);
1534
1535         if (p_hwfn->hw_init_done) {
1536                 /* Wide-bus, initialize via DMAE */
1537                 u64 phys_addr = (u64)sb_phys;
1538
1539                 ecore_dmae_host2grc(p_hwfn, p_ptt,
1540                                     (u64)(osal_uintptr_t)&phys_addr,
1541                                     CAU_REG_SB_ADDR_MEMORY +
1542                                     igu_sb_id * sizeof(u64), 2, 0);
1543                 ecore_dmae_host2grc(p_hwfn, p_ptt,
1544                                     (u64)(osal_uintptr_t)&sb_entry,
1545                                     CAU_REG_SB_VAR_MEMORY +
1546                                     igu_sb_id * sizeof(u64), 2, 0);
1547         } else {
1548                 /* Initialize Status Block Address */
1549                 STORE_RT_REG_AGG(p_hwfn,
1550                                  CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1551                                  igu_sb_id * 2, sb_phys);
1552
1553                 STORE_RT_REG_AGG(p_hwfn,
1554                                  CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1555                                  igu_sb_id * 2, sb_entry);
1556         }
1557
1558         /* Configure pi coalescing if set */
1559         if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1560                 /* eth will open queues for all tcs, so configure all of them
1561                  * properly, rather than just the active ones
1562                  */
1563                 u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1564
1565                 u8 timeset, timer_res;
1566                 u8 i;
1567
1568                 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1569                 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
1570                         timer_res = 0;
1571                 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
1572                         timer_res = 1;
1573                 else
1574                         timer_res = 2;
1575                 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
1576                 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1577                                        ECORE_COAL_RX_STATE_MACHINE,
1578                                        timeset);
1579
1580                 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
1581                         timer_res = 0;
1582                 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
1583                         timer_res = 1;
1584                 else
1585                         timer_res = 2;
1586                 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
1587                 for (i = 0; i < num_tc; i++) {
1588                         _ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1589                                                igu_sb_id, TX_PI(i),
1590                                                ECORE_COAL_TX_STATE_MACHINE,
1591                                                timeset);
1592                 }
1593         }
1594 }
1595
1596 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1597                         struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
1598 {
1599         /* zero status block and ack counter */
1600         sb_info->sb_ack = 0;
1601         OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1602
1603         if (IS_PF(p_hwfn->p_dev))
1604                 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1605                                       sb_info->igu_sb_id, 0, 0);
1606 }
1607
1608 struct ecore_igu_block *
1609 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
1610 {
1611         struct ecore_igu_block *p_block;
1612         u16 igu_id;
1613
1614         for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1615              igu_id++) {
1616                 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1617
1618                 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1619                     !(p_block->status & ECORE_IGU_STATUS_FREE))
1620                         continue;
1621
1622                 if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
1623                     b_is_pf)
1624                         return p_block;
1625         }
1626
1627         return OSAL_NULL;
1628 }
1629
1630 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
1631                                   u16 vector_id)
1632 {
1633         struct ecore_igu_block *p_block;
1634         u16 igu_id;
1635
1636         for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1637              igu_id++) {
1638                 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1639
1640                 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1641                     !p_block->is_pf ||
1642                     p_block->vector_number != vector_id)
1643                         continue;
1644
1645                 return igu_id;
1646         }
1647
1648         return ECORE_SB_INVALID_IDX;
1649 }
1650
1651 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1652 {
1653         u16 igu_sb_id;
1654
1655         /* Assuming continuous set of IGU SBs dedicated for given PF */
1656         if (sb_id == ECORE_SP_SB_ID)
1657                 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1658         else if (IS_PF(p_hwfn->p_dev))
1659                 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1660         else
1661                 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1662
1663         if (igu_sb_id == ECORE_SB_INVALID_IDX)
1664                 DP_NOTICE(p_hwfn, true,
1665                           "Slowpath SB vector %04x doesn't exist\n",
1666                           sb_id);
1667         else if (sb_id == ECORE_SP_SB_ID)
1668                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1669                            "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1670         else
1671                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1672                            "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1673
1674         return igu_sb_id;
1675 }
1676
1677 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1678                                        struct ecore_ptt *p_ptt,
1679                                        struct ecore_sb_info *sb_info,
1680                                        void *sb_virt_addr,
1681                                        dma_addr_t sb_phy_addr, u16 sb_id)
1682 {
1683         sb_info->sb_virt = sb_virt_addr;
1684         sb_info->sb_phys = sb_phy_addr;
1685
1686         sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1687
1688         if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
1689                 return ECORE_INVAL;
1690
1691         /* Let the igu info reference the client's SB info */
1692         if (sb_id != ECORE_SP_SB_ID) {
1693                 if (IS_PF(p_hwfn->p_dev)) {
1694                         struct ecore_igu_info *p_info;
1695                         struct ecore_igu_block *p_block;
1696
1697                         p_info = p_hwfn->hw_info.p_igu_info;
1698                         p_block = &p_info->entry[sb_info->igu_sb_id];
1699
1700                         p_block->sb_info = sb_info;
1701                         p_block->status &= ~ECORE_IGU_STATUS_FREE;
1702                         p_info->usage.free_cnt--;
1703                 } else {
1704                         ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1705                 }
1706         }
1707 #ifdef ECORE_CONFIG_DIRECT_HWFN
1708         sb_info->p_hwfn = p_hwfn;
1709 #endif
1710         sb_info->p_dev = p_hwfn->p_dev;
1711
1712         /* The igu address will hold the absolute address that needs to be
1713          * written to for a specific status block
1714          */
1715         if (IS_PF(p_hwfn->p_dev)) {
1716                 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
1717                     GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
1718
1719         } else {
1720                 sb_info->igu_addr =
1721                     (u8 OSAL_IOMEM *)p_hwfn->regview +
1722                     PXP_VF_BAR0_START_IGU +
1723                     ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1724         }
1725
1726         sb_info->flags |= ECORE_SB_INFO_INIT;
1727
1728         ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1729
1730         return ECORE_SUCCESS;
1731 }
1732
1733 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1734                                           struct ecore_sb_info *sb_info,
1735                                           u16 sb_id)
1736 {
1737         struct ecore_igu_info *p_info;
1738         struct ecore_igu_block *p_block;
1739
1740         if (sb_info == OSAL_NULL)
1741                 return ECORE_SUCCESS;
1742
1743         /* zero status block and ack counter */
1744         sb_info->sb_ack = 0;
1745         OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1746
1747         if (IS_VF(p_hwfn->p_dev)) {
1748                 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
1749                 return ECORE_SUCCESS;
1750         }
1751
1752         p_info = p_hwfn->hw_info.p_igu_info;
1753         p_block = &p_info->entry[sb_info->igu_sb_id];
1754
1755         /* Vector 0 is reserved to Default SB */
1756         if (p_block->vector_number == 0) {
1757                 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1758                 return ECORE_INVAL;
1759         }
1760
1761         /* Lose reference to client's SB info, and fix counters */
1762         p_block->sb_info = OSAL_NULL;
1763         p_block->status |= ECORE_IGU_STATUS_FREE;
1764         p_info->usage.free_cnt++;
1765
1766         return ECORE_SUCCESS;
1767 }
1768
1769 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1770 {
1771         struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1772
1773         if (!p_sb)
1774                 return;
1775
1776         if (p_sb->sb_info.sb_virt) {
1777                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1778                                        p_sb->sb_info.sb_virt,
1779                                        p_sb->sb_info.sb_phys,
1780                                        SB_ALIGNED_SIZE(p_hwfn));
1781         }
1782
1783         OSAL_FREE(p_hwfn->p_dev, p_sb);
1784 }
1785
1786 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1787                                                   struct ecore_ptt *p_ptt)
1788 {
1789         struct ecore_sb_sp_info *p_sb;
1790         dma_addr_t p_phys = 0;
1791         void *p_virt;
1792
1793         /* SB struct */
1794         p_sb =
1795             OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
1796                        sizeof(*p_sb));
1797         if (!p_sb) {
1798                 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n");
1799                 return ECORE_NOMEM;
1800         }
1801
1802         /* SB ring  */
1803         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1804                                          &p_phys, SB_ALIGNED_SIZE(p_hwfn));
1805         if (!p_virt) {
1806                 DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n");
1807                 OSAL_FREE(p_hwfn->p_dev, p_sb);
1808                 return ECORE_NOMEM;
1809         }
1810
1811         /* Status Block setup */
1812         p_hwfn->p_sp_sb = p_sb;
1813         ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1814                           p_virt, p_phys, ECORE_SP_SB_ID);
1815
1816         OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1817
1818         return ECORE_SUCCESS;
1819 }
1820
1821 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1822                                            ecore_int_comp_cb_t comp_cb,
1823                                            void *cookie,
1824                                            u8 *sb_idx, __le16 **p_fw_cons)
1825 {
1826         struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1827         enum _ecore_status_t rc = ECORE_NOMEM;
1828         u8 pi;
1829
1830         /* Look for a free index */
1831         for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1832                 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1833                         continue;
1834
1835                 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1836                 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1837                 *sb_idx = pi;
1838                 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1839                 rc = ECORE_SUCCESS;
1840                 break;
1841         }
1842
1843         return rc;
1844 }
1845
1846 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
1847 {
1848         struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1849
1850         if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1851                 return ECORE_NOMEM;
1852
1853         p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1854         p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1855         return ECORE_SUCCESS;
1856 }
1857
1858 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1859 {
1860         return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1861 }
1862
1863 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1864                               struct ecore_ptt *p_ptt,
1865                               enum ecore_int_mode int_mode)
1866 {
1867         u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1868
1869 #ifndef ASIC_ONLY
1870         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1871                 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1872                 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
1873         }
1874 #endif
1875
1876         p_hwfn->p_dev->int_mode = int_mode;
1877         switch (p_hwfn->p_dev->int_mode) {
1878         case ECORE_INT_MODE_INTA:
1879                 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1880                 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1881                 break;
1882
1883         case ECORE_INT_MODE_MSI:
1884                 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1885                 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1886                 break;
1887
1888         case ECORE_INT_MODE_MSIX:
1889                 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1890                 break;
1891         case ECORE_INT_MODE_POLL:
1892                 break;
1893         }
1894
1895         ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1896 }
1897
1898 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1899                                       struct ecore_ptt *p_ptt)
1900 {
1901 #ifndef ASIC_ONLY
1902         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1903                 DP_INFO(p_hwfn,
1904                         "FPGA - Don't enable Attentions in IGU and MISC\n");
1905                 return;
1906         }
1907 #endif
1908
1909         /* Configure AEU signal change to produce attentions */
1910         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1911         ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1912         ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1913         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1914
1915         /* Flush the writes to IGU */
1916         OSAL_MMIOWB(p_hwfn->p_dev);
1917
1918         /* Unmask AEU signals toward IGU */
1919         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1920 }
1921
1922 enum _ecore_status_t
1923 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1924                           enum ecore_int_mode int_mode)
1925 {
1926         enum _ecore_status_t rc = ECORE_SUCCESS;
1927
1928         ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1929
1930         if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1931                 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1932                 if (rc != ECORE_SUCCESS) {
1933                         DP_NOTICE(p_hwfn, true,
1934                                   "Slowpath IRQ request failed\n");
1935                         return ECORE_NORESOURCES;
1936                 }
1937                 p_hwfn->b_int_requested = true;
1938         }
1939
1940         /* Enable interrupt Generation */
1941         ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1942
1943         p_hwfn->b_int_enabled = 1;
1944
1945         return rc;
1946 }
1947
1948 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1949                                struct ecore_ptt *p_ptt)
1950 {
1951         p_hwfn->b_int_enabled = 0;
1952
1953         if (IS_VF(p_hwfn->p_dev))
1954                 return;
1955
1956         ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1957 }
1958
1959 #define IGU_CLEANUP_SLEEP_LENGTH                (1000)
1960 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1961                                      struct ecore_ptt *p_ptt,
1962                                      u32 igu_sb_id,
1963                                      bool cleanup_set,
1964                                      u16 opaque_fid)
1965 {
1966         u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1967         u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1968         u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1969         u8 type = 0;            /* FIXME MichalS type??? */
1970
1971         OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1972                            IGU_REG_CLEANUP_STATUS_0) != 0x200);
1973
1974         /* USE Control Command Register to perform cleanup. There is an
1975          * option to do this using IGU bar, but then it can't be used for VFs.
1976          */
1977
1978         /* Set the data field */
1979         SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1980         SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1981         SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1982
1983         /* Set the control register */
1984         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1985         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1986         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1987
1988         ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1989
1990         OSAL_BARRIER(p_hwfn->p_dev);
1991
1992         ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1993
1994         /* Flush the write to IGU */
1995         OSAL_MMIOWB(p_hwfn->p_dev);
1996
1997         /* calculate where to read the status bit from */
1998         sb_bit = 1 << (igu_sb_id % 32);
1999         sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
2000
2001         sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
2002
2003         /* Now wait for the command to complete */
2004         while (--sleep_cnt) {
2005                 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
2006                 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
2007                         break;
2008                 OSAL_MSLEEP(5);
2009         }
2010
2011         if (!sleep_cnt)
2012                 DP_NOTICE(p_hwfn, true,
2013                           "Timeout waiting for clear status 0x%08x [for sb %d]\n",
2014                           val, igu_sb_id);
2015 }
2016
2017 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
2018                                        struct ecore_ptt *p_ptt,
2019                                        u16 igu_sb_id, u16 opaque, bool b_set)
2020 {
2021         struct ecore_igu_block *p_block;
2022         int pi, i;
2023
2024         p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2025         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2026                    "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
2027                    igu_sb_id, p_block->function_id, p_block->is_pf,
2028                    p_block->vector_number);
2029
2030         /* Set */
2031         if (b_set)
2032                 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
2033
2034         /* Clear */
2035         ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
2036
2037         /* Wait for the IGU SB to cleanup */
2038         for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
2039                 u32 val;
2040
2041                 val = ecore_rd(p_hwfn, p_ptt,
2042                                IGU_REG_WRITE_DONE_PENDING +
2043                                ((igu_sb_id / 32) * 4));
2044                 if (val & (1 << (igu_sb_id % 32)))
2045                         OSAL_UDELAY(10);
2046                 else
2047                         break;
2048         }
2049         if (i == IGU_CLEANUP_SLEEP_LENGTH)
2050                 DP_NOTICE(p_hwfn, true,
2051                           "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
2052                           igu_sb_id);
2053
2054         /* Clear the CAU for the SB */
2055         for (pi = 0; pi < 12; pi++)
2056                 ecore_wr(p_hwfn, p_ptt,
2057                          CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
2058 }
2059
2060 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
2061                                 struct ecore_ptt *p_ptt,
2062                                 bool b_set, bool b_slowpath)
2063 {
2064         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2065         struct ecore_igu_block *p_block;
2066         u16 igu_sb_id = 0;
2067         u32 val = 0;
2068
2069         /* @@@TBD MichalK temporary... should be moved to init-tool... */
2070         val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
2071         val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
2072         val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
2073         ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
2074         /* end temporary */
2075
2076         for (igu_sb_id = 0;
2077              igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2078              igu_sb_id++) {
2079                 p_block = &p_info->entry[igu_sb_id];
2080
2081                 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2082                     !p_block->is_pf ||
2083                     (p_block->status & ECORE_IGU_STATUS_DSB))
2084                         continue;
2085
2086                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
2087                                                   p_hwfn->hw_info.opaque_fid,
2088                                                   b_set);
2089         }
2090
2091         if (b_slowpath)
2092                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2093                                                   p_info->igu_dsb_id,
2094                                                   p_hwfn->hw_info.opaque_fid,
2095                                                   b_set);
2096 }
2097
2098 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
2099                             struct ecore_ptt *p_ptt)
2100 {
2101         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2102         struct ecore_igu_block *p_block;
2103         int pf_sbs, vf_sbs;
2104         u16 igu_sb_id;
2105         u32 val, rval;
2106
2107         if (!RESC_NUM(p_hwfn, ECORE_SB)) {
2108                 /* We're using an old MFW - have to prevent any switching
2109                  * of SBs between PF and VFs as later driver wouldn't be
2110                  * able to tell which belongs to which.
2111                  */
2112                 p_info->b_allow_pf_vf_change = false;
2113         } else {
2114                 /* Use the numbers the MFW have provided -
2115                  * don't forget MFW accounts for the default SB as well.
2116                  */
2117                 p_info->b_allow_pf_vf_change = true;
2118
2119                 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
2120                         DP_INFO(p_hwfn,
2121                                 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2122                                 RESC_NUM(p_hwfn, ECORE_SB) - 1,
2123                                 p_info->usage.cnt);
2124                         p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
2125                 }
2126
2127                 /* TODO - how do we learn about VF SBs from MFW? */
2128                 if (IS_PF_SRIOV(p_hwfn)) {
2129                         u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
2130
2131                         if (vfs != p_info->usage.iov_cnt)
2132                                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2133                                            "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2134                                            p_info->usage.iov_cnt, vfs);
2135
2136                         /* At this point we know how many SBs we have totally
2137                          * in IGU + number of PF SBs. So we can validate that
2138                          * we'd have sufficient for VF.
2139                          */
2140                         if (vfs > p_info->usage.free_cnt +
2141                                   p_info->usage.free_cnt_iov -
2142                                   p_info->usage.cnt) {
2143                                 DP_NOTICE(p_hwfn, true,
2144                                           "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2145                                           p_info->usage.free_cnt +
2146                                           p_info->usage.free_cnt_iov,
2147                                           p_info->usage.cnt, vfs);
2148                                 return ECORE_INVAL;
2149                         }
2150                 }
2151         }
2152
2153         /* Cap the number of VFs SBs by the number of VFs */
2154         if (IS_PF_SRIOV(p_hwfn))
2155                 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs;
2156
2157         /* Mark all SBs as free, now in the right PF/VFs division */
2158         p_info->usage.free_cnt = p_info->usage.cnt;
2159         p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2160         p_info->usage.orig = p_info->usage.cnt;
2161         p_info->usage.iov_orig = p_info->usage.iov_cnt;
2162
2163         /* We now proceed to re-configure the IGU cam to reflect the initial
2164          * configuration. We can start with the Default SB.
2165          */
2166         pf_sbs = p_info->usage.cnt;
2167         vf_sbs = p_info->usage.iov_cnt;
2168
2169         for (igu_sb_id = p_info->igu_dsb_id;
2170              igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2171              igu_sb_id++) {
2172                 p_block = &p_info->entry[igu_sb_id];
2173                 val = 0;
2174
2175                 if (!(p_block->status & ECORE_IGU_STATUS_VALID))
2176                         continue;
2177
2178                 if (p_block->status & ECORE_IGU_STATUS_DSB) {
2179                         p_block->function_id = p_hwfn->rel_pf_id;
2180                         p_block->is_pf = 1;
2181                         p_block->vector_number = 0;
2182                         p_block->status = ECORE_IGU_STATUS_VALID |
2183                                           ECORE_IGU_STATUS_PF |
2184                                           ECORE_IGU_STATUS_DSB;
2185                 } else if (pf_sbs) {
2186                         pf_sbs--;
2187                         p_block->function_id = p_hwfn->rel_pf_id;
2188                         p_block->is_pf = 1;
2189                         p_block->vector_number = p_info->usage.cnt - pf_sbs;
2190                         p_block->status = ECORE_IGU_STATUS_VALID |
2191                                           ECORE_IGU_STATUS_PF |
2192                                           ECORE_IGU_STATUS_FREE;
2193                 } else if (vf_sbs) {
2194                         p_block->function_id =
2195                                 p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
2196                                 p_info->usage.iov_cnt - vf_sbs;
2197                         p_block->is_pf = 0;
2198                         p_block->vector_number = 0;
2199                         p_block->status = ECORE_IGU_STATUS_VALID |
2200                                           ECORE_IGU_STATUS_FREE;
2201                         vf_sbs--;
2202                 } else {
2203                         p_block->function_id = 0;
2204                         p_block->is_pf = 0;
2205                         p_block->vector_number = 0;
2206                 }
2207
2208                 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2209                           p_block->function_id);
2210                 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2211                 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2212                           p_block->vector_number);
2213
2214                 /* VF entries would be enabled when VF is initializaed */
2215                 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2216
2217                 rval = ecore_rd(p_hwfn, p_ptt,
2218                                 IGU_REG_MAPPING_MEMORY +
2219                                 sizeof(u32) * igu_sb_id);
2220
2221                 if (rval != val) {
2222                         ecore_wr(p_hwfn, p_ptt,
2223                                  IGU_REG_MAPPING_MEMORY +
2224                                  sizeof(u32) * igu_sb_id,
2225                                  val);
2226
2227                         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2228                                    "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2229                                    igu_sb_id, p_block->function_id,
2230                                    p_block->is_pf, p_block->vector_number,
2231                                    rval, val);
2232                 }
2233         }
2234
2235         return 0;
2236 }
2237
2238 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
2239                                     struct ecore_ptt *p_ptt)
2240 {
2241         struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
2242
2243         /* Return all the usage indications to default prior to the reset;
2244          * The reset expects the !orig to reflect the initial status of the
2245          * SBs, and would re-calculate the originals based on those.
2246          */
2247         p_cnt->cnt = p_cnt->orig;
2248         p_cnt->free_cnt = p_cnt->orig;
2249         p_cnt->iov_cnt = p_cnt->iov_orig;
2250         p_cnt->free_cnt_iov = p_cnt->iov_orig;
2251         p_cnt->orig = 0;
2252         p_cnt->iov_orig = 0;
2253
2254         /* TODO - we probably need to re-configure the CAU as well... */
2255         return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
2256 }
2257
2258 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
2259                                          struct ecore_ptt *p_ptt,
2260                                          u16 igu_sb_id)
2261 {
2262         u32 val = ecore_rd(p_hwfn, p_ptt,
2263                            IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2264         struct ecore_igu_block *p_block;
2265
2266         p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2267
2268         /* Fill the block information */
2269         p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
2270         p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2271         p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
2272
2273         p_block->igu_sb_id = igu_sb_id;
2274 }
2275
2276 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
2277                                             struct ecore_ptt *p_ptt)
2278 {
2279         struct ecore_igu_info *p_igu_info;
2280         struct ecore_igu_block *p_block;
2281         u32 min_vf = 0, max_vf = 0;
2282         u16 igu_sb_id;
2283
2284         p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
2285                                                  GFP_KERNEL,
2286                                                  sizeof(*p_igu_info));
2287         if (!p_hwfn->hw_info.p_igu_info)
2288                 return ECORE_NOMEM;
2289         p_igu_info = p_hwfn->hw_info.p_igu_info;
2290
2291         /* Distinguish between existent and onn-existent default SB */
2292         p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
2293
2294         /* Find the range of VF ids whose SB belong to this PF */
2295         if (p_hwfn->p_dev->p_iov_info) {
2296                 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
2297
2298                 min_vf = p_iov->first_vf_in_pf;
2299                 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
2300         }
2301
2302         for (igu_sb_id = 0;
2303              igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2304              igu_sb_id++) {
2305                 /* Read current entry; Notice it might not belong to this PF */
2306                 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2307                 p_block = &p_igu_info->entry[igu_sb_id];
2308
2309                 if ((p_block->is_pf) &&
2310                     (p_block->function_id == p_hwfn->rel_pf_id)) {
2311                         p_block->status = ECORE_IGU_STATUS_PF |
2312                                           ECORE_IGU_STATUS_VALID |
2313                                           ECORE_IGU_STATUS_FREE;
2314
2315                         if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2316                                 p_igu_info->usage.cnt++;
2317                 } else if (!(p_block->is_pf) &&
2318                            (p_block->function_id >= min_vf) &&
2319                            (p_block->function_id < max_vf)) {
2320                         /* Available for VFs of this PF */
2321                         p_block->status = ECORE_IGU_STATUS_VALID |
2322                                           ECORE_IGU_STATUS_FREE;
2323
2324                         if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2325                                 p_igu_info->usage.iov_cnt++;
2326                 }
2327
2328                 /* Mark the First entry belonging to the PF or its VFs
2329                  * as the default SB [we'll reset IGU prior to first usage].
2330                  */
2331                 if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
2332                     (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
2333                         p_igu_info->igu_dsb_id = igu_sb_id;
2334                         p_block->status |= ECORE_IGU_STATUS_DSB;
2335                 }
2336
2337                 /* While this isn't suitable for all clients, limit number
2338                  * of prints by having each PF print only its entries with the
2339                  * exception of PF0 which would print everything.
2340                  */
2341                 if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
2342                     (p_hwfn->abs_pf_id == 0))
2343                         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2344                                    "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2345                                    igu_sb_id, p_block->function_id,
2346                                    p_block->is_pf, p_block->vector_number);
2347         }
2348
2349         if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
2350                 DP_NOTICE(p_hwfn, true,
2351                           "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2352                           p_igu_info->igu_dsb_id);
2353                 return ECORE_INVAL;
2354         }
2355
2356         /* All non default SB are considered free at this point */
2357         p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2358         p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2359
2360         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2361                    "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2362                    p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
2363                    p_igu_info->usage.iov_cnt);
2364
2365         return ECORE_SUCCESS;
2366 }
2367
2368 enum _ecore_status_t
2369 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2370                           u16 sb_id, bool b_to_vf)
2371 {
2372         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2373         struct ecore_igu_block *p_block = OSAL_NULL;
2374         u16 igu_sb_id = 0, vf_num = 0;
2375         u32 val = 0;
2376
2377         if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
2378                 return ECORE_INVAL;
2379
2380         if (sb_id == ECORE_SP_SB_ID)
2381                 return ECORE_INVAL;
2382
2383         if (!p_info->b_allow_pf_vf_change) {
2384                 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
2385                 return ECORE_INVAL;
2386         }
2387
2388         /* If we're moving a SB from PF to VF, the client had to specify
2389          * which vector it wants to move.
2390          */
2391         if (b_to_vf) {
2392                 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
2393                 if (igu_sb_id == ECORE_SB_INVALID_IDX)
2394                         return ECORE_INVAL;
2395         }
2396
2397         /* If we're moving a SB from VF to PF, need to validate there isn't
2398          * already a line configured for that vector.
2399          */
2400         if (!b_to_vf) {
2401                 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
2402                     ECORE_SB_INVALID_IDX)
2403                         return ECORE_INVAL;
2404         }
2405
2406         /* We need to validate that the SB can actually be relocated.
2407          * This would also handle the previous case where we've explicitly
2408          * stated which IGU SB needs to move.
2409          */
2410         for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2411              igu_sb_id++) {
2412                 p_block = &p_info->entry[igu_sb_id];
2413
2414                 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2415                     !(p_block->status & ECORE_IGU_STATUS_FREE) ||
2416                     (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
2417                         if (b_to_vf)
2418                                 return ECORE_INVAL;
2419                         else
2420                                 continue;
2421                 }
2422
2423                 break;
2424         }
2425
2426         if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
2427                 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
2428                            "Failed to find a free SB to move\n");
2429                 return ECORE_INVAL;
2430         }
2431
2432         /* At this point, p_block points to the SB we want to relocate */
2433         if (b_to_vf) {
2434                 p_block->status &= ~ECORE_IGU_STATUS_PF;
2435
2436                 /* It doesn't matter which VF number we choose, since we're
2437                  * going to disable the line; But let's keep it in range.
2438                  */
2439                 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
2440
2441                 p_block->function_id = (u8)vf_num;
2442                 p_block->is_pf = 0;
2443                 p_block->vector_number = 0;
2444
2445                 p_info->usage.cnt--;
2446                 p_info->usage.free_cnt--;
2447                 p_info->usage.iov_cnt++;
2448                 p_info->usage.free_cnt_iov++;
2449
2450                 /* TODO - if SBs aren't really the limiting factor,
2451                  * then it might not be accurate [in the since that
2452                  * we might not need decrement the feature].
2453                  */
2454                 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
2455                 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
2456         } else {
2457                 p_block->status |= ECORE_IGU_STATUS_PF;
2458                 p_block->function_id = p_hwfn->rel_pf_id;
2459                 p_block->is_pf = 1;
2460                 p_block->vector_number = sb_id + 1;
2461
2462                 p_info->usage.cnt++;
2463                 p_info->usage.free_cnt++;
2464                 p_info->usage.iov_cnt--;
2465                 p_info->usage.free_cnt_iov--;
2466
2467                 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
2468                 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
2469         }
2470
2471         /* Update the IGU and CAU with the new configuration */
2472         SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2473                   p_block->function_id);
2474         SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2475         SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2476         SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2477                   p_block->vector_number);
2478
2479         ecore_wr(p_hwfn, p_ptt,
2480                  IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
2481                  val);
2482
2483         ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
2484                               igu_sb_id, vf_num,
2485                               p_block->is_pf ? 0 : 1);
2486
2487         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2488                    "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2489                    igu_sb_id, p_block->function_id,
2490                    p_block->is_pf, p_block->vector_number);
2491
2492         return ECORE_SUCCESS;
2493 }
2494
2495 /**
2496  * @brief Initialize igu runtime registers
2497  *
2498  * @param p_hwfn
2499  */
2500 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2501 {
2502         u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2503
2504         STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2505 }
2506
2507 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2508                           IGU_CMD_INT_ACK_BASE)
2509 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2510                           IGU_CMD_INT_ACK_BASE)
2511 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2512 {
2513         u32 intr_status_hi = 0, intr_status_lo = 0;
2514         u64 intr_status = 0;
2515
2516         intr_status_lo = REG_RD(p_hwfn,
2517                                 GTT_BAR0_MAP_REG_IGU_CMD +
2518                                 LSB_IGU_CMD_ADDR * 8);
2519         intr_status_hi = REG_RD(p_hwfn,
2520                                 GTT_BAR0_MAP_REG_IGU_CMD +
2521                                 MSB_IGU_CMD_ADDR * 8);
2522         intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2523
2524         return intr_status;
2525 }
2526
2527 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2528 {
2529         OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2530         p_hwfn->b_sp_dpc_enabled = true;
2531 }
2532
2533 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2534 {
2535         p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2536         if (!p_hwfn->sp_dpc)
2537                 return ECORE_NOMEM;
2538
2539         return ECORE_SUCCESS;
2540 }
2541
2542 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2543 {
2544         OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2545 }
2546
2547 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2548                                      struct ecore_ptt *p_ptt)
2549 {
2550         enum _ecore_status_t rc = ECORE_SUCCESS;
2551
2552         rc = ecore_int_sp_dpc_alloc(p_hwfn);
2553         if (rc != ECORE_SUCCESS) {
2554                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2555                 return rc;
2556         }
2557
2558         rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2559         if (rc != ECORE_SUCCESS) {
2560                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2561                 return rc;
2562         }
2563
2564         rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2565         if (rc != ECORE_SUCCESS)
2566                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2567
2568         return rc;
2569 }
2570
2571 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2572 {
2573         ecore_int_sp_sb_free(p_hwfn);
2574         ecore_int_sb_attn_free(p_hwfn);
2575         ecore_int_sp_dpc_free(p_hwfn);
2576 }
2577
2578 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2579 {
2580         if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2581                 return;
2582
2583         ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2584         ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2585         ecore_int_sp_dpc_setup(p_hwfn);
2586 }
2587
2588 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2589                            struct ecore_sb_cnt_info *p_sb_cnt_info)
2590 {
2591         struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
2592
2593         if (!p_igu_info || !p_sb_cnt_info)
2594                 return;
2595
2596         OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
2597                     sizeof(*p_sb_cnt_info));
2598 }
2599
2600 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2601 {
2602         int i;
2603
2604         for_each_hwfn(p_dev, i)
2605                 p_dev->hwfns[i].b_int_requested = false;
2606 }
2607
2608 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
2609 {
2610         p_dev->attn_clr_en = clr_enable;
2611 }
2612
2613 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
2614                                              struct ecore_ptt *p_ptt,
2615                                              u8 timer_res, u16 sb_id, bool tx)
2616 {
2617         struct cau_sb_entry sb_entry;
2618         enum _ecore_status_t rc;
2619
2620         if (!p_hwfn->hw_init_done) {
2621                 DP_ERR(p_hwfn, "hardware not initialized yet\n");
2622                 return ECORE_INVAL;
2623         }
2624
2625         rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2626                                  sb_id * sizeof(u64),
2627                                  (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2628         if (rc != ECORE_SUCCESS) {
2629                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2630                 return rc;
2631         }
2632
2633         if (tx)
2634                 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2635         else
2636                 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2637
2638         rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
2639                                  (u64)(osal_uintptr_t)&sb_entry,
2640                                  CAU_REG_SB_VAR_MEMORY +
2641                                  sb_id * sizeof(u64), 2, 0);
2642         if (rc != ECORE_SUCCESS) {
2643                 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2644                 return rc;
2645         }
2646
2647         return rc;
2648 }
2649
2650 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
2651                                           struct ecore_ptt *p_ptt,
2652                                           struct ecore_sb_info *p_sb,
2653                                           struct ecore_sb_info_dbg *p_info)
2654 {
2655         u16 sbid = p_sb->igu_sb_id;
2656         int i;
2657
2658         if (IS_VF(p_hwfn->p_dev))
2659                 return ECORE_INVAL;
2660
2661         if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
2662                 return ECORE_INVAL;
2663
2664         p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
2665                                     IGU_REG_PRODUCER_MEMORY + sbid * 4);
2666         p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
2667                                     IGU_REG_CONSUMER_MEM + sbid * 4);
2668
2669         for (i = 0; i < PIS_PER_SB_E4; i++)
2670                 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2671                                               CAU_REG_PI_MEMORY +
2672                                               sbid * 4 * PIS_PER_SB_E4 +
2673                                               i * 4);
2674
2675         return ECORE_SUCCESS;
2676 }