73d7fb5328ef3efb153cfe357203faeabaf27fcd
[dpdk.git] / drivers / net / qede / base / ecore_int.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_spq.h"
12 #include "reg_addr.h"
13 #include "ecore_gtt_reg_addr.h"
14 #include "ecore_init_ops.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_int.h"
17 #include "reg_addr.h"
18 #include "ecore_hw.h"
19 #include "ecore_hw_defs.h"
20 #include "ecore_hsi_common.h"
21 #include "ecore_mcp.h"
22
23 struct ecore_pi_info {
24         ecore_int_comp_cb_t comp_cb;
25         void *cookie;           /* Will be sent to the compl cb function */
26 };
27
28 struct ecore_sb_sp_info {
29         struct ecore_sb_info sb_info;
30         /* per protocol index data */
31         struct ecore_pi_info pi_info_arr[PIS_PER_SB];
32 };
33
34 enum ecore_attention_type {
35         ECORE_ATTN_TYPE_ATTN,
36         ECORE_ATTN_TYPE_PARITY,
37 };
38
39 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
40         ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
41
42 struct aeu_invert_reg_bit {
43         char bit_name[30];
44
45 #define ATTENTION_PARITY                (1 << 0)
46
47 #define ATTENTION_LENGTH_MASK           (0x00000ff0)
48 #define ATTENTION_LENGTH_SHIFT          (4)
49 #define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
50                                          ATTENTION_LENGTH_SHIFT)
51 #define ATTENTION_SINGLE                (1 << ATTENTION_LENGTH_SHIFT)
52 #define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
53 #define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
54                                          ATTENTION_PARITY)
55
56 /* Multiple bits start with this offset */
57 #define ATTENTION_OFFSET_MASK           (0x000ff000)
58 #define ATTENTION_OFFSET_SHIFT          (12)
59
60 #define ATTENTION_CLEAR_ENABLE          (1 << 28)
61 #define ATTENTION_FW_DUMP               (1 << 29)
62 #define ATTENTION_PANIC_DUMP            (1 << 30)
63         unsigned int flags;
64
65         /* Callback to call if attention will be triggered */
66         enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
67
68         enum block_id block_index;
69 };
70
71 struct aeu_invert_reg {
72         struct aeu_invert_reg_bit bits[32];
73 };
74
75 #define NUM_ATTN_REGS           (9)
76
77 #define ATTN_STATE_BITS         (0xfff)
78 #define ATTN_BITS_MASKABLE      (0x3ff)
79 struct ecore_sb_attn_info {
80         /* Virtual & Physical address of the SB */
81         struct atten_status_block *sb_attn;
82         dma_addr_t sb_phys;
83
84         /* Last seen running index */
85         u16 index;
86
87         /* A mask of the AEU bits resulting in a parity error */
88         u32 parity_mask[NUM_ATTN_REGS];
89
90         /* A pointer to the attention description structure */
91         struct aeu_invert_reg *p_aeu_desc;
92
93         /* Previously asserted attentions, which are still unasserted */
94         u16 known_attn;
95
96         /* Cleanup address for the link's general hw attention */
97         u32 mfw_attn_addr;
98 };
99
100 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
101                                  struct ecore_sb_attn_info *p_sb_desc)
102 {
103         u16 rc = 0, index;
104
105         OSAL_MMIOWB(p_hwfn->p_dev);
106
107         index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
108         if (p_sb_desc->index != index) {
109                 p_sb_desc->index = index;
110                 rc = ECORE_SB_ATT_IDX;
111         }
112
113         OSAL_MMIOWB(p_hwfn->p_dev);
114
115         return rc;
116 }
117
118 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
119                               void OSAL_IOMEM *igu_addr, u32 ack_cons)
120 {
121         struct igu_prod_cons_update igu_ack = { 0 };
122
123         igu_ack.sb_id_and_flags =
124             ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
125              (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
126              (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
127              (IGU_SEG_ACCESS_ATTN <<
128               IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
129
130         DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
131
132         /* Both segments (interrupts & acks) are written to same place address;
133          * Need to guarantee all commands will be received (in-order) by HW.
134          */
135         OSAL_MMIOWB(p_hwfn->p_dev);
136         OSAL_BARRIER(p_hwfn->p_dev);
137 }
138
139 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
140 {
141         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
142         struct ecore_pi_info *pi_info = OSAL_NULL;
143         struct ecore_sb_attn_info *sb_attn;
144         struct ecore_sb_info *sb_info;
145         static int arr_size;
146         u16 rc = 0;
147
148         if (!p_hwfn) {
149                 DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n");
150                 return;
151         }
152
153         if (!p_hwfn->p_sp_sb) {
154                 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
155                 return;
156         }
157
158         sb_info = &p_hwfn->p_sp_sb->sb_info;
159         arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
160         if (!sb_info) {
161                 DP_ERR(p_hwfn->p_dev,
162                        "Status block is NULL - cannot ack interrupts\n");
163                 return;
164         }
165
166         if (!p_hwfn->p_sb_attn) {
167                 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
168                 return;
169         }
170         sb_attn = p_hwfn->p_sb_attn;
171
172         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
173                    p_hwfn, p_hwfn->my_id);
174
175         /* Disable ack for def status block. Required both for msix +
176          * inta in non-mask mode, in inta does no harm.
177          */
178         ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
179
180         /* Gather Interrupts/Attentions information */
181         if (!sb_info->sb_virt) {
182                 DP_ERR(p_hwfn->p_dev,
183                        "Interrupt Status block is NULL -"
184                        " cannot check for new interrupts!\n");
185         } else {
186                 u32 tmp_index = sb_info->sb_ack;
187                 rc = ecore_sb_update_sb_idx(sb_info);
188                 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
189                            "Interrupt indices: 0x%08x --> 0x%08x\n",
190                            tmp_index, sb_info->sb_ack);
191         }
192
193         if (!sb_attn || !sb_attn->sb_attn) {
194                 DP_ERR(p_hwfn->p_dev,
195                        "Attentions Status block is NULL -"
196                        " cannot check for new attentions!\n");
197         } else {
198                 u16 tmp_index = sb_attn->index;
199
200                 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
201                 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
202                            "Attention indices: 0x%08x --> 0x%08x\n",
203                            tmp_index, sb_attn->index);
204         }
205
206         /* Check if we expect interrupts at this time. if not just ack them */
207         if (!(rc & ECORE_SB_EVENT_MASK)) {
208                 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
209                 return;
210         }
211
212         /* Check the validity of the DPC ptt. If not ack interrupts and fail */
213         if (!p_hwfn->p_dpc_ptt) {
214                 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
215                 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
216                 return;
217         }
218
219         if (rc & ECORE_SB_IDX) {
220                 int pi;
221
222                 /* Since we only looked at the SB index, it's possible more
223                  * than a single protocol-index on the SB incremented.
224                  * Iterate over all configured protocol indices and check
225                  * whether something happened for each.
226                  */
227                 for (pi = 0; pi < arr_size; pi++) {
228                         pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
229                         if (pi_info->comp_cb != OSAL_NULL)
230                                 pi_info->comp_cb(p_hwfn, pi_info->cookie);
231                 }
232         }
233
234         if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
235                 /* This should be done before the interrupts are enabled,
236                  * since otherwise a new attention will be generated.
237                  */
238                 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
239         }
240
241         ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
242 }
243
244 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
245 {
246         struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
247
248         if (!p_sb)
249                 return;
250
251         if (p_sb->sb_attn) {
252                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
253                                        p_sb->sb_phys,
254                                        SB_ATTN_ALIGNED_SIZE(p_hwfn));
255         }
256         OSAL_FREE(p_hwfn->p_dev, p_sb);
257 }
258
259 /* coalescing timeout = timeset << (timer_res + 1) */
260 #ifdef RTE_LIBRTE_QEDE_RX_COAL_US
261 #define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US
262 #else
263 #define ECORE_CAU_DEF_RX_USECS 24
264 #endif
265
266 #ifdef RTE_LIBRTE_QEDE_TX_COAL_US
267 #define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US
268 #else
269 #define ECORE_CAU_DEF_TX_USECS 48
270 #endif
271
272 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
273                              struct cau_sb_entry *p_sb_entry,
274                              u8 pf_id, u16 vf_number, u8 vf_valid)
275 {
276         struct ecore_dev *p_dev = p_hwfn->p_dev;
277         u32 cau_state;
278
279         OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
280
281         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
282         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
283         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
284         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
285         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
286
287         /* setting the time resultion to a fixed value ( = 1) */
288         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
289                   ECORE_CAU_DEF_RX_TIMER_RES);
290         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
291                   ECORE_CAU_DEF_TX_TIMER_RES);
292
293         cau_state = CAU_HC_DISABLE_STATE;
294
295         if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
296                 cau_state = CAU_HC_ENABLE_STATE;
297                 if (!p_dev->rx_coalesce_usecs) {
298                         p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
299                         DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n",
300                                 p_dev->rx_coalesce_usecs);
301                 }
302                 if (!p_dev->tx_coalesce_usecs) {
303                         p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
304                         DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n",
305                                 p_dev->tx_coalesce_usecs);
306                 }
307         }
308
309         SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
310         SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
311 }
312
313 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
314                            struct ecore_ptt *p_ptt,
315                            dma_addr_t sb_phys, u16 igu_sb_id,
316                            u16 vf_number, u8 vf_valid)
317 {
318         struct cau_sb_entry sb_entry;
319
320         ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
321                                 vf_number, vf_valid);
322
323         if (p_hwfn->hw_init_done) {
324                 /* Wide-bus, initialize via DMAE */
325                 u64 phys_addr = (u64)sb_phys;
326
327                 ecore_dmae_host2grc(p_hwfn, p_ptt,
328                                     (u64)(osal_uintptr_t)&phys_addr,
329                                     CAU_REG_SB_ADDR_MEMORY +
330                                     igu_sb_id * sizeof(u64), 2, 0);
331                 ecore_dmae_host2grc(p_hwfn, p_ptt,
332                                     (u64)(osal_uintptr_t)&sb_entry,
333                                     CAU_REG_SB_VAR_MEMORY +
334                                     igu_sb_id * sizeof(u64), 2, 0);
335         } else {
336                 /* Initialize Status Block Address */
337                 STORE_RT_REG_AGG(p_hwfn,
338                                  CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
339                                  igu_sb_id * 2, sb_phys);
340
341                 STORE_RT_REG_AGG(p_hwfn,
342                                  CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
343                                  igu_sb_id * 2, sb_entry);
344         }
345
346         /* Configure pi coalescing if set */
347         if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
348                 u8 num_tc = 1;  /* @@@TBD aelior ECORE_MULTI_COS */
349                 u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >>
350                     (ECORE_CAU_DEF_RX_TIMER_RES + 1);
351                 u8 i;
352
353                 ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
354                                       ECORE_COAL_RX_STATE_MACHINE, timeset);
355
356                 timeset = p_hwfn->p_dev->tx_coalesce_usecs >>
357                     (ECORE_CAU_DEF_TX_TIMER_RES + 1);
358
359                 for (i = 0; i < num_tc; i++) {
360                         ecore_int_cau_conf_pi(p_hwfn, p_ptt,
361                                               igu_sb_id, TX_PI(i),
362                                               ECORE_COAL_TX_STATE_MACHINE,
363                                               timeset);
364                 }
365         }
366 }
367
368 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
369                            struct ecore_ptt *p_ptt,
370                            u16 igu_sb_id, u32 pi_index,
371                            enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
372 {
373         struct cau_pi_entry pi_entry;
374         u32 sb_offset, pi_offset;
375
376         sb_offset = igu_sb_id * PIS_PER_SB;
377         OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
378
379         SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
380         if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
381                 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
382         else
383                 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
384
385         pi_offset = sb_offset + pi_index;
386         if (p_hwfn->hw_init_done) {
387                 ecore_wr(p_hwfn, p_ptt,
388                          CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
389                          *((u32 *)&(pi_entry)));
390         } else {
391                 STORE_RT_REG(p_hwfn,
392                              CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
393                              *((u32 *)&(pi_entry)));
394         }
395 }
396
397 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
398                         struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
399 {
400         /* zero status block and ack counter */
401         sb_info->sb_ack = 0;
402         OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
403
404         ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
405                                       sb_info->igu_sb_id, 0, 0);
406 }
407
408 /**
409  * @brief ecore_get_igu_sb_id - given a sw sb_id return the
410  *        igu_sb_id
411  *
412  * @param p_hwfn
413  * @param sb_id
414  *
415  * @return u16
416  */
417 static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
418 {
419         u16 igu_sb_id;
420
421         /* Assuming continuous set of IGU SBs dedicated for given PF */
422         if (sb_id == ECORE_SP_SB_ID)
423                 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
424         else
425                 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
426
427         if (sb_id == ECORE_SP_SB_ID)
428                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
429                            "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
430         else
431                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
432                            "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
433
434         return igu_sb_id;
435 }
436
437 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
438                                        struct ecore_ptt *p_ptt,
439                                        struct ecore_sb_info *sb_info,
440                                        void *sb_virt_addr,
441                                        dma_addr_t sb_phy_addr, u16 sb_id)
442 {
443         sb_info->sb_virt = sb_virt_addr;
444         sb_info->sb_phys = sb_phy_addr;
445
446         sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
447
448         if (sb_id != ECORE_SP_SB_ID) {
449                 p_hwfn->sbs_info[sb_id] = sb_info;
450                 p_hwfn->num_sbs++;
451         }
452 #ifdef ECORE_CONFIG_DIRECT_HWFN
453         sb_info->p_hwfn = p_hwfn;
454 #endif
455         sb_info->p_dev = p_hwfn->p_dev;
456
457         /* The igu address will hold the absolute address that needs to be
458          * written to for a specific status block
459          */
460         sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
461                     GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
462
463         sb_info->flags |= ECORE_SB_INFO_INIT;
464
465         ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
466
467         return ECORE_SUCCESS;
468 }
469
470 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
471                                           struct ecore_sb_info *sb_info,
472                                           u16 sb_id)
473 {
474         if (sb_id == ECORE_SP_SB_ID) {
475                 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
476                 return ECORE_INVAL;
477         }
478
479         /* zero status block and ack counter */
480         sb_info->sb_ack = 0;
481         OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
482
483         if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
484                 p_hwfn->sbs_info[sb_id] = OSAL_NULL;
485                 p_hwfn->num_sbs--;
486         }
487
488         return ECORE_SUCCESS;
489 }
490
491 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
492 {
493         struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
494
495         if (!p_sb)
496                 return;
497
498         if (p_sb->sb_info.sb_virt) {
499                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
500                                        p_sb->sb_info.sb_virt,
501                                        p_sb->sb_info.sb_phys,
502                                        SB_ALIGNED_SIZE(p_hwfn));
503         }
504
505         OSAL_FREE(p_hwfn->p_dev, p_sb);
506 }
507
508 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
509                                                   struct ecore_ptt *p_ptt)
510 {
511         struct ecore_sb_sp_info *p_sb;
512         dma_addr_t p_phys = 0;
513         void *p_virt;
514
515         /* SB struct */
516         p_sb =
517             OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
518                        sizeof(struct ecore_sb_sp_info));
519         if (!p_sb) {
520                 DP_NOTICE(p_hwfn, true,
521                           "Failed to allocate `struct ecore_sb_info'");
522                 return ECORE_NOMEM;
523         }
524
525         /* SB ring  */
526         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
527                                          &p_phys, SB_ALIGNED_SIZE(p_hwfn));
528         if (!p_virt) {
529                 DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
530                 OSAL_FREE(p_hwfn->p_dev, p_sb);
531                 return ECORE_NOMEM;
532         }
533
534         /* Status Block setup */
535         p_hwfn->p_sp_sb = p_sb;
536         ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
537                           p_virt, p_phys, ECORE_SP_SB_ID);
538
539         OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
540
541         return ECORE_SUCCESS;
542 }
543
544 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
545                                            ecore_int_comp_cb_t comp_cb,
546                                            void *cookie,
547                                            u8 *sb_idx, __le16 **p_fw_cons)
548 {
549         struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
550         enum _ecore_status_t rc = ECORE_NOMEM;
551         u8 pi;
552
553         /* Look for a free index */
554         for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
555                 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
556                         continue;
557
558                 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
559                 p_sp_sb->pi_info_arr[pi].cookie = cookie;
560                 *sb_idx = pi;
561                 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
562                 rc = ECORE_SUCCESS;
563                 break;
564         }
565
566         return rc;
567 }
568
569 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
570 {
571         struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
572
573         if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
574                 return ECORE_NOMEM;
575
576         p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
577         p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
578         return ECORE_SUCCESS;
579 }
580
581 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
582 {
583         return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
584 }
585
586 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
587                               struct ecore_ptt *p_ptt,
588                               enum ecore_int_mode int_mode)
589 {
590         u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
591
592 #ifndef ASIC_ONLY
593         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
594                 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
595         else
596 #endif
597                 igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN;
598
599         p_hwfn->p_dev->int_mode = int_mode;
600         switch (p_hwfn->p_dev->int_mode) {
601         case ECORE_INT_MODE_INTA:
602                 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
603                 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
604                 break;
605
606         case ECORE_INT_MODE_MSI:
607                 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
608                 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
609                 break;
610
611         case ECORE_INT_MODE_MSIX:
612                 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
613                 break;
614         case ECORE_INT_MODE_POLL:
615                 break;
616         }
617
618         ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
619 }
620
621 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
622                                       struct ecore_ptt *p_ptt)
623 {
624 #ifndef ASIC_ONLY
625         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
626                 DP_INFO(p_hwfn,
627                         "FPGA - Don't enable Attentions in IGU and MISC\n");
628                 return;
629         }
630 #endif
631
632         /* Configure AEU signal change to produce attentions */
633         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
634         ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
635         ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
636         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
637
638         OSAL_MMIOWB(p_hwfn->p_dev);
639
640         /* Unmask AEU signals toward IGU */
641         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
642 }
643
644 enum _ecore_status_t
645 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
646                      enum ecore_int_mode int_mode)
647 {
648         enum _ecore_status_t rc = ECORE_SUCCESS;
649         u32 tmp, reg_addr;
650
651         /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */
652         tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
653         tmp |= 0xf;
654         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0);
655         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
656
657         /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
658          * attentions. Since we're waiting for BRCM answer regarding this
659          * attention, in the meanwhile we simply mask it.
660          */
661         tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
662         tmp &= ~0x800;
663         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
664
665         ecore_int_igu_enable_attn(p_hwfn, p_ptt);
666
667         if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
668                 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
669                 if (rc != ECORE_SUCCESS) {
670                         DP_NOTICE(p_hwfn, true,
671                                   "Slowpath IRQ request failed\n");
672                         return ECORE_NORESOURCES;
673                 }
674                 p_hwfn->b_int_requested = true;
675         }
676
677         /* Enable interrupt Generation */
678         ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
679
680         p_hwfn->b_int_enabled = 1;
681
682         return rc;
683 }
684
685 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
686                                struct ecore_ptt *p_ptt)
687 {
688         p_hwfn->b_int_enabled = 0;
689
690         ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
691 }
692
693 #define IGU_CLEANUP_SLEEP_LENGTH                (1000)
694 void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
695                               struct ecore_ptt *p_ptt,
696                               u32 sb_id, bool cleanup_set, u16 opaque_fid)
697 {
698         u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
699         u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
700         u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
701         u8 type = 0;            /* FIXME MichalS type??? */
702
703         OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
704                            IGU_REG_CLEANUP_STATUS_0) != 0x200);
705
706         /* USE Control Command Register to perform cleanup. There is an
707          * option to do this using IGU bar, but then it can't be used for VFs.
708          */
709
710         /* Set the data field */
711         SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
712         SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
713         SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
714
715         /* Set the control register */
716         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
717         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
718         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
719
720         ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
721
722         OSAL_BARRIER(p_hwfn->p_dev);
723
724         ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
725
726         OSAL_MMIOWB(p_hwfn->p_dev);
727
728         /* calculate where to read the status bit from */
729         sb_bit = 1 << (sb_id % 32);
730         sb_bit_addr = sb_id / 32 * sizeof(u32);
731
732         sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
733
734         /* Now wait for the command to complete */
735         while (--sleep_cnt) {
736                 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
737                 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
738                         break;
739                 OSAL_MSLEEP(5);
740         }
741
742         if (!sleep_cnt)
743                 DP_NOTICE(p_hwfn, true,
744                           "Timeout waiting for clear status 0x%08x [for sb %d]\n",
745                           val, sb_id);
746 }
747
748 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
749                                        struct ecore_ptt *p_ptt,
750                                        u32 sb_id, u16 opaque, bool b_set)
751 {
752         int pi;
753
754         /* Set */
755         if (b_set)
756                 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
757
758         /* Clear */
759         ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
760
761         /* Clear the CAU for the SB */
762         for (pi = 0; pi < 12; pi++)
763                 ecore_wr(p_hwfn, p_ptt,
764                          CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
765 }
766
767 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
768                                 struct ecore_ptt *p_ptt,
769                                 bool b_set, bool b_slowpath)
770 {
771         u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
772         u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
773         u32 sb_id = 0, val = 0;
774
775         /* @@@TBD MichalK temporary... should be moved to init-tool... */
776         val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
777         val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
778         val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
779         ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
780         /* end temporary */
781
782         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
783                    "IGU cleaning SBs [%d,...,%d]\n",
784                    igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
785
786         for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
787                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
788                                                   p_hwfn->hw_info.opaque_fid,
789                                                   b_set);
790
791         if (!b_slowpath)
792                 return;
793
794         sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
795         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
796                    "IGU cleaning slowpath SB [%d]\n", sb_id);
797         ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
798                                           p_hwfn->hw_info.opaque_fid, b_set);
799 }
800
801 static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
802                                         struct ecore_ptt *p_ptt, u16 sb_id)
803 {
804         u32 val = ecore_rd(p_hwfn, p_ptt,
805                            IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
806         struct ecore_igu_block *p_block;
807
808         p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
809
810         /* stop scanning when hit first invalid PF entry */
811         if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
812             GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
813                 goto out;
814
815         /* Fill the block information */
816         p_block->status = ECORE_IGU_STATUS_VALID;
817         p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
818         p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
819         p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
820
821         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
822                    "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
823                    " is_pf = %d vector_num = 0x%x\n",
824                    sb_id, val, p_block->function_id, p_block->is_pf,
825                    p_block->vector_number);
826
827 out:
828         return val;
829 }
830
831 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
832                                             struct ecore_ptt *p_ptt)
833 {
834         struct ecore_igu_info *p_igu_info;
835         struct ecore_igu_block *p_block;
836         u16 sb_id, last_iov_sb_id = 0;
837         u32 min_vf, max_vf, val;
838         u16 prev_sb_id = 0xFF;
839
840         p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
841                                                 GFP_KERNEL,
842                                                 sizeof(*p_igu_info));
843         if (!p_hwfn->hw_info.p_igu_info)
844                 return ECORE_NOMEM;
845
846         OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
847
848         p_igu_info = p_hwfn->hw_info.p_igu_info;
849
850         /* Initialize base sb / sb cnt for PFs and VFs */
851         p_igu_info->igu_base_sb = 0xffff;
852         p_igu_info->igu_sb_cnt = 0;
853         p_igu_info->igu_dsb_id = 0xffff;
854         p_igu_info->igu_base_sb_iov = 0xffff;
855
856         min_vf = 0;
857         max_vf = 0;
858
859         for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
860              sb_id++) {
861                 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
862                 val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
863                 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
864                     GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
865                         break;
866
867                 if (p_block->is_pf) {
868                         if (p_block->function_id == p_hwfn->rel_pf_id) {
869                                 p_block->status |= ECORE_IGU_STATUS_PF;
870
871                                 if (p_block->vector_number == 0) {
872                                         if (p_igu_info->igu_dsb_id == 0xffff)
873                                                 p_igu_info->igu_dsb_id = sb_id;
874                                 } else {
875                                         if (p_igu_info->igu_base_sb == 0xffff) {
876                                                 p_igu_info->igu_base_sb = sb_id;
877                                         } else if (prev_sb_id != sb_id - 1) {
878                                                 DP_NOTICE(p_hwfn->p_dev, false,
879                                                           "consecutive igu"
880                                                           " vectors for HWFN"
881                                                           " %x broken",
882                                                           p_hwfn->rel_pf_id);
883                                                 break;
884                                         }
885                                         prev_sb_id = sb_id;
886                                         /* we don't count the default */
887                                         (p_igu_info->igu_sb_cnt)++;
888                                 }
889                         }
890                 } else {
891                         if ((p_block->function_id >= min_vf) &&
892                             (p_block->function_id < max_vf)) {
893                                 /* Available for VFs of this PF */
894                                 if (p_igu_info->igu_base_sb_iov == 0xffff) {
895                                         p_igu_info->igu_base_sb_iov = sb_id;
896                                 } else if (last_iov_sb_id != sb_id - 1) {
897                                         if (!val)
898                                                 DP_VERBOSE(p_hwfn->p_dev,
899                                                            ECORE_MSG_INTR,
900                                                            "First uninited IGU"
901                                                            " CAM entry at"
902                                                            " index 0x%04x\n",
903                                                            sb_id);
904                                         else
905                                                 DP_NOTICE(p_hwfn->p_dev, false,
906                                                           "Consecutive igu"
907                                                           " vectors for HWFN"
908                                                           " %x vfs is broken"
909                                                           " [jumps from %04x"
910                                                           " to %04x]\n",
911                                                           p_hwfn->rel_pf_id,
912                                                           last_iov_sb_id,
913                                                           sb_id);
914                                         break;
915                                 }
916                                 p_block->status |= ECORE_IGU_STATUS_FREE;
917                                 p_hwfn->hw_info.p_igu_info->free_blks++;
918                                 last_iov_sb_id = sb_id;
919                         }
920                 }
921         }
922         p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
923
924         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
925                    "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
926                    "igu_dsb_id=0x%x\n",
927                    p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
928                    p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
929                    p_igu_info->igu_dsb_id);
930
931         if (p_igu_info->igu_base_sb == 0xffff ||
932             p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
933                 DP_NOTICE(p_hwfn, true,
934                           "IGU CAM returned invalid values igu_base_sb=0x%x "
935                           "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
936                           p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
937                           p_igu_info->igu_dsb_id);
938                 return ECORE_INVAL;
939         }
940
941         return ECORE_SUCCESS;
942 }
943
944 /**
945  * @brief Initialize igu runtime registers
946  *
947  * @param p_hwfn
948  */
949 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
950 {
951         u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
952
953         STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
954 }
955
956 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
957                           IGU_CMD_INT_ACK_BASE)
958 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
959                           IGU_CMD_INT_ACK_BASE)
960 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
961 {
962         u32 intr_status_hi = 0, intr_status_lo = 0;
963         u64 intr_status = 0;
964
965         intr_status_lo = REG_RD(p_hwfn,
966                                 GTT_BAR0_MAP_REG_IGU_CMD +
967                                 LSB_IGU_CMD_ADDR * 8);
968         intr_status_hi = REG_RD(p_hwfn,
969                                 GTT_BAR0_MAP_REG_IGU_CMD +
970                                 MSB_IGU_CMD_ADDR * 8);
971         intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
972
973         return intr_status;
974 }
975
976 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
977 {
978         OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
979         p_hwfn->b_sp_dpc_enabled = true;
980 }
981
982 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
983 {
984         p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
985         if (!p_hwfn->sp_dpc)
986                 return ECORE_NOMEM;
987
988         return ECORE_SUCCESS;
989 }
990
991 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
992 {
993         OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
994 }
995
996 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
997                                      struct ecore_ptt *p_ptt)
998 {
999         enum _ecore_status_t rc = ECORE_SUCCESS;
1000
1001         rc = ecore_int_sp_dpc_alloc(p_hwfn);
1002         if (rc != ECORE_SUCCESS) {
1003                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
1004                 return rc;
1005         }
1006
1007         rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
1008         if (rc != ECORE_SUCCESS) {
1009                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
1010                 return rc;
1011         }
1012
1013         return rc;
1014 }
1015
1016 void ecore_int_free(struct ecore_hwfn *p_hwfn)
1017 {
1018         ecore_int_sp_sb_free(p_hwfn);
1019         ecore_int_sb_attn_free(p_hwfn);
1020         ecore_int_sp_dpc_free(p_hwfn);
1021 }
1022
1023 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1024 {
1025         if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
1026                 return;
1027
1028         ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
1029         ecore_int_sp_dpc_setup(p_hwfn);
1030 }
1031
1032 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
1033                            struct ecore_sb_cnt_info *p_sb_cnt_info)
1034 {
1035         struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
1036
1037         if (!info || !p_sb_cnt_info)
1038                 return;
1039
1040         p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
1041         p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
1042         p_sb_cnt_info->sb_free_blk = info->free_blks;
1043 }
1044
1045 u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1046 {
1047         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1048
1049         /* Determine origin of SB id */
1050         if ((sb_id >= p_info->igu_base_sb) &&
1051             (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
1052                 return sb_id - p_info->igu_base_sb;
1053         } else if ((sb_id >= p_info->igu_base_sb_iov) &&
1054                    (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
1055                 return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
1056         }
1057
1058         DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
1059                   sb_id);
1060         return 0;
1061 }
1062
1063 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
1064 {
1065         int i;
1066
1067         for_each_hwfn(p_dev, i)
1068                 p_dev->hwfns[i].b_int_requested = false;
1069 }