qede: add SRIOV support
[dpdk.git] / drivers / net / qede / base / ecore_int.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_spq.h"
12 #include "reg_addr.h"
13 #include "ecore_gtt_reg_addr.h"
14 #include "ecore_init_ops.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_int.h"
17 #include "reg_addr.h"
18 #include "ecore_hw.h"
19 #include "ecore_sriov.h"
20 #include "ecore_vf.h"
21 #include "ecore_hw_defs.h"
22 #include "ecore_hsi_common.h"
23 #include "ecore_mcp.h"
24
25 struct ecore_pi_info {
26         ecore_int_comp_cb_t comp_cb;
27         void *cookie;           /* Will be sent to the compl cb function */
28 };
29
30 struct ecore_sb_sp_info {
31         struct ecore_sb_info sb_info;
32         /* per protocol index data */
33         struct ecore_pi_info pi_info_arr[PIS_PER_SB];
34 };
35
36 enum ecore_attention_type {
37         ECORE_ATTN_TYPE_ATTN,
38         ECORE_ATTN_TYPE_PARITY,
39 };
40
41 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
42         ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
43
44 struct aeu_invert_reg_bit {
45         char bit_name[30];
46
47 #define ATTENTION_PARITY                (1 << 0)
48
49 #define ATTENTION_LENGTH_MASK           (0x00000ff0)
50 #define ATTENTION_LENGTH_SHIFT          (4)
51 #define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
52                                          ATTENTION_LENGTH_SHIFT)
53 #define ATTENTION_SINGLE                (1 << ATTENTION_LENGTH_SHIFT)
54 #define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
55 #define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
56                                          ATTENTION_PARITY)
57
58 /* Multiple bits start with this offset */
59 #define ATTENTION_OFFSET_MASK           (0x000ff000)
60 #define ATTENTION_OFFSET_SHIFT          (12)
61
62 #define ATTENTION_CLEAR_ENABLE          (1 << 28)
63 #define ATTENTION_FW_DUMP               (1 << 29)
64 #define ATTENTION_PANIC_DUMP            (1 << 30)
65         unsigned int flags;
66
67         /* Callback to call if attention will be triggered */
68         enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
69
70         enum block_id block_index;
71 };
72
73 struct aeu_invert_reg {
74         struct aeu_invert_reg_bit bits[32];
75 };
76
77 #define NUM_ATTN_REGS           (9)
78
79 #define ATTN_STATE_BITS         (0xfff)
80 #define ATTN_BITS_MASKABLE      (0x3ff)
81 struct ecore_sb_attn_info {
82         /* Virtual & Physical address of the SB */
83         struct atten_status_block *sb_attn;
84         dma_addr_t sb_phys;
85
86         /* Last seen running index */
87         u16 index;
88
89         /* A mask of the AEU bits resulting in a parity error */
90         u32 parity_mask[NUM_ATTN_REGS];
91
92         /* A pointer to the attention description structure */
93         struct aeu_invert_reg *p_aeu_desc;
94
95         /* Previously asserted attentions, which are still unasserted */
96         u16 known_attn;
97
98         /* Cleanup address for the link's general hw attention */
99         u32 mfw_attn_addr;
100 };
101
102 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
103                                  struct ecore_sb_attn_info *p_sb_desc)
104 {
105         u16 rc = 0, index;
106
107         OSAL_MMIOWB(p_hwfn->p_dev);
108
109         index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
110         if (p_sb_desc->index != index) {
111                 p_sb_desc->index = index;
112                 rc = ECORE_SB_ATT_IDX;
113         }
114
115         OSAL_MMIOWB(p_hwfn->p_dev);
116
117         return rc;
118 }
119
120 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
121                               void OSAL_IOMEM *igu_addr, u32 ack_cons)
122 {
123         struct igu_prod_cons_update igu_ack = { 0 };
124
125         igu_ack.sb_id_and_flags =
126             ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
127              (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
128              (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
129              (IGU_SEG_ACCESS_ATTN <<
130               IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
131
132         DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
133
134         /* Both segments (interrupts & acks) are written to same place address;
135          * Need to guarantee all commands will be received (in-order) by HW.
136          */
137         OSAL_MMIOWB(p_hwfn->p_dev);
138         OSAL_BARRIER(p_hwfn->p_dev);
139 }
140
141 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
142 {
143         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
144         struct ecore_pi_info *pi_info = OSAL_NULL;
145         struct ecore_sb_attn_info *sb_attn;
146         struct ecore_sb_info *sb_info;
147         static int arr_size;
148         u16 rc = 0;
149
150         if (!p_hwfn) {
151                 DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n");
152                 return;
153         }
154
155         if (!p_hwfn->p_sp_sb) {
156                 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
157                 return;
158         }
159
160         sb_info = &p_hwfn->p_sp_sb->sb_info;
161         arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
162         if (!sb_info) {
163                 DP_ERR(p_hwfn->p_dev,
164                        "Status block is NULL - cannot ack interrupts\n");
165                 return;
166         }
167
168         if (!p_hwfn->p_sb_attn) {
169                 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
170                 return;
171         }
172         sb_attn = p_hwfn->p_sb_attn;
173
174         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
175                    p_hwfn, p_hwfn->my_id);
176
177         /* Disable ack for def status block. Required both for msix +
178          * inta in non-mask mode, in inta does no harm.
179          */
180         ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
181
182         /* Gather Interrupts/Attentions information */
183         if (!sb_info->sb_virt) {
184                 DP_ERR(p_hwfn->p_dev,
185                        "Interrupt Status block is NULL -"
186                        " cannot check for new interrupts!\n");
187         } else {
188                 u32 tmp_index = sb_info->sb_ack;
189                 rc = ecore_sb_update_sb_idx(sb_info);
190                 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
191                            "Interrupt indices: 0x%08x --> 0x%08x\n",
192                            tmp_index, sb_info->sb_ack);
193         }
194
195         if (!sb_attn || !sb_attn->sb_attn) {
196                 DP_ERR(p_hwfn->p_dev,
197                        "Attentions Status block is NULL -"
198                        " cannot check for new attentions!\n");
199         } else {
200                 u16 tmp_index = sb_attn->index;
201
202                 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
203                 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
204                            "Attention indices: 0x%08x --> 0x%08x\n",
205                            tmp_index, sb_attn->index);
206         }
207
208         /* Check if we expect interrupts at this time. if not just ack them */
209         if (!(rc & ECORE_SB_EVENT_MASK)) {
210                 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
211                 return;
212         }
213
214         /* Check the validity of the DPC ptt. If not ack interrupts and fail */
215         if (!p_hwfn->p_dpc_ptt) {
216                 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
217                 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
218                 return;
219         }
220
221         if (rc & ECORE_SB_IDX) {
222                 int pi;
223
224                 /* Since we only looked at the SB index, it's possible more
225                  * than a single protocol-index on the SB incremented.
226                  * Iterate over all configured protocol indices and check
227                  * whether something happened for each.
228                  */
229                 for (pi = 0; pi < arr_size; pi++) {
230                         pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
231                         if (pi_info->comp_cb != OSAL_NULL)
232                                 pi_info->comp_cb(p_hwfn, pi_info->cookie);
233                 }
234         }
235
236         if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
237                 /* This should be done before the interrupts are enabled,
238                  * since otherwise a new attention will be generated.
239                  */
240                 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
241         }
242
243         ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
244 }
245
246 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
247 {
248         struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
249
250         if (!p_sb)
251                 return;
252
253         if (p_sb->sb_attn) {
254                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
255                                        p_sb->sb_phys,
256                                        SB_ATTN_ALIGNED_SIZE(p_hwfn));
257         }
258         OSAL_FREE(p_hwfn->p_dev, p_sb);
259 }
260
261 /* coalescing timeout = timeset << (timer_res + 1) */
262 #ifdef RTE_LIBRTE_QEDE_RX_COAL_US
263 #define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US
264 #else
265 #define ECORE_CAU_DEF_RX_USECS 24
266 #endif
267
268 #ifdef RTE_LIBRTE_QEDE_TX_COAL_US
269 #define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US
270 #else
271 #define ECORE_CAU_DEF_TX_USECS 48
272 #endif
273
274 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
275                              struct cau_sb_entry *p_sb_entry,
276                              u8 pf_id, u16 vf_number, u8 vf_valid)
277 {
278         struct ecore_dev *p_dev = p_hwfn->p_dev;
279         u32 cau_state;
280
281         OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
282
283         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
284         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
285         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
286         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
287         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
288
289         /* setting the time resultion to a fixed value ( = 1) */
290         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
291                   ECORE_CAU_DEF_RX_TIMER_RES);
292         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
293                   ECORE_CAU_DEF_TX_TIMER_RES);
294
295         cau_state = CAU_HC_DISABLE_STATE;
296
297         if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
298                 cau_state = CAU_HC_ENABLE_STATE;
299                 if (!p_dev->rx_coalesce_usecs) {
300                         p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
301                         DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n",
302                                 p_dev->rx_coalesce_usecs);
303                 }
304                 if (!p_dev->tx_coalesce_usecs) {
305                         p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
306                         DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n",
307                                 p_dev->tx_coalesce_usecs);
308                 }
309         }
310
311         SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
312         SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
313 }
314
315 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
316                            struct ecore_ptt *p_ptt,
317                            dma_addr_t sb_phys, u16 igu_sb_id,
318                            u16 vf_number, u8 vf_valid)
319 {
320         struct cau_sb_entry sb_entry;
321
322         ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
323                                 vf_number, vf_valid);
324
325         if (p_hwfn->hw_init_done) {
326                 /* Wide-bus, initialize via DMAE */
327                 u64 phys_addr = (u64)sb_phys;
328
329                 ecore_dmae_host2grc(p_hwfn, p_ptt,
330                                     (u64)(osal_uintptr_t)&phys_addr,
331                                     CAU_REG_SB_ADDR_MEMORY +
332                                     igu_sb_id * sizeof(u64), 2, 0);
333                 ecore_dmae_host2grc(p_hwfn, p_ptt,
334                                     (u64)(osal_uintptr_t)&sb_entry,
335                                     CAU_REG_SB_VAR_MEMORY +
336                                     igu_sb_id * sizeof(u64), 2, 0);
337         } else {
338                 /* Initialize Status Block Address */
339                 STORE_RT_REG_AGG(p_hwfn,
340                                  CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
341                                  igu_sb_id * 2, sb_phys);
342
343                 STORE_RT_REG_AGG(p_hwfn,
344                                  CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
345                                  igu_sb_id * 2, sb_entry);
346         }
347
348         /* Configure pi coalescing if set */
349         if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
350                 u8 num_tc = 1;  /* @@@TBD aelior ECORE_MULTI_COS */
351                 u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >>
352                     (ECORE_CAU_DEF_RX_TIMER_RES + 1);
353                 u8 i;
354
355                 ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
356                                       ECORE_COAL_RX_STATE_MACHINE, timeset);
357
358                 timeset = p_hwfn->p_dev->tx_coalesce_usecs >>
359                     (ECORE_CAU_DEF_TX_TIMER_RES + 1);
360
361                 for (i = 0; i < num_tc; i++) {
362                         ecore_int_cau_conf_pi(p_hwfn, p_ptt,
363                                               igu_sb_id, TX_PI(i),
364                                               ECORE_COAL_TX_STATE_MACHINE,
365                                               timeset);
366                 }
367         }
368 }
369
370 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
371                            struct ecore_ptt *p_ptt,
372                            u16 igu_sb_id, u32 pi_index,
373                            enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
374 {
375         struct cau_pi_entry pi_entry;
376         u32 sb_offset, pi_offset;
377
378         if (IS_VF(p_hwfn->p_dev))
379                 return;         /* @@@TBD MichalK- VF CAU... */
380
381         sb_offset = igu_sb_id * PIS_PER_SB;
382         OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
383
384         SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
385         if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
386                 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
387         else
388                 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
389
390         pi_offset = sb_offset + pi_index;
391         if (p_hwfn->hw_init_done) {
392                 ecore_wr(p_hwfn, p_ptt,
393                          CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
394                          *((u32 *)&(pi_entry)));
395         } else {
396                 STORE_RT_REG(p_hwfn,
397                              CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
398                              *((u32 *)&(pi_entry)));
399         }
400 }
401
402 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
403                         struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
404 {
405         /* zero status block and ack counter */
406         sb_info->sb_ack = 0;
407         OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
408
409         if (IS_PF(p_hwfn->p_dev))
410                 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
411                                       sb_info->igu_sb_id, 0, 0);
412 }
413
414 /**
415  * @brief ecore_get_igu_sb_id - given a sw sb_id return the
416  *        igu_sb_id
417  *
418  * @param p_hwfn
419  * @param sb_id
420  *
421  * @return u16
422  */
423 static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
424 {
425         u16 igu_sb_id;
426
427         /* Assuming continuous set of IGU SBs dedicated for given PF */
428         if (sb_id == ECORE_SP_SB_ID)
429                 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
430         else if (IS_PF(p_hwfn->p_dev))
431                 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
432         else
433                 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
434
435         if (sb_id == ECORE_SP_SB_ID)
436                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
437                            "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
438         else
439                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
440                            "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
441
442         return igu_sb_id;
443 }
444
445 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
446                                        struct ecore_ptt *p_ptt,
447                                        struct ecore_sb_info *sb_info,
448                                        void *sb_virt_addr,
449                                        dma_addr_t sb_phy_addr, u16 sb_id)
450 {
451         sb_info->sb_virt = sb_virt_addr;
452         sb_info->sb_phys = sb_phy_addr;
453
454         sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
455
456         if (sb_id != ECORE_SP_SB_ID) {
457                 p_hwfn->sbs_info[sb_id] = sb_info;
458                 p_hwfn->num_sbs++;
459         }
460 #ifdef ECORE_CONFIG_DIRECT_HWFN
461         sb_info->p_hwfn = p_hwfn;
462 #endif
463         sb_info->p_dev = p_hwfn->p_dev;
464
465         /* The igu address will hold the absolute address that needs to be
466          * written to for a specific status block
467          */
468         if (IS_PF(p_hwfn->p_dev)) {
469                 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
470                     GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
471
472         } else {
473                 sb_info->igu_addr =
474                     (u8 OSAL_IOMEM *)p_hwfn->regview +
475                     PXP_VF_BAR0_START_IGU +
476                     ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
477         }
478
479         sb_info->flags |= ECORE_SB_INFO_INIT;
480
481         ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
482
483         return ECORE_SUCCESS;
484 }
485
486 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
487                                           struct ecore_sb_info *sb_info,
488                                           u16 sb_id)
489 {
490         if (sb_id == ECORE_SP_SB_ID) {
491                 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
492                 return ECORE_INVAL;
493         }
494
495         /* zero status block and ack counter */
496         sb_info->sb_ack = 0;
497         OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
498
499         if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
500                 p_hwfn->sbs_info[sb_id] = OSAL_NULL;
501                 p_hwfn->num_sbs--;
502         }
503
504         return ECORE_SUCCESS;
505 }
506
507 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
508 {
509         struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
510
511         if (!p_sb)
512                 return;
513
514         if (p_sb->sb_info.sb_virt) {
515                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
516                                        p_sb->sb_info.sb_virt,
517                                        p_sb->sb_info.sb_phys,
518                                        SB_ALIGNED_SIZE(p_hwfn));
519         }
520
521         OSAL_FREE(p_hwfn->p_dev, p_sb);
522 }
523
524 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
525                                                   struct ecore_ptt *p_ptt)
526 {
527         struct ecore_sb_sp_info *p_sb;
528         dma_addr_t p_phys = 0;
529         void *p_virt;
530
531         /* SB struct */
532         p_sb =
533             OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
534                        sizeof(struct ecore_sb_sp_info));
535         if (!p_sb) {
536                 DP_NOTICE(p_hwfn, true,
537                           "Failed to allocate `struct ecore_sb_info'");
538                 return ECORE_NOMEM;
539         }
540
541         /* SB ring  */
542         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
543                                          &p_phys, SB_ALIGNED_SIZE(p_hwfn));
544         if (!p_virt) {
545                 DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
546                 OSAL_FREE(p_hwfn->p_dev, p_sb);
547                 return ECORE_NOMEM;
548         }
549
550         /* Status Block setup */
551         p_hwfn->p_sp_sb = p_sb;
552         ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
553                           p_virt, p_phys, ECORE_SP_SB_ID);
554
555         OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
556
557         return ECORE_SUCCESS;
558 }
559
560 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
561                                            ecore_int_comp_cb_t comp_cb,
562                                            void *cookie,
563                                            u8 *sb_idx, __le16 **p_fw_cons)
564 {
565         struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
566         enum _ecore_status_t rc = ECORE_NOMEM;
567         u8 pi;
568
569         /* Look for a free index */
570         for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
571                 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
572                         continue;
573
574                 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
575                 p_sp_sb->pi_info_arr[pi].cookie = cookie;
576                 *sb_idx = pi;
577                 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
578                 rc = ECORE_SUCCESS;
579                 break;
580         }
581
582         return rc;
583 }
584
585 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
586 {
587         struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
588
589         if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
590                 return ECORE_NOMEM;
591
592         p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
593         p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
594         return ECORE_SUCCESS;
595 }
596
597 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
598 {
599         return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
600 }
601
602 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
603                               struct ecore_ptt *p_ptt,
604                               enum ecore_int_mode int_mode)
605 {
606         u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
607
608 #ifndef ASIC_ONLY
609         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
610                 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
611         else
612 #endif
613                 igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN;
614
615         p_hwfn->p_dev->int_mode = int_mode;
616         switch (p_hwfn->p_dev->int_mode) {
617         case ECORE_INT_MODE_INTA:
618                 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
619                 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
620                 break;
621
622         case ECORE_INT_MODE_MSI:
623                 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
624                 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
625                 break;
626
627         case ECORE_INT_MODE_MSIX:
628                 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
629                 break;
630         case ECORE_INT_MODE_POLL:
631                 break;
632         }
633
634         ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
635 }
636
637 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
638                                       struct ecore_ptt *p_ptt)
639 {
640 #ifndef ASIC_ONLY
641         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
642                 DP_INFO(p_hwfn,
643                         "FPGA - Don't enable Attentions in IGU and MISC\n");
644                 return;
645         }
646 #endif
647
648         /* Configure AEU signal change to produce attentions */
649         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
650         ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
651         ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
652         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
653
654         OSAL_MMIOWB(p_hwfn->p_dev);
655
656         /* Unmask AEU signals toward IGU */
657         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
658 }
659
660 enum _ecore_status_t
661 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
662                      enum ecore_int_mode int_mode)
663 {
664         enum _ecore_status_t rc = ECORE_SUCCESS;
665         u32 tmp, reg_addr;
666
667         /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */
668         tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
669         tmp |= 0xf;
670         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0);
671         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
672
673         /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
674          * attentions. Since we're waiting for BRCM answer regarding this
675          * attention, in the meanwhile we simply mask it.
676          */
677         tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
678         tmp &= ~0x800;
679         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
680
681         ecore_int_igu_enable_attn(p_hwfn, p_ptt);
682
683         if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
684                 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
685                 if (rc != ECORE_SUCCESS) {
686                         DP_NOTICE(p_hwfn, true,
687                                   "Slowpath IRQ request failed\n");
688                         return ECORE_NORESOURCES;
689                 }
690                 p_hwfn->b_int_requested = true;
691         }
692
693         /* Enable interrupt Generation */
694         ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
695
696         p_hwfn->b_int_enabled = 1;
697
698         return rc;
699 }
700
701 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
702                                struct ecore_ptt *p_ptt)
703 {
704         p_hwfn->b_int_enabled = 0;
705
706         if (IS_VF(p_hwfn->p_dev))
707                 return;
708
709         ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
710 }
711
712 #define IGU_CLEANUP_SLEEP_LENGTH                (1000)
713 void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
714                               struct ecore_ptt *p_ptt,
715                               u32 sb_id, bool cleanup_set, u16 opaque_fid)
716 {
717         u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
718         u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
719         u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
720         u8 type = 0;            /* FIXME MichalS type??? */
721
722         OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
723                            IGU_REG_CLEANUP_STATUS_0) != 0x200);
724
725         /* USE Control Command Register to perform cleanup. There is an
726          * option to do this using IGU bar, but then it can't be used for VFs.
727          */
728
729         /* Set the data field */
730         SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
731         SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
732         SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
733
734         /* Set the control register */
735         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
736         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
737         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
738
739         ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
740
741         OSAL_BARRIER(p_hwfn->p_dev);
742
743         ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
744
745         OSAL_MMIOWB(p_hwfn->p_dev);
746
747         /* calculate where to read the status bit from */
748         sb_bit = 1 << (sb_id % 32);
749         sb_bit_addr = sb_id / 32 * sizeof(u32);
750
751         sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
752
753         /* Now wait for the command to complete */
754         while (--sleep_cnt) {
755                 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
756                 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
757                         break;
758                 OSAL_MSLEEP(5);
759         }
760
761         if (!sleep_cnt)
762                 DP_NOTICE(p_hwfn, true,
763                           "Timeout waiting for clear status 0x%08x [for sb %d]\n",
764                           val, sb_id);
765 }
766
767 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
768                                        struct ecore_ptt *p_ptt,
769                                        u32 sb_id, u16 opaque, bool b_set)
770 {
771         int pi;
772
773         /* Set */
774         if (b_set)
775                 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
776
777         /* Clear */
778         ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
779
780         /* Clear the CAU for the SB */
781         for (pi = 0; pi < 12; pi++)
782                 ecore_wr(p_hwfn, p_ptt,
783                          CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
784 }
785
786 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
787                                 struct ecore_ptt *p_ptt,
788                                 bool b_set, bool b_slowpath)
789 {
790         u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
791         u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
792         u32 sb_id = 0, val = 0;
793
794         /* @@@TBD MichalK temporary... should be moved to init-tool... */
795         val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
796         val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
797         val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
798         ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
799         /* end temporary */
800
801         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
802                    "IGU cleaning SBs [%d,...,%d]\n",
803                    igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
804
805         for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
806                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
807                                                   p_hwfn->hw_info.opaque_fid,
808                                                   b_set);
809
810         if (!b_slowpath)
811                 return;
812
813         sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
814         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
815                    "IGU cleaning slowpath SB [%d]\n", sb_id);
816         ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
817                                           p_hwfn->hw_info.opaque_fid, b_set);
818 }
819
820 static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
821                                         struct ecore_ptt *p_ptt, u16 sb_id)
822 {
823         u32 val = ecore_rd(p_hwfn, p_ptt,
824                            IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
825         struct ecore_igu_block *p_block;
826
827         p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
828
829         /* stop scanning when hit first invalid PF entry */
830         if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
831             GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
832                 goto out;
833
834         /* Fill the block information */
835         p_block->status = ECORE_IGU_STATUS_VALID;
836         p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
837         p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
838         p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
839
840         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
841                    "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
842                    " is_pf = %d vector_num = 0x%x\n",
843                    sb_id, val, p_block->function_id, p_block->is_pf,
844                    p_block->vector_number);
845
846 out:
847         return val;
848 }
849
850 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
851                                             struct ecore_ptt *p_ptt)
852 {
853         struct ecore_igu_info *p_igu_info;
854         struct ecore_igu_block *p_block;
855         u16 sb_id, last_iov_sb_id = 0;
856         u32 min_vf, max_vf, val;
857         u16 prev_sb_id = 0xFF;
858
859         p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
860                                                 GFP_KERNEL,
861                                                 sizeof(*p_igu_info));
862         if (!p_hwfn->hw_info.p_igu_info)
863                 return ECORE_NOMEM;
864
865         OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
866
867         p_igu_info = p_hwfn->hw_info.p_igu_info;
868
869         /* Initialize base sb / sb cnt for PFs and VFs */
870         p_igu_info->igu_base_sb = 0xffff;
871         p_igu_info->igu_sb_cnt = 0;
872         p_igu_info->igu_dsb_id = 0xffff;
873         p_igu_info->igu_base_sb_iov = 0xffff;
874
875 #ifdef CONFIG_ECORE_SRIOV
876         min_vf = p_hwfn->hw_info.first_vf_in_pf;
877         max_vf = p_hwfn->hw_info.first_vf_in_pf +
878             p_hwfn->p_dev->sriov_info.total_vfs;
879 #else
880         min_vf = 0;
881         max_vf = 0;
882 #endif
883
884         for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
885              sb_id++) {
886                 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
887                 val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
888                 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
889                     GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
890                         break;
891
892                 if (p_block->is_pf) {
893                         if (p_block->function_id == p_hwfn->rel_pf_id) {
894                                 p_block->status |= ECORE_IGU_STATUS_PF;
895
896                                 if (p_block->vector_number == 0) {
897                                         if (p_igu_info->igu_dsb_id == 0xffff)
898                                                 p_igu_info->igu_dsb_id = sb_id;
899                                 } else {
900                                         if (p_igu_info->igu_base_sb == 0xffff) {
901                                                 p_igu_info->igu_base_sb = sb_id;
902                                         } else if (prev_sb_id != sb_id - 1) {
903                                                 DP_NOTICE(p_hwfn->p_dev, false,
904                                                           "consecutive igu"
905                                                           " vectors for HWFN"
906                                                           " %x broken",
907                                                           p_hwfn->rel_pf_id);
908                                                 break;
909                                         }
910                                         prev_sb_id = sb_id;
911                                         /* we don't count the default */
912                                         (p_igu_info->igu_sb_cnt)++;
913                                 }
914                         }
915                 } else {
916                         if ((p_block->function_id >= min_vf) &&
917                             (p_block->function_id < max_vf)) {
918                                 /* Available for VFs of this PF */
919                                 if (p_igu_info->igu_base_sb_iov == 0xffff) {
920                                         p_igu_info->igu_base_sb_iov = sb_id;
921                                 } else if (last_iov_sb_id != sb_id - 1) {
922                                         if (!val)
923                                                 DP_VERBOSE(p_hwfn->p_dev,
924                                                            ECORE_MSG_INTR,
925                                                            "First uninited IGU"
926                                                            " CAM entry at"
927                                                            " index 0x%04x\n",
928                                                            sb_id);
929                                         else
930                                                 DP_NOTICE(p_hwfn->p_dev, false,
931                                                           "Consecutive igu"
932                                                           " vectors for HWFN"
933                                                           " %x vfs is broken"
934                                                           " [jumps from %04x"
935                                                           " to %04x]\n",
936                                                           p_hwfn->rel_pf_id,
937                                                           last_iov_sb_id,
938                                                           sb_id);
939                                         break;
940                                 }
941                                 p_block->status |= ECORE_IGU_STATUS_FREE;
942                                 p_hwfn->hw_info.p_igu_info->free_blks++;
943                                 last_iov_sb_id = sb_id;
944                         }
945                 }
946         }
947         p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
948
949         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
950                    "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
951                    "igu_dsb_id=0x%x\n",
952                    p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
953                    p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
954                    p_igu_info->igu_dsb_id);
955
956         if (p_igu_info->igu_base_sb == 0xffff ||
957             p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
958                 DP_NOTICE(p_hwfn, true,
959                           "IGU CAM returned invalid values igu_base_sb=0x%x "
960                           "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
961                           p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
962                           p_igu_info->igu_dsb_id);
963                 return ECORE_INVAL;
964         }
965
966         return ECORE_SUCCESS;
967 }
968
969 /**
970  * @brief Initialize igu runtime registers
971  *
972  * @param p_hwfn
973  */
974 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
975 {
976         u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
977
978         STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
979 }
980
981 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
982                           IGU_CMD_INT_ACK_BASE)
983 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
984                           IGU_CMD_INT_ACK_BASE)
985 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
986 {
987         u32 intr_status_hi = 0, intr_status_lo = 0;
988         u64 intr_status = 0;
989
990         intr_status_lo = REG_RD(p_hwfn,
991                                 GTT_BAR0_MAP_REG_IGU_CMD +
992                                 LSB_IGU_CMD_ADDR * 8);
993         intr_status_hi = REG_RD(p_hwfn,
994                                 GTT_BAR0_MAP_REG_IGU_CMD +
995                                 MSB_IGU_CMD_ADDR * 8);
996         intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
997
998         return intr_status;
999 }
1000
1001 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
1002 {
1003         OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
1004         p_hwfn->b_sp_dpc_enabled = true;
1005 }
1006
1007 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
1008 {
1009         p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
1010         if (!p_hwfn->sp_dpc)
1011                 return ECORE_NOMEM;
1012
1013         return ECORE_SUCCESS;
1014 }
1015
1016 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
1017 {
1018         OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
1019 }
1020
1021 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
1022                                      struct ecore_ptt *p_ptt)
1023 {
1024         enum _ecore_status_t rc = ECORE_SUCCESS;
1025
1026         rc = ecore_int_sp_dpc_alloc(p_hwfn);
1027         if (rc != ECORE_SUCCESS) {
1028                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
1029                 return rc;
1030         }
1031
1032         rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
1033         if (rc != ECORE_SUCCESS) {
1034                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
1035                 return rc;
1036         }
1037
1038         return rc;
1039 }
1040
1041 void ecore_int_free(struct ecore_hwfn *p_hwfn)
1042 {
1043         ecore_int_sp_sb_free(p_hwfn);
1044         ecore_int_sb_attn_free(p_hwfn);
1045         ecore_int_sp_dpc_free(p_hwfn);
1046 }
1047
1048 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1049 {
1050         if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
1051                 return;
1052
1053         ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
1054         ecore_int_sp_dpc_setup(p_hwfn);
1055 }
1056
1057 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
1058                            struct ecore_sb_cnt_info *p_sb_cnt_info)
1059 {
1060         struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
1061
1062         if (!info || !p_sb_cnt_info)
1063                 return;
1064
1065         p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
1066         p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
1067         p_sb_cnt_info->sb_free_blk = info->free_blks;
1068 }
1069
1070 u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1071 {
1072         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1073
1074         /* Determine origin of SB id */
1075         if ((sb_id >= p_info->igu_base_sb) &&
1076             (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
1077                 return sb_id - p_info->igu_base_sb;
1078         } else if ((sb_id >= p_info->igu_base_sb_iov) &&
1079                    (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
1080                 return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
1081         }
1082
1083         DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
1084                   sb_id);
1085         return 0;
1086 }
1087
1088 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
1089 {
1090         int i;
1091
1092         for_each_hwfn(p_dev, i)
1093                 p_dev->hwfns[i].b_int_requested = false;
1094 }