2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_spq.h"
13 #include "ecore_gtt_reg_addr.h"
14 #include "ecore_init_ops.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_int.h"
19 #include "ecore_hw_defs.h"
20 #include "ecore_hsi_common.h"
21 #include "ecore_mcp.h"
23 struct ecore_pi_info {
24 ecore_int_comp_cb_t comp_cb;
25 void *cookie; /* Will be sent to the compl cb function */
28 struct ecore_sb_sp_info {
29 struct ecore_sb_info sb_info;
30 /* per protocol index data */
31 struct ecore_pi_info pi_info_arr[PIS_PER_SB];
34 enum ecore_attention_type {
36 ECORE_ATTN_TYPE_PARITY,
39 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
40 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
42 struct aeu_invert_reg_bit {
45 #define ATTENTION_PARITY (1 << 0)
47 #define ATTENTION_LENGTH_MASK (0x00000ff0)
48 #define ATTENTION_LENGTH_SHIFT (4)
49 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
50 ATTENTION_LENGTH_SHIFT)
51 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
52 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
53 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
56 /* Multiple bits start with this offset */
57 #define ATTENTION_OFFSET_MASK (0x000ff000)
58 #define ATTENTION_OFFSET_SHIFT (12)
60 #define ATTENTION_CLEAR_ENABLE (1 << 28)
61 #define ATTENTION_FW_DUMP (1 << 29)
62 #define ATTENTION_PANIC_DUMP (1 << 30)
65 /* Callback to call if attention will be triggered */
66 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
68 enum block_id block_index;
71 struct aeu_invert_reg {
72 struct aeu_invert_reg_bit bits[32];
75 #define NUM_ATTN_REGS (9)
77 #define ATTN_STATE_BITS (0xfff)
78 #define ATTN_BITS_MASKABLE (0x3ff)
79 struct ecore_sb_attn_info {
80 /* Virtual & Physical address of the SB */
81 struct atten_status_block *sb_attn;
84 /* Last seen running index */
87 /* A mask of the AEU bits resulting in a parity error */
88 u32 parity_mask[NUM_ATTN_REGS];
90 /* A pointer to the attention description structure */
91 struct aeu_invert_reg *p_aeu_desc;
93 /* Previously asserted attentions, which are still unasserted */
96 /* Cleanup address for the link's general hw attention */
100 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
101 struct ecore_sb_attn_info *p_sb_desc)
105 OSAL_MMIOWB(p_hwfn->p_dev);
107 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
108 if (p_sb_desc->index != index) {
109 p_sb_desc->index = index;
110 rc = ECORE_SB_ATT_IDX;
113 OSAL_MMIOWB(p_hwfn->p_dev);
118 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
119 void OSAL_IOMEM *igu_addr, u32 ack_cons)
121 struct igu_prod_cons_update igu_ack = { 0 };
123 igu_ack.sb_id_and_flags =
124 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
125 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
126 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
127 (IGU_SEG_ACCESS_ATTN <<
128 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
130 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
132 /* Both segments (interrupts & acks) are written to same place address;
133 * Need to guarantee all commands will be received (in-order) by HW.
135 OSAL_MMIOWB(p_hwfn->p_dev);
136 OSAL_BARRIER(p_hwfn->p_dev);
139 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
141 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
142 struct ecore_pi_info *pi_info = OSAL_NULL;
143 struct ecore_sb_attn_info *sb_attn;
144 struct ecore_sb_info *sb_info;
149 DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n");
153 if (!p_hwfn->p_sp_sb) {
154 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
158 sb_info = &p_hwfn->p_sp_sb->sb_info;
159 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
161 DP_ERR(p_hwfn->p_dev,
162 "Status block is NULL - cannot ack interrupts\n");
166 if (!p_hwfn->p_sb_attn) {
167 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
170 sb_attn = p_hwfn->p_sb_attn;
172 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
173 p_hwfn, p_hwfn->my_id);
175 /* Disable ack for def status block. Required both for msix +
176 * inta in non-mask mode, in inta does no harm.
178 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
180 /* Gather Interrupts/Attentions information */
181 if (!sb_info->sb_virt) {
182 DP_ERR(p_hwfn->p_dev,
183 "Interrupt Status block is NULL -"
184 " cannot check for new interrupts!\n");
186 u32 tmp_index = sb_info->sb_ack;
187 rc = ecore_sb_update_sb_idx(sb_info);
188 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
189 "Interrupt indices: 0x%08x --> 0x%08x\n",
190 tmp_index, sb_info->sb_ack);
193 if (!sb_attn || !sb_attn->sb_attn) {
194 DP_ERR(p_hwfn->p_dev,
195 "Attentions Status block is NULL -"
196 " cannot check for new attentions!\n");
198 u16 tmp_index = sb_attn->index;
200 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
201 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
202 "Attention indices: 0x%08x --> 0x%08x\n",
203 tmp_index, sb_attn->index);
206 /* Check if we expect interrupts at this time. if not just ack them */
207 if (!(rc & ECORE_SB_EVENT_MASK)) {
208 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
212 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
213 if (!p_hwfn->p_dpc_ptt) {
214 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
215 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
219 if (rc & ECORE_SB_IDX) {
222 /* Since we only looked at the SB index, it's possible more
223 * than a single protocol-index on the SB incremented.
224 * Iterate over all configured protocol indices and check
225 * whether something happened for each.
227 for (pi = 0; pi < arr_size; pi++) {
228 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
229 if (pi_info->comp_cb != OSAL_NULL)
230 pi_info->comp_cb(p_hwfn, pi_info->cookie);
234 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
235 /* This should be done before the interrupts are enabled,
236 * since otherwise a new attention will be generated.
238 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
241 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
244 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
246 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
252 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
254 SB_ATTN_ALIGNED_SIZE(p_hwfn));
256 OSAL_FREE(p_hwfn->p_dev, p_sb);
259 /* coalescing timeout = timeset << (timer_res + 1) */
260 #ifdef RTE_LIBRTE_QEDE_RX_COAL_US
261 #define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US
263 #define ECORE_CAU_DEF_RX_USECS 24
266 #ifdef RTE_LIBRTE_QEDE_TX_COAL_US
267 #define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US
269 #define ECORE_CAU_DEF_TX_USECS 48
272 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
273 struct cau_sb_entry *p_sb_entry,
274 u8 pf_id, u16 vf_number, u8 vf_valid)
276 struct ecore_dev *p_dev = p_hwfn->p_dev;
279 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
281 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
282 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
283 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
284 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
285 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
287 /* setting the time resultion to a fixed value ( = 1) */
288 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
289 ECORE_CAU_DEF_RX_TIMER_RES);
290 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
291 ECORE_CAU_DEF_TX_TIMER_RES);
293 cau_state = CAU_HC_DISABLE_STATE;
295 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
296 cau_state = CAU_HC_ENABLE_STATE;
297 if (!p_dev->rx_coalesce_usecs) {
298 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
299 DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n",
300 p_dev->rx_coalesce_usecs);
302 if (!p_dev->tx_coalesce_usecs) {
303 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
304 DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n",
305 p_dev->tx_coalesce_usecs);
309 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
310 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
313 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
314 struct ecore_ptt *p_ptt,
315 dma_addr_t sb_phys, u16 igu_sb_id,
316 u16 vf_number, u8 vf_valid)
318 struct cau_sb_entry sb_entry;
320 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
321 vf_number, vf_valid);
323 if (p_hwfn->hw_init_done) {
324 /* Wide-bus, initialize via DMAE */
325 u64 phys_addr = (u64)sb_phys;
327 ecore_dmae_host2grc(p_hwfn, p_ptt,
328 (u64)(osal_uintptr_t)&phys_addr,
329 CAU_REG_SB_ADDR_MEMORY +
330 igu_sb_id * sizeof(u64), 2, 0);
331 ecore_dmae_host2grc(p_hwfn, p_ptt,
332 (u64)(osal_uintptr_t)&sb_entry,
333 CAU_REG_SB_VAR_MEMORY +
334 igu_sb_id * sizeof(u64), 2, 0);
336 /* Initialize Status Block Address */
337 STORE_RT_REG_AGG(p_hwfn,
338 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
339 igu_sb_id * 2, sb_phys);
341 STORE_RT_REG_AGG(p_hwfn,
342 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
343 igu_sb_id * 2, sb_entry);
346 /* Configure pi coalescing if set */
347 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
348 u8 num_tc = 1; /* @@@TBD aelior ECORE_MULTI_COS */
349 u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >>
350 (ECORE_CAU_DEF_RX_TIMER_RES + 1);
353 ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
354 ECORE_COAL_RX_STATE_MACHINE, timeset);
356 timeset = p_hwfn->p_dev->tx_coalesce_usecs >>
357 (ECORE_CAU_DEF_TX_TIMER_RES + 1);
359 for (i = 0; i < num_tc; i++) {
360 ecore_int_cau_conf_pi(p_hwfn, p_ptt,
362 ECORE_COAL_TX_STATE_MACHINE,
368 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
369 struct ecore_ptt *p_ptt,
370 u16 igu_sb_id, u32 pi_index,
371 enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
373 struct cau_pi_entry pi_entry;
374 u32 sb_offset, pi_offset;
376 sb_offset = igu_sb_id * PIS_PER_SB;
377 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
379 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
380 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
381 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
383 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
385 pi_offset = sb_offset + pi_index;
386 if (p_hwfn->hw_init_done) {
387 ecore_wr(p_hwfn, p_ptt,
388 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
389 *((u32 *)&(pi_entry)));
392 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
393 *((u32 *)&(pi_entry)));
397 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
398 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
400 /* zero status block and ack counter */
402 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
404 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
405 sb_info->igu_sb_id, 0, 0);
409 * @brief ecore_get_igu_sb_id - given a sw sb_id return the
417 static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
421 /* Assuming continuous set of IGU SBs dedicated for given PF */
422 if (sb_id == ECORE_SP_SB_ID)
423 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
425 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
427 if (sb_id == ECORE_SP_SB_ID)
428 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
429 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
431 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
432 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
437 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
438 struct ecore_ptt *p_ptt,
439 struct ecore_sb_info *sb_info,
441 dma_addr_t sb_phy_addr, u16 sb_id)
443 sb_info->sb_virt = sb_virt_addr;
444 sb_info->sb_phys = sb_phy_addr;
446 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
448 if (sb_id != ECORE_SP_SB_ID) {
449 p_hwfn->sbs_info[sb_id] = sb_info;
452 #ifdef ECORE_CONFIG_DIRECT_HWFN
453 sb_info->p_hwfn = p_hwfn;
455 sb_info->p_dev = p_hwfn->p_dev;
457 /* The igu address will hold the absolute address that needs to be
458 * written to for a specific status block
460 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
461 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
463 sb_info->flags |= ECORE_SB_INFO_INIT;
465 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
467 return ECORE_SUCCESS;
470 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
471 struct ecore_sb_info *sb_info,
474 if (sb_id == ECORE_SP_SB_ID) {
475 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
479 /* zero status block and ack counter */
481 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
483 if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
484 p_hwfn->sbs_info[sb_id] = OSAL_NULL;
488 return ECORE_SUCCESS;
491 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
493 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
498 if (p_sb->sb_info.sb_virt) {
499 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
500 p_sb->sb_info.sb_virt,
501 p_sb->sb_info.sb_phys,
502 SB_ALIGNED_SIZE(p_hwfn));
505 OSAL_FREE(p_hwfn->p_dev, p_sb);
508 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
509 struct ecore_ptt *p_ptt)
511 struct ecore_sb_sp_info *p_sb;
512 dma_addr_t p_phys = 0;
517 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
518 sizeof(struct ecore_sb_sp_info));
520 DP_NOTICE(p_hwfn, true,
521 "Failed to allocate `struct ecore_sb_info'");
526 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
527 &p_phys, SB_ALIGNED_SIZE(p_hwfn));
529 DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
530 OSAL_FREE(p_hwfn->p_dev, p_sb);
534 /* Status Block setup */
535 p_hwfn->p_sp_sb = p_sb;
536 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
537 p_virt, p_phys, ECORE_SP_SB_ID);
539 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
541 return ECORE_SUCCESS;
544 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
545 ecore_int_comp_cb_t comp_cb,
547 u8 *sb_idx, __le16 **p_fw_cons)
549 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
550 enum _ecore_status_t rc = ECORE_NOMEM;
553 /* Look for a free index */
554 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
555 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
558 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
559 p_sp_sb->pi_info_arr[pi].cookie = cookie;
561 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
569 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
571 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
573 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
576 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
577 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
578 return ECORE_SUCCESS;
581 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
583 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
586 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
587 struct ecore_ptt *p_ptt,
588 enum ecore_int_mode int_mode)
590 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
593 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
594 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
597 igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN;
599 p_hwfn->p_dev->int_mode = int_mode;
600 switch (p_hwfn->p_dev->int_mode) {
601 case ECORE_INT_MODE_INTA:
602 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
603 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
606 case ECORE_INT_MODE_MSI:
607 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
608 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
611 case ECORE_INT_MODE_MSIX:
612 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
614 case ECORE_INT_MODE_POLL:
618 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
621 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
622 struct ecore_ptt *p_ptt)
625 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
627 "FPGA - Don't enable Attentions in IGU and MISC\n");
632 /* Configure AEU signal change to produce attentions */
633 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
634 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
635 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
636 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
638 OSAL_MMIOWB(p_hwfn->p_dev);
640 /* Unmask AEU signals toward IGU */
641 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
645 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
646 enum ecore_int_mode int_mode)
648 enum _ecore_status_t rc = ECORE_SUCCESS;
651 /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */
652 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
654 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0);
655 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
657 /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
658 * attentions. Since we're waiting for BRCM answer regarding this
659 * attention, in the meanwhile we simply mask it.
661 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
663 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
665 ecore_int_igu_enable_attn(p_hwfn, p_ptt);
667 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
668 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
669 if (rc != ECORE_SUCCESS) {
670 DP_NOTICE(p_hwfn, true,
671 "Slowpath IRQ request failed\n");
672 return ECORE_NORESOURCES;
674 p_hwfn->b_int_requested = true;
677 /* Enable interrupt Generation */
678 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
680 p_hwfn->b_int_enabled = 1;
685 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
686 struct ecore_ptt *p_ptt)
688 p_hwfn->b_int_enabled = 0;
690 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
693 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
694 void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
695 struct ecore_ptt *p_ptt,
696 u32 sb_id, bool cleanup_set, u16 opaque_fid)
698 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
699 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
700 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
701 u8 type = 0; /* FIXME MichalS type??? */
703 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
704 IGU_REG_CLEANUP_STATUS_0) != 0x200);
706 /* USE Control Command Register to perform cleanup. There is an
707 * option to do this using IGU bar, but then it can't be used for VFs.
710 /* Set the data field */
711 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
712 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
713 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
715 /* Set the control register */
716 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
717 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
718 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
720 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
722 OSAL_BARRIER(p_hwfn->p_dev);
724 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
726 OSAL_MMIOWB(p_hwfn->p_dev);
728 /* calculate where to read the status bit from */
729 sb_bit = 1 << (sb_id % 32);
730 sb_bit_addr = sb_id / 32 * sizeof(u32);
732 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
734 /* Now wait for the command to complete */
735 while (--sleep_cnt) {
736 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
737 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
743 DP_NOTICE(p_hwfn, true,
744 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
748 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
749 struct ecore_ptt *p_ptt,
750 u32 sb_id, u16 opaque, bool b_set)
756 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
759 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
761 /* Clear the CAU for the SB */
762 for (pi = 0; pi < 12; pi++)
763 ecore_wr(p_hwfn, p_ptt,
764 CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
767 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
768 struct ecore_ptt *p_ptt,
769 bool b_set, bool b_slowpath)
771 u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
772 u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
773 u32 sb_id = 0, val = 0;
775 /* @@@TBD MichalK temporary... should be moved to init-tool... */
776 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
777 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
778 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
779 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
782 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
783 "IGU cleaning SBs [%d,...,%d]\n",
784 igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
786 for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
787 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
788 p_hwfn->hw_info.opaque_fid,
794 sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
795 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
796 "IGU cleaning slowpath SB [%d]\n", sb_id);
797 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
798 p_hwfn->hw_info.opaque_fid, b_set);
801 static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
802 struct ecore_ptt *p_ptt, u16 sb_id)
804 u32 val = ecore_rd(p_hwfn, p_ptt,
805 IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
806 struct ecore_igu_block *p_block;
808 p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
810 /* stop scanning when hit first invalid PF entry */
811 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
812 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
815 /* Fill the block information */
816 p_block->status = ECORE_IGU_STATUS_VALID;
817 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
818 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
819 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
821 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
822 "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
823 " is_pf = %d vector_num = 0x%x\n",
824 sb_id, val, p_block->function_id, p_block->is_pf,
825 p_block->vector_number);
831 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
832 struct ecore_ptt *p_ptt)
834 struct ecore_igu_info *p_igu_info;
835 struct ecore_igu_block *p_block;
836 u16 sb_id, last_iov_sb_id = 0;
837 u32 min_vf, max_vf, val;
838 u16 prev_sb_id = 0xFF;
840 p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
842 sizeof(*p_igu_info));
843 if (!p_hwfn->hw_info.p_igu_info)
846 OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
848 p_igu_info = p_hwfn->hw_info.p_igu_info;
850 /* Initialize base sb / sb cnt for PFs and VFs */
851 p_igu_info->igu_base_sb = 0xffff;
852 p_igu_info->igu_sb_cnt = 0;
853 p_igu_info->igu_dsb_id = 0xffff;
854 p_igu_info->igu_base_sb_iov = 0xffff;
859 for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
861 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
862 val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
863 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
864 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
867 if (p_block->is_pf) {
868 if (p_block->function_id == p_hwfn->rel_pf_id) {
869 p_block->status |= ECORE_IGU_STATUS_PF;
871 if (p_block->vector_number == 0) {
872 if (p_igu_info->igu_dsb_id == 0xffff)
873 p_igu_info->igu_dsb_id = sb_id;
875 if (p_igu_info->igu_base_sb == 0xffff) {
876 p_igu_info->igu_base_sb = sb_id;
877 } else if (prev_sb_id != sb_id - 1) {
878 DP_NOTICE(p_hwfn->p_dev, false,
886 /* we don't count the default */
887 (p_igu_info->igu_sb_cnt)++;
891 if ((p_block->function_id >= min_vf) &&
892 (p_block->function_id < max_vf)) {
893 /* Available for VFs of this PF */
894 if (p_igu_info->igu_base_sb_iov == 0xffff) {
895 p_igu_info->igu_base_sb_iov = sb_id;
896 } else if (last_iov_sb_id != sb_id - 1) {
898 DP_VERBOSE(p_hwfn->p_dev,
905 DP_NOTICE(p_hwfn->p_dev, false,
916 p_block->status |= ECORE_IGU_STATUS_FREE;
917 p_hwfn->hw_info.p_igu_info->free_blks++;
918 last_iov_sb_id = sb_id;
922 p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
924 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
925 "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
927 p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
928 p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
929 p_igu_info->igu_dsb_id);
931 if (p_igu_info->igu_base_sb == 0xffff ||
932 p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
933 DP_NOTICE(p_hwfn, true,
934 "IGU CAM returned invalid values igu_base_sb=0x%x "
935 "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
936 p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
937 p_igu_info->igu_dsb_id);
941 return ECORE_SUCCESS;
945 * @brief Initialize igu runtime registers
949 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
951 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
953 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
956 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
957 IGU_CMD_INT_ACK_BASE)
958 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
959 IGU_CMD_INT_ACK_BASE)
960 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
962 u32 intr_status_hi = 0, intr_status_lo = 0;
965 intr_status_lo = REG_RD(p_hwfn,
966 GTT_BAR0_MAP_REG_IGU_CMD +
967 LSB_IGU_CMD_ADDR * 8);
968 intr_status_hi = REG_RD(p_hwfn,
969 GTT_BAR0_MAP_REG_IGU_CMD +
970 MSB_IGU_CMD_ADDR * 8);
971 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
976 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
978 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
979 p_hwfn->b_sp_dpc_enabled = true;
982 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
984 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
988 return ECORE_SUCCESS;
991 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
993 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
996 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
997 struct ecore_ptt *p_ptt)
999 enum _ecore_status_t rc = ECORE_SUCCESS;
1001 rc = ecore_int_sp_dpc_alloc(p_hwfn);
1002 if (rc != ECORE_SUCCESS) {
1003 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
1007 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
1008 if (rc != ECORE_SUCCESS) {
1009 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
1016 void ecore_int_free(struct ecore_hwfn *p_hwfn)
1018 ecore_int_sp_sb_free(p_hwfn);
1019 ecore_int_sb_attn_free(p_hwfn);
1020 ecore_int_sp_dpc_free(p_hwfn);
1023 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1025 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
1028 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
1029 ecore_int_sp_dpc_setup(p_hwfn);
1032 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
1033 struct ecore_sb_cnt_info *p_sb_cnt_info)
1035 struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
1037 if (!info || !p_sb_cnt_info)
1040 p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
1041 p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
1042 p_sb_cnt_info->sb_free_blk = info->free_blks;
1045 u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1047 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1049 /* Determine origin of SB id */
1050 if ((sb_id >= p_info->igu_base_sb) &&
1051 (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
1052 return sb_id - p_info->igu_base_sb;
1053 } else if ((sb_id >= p_info->igu_base_sb_iov) &&
1054 (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
1055 return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
1058 DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
1063 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
1067 for_each_hwfn(p_dev, i)
1068 p_dev->hwfns[i].b_int_requested = false;