2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_spq.h"
13 #include "ecore_gtt_reg_addr.h"
14 #include "ecore_init_ops.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_int.h"
19 #include "ecore_sriov.h"
21 #include "ecore_hw_defs.h"
22 #include "ecore_hsi_common.h"
23 #include "ecore_mcp.h"
25 struct ecore_pi_info {
26 ecore_int_comp_cb_t comp_cb;
27 void *cookie; /* Will be sent to the compl cb function */
30 struct ecore_sb_sp_info {
31 struct ecore_sb_info sb_info;
32 /* per protocol index data */
33 struct ecore_pi_info pi_info_arr[PIS_PER_SB];
36 enum ecore_attention_type {
38 ECORE_ATTN_TYPE_PARITY,
41 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
42 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
44 struct aeu_invert_reg_bit {
47 #define ATTENTION_PARITY (1 << 0)
49 #define ATTENTION_LENGTH_MASK (0x00000ff0)
50 #define ATTENTION_LENGTH_SHIFT (4)
51 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
52 ATTENTION_LENGTH_SHIFT)
53 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
54 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
55 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
58 /* Multiple bits start with this offset */
59 #define ATTENTION_OFFSET_MASK (0x000ff000)
60 #define ATTENTION_OFFSET_SHIFT (12)
62 #define ATTENTION_CLEAR_ENABLE (1 << 28)
63 #define ATTENTION_FW_DUMP (1 << 29)
64 #define ATTENTION_PANIC_DUMP (1 << 30)
67 /* Callback to call if attention will be triggered */
68 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
70 enum block_id block_index;
73 struct aeu_invert_reg {
74 struct aeu_invert_reg_bit bits[32];
77 #define NUM_ATTN_REGS (9)
79 #define ATTN_STATE_BITS (0xfff)
80 #define ATTN_BITS_MASKABLE (0x3ff)
81 struct ecore_sb_attn_info {
82 /* Virtual & Physical address of the SB */
83 struct atten_status_block *sb_attn;
86 /* Last seen running index */
89 /* A mask of the AEU bits resulting in a parity error */
90 u32 parity_mask[NUM_ATTN_REGS];
92 /* A pointer to the attention description structure */
93 struct aeu_invert_reg *p_aeu_desc;
95 /* Previously asserted attentions, which are still unasserted */
98 /* Cleanup address for the link's general hw attention */
102 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
103 struct ecore_sb_attn_info *p_sb_desc)
107 OSAL_MMIOWB(p_hwfn->p_dev);
109 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
110 if (p_sb_desc->index != index) {
111 p_sb_desc->index = index;
112 rc = ECORE_SB_ATT_IDX;
115 OSAL_MMIOWB(p_hwfn->p_dev);
120 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
121 void OSAL_IOMEM *igu_addr, u32 ack_cons)
123 struct igu_prod_cons_update igu_ack = { 0 };
125 igu_ack.sb_id_and_flags =
126 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
127 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
128 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
129 (IGU_SEG_ACCESS_ATTN <<
130 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
132 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
134 /* Both segments (interrupts & acks) are written to same place address;
135 * Need to guarantee all commands will be received (in-order) by HW.
137 OSAL_MMIOWB(p_hwfn->p_dev);
138 OSAL_BARRIER(p_hwfn->p_dev);
141 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
143 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
144 struct ecore_pi_info *pi_info = OSAL_NULL;
145 struct ecore_sb_attn_info *sb_attn;
146 struct ecore_sb_info *sb_info;
151 DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n");
155 if (!p_hwfn->p_sp_sb) {
156 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
160 sb_info = &p_hwfn->p_sp_sb->sb_info;
161 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
163 DP_ERR(p_hwfn->p_dev,
164 "Status block is NULL - cannot ack interrupts\n");
168 if (!p_hwfn->p_sb_attn) {
169 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
172 sb_attn = p_hwfn->p_sb_attn;
174 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
175 p_hwfn, p_hwfn->my_id);
177 /* Disable ack for def status block. Required both for msix +
178 * inta in non-mask mode, in inta does no harm.
180 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
182 /* Gather Interrupts/Attentions information */
183 if (!sb_info->sb_virt) {
184 DP_ERR(p_hwfn->p_dev,
185 "Interrupt Status block is NULL -"
186 " cannot check for new interrupts!\n");
188 u32 tmp_index = sb_info->sb_ack;
189 rc = ecore_sb_update_sb_idx(sb_info);
190 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
191 "Interrupt indices: 0x%08x --> 0x%08x\n",
192 tmp_index, sb_info->sb_ack);
195 if (!sb_attn || !sb_attn->sb_attn) {
196 DP_ERR(p_hwfn->p_dev,
197 "Attentions Status block is NULL -"
198 " cannot check for new attentions!\n");
200 u16 tmp_index = sb_attn->index;
202 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
203 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
204 "Attention indices: 0x%08x --> 0x%08x\n",
205 tmp_index, sb_attn->index);
208 /* Check if we expect interrupts at this time. if not just ack them */
209 if (!(rc & ECORE_SB_EVENT_MASK)) {
210 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
214 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
215 if (!p_hwfn->p_dpc_ptt) {
216 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
217 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
221 if (rc & ECORE_SB_IDX) {
224 /* Since we only looked at the SB index, it's possible more
225 * than a single protocol-index on the SB incremented.
226 * Iterate over all configured protocol indices and check
227 * whether something happened for each.
229 for (pi = 0; pi < arr_size; pi++) {
230 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
231 if (pi_info->comp_cb != OSAL_NULL)
232 pi_info->comp_cb(p_hwfn, pi_info->cookie);
236 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
237 /* This should be done before the interrupts are enabled,
238 * since otherwise a new attention will be generated.
240 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
243 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
246 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
248 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
254 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
256 SB_ATTN_ALIGNED_SIZE(p_hwfn));
258 OSAL_FREE(p_hwfn->p_dev, p_sb);
261 /* coalescing timeout = timeset << (timer_res + 1) */
262 #ifdef RTE_LIBRTE_QEDE_RX_COAL_US
263 #define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US
265 #define ECORE_CAU_DEF_RX_USECS 24
268 #ifdef RTE_LIBRTE_QEDE_TX_COAL_US
269 #define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US
271 #define ECORE_CAU_DEF_TX_USECS 48
274 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
275 struct cau_sb_entry *p_sb_entry,
276 u8 pf_id, u16 vf_number, u8 vf_valid)
278 struct ecore_dev *p_dev = p_hwfn->p_dev;
281 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
283 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
284 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
285 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
286 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
287 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
289 /* setting the time resultion to a fixed value ( = 1) */
290 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
291 ECORE_CAU_DEF_RX_TIMER_RES);
292 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
293 ECORE_CAU_DEF_TX_TIMER_RES);
295 cau_state = CAU_HC_DISABLE_STATE;
297 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
298 cau_state = CAU_HC_ENABLE_STATE;
299 if (!p_dev->rx_coalesce_usecs) {
300 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
301 DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n",
302 p_dev->rx_coalesce_usecs);
304 if (!p_dev->tx_coalesce_usecs) {
305 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
306 DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n",
307 p_dev->tx_coalesce_usecs);
311 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
312 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
315 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
316 struct ecore_ptt *p_ptt,
317 dma_addr_t sb_phys, u16 igu_sb_id,
318 u16 vf_number, u8 vf_valid)
320 struct cau_sb_entry sb_entry;
322 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
323 vf_number, vf_valid);
325 if (p_hwfn->hw_init_done) {
326 /* Wide-bus, initialize via DMAE */
327 u64 phys_addr = (u64)sb_phys;
329 ecore_dmae_host2grc(p_hwfn, p_ptt,
330 (u64)(osal_uintptr_t)&phys_addr,
331 CAU_REG_SB_ADDR_MEMORY +
332 igu_sb_id * sizeof(u64), 2, 0);
333 ecore_dmae_host2grc(p_hwfn, p_ptt,
334 (u64)(osal_uintptr_t)&sb_entry,
335 CAU_REG_SB_VAR_MEMORY +
336 igu_sb_id * sizeof(u64), 2, 0);
338 /* Initialize Status Block Address */
339 STORE_RT_REG_AGG(p_hwfn,
340 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
341 igu_sb_id * 2, sb_phys);
343 STORE_RT_REG_AGG(p_hwfn,
344 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
345 igu_sb_id * 2, sb_entry);
348 /* Configure pi coalescing if set */
349 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
350 u8 num_tc = 1; /* @@@TBD aelior ECORE_MULTI_COS */
351 u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >>
352 (ECORE_CAU_DEF_RX_TIMER_RES + 1);
355 ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
356 ECORE_COAL_RX_STATE_MACHINE, timeset);
358 timeset = p_hwfn->p_dev->tx_coalesce_usecs >>
359 (ECORE_CAU_DEF_TX_TIMER_RES + 1);
361 for (i = 0; i < num_tc; i++) {
362 ecore_int_cau_conf_pi(p_hwfn, p_ptt,
364 ECORE_COAL_TX_STATE_MACHINE,
370 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
371 struct ecore_ptt *p_ptt,
372 u16 igu_sb_id, u32 pi_index,
373 enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
375 struct cau_pi_entry pi_entry;
376 u32 sb_offset, pi_offset;
378 if (IS_VF(p_hwfn->p_dev))
379 return; /* @@@TBD MichalK- VF CAU... */
381 sb_offset = igu_sb_id * PIS_PER_SB;
382 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
384 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
385 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
386 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
388 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
390 pi_offset = sb_offset + pi_index;
391 if (p_hwfn->hw_init_done) {
392 ecore_wr(p_hwfn, p_ptt,
393 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
394 *((u32 *)&(pi_entry)));
397 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
398 *((u32 *)&(pi_entry)));
402 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
403 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
405 /* zero status block and ack counter */
407 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
409 if (IS_PF(p_hwfn->p_dev))
410 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
411 sb_info->igu_sb_id, 0, 0);
415 * @brief ecore_get_igu_sb_id - given a sw sb_id return the
423 static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
427 /* Assuming continuous set of IGU SBs dedicated for given PF */
428 if (sb_id == ECORE_SP_SB_ID)
429 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
430 else if (IS_PF(p_hwfn->p_dev))
431 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
433 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
435 if (sb_id == ECORE_SP_SB_ID)
436 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
437 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
439 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
440 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
445 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
446 struct ecore_ptt *p_ptt,
447 struct ecore_sb_info *sb_info,
449 dma_addr_t sb_phy_addr, u16 sb_id)
451 sb_info->sb_virt = sb_virt_addr;
452 sb_info->sb_phys = sb_phy_addr;
454 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
456 if (sb_id != ECORE_SP_SB_ID) {
457 p_hwfn->sbs_info[sb_id] = sb_info;
460 #ifdef ECORE_CONFIG_DIRECT_HWFN
461 sb_info->p_hwfn = p_hwfn;
463 sb_info->p_dev = p_hwfn->p_dev;
465 /* The igu address will hold the absolute address that needs to be
466 * written to for a specific status block
468 if (IS_PF(p_hwfn->p_dev)) {
469 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
470 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
474 (u8 OSAL_IOMEM *)p_hwfn->regview +
475 PXP_VF_BAR0_START_IGU +
476 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
479 sb_info->flags |= ECORE_SB_INFO_INIT;
481 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
483 return ECORE_SUCCESS;
486 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
487 struct ecore_sb_info *sb_info,
490 if (sb_id == ECORE_SP_SB_ID) {
491 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
495 /* zero status block and ack counter */
497 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
499 if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
500 p_hwfn->sbs_info[sb_id] = OSAL_NULL;
504 return ECORE_SUCCESS;
507 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
509 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
514 if (p_sb->sb_info.sb_virt) {
515 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
516 p_sb->sb_info.sb_virt,
517 p_sb->sb_info.sb_phys,
518 SB_ALIGNED_SIZE(p_hwfn));
521 OSAL_FREE(p_hwfn->p_dev, p_sb);
524 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
525 struct ecore_ptt *p_ptt)
527 struct ecore_sb_sp_info *p_sb;
528 dma_addr_t p_phys = 0;
533 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
534 sizeof(struct ecore_sb_sp_info));
536 DP_NOTICE(p_hwfn, true,
537 "Failed to allocate `struct ecore_sb_info'");
542 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
543 &p_phys, SB_ALIGNED_SIZE(p_hwfn));
545 DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
546 OSAL_FREE(p_hwfn->p_dev, p_sb);
550 /* Status Block setup */
551 p_hwfn->p_sp_sb = p_sb;
552 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
553 p_virt, p_phys, ECORE_SP_SB_ID);
555 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
557 return ECORE_SUCCESS;
560 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
561 ecore_int_comp_cb_t comp_cb,
563 u8 *sb_idx, __le16 **p_fw_cons)
565 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
566 enum _ecore_status_t rc = ECORE_NOMEM;
569 /* Look for a free index */
570 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
571 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
574 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
575 p_sp_sb->pi_info_arr[pi].cookie = cookie;
577 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
585 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
587 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
589 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
592 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
593 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
594 return ECORE_SUCCESS;
597 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
599 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
602 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
603 struct ecore_ptt *p_ptt,
604 enum ecore_int_mode int_mode)
606 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
609 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
610 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
613 igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN;
615 p_hwfn->p_dev->int_mode = int_mode;
616 switch (p_hwfn->p_dev->int_mode) {
617 case ECORE_INT_MODE_INTA:
618 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
619 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
622 case ECORE_INT_MODE_MSI:
623 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
624 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
627 case ECORE_INT_MODE_MSIX:
628 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
630 case ECORE_INT_MODE_POLL:
634 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
637 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
638 struct ecore_ptt *p_ptt)
641 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
643 "FPGA - Don't enable Attentions in IGU and MISC\n");
648 /* Configure AEU signal change to produce attentions */
649 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
650 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
651 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
652 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
654 OSAL_MMIOWB(p_hwfn->p_dev);
656 /* Unmask AEU signals toward IGU */
657 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
661 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
662 enum ecore_int_mode int_mode)
664 enum _ecore_status_t rc = ECORE_SUCCESS;
667 /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */
668 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
670 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0);
671 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
673 /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
674 * attentions. Since we're waiting for BRCM answer regarding this
675 * attention, in the meanwhile we simply mask it.
677 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
679 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
681 ecore_int_igu_enable_attn(p_hwfn, p_ptt);
683 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
684 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
685 if (rc != ECORE_SUCCESS) {
686 DP_NOTICE(p_hwfn, true,
687 "Slowpath IRQ request failed\n");
688 return ECORE_NORESOURCES;
690 p_hwfn->b_int_requested = true;
693 /* Enable interrupt Generation */
694 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
696 p_hwfn->b_int_enabled = 1;
701 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
702 struct ecore_ptt *p_ptt)
704 p_hwfn->b_int_enabled = 0;
706 if (IS_VF(p_hwfn->p_dev))
709 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
712 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
713 void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
714 struct ecore_ptt *p_ptt,
715 u32 sb_id, bool cleanup_set, u16 opaque_fid)
717 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
718 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
719 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
720 u8 type = 0; /* FIXME MichalS type??? */
722 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
723 IGU_REG_CLEANUP_STATUS_0) != 0x200);
725 /* USE Control Command Register to perform cleanup. There is an
726 * option to do this using IGU bar, but then it can't be used for VFs.
729 /* Set the data field */
730 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
731 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
732 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
734 /* Set the control register */
735 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
736 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
737 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
739 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
741 OSAL_BARRIER(p_hwfn->p_dev);
743 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
745 OSAL_MMIOWB(p_hwfn->p_dev);
747 /* calculate where to read the status bit from */
748 sb_bit = 1 << (sb_id % 32);
749 sb_bit_addr = sb_id / 32 * sizeof(u32);
751 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
753 /* Now wait for the command to complete */
754 while (--sleep_cnt) {
755 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
756 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
762 DP_NOTICE(p_hwfn, true,
763 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
767 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
768 struct ecore_ptt *p_ptt,
769 u32 sb_id, u16 opaque, bool b_set)
775 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
778 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
780 /* Clear the CAU for the SB */
781 for (pi = 0; pi < 12; pi++)
782 ecore_wr(p_hwfn, p_ptt,
783 CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
786 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
787 struct ecore_ptt *p_ptt,
788 bool b_set, bool b_slowpath)
790 u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
791 u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
792 u32 sb_id = 0, val = 0;
794 /* @@@TBD MichalK temporary... should be moved to init-tool... */
795 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
796 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
797 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
798 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
801 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
802 "IGU cleaning SBs [%d,...,%d]\n",
803 igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
805 for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
806 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
807 p_hwfn->hw_info.opaque_fid,
813 sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
814 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
815 "IGU cleaning slowpath SB [%d]\n", sb_id);
816 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
817 p_hwfn->hw_info.opaque_fid, b_set);
820 static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
821 struct ecore_ptt *p_ptt, u16 sb_id)
823 u32 val = ecore_rd(p_hwfn, p_ptt,
824 IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
825 struct ecore_igu_block *p_block;
827 p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
829 /* stop scanning when hit first invalid PF entry */
830 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
831 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
834 /* Fill the block information */
835 p_block->status = ECORE_IGU_STATUS_VALID;
836 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
837 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
838 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
840 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
841 "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
842 " is_pf = %d vector_num = 0x%x\n",
843 sb_id, val, p_block->function_id, p_block->is_pf,
844 p_block->vector_number);
850 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
851 struct ecore_ptt *p_ptt)
853 struct ecore_igu_info *p_igu_info;
854 struct ecore_igu_block *p_block;
855 u16 sb_id, last_iov_sb_id = 0;
856 u32 min_vf, max_vf, val;
857 u16 prev_sb_id = 0xFF;
859 p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
861 sizeof(*p_igu_info));
862 if (!p_hwfn->hw_info.p_igu_info)
865 OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
867 p_igu_info = p_hwfn->hw_info.p_igu_info;
869 /* Initialize base sb / sb cnt for PFs and VFs */
870 p_igu_info->igu_base_sb = 0xffff;
871 p_igu_info->igu_sb_cnt = 0;
872 p_igu_info->igu_dsb_id = 0xffff;
873 p_igu_info->igu_base_sb_iov = 0xffff;
875 #ifdef CONFIG_ECORE_SRIOV
876 min_vf = p_hwfn->hw_info.first_vf_in_pf;
877 max_vf = p_hwfn->hw_info.first_vf_in_pf +
878 p_hwfn->p_dev->sriov_info.total_vfs;
884 for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
886 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
887 val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
888 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
889 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
892 if (p_block->is_pf) {
893 if (p_block->function_id == p_hwfn->rel_pf_id) {
894 p_block->status |= ECORE_IGU_STATUS_PF;
896 if (p_block->vector_number == 0) {
897 if (p_igu_info->igu_dsb_id == 0xffff)
898 p_igu_info->igu_dsb_id = sb_id;
900 if (p_igu_info->igu_base_sb == 0xffff) {
901 p_igu_info->igu_base_sb = sb_id;
902 } else if (prev_sb_id != sb_id - 1) {
903 DP_NOTICE(p_hwfn->p_dev, false,
911 /* we don't count the default */
912 (p_igu_info->igu_sb_cnt)++;
916 if ((p_block->function_id >= min_vf) &&
917 (p_block->function_id < max_vf)) {
918 /* Available for VFs of this PF */
919 if (p_igu_info->igu_base_sb_iov == 0xffff) {
920 p_igu_info->igu_base_sb_iov = sb_id;
921 } else if (last_iov_sb_id != sb_id - 1) {
923 DP_VERBOSE(p_hwfn->p_dev,
930 DP_NOTICE(p_hwfn->p_dev, false,
941 p_block->status |= ECORE_IGU_STATUS_FREE;
942 p_hwfn->hw_info.p_igu_info->free_blks++;
943 last_iov_sb_id = sb_id;
947 p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
949 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
950 "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
952 p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
953 p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
954 p_igu_info->igu_dsb_id);
956 if (p_igu_info->igu_base_sb == 0xffff ||
957 p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
958 DP_NOTICE(p_hwfn, true,
959 "IGU CAM returned invalid values igu_base_sb=0x%x "
960 "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
961 p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
962 p_igu_info->igu_dsb_id);
966 return ECORE_SUCCESS;
970 * @brief Initialize igu runtime registers
974 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
976 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
978 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
981 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
982 IGU_CMD_INT_ACK_BASE)
983 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
984 IGU_CMD_INT_ACK_BASE)
985 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
987 u32 intr_status_hi = 0, intr_status_lo = 0;
990 intr_status_lo = REG_RD(p_hwfn,
991 GTT_BAR0_MAP_REG_IGU_CMD +
992 LSB_IGU_CMD_ADDR * 8);
993 intr_status_hi = REG_RD(p_hwfn,
994 GTT_BAR0_MAP_REG_IGU_CMD +
995 MSB_IGU_CMD_ADDR * 8);
996 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
1001 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
1003 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
1004 p_hwfn->b_sp_dpc_enabled = true;
1007 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
1009 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
1010 if (!p_hwfn->sp_dpc)
1013 return ECORE_SUCCESS;
1016 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
1018 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
1021 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
1022 struct ecore_ptt *p_ptt)
1024 enum _ecore_status_t rc = ECORE_SUCCESS;
1026 rc = ecore_int_sp_dpc_alloc(p_hwfn);
1027 if (rc != ECORE_SUCCESS) {
1028 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
1032 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
1033 if (rc != ECORE_SUCCESS) {
1034 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
1041 void ecore_int_free(struct ecore_hwfn *p_hwfn)
1043 ecore_int_sp_sb_free(p_hwfn);
1044 ecore_int_sb_attn_free(p_hwfn);
1045 ecore_int_sp_dpc_free(p_hwfn);
1048 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1050 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
1053 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
1054 ecore_int_sp_dpc_setup(p_hwfn);
1057 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
1058 struct ecore_sb_cnt_info *p_sb_cnt_info)
1060 struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
1062 if (!info || !p_sb_cnt_info)
1065 p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
1066 p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
1067 p_sb_cnt_info->sb_free_blk = info->free_blks;
1070 u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1072 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1074 /* Determine origin of SB id */
1075 if ((sb_id >= p_info->igu_base_sb) &&
1076 (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
1077 return sb_id - p_info->igu_base_sb;
1078 } else if ((sb_id >= p_info->igu_base_sb_iov) &&
1079 (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
1080 return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
1083 DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
1088 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
1092 for_each_hwfn(p_dev, i)
1093 p_dev->hwfns[i].b_int_requested = false;