2 * Copyright (c) 2016 - 2018 Cavium Inc.
6 * See LICENSE.qede_pmd for copyright and licensing details.
10 #include "ecore_hsi_common.h"
11 #include "ecore_status.h"
15 #include "ecore_utils.h"
16 #include "ecore_iov_api.h"
19 #define ECORE_EMUL_FACTOR 2000
20 #define ECORE_FPGA_FACTOR 200
23 #define ECORE_BAR_ACQUIRE_TIMEOUT 1000
26 #define ECORE_BAR_INVALID_OFFSET (OSAL_CPU_TO_LE32(-1))
29 osal_list_entry_t list_entry;
31 struct pxp_ptt_entry pxp;
35 struct ecore_ptt_pool {
36 osal_list_t free_list;
37 osal_spinlock_t lock; /* ptt synchronized access */
38 struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
41 void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
43 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
44 p_hwfn->p_ptt_pool = OSAL_NULL;
47 enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
49 struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
57 OSAL_LIST_INIT(&p_pool->free_list);
58 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
59 p_pool->ptts[i].idx = i;
60 p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
61 p_pool->ptts[i].pxp.pretend.control = 0;
62 p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
64 /* There are special PTT entries that are taken only by design.
65 * The rest are added ot the list for general usage.
67 if (i >= RESERVED_PTT_MAX)
68 OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
72 p_hwfn->p_ptt_pool = p_pool;
73 #ifdef CONFIG_ECORE_LOCK_ALLOC
74 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
75 __ecore_ptt_pool_free(p_hwfn);
79 OSAL_SPIN_LOCK_INIT(&p_pool->lock);
83 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
85 struct ecore_ptt *p_ptt;
88 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
89 p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
90 p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
94 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
96 #ifdef CONFIG_ECORE_LOCK_ALLOC
97 if (p_hwfn->p_ptt_pool)
98 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
100 __ecore_ptt_pool_free(p_hwfn);
103 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
105 struct ecore_ptt *p_ptt;
108 /* Take the free PTT from the list */
109 for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
110 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
111 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
112 p_ptt = OSAL_LIST_FIRST_ENTRY(
113 &p_hwfn->p_ptt_pool->free_list,
114 struct ecore_ptt, list_entry);
115 OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
116 &p_hwfn->p_ptt_pool->free_list);
118 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
120 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
121 "allocated ptt %d\n", p_ptt->idx);
126 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
130 DP_NOTICE(p_hwfn, true,
131 "PTT acquire timeout - failed to allocate PTT\n");
135 void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
137 /* This PTT should not be set to pretend if it is being released */
138 /* TODO - add some pretend sanity checks, to make sure pretend
139 * isn't set on this ptt
142 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
143 OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
144 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
147 static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
149 /* The HW is using DWORDS and we need to translate it to Bytes */
150 return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
153 static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
155 return PXP_PF_WINDOW_ADMIN_PER_PF_START +
156 p_ptt->idx * sizeof(struct pxp_ptt_entry);
159 u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
161 return PXP_EXTERNAL_BAR_PF_WINDOW_START +
162 p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
165 void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
166 struct ecore_ptt *p_ptt, u32 new_hw_addr)
170 prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
172 if (new_hw_addr == prev_hw_addr)
175 /* Update PTT entery in admin window */
176 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
177 "Updating PTT entry %d to offset 0x%x\n",
178 p_ptt->idx, new_hw_addr);
180 /* The HW is using DWORDS and the address is in Bytes */
181 p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
184 ecore_ptt_config_addr(p_ptt) +
185 OFFSETOF(struct pxp_ptt_entry, offset),
186 OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
189 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
190 struct ecore_ptt *p_ptt, u32 hw_addr)
192 u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
195 offset = hw_addr - win_hw_addr;
197 if (p_ptt->hwfn_id != p_hwfn->my_id)
198 DP_NOTICE(p_hwfn, true,
199 "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
200 p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
202 /* Verify the address is within the window */
203 if (hw_addr < win_hw_addr ||
204 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
205 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
209 return ecore_ptt_get_bar_addr(p_ptt) + offset;
212 struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
213 enum reserved_ptts ptt_idx)
215 if (ptt_idx >= RESERVED_PTT_MAX) {
216 DP_NOTICE(p_hwfn, true,
217 "Requested PTT %d is out of range\n", ptt_idx);
221 return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
224 static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
225 struct ecore_ptt *p_ptt)
227 bool is_empty = true;
230 if (!p_hwfn->p_dev->chk_reg_fifo)
233 /* ecore_rd() cannot be used here since it calls this function */
234 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
235 is_empty = REG_RD(p_hwfn, bar_addr) == 0;
238 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
246 void ecore_wr(struct ecore_hwfn *p_hwfn,
247 struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
252 prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
254 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
255 REG_WR(p_hwfn, bar_addr, val);
256 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
257 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
258 bar_addr, hw_addr, val);
261 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
265 OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
266 "reg_fifo err was caused by a call to ecore_wr(0x%x, 0x%x)\n",
270 u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
275 prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
277 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
278 val = REG_RD(p_hwfn, bar_addr);
280 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
281 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
282 bar_addr, hw_addr, val);
285 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
289 OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
290 "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
296 static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
297 struct ecore_ptt *p_ptt,
299 u32 hw_addr, osal_size_t n, bool to_device)
301 u32 dw_count, *host_addr, hw_offset;
302 osal_size_t quota, done = 0;
303 u32 OSAL_IOMEM *reg_addr;
306 quota = OSAL_MIN_T(osal_size_t, n - done,
307 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
309 if (IS_PF(p_hwfn->p_dev)) {
310 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
311 hw_offset = ecore_ptt_get_bar_addr(p_ptt);
313 hw_offset = hw_addr + done;
316 dw_count = quota / 4;
317 host_addr = (u32 *)((u8 *)addr + done);
318 reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
322 DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
325 *host_addr++ = DIRECT_REG_RD(p_hwfn,
332 void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
333 struct ecore_ptt *p_ptt,
334 void *dest, u32 hw_addr, osal_size_t n)
336 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
337 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
338 hw_addr, dest, hw_addr, (unsigned long)n);
340 ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
343 void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
344 struct ecore_ptt *p_ptt,
345 u32 hw_addr, void *src, osal_size_t n)
347 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
348 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
349 hw_addr, hw_addr, src, (unsigned long)n);
351 ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
354 void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
355 struct ecore_ptt *p_ptt, u16 fid)
359 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
360 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
362 /* Every pretend undos prev pretends, including previous port pretend */
364 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
365 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
366 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
368 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
369 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
371 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
372 p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
375 ecore_ptt_config_addr(p_ptt) +
376 OFFSETOF(struct pxp_ptt_entry, pretend),
377 *(u32 *)&p_ptt->pxp.pretend);
380 void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
381 struct ecore_ptt *p_ptt, u8 port_id)
385 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
386 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
387 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
388 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
391 ecore_ptt_config_addr(p_ptt) +
392 OFFSETOF(struct pxp_ptt_entry, pretend),
393 *(u32 *)&p_ptt->pxp.pretend);
396 void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
400 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
401 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
402 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
404 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
407 ecore_ptt_config_addr(p_ptt) +
408 OFFSETOF(struct pxp_ptt_entry, pretend),
409 *(u32 *)&p_ptt->pxp.pretend);
412 u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
414 u32 concrete_fid = 0;
416 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
417 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
418 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
426 * Although the implementation is ready, today we don't have any flow that
427 * utliizes said locks - and we want to keep it this way.
428 * If this changes, this needs to be revisted.
434 static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
435 const u8 is_src_type_grc,
436 const u8 is_dst_type_grc,
437 struct ecore_dmae_params *p_params)
442 /* Whether the source is the PCIe or the GRC.
443 * 0- The source is the PCIe
444 * 1- The source is the GRC.
446 opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
447 : DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
448 opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
449 DMAE_CMD_SRC_PF_ID_SHIFT;
451 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
452 opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
453 : DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
454 opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
455 DMAE_CMD_DST_PF_ID_SHIFT;
457 /* DMAE_E4_TODO need to check which value to specifiy here. */
458 /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
460 /* Whether to write a completion word to the completion destination:
461 * 0-Do not write a completion word
462 * 1-Write the completion word
464 opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
465 opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
467 if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
468 opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
470 /* swapping mode 3 - big endian there should be a define ifdefed in
471 * the HSI somewhere. Since it is currently
473 opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
475 opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
477 /* reset source address in next go */
478 opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
480 /* reset dest address in next go */
481 opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
483 /* SRC/DST VFID: all 1's - pf, otherwise VF id */
484 if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
485 opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
486 opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
488 opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
489 DMAE_CMD_SRC_VF_ID_SHIFT);
491 if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
492 opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
493 opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
495 opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
498 p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
499 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
502 static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
504 OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
506 /* All the DMAE 'go' registers form an array in internal memory */
507 return DMAE_REG_GO_C0 + (idx << 2);
510 static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
511 struct ecore_ptt *p_ptt)
513 struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
514 u8 idx_cmd = p_hwfn->dmae_info.channel, i;
515 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
517 /* verify address is not OSAL_NULL */
518 if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
519 ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
520 DP_NOTICE(p_hwfn, true,
521 "source or destination address 0 idx_cmd=%d\n"
522 "opcode = [0x%08x,0x%04x] len=0x%x"
523 " src=0x%x:%x dst=0x%x:%x\n",
525 OSAL_LE32_TO_CPU(p_command->opcode),
526 OSAL_LE16_TO_CPU(p_command->opcode_b),
527 OSAL_LE16_TO_CPU(p_command->length_dw),
528 OSAL_LE32_TO_CPU(p_command->src_addr_hi),
529 OSAL_LE32_TO_CPU(p_command->src_addr_lo),
530 OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
531 OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
536 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
537 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
538 "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
540 OSAL_LE32_TO_CPU(p_command->opcode),
541 OSAL_LE16_TO_CPU(p_command->opcode_b),
542 OSAL_LE16_TO_CPU(p_command->length_dw),
543 OSAL_LE32_TO_CPU(p_command->src_addr_hi),
544 OSAL_LE32_TO_CPU(p_command->src_addr_lo),
545 OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
546 OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
548 /* Copy the command to DMAE - need to do it before every call
549 * for source/dest address no reset.
550 * The number of commands have been increased to 16 (previous was 14)
551 * The first 9 DWs are the command registers, the 10 DW is the
553 * the rest are result registers (which are read only by the client).
555 for (i = 0; i < DMAE_CMD_SIZE; i++) {
556 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
557 *(((u32 *)p_command) + i) : 0;
559 ecore_wr(p_hwfn, p_ptt,
561 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
562 (i * sizeof(u32)), data);
565 ecore_wr(p_hwfn, p_ptt,
566 ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
571 enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
573 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
574 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
575 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
576 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
578 *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
579 if (*p_comp == OSAL_NULL) {
580 DP_NOTICE(p_hwfn, false,
581 "Failed to allocate `p_completion_word'\n");
585 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
586 *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
587 sizeof(struct dmae_cmd));
588 if (*p_cmd == OSAL_NULL) {
589 DP_NOTICE(p_hwfn, false,
590 "Failed to allocate `struct dmae_cmd'\n");
594 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
595 *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
596 sizeof(u32) * DMAE_MAX_RW_SIZE);
597 if (*p_buff == OSAL_NULL) {
598 DP_NOTICE(p_hwfn, false,
599 "Failed to allocate `intermediate_buffer'\n");
603 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
604 p_hwfn->dmae_info.b_mem_ready = true;
606 return ECORE_SUCCESS;
608 ecore_dmae_info_free(p_hwfn);
612 void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
616 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
617 p_hwfn->dmae_info.b_mem_ready = false;
618 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
620 if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
621 p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
622 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
623 p_hwfn->dmae_info.p_completion_word,
624 p_phys, sizeof(u32));
625 p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
628 if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
629 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
630 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
631 p_hwfn->dmae_info.p_dmae_cmd,
632 p_phys, sizeof(struct dmae_cmd));
633 p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
636 if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
637 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
638 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
639 p_hwfn->dmae_info.p_intermediate_buffer,
640 p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
641 p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
645 static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
647 u32 wait_cnt_limit = 10000, wait_cnt = 0;
648 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
651 u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
653 (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
654 ECORE_FPGA_FACTOR : 1));
656 wait_cnt_limit *= factor;
659 /* DMAE_E4_TODO : TODO check if we have to call any other function
660 * other than BARRIER to sync the completion_word since we are not
661 * using the volatile keyword for this
663 OSAL_BARRIER(p_hwfn->p_dev);
664 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
665 OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
666 if (++wait_cnt > wait_cnt_limit) {
667 DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
668 "Timed-out waiting for operation to"
669 " complete. Completion word is 0x%08x"
670 " expected 0x%08x.\n",
671 *p_hwfn->dmae_info.p_completion_word,
672 DMAE_COMPLETION_VAL);
673 ecore_status = ECORE_TIMEOUT;
676 /* to sync the completion_word since we are not
677 * using the volatile keyword for p_completion_word
679 OSAL_BARRIER(p_hwfn->p_dev);
682 if (ecore_status == ECORE_SUCCESS)
683 *p_hwfn->dmae_info.p_completion_word = 0;
688 static enum _ecore_status_t
689 ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
690 struct ecore_ptt *p_ptt,
693 u8 src_type, u8 dst_type, u32 length_dw)
695 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
696 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
697 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
700 case ECORE_DMAE_ADDRESS_GRC:
701 case ECORE_DMAE_ADDRESS_HOST_PHYS:
702 cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
703 cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
705 /* for virt source addresses we use the intermediate buffer. */
706 case ECORE_DMAE_ADDRESS_HOST_VIRT:
707 cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
708 cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
709 OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
710 (void *)(osal_uintptr_t)src_addr,
711 length_dw * sizeof(u32));
718 case ECORE_DMAE_ADDRESS_GRC:
719 case ECORE_DMAE_ADDRESS_HOST_PHYS:
720 cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
721 cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
723 /* for virt destination address we use the intermediate buff. */
724 case ECORE_DMAE_ADDRESS_HOST_VIRT:
725 cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
726 cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
732 cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
734 if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
735 src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
736 OSAL_DMA_SYNC(p_hwfn->p_dev,
737 (void *)HILO_U64(cmd->src_addr_hi,
739 length_dw * sizeof(u32), false);
741 ecore_dmae_post_command(p_hwfn, p_ptt);
743 ecore_status = ecore_dmae_operation_wait(p_hwfn);
745 /* TODO - is it true ? */
746 if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
747 src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
748 OSAL_DMA_SYNC(p_hwfn->p_dev,
749 (void *)HILO_U64(cmd->src_addr_hi,
751 length_dw * sizeof(u32), true);
753 if (ecore_status != ECORE_SUCCESS) {
754 DP_NOTICE(p_hwfn, ECORE_MSG_HW,
755 "Wait Failed. source_addr 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x, intermediate buffer 0x%lx.\n",
756 (unsigned long)src_addr, (unsigned long)dst_addr,
758 (unsigned long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
762 if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
763 OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
764 &p_hwfn->dmae_info.p_intermediate_buffer[0],
765 length_dw * sizeof(u32));
767 return ECORE_SUCCESS;
770 static enum _ecore_status_t
771 ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
772 struct ecore_ptt *p_ptt,
778 struct ecore_dmae_params *p_params)
780 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
781 u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
782 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
783 u64 src_addr_split = 0, dst_addr_split = 0;
784 u16 length_limit = DMAE_MAX_RW_SIZE;
785 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
788 if (!p_hwfn->dmae_info.b_mem_ready) {
789 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
790 "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
791 (unsigned long)src_addr, src_type,
792 (unsigned long)dst_addr, dst_type,
797 if (p_hwfn->p_dev->recov_in_prog) {
798 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
799 "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
800 (unsigned long)src_addr, src_type,
801 (unsigned long)dst_addr, dst_type,
803 /* Return success to let the flow to be completed successfully
804 * w/o any error handling.
806 return ECORE_SUCCESS;
810 DP_NOTICE(p_hwfn, true,
811 "ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%lx, destination addr 0x%lx, size_in_dwords 0x%x\n",
812 (unsigned long)src_addr,
813 (unsigned long)dst_addr,
818 ecore_dmae_opcode(p_hwfn,
819 (src_type == ECORE_DMAE_ADDRESS_GRC),
820 (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
822 cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
823 cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
824 cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
826 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
827 cnt_split = size_in_dwords / length_limit;
828 length_mod = size_in_dwords % length_limit;
830 src_addr_split = src_addr;
831 dst_addr_split = dst_addr;
833 for (i = 0; i <= cnt_split; i++) {
834 offset = length_limit * i;
836 if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
837 if (src_type == ECORE_DMAE_ADDRESS_GRC)
838 src_addr_split = src_addr + offset;
840 src_addr_split = src_addr + (offset * 4);
843 if (dst_type == ECORE_DMAE_ADDRESS_GRC)
844 dst_addr_split = dst_addr + offset;
846 dst_addr_split = dst_addr + (offset * 4);
848 length_cur = (cnt_split == i) ? length_mod : length_limit;
850 /* might be zero on last iteration */
854 ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
861 if (ecore_status != ECORE_SUCCESS) {
862 DP_NOTICE(p_hwfn, false,
863 "ecore_dmae_execute_sub_operation Failed"
864 " with error 0x%x. source_addr 0x%lx,"
865 " dest addr 0x%lx, size_in_dwords 0x%x\n",
866 ecore_status, (unsigned long)src_addr,
867 (unsigned long)dst_addr, length_cur);
869 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
878 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
879 struct ecore_ptt *p_ptt,
881 u32 grc_addr, u32 size_in_dwords, u32 flags)
883 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
884 struct ecore_dmae_params params;
885 enum _ecore_status_t rc;
887 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
888 params.flags = flags;
890 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
892 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
894 ECORE_DMAE_ADDRESS_HOST_VIRT,
895 ECORE_DMAE_ADDRESS_GRC,
896 size_in_dwords, ¶ms);
898 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
904 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
905 struct ecore_ptt *p_ptt,
907 dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
909 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
910 struct ecore_dmae_params params;
911 enum _ecore_status_t rc;
913 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
914 params.flags = flags;
916 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
918 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
919 dest_addr, ECORE_DMAE_ADDRESS_GRC,
920 ECORE_DMAE_ADDRESS_HOST_VIRT,
921 size_in_dwords, ¶ms);
923 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
929 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
930 struct ecore_ptt *p_ptt,
931 dma_addr_t source_addr,
932 dma_addr_t dest_addr,
933 u32 size_in_dwords, struct ecore_dmae_params *p_params)
935 enum _ecore_status_t rc;
937 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
939 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
941 ECORE_DMAE_ADDRESS_HOST_PHYS,
942 ECORE_DMAE_ADDRESS_HOST_PHYS,
943 size_in_dwords, p_params);
945 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
950 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
951 enum ecore_hw_err_type err_type)
953 /* Fan failure cannot be masked by handling of another HW error */
954 if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
955 DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
956 "Recovery is in progress."
957 "Avoid notifying about HW error %d.\n",
962 OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
965 enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
966 struct ecore_ptt *p_ptt,
969 u32 size = OSAL_PAGE_SIZE / 2, val;
970 struct ecore_dmae_params params;
971 enum _ecore_status_t rc = ECORE_SUCCESS;
976 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size);
978 DP_NOTICE(p_hwfn, false,
979 "DMAE sanity [%s]: failed to allocate memory\n",
984 /* Fill the bottom half of the allocated memory with a known pattern */
985 for (p_tmp = (u32 *)p_virt;
986 p_tmp < (u32 *)((u8 *)p_virt + size);
988 /* Save the address itself as the value */
989 val = (u32)(osal_uintptr_t)p_tmp;
993 /* Zero the top half of the allocated memory */
994 OSAL_MEM_ZERO((u8 *)p_virt + size, size);
996 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
997 "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n",
998 phase, (unsigned long)p_phys, p_virt,
999 (unsigned long)(p_phys + size),
1000 (u8 *)p_virt + size, size);
1002 OSAL_MEMSET(¶ms, 0, sizeof(params));
1003 rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
1004 size / 4 /* size_in_dwords */, ¶ms);
1005 if (rc != ECORE_SUCCESS) {
1006 DP_NOTICE(p_hwfn, false,
1007 "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
1012 /* Verify that the top half of the allocated memory has the pattern */
1013 for (p_tmp = (u32 *)((u8 *)p_virt + size);
1014 p_tmp < (u32 *)((u8 *)p_virt + (2 * size));
1016 /* The corresponding address in the bottom half */
1017 val = (u32)(osal_uintptr_t)p_tmp - size;
1019 if (*p_tmp != val) {
1020 DP_NOTICE(p_hwfn, false,
1021 "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
1023 (unsigned long)p_phys +
1024 ((u8 *)p_tmp - (u8 *)p_virt),
1025 p_tmp, *p_tmp, val);
1026 rc = ECORE_UNKNOWN_ERROR;
1032 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);