1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
8 #include "ecore_hsi_common.h"
9 #include "ecore_status.h"
13 #include "ecore_utils.h"
14 #include "ecore_iov_api.h"
17 #define ECORE_EMUL_FACTOR 2000
18 #define ECORE_FPGA_FACTOR 200
21 #define ECORE_BAR_ACQUIRE_TIMEOUT 1000
24 #define ECORE_BAR_INVALID_OFFSET (OSAL_CPU_TO_LE32(-1))
27 osal_list_entry_t list_entry;
29 struct pxp_ptt_entry pxp;
33 struct ecore_ptt_pool {
34 osal_list_t free_list;
35 osal_spinlock_t lock; /* ptt synchronized access */
36 struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
39 void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
41 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
42 p_hwfn->p_ptt_pool = OSAL_NULL;
45 enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
47 struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
55 OSAL_LIST_INIT(&p_pool->free_list);
56 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
57 p_pool->ptts[i].idx = i;
58 p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
59 p_pool->ptts[i].pxp.pretend.control = 0;
60 p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
62 /* There are special PTT entries that are taken only by design.
63 * The rest are added ot the list for general usage.
65 if (i >= RESERVED_PTT_MAX)
66 OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
70 p_hwfn->p_ptt_pool = p_pool;
71 #ifdef CONFIG_ECORE_LOCK_ALLOC
72 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
73 __ecore_ptt_pool_free(p_hwfn);
77 OSAL_SPIN_LOCK_INIT(&p_pool->lock);
81 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
83 struct ecore_ptt *p_ptt;
86 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
87 p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
88 p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
92 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
94 #ifdef CONFIG_ECORE_LOCK_ALLOC
95 if (p_hwfn->p_ptt_pool)
96 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
98 __ecore_ptt_pool_free(p_hwfn);
101 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
103 struct ecore_ptt *p_ptt;
106 /* Take the free PTT from the list */
107 for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
108 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
109 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
110 p_ptt = OSAL_LIST_FIRST_ENTRY(
111 &p_hwfn->p_ptt_pool->free_list,
112 struct ecore_ptt, list_entry);
113 OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
114 &p_hwfn->p_ptt_pool->free_list);
116 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
118 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
119 "allocated ptt %d\n", p_ptt->idx);
124 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
128 DP_NOTICE(p_hwfn, true,
129 "PTT acquire timeout - failed to allocate PTT\n");
133 void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
135 /* This PTT should not be set to pretend if it is being released */
136 /* TODO - add some pretend sanity checks, to make sure pretend
137 * isn't set on this ptt
140 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
141 OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
142 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
145 static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
147 /* The HW is using DWORDS and we need to translate it to Bytes */
148 return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
151 static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
153 return PXP_PF_WINDOW_ADMIN_PER_PF_START +
154 p_ptt->idx * sizeof(struct pxp_ptt_entry);
157 u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
159 return PXP_EXTERNAL_BAR_PF_WINDOW_START +
160 p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
163 void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
164 struct ecore_ptt *p_ptt, u32 new_hw_addr)
168 prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
170 if (new_hw_addr == prev_hw_addr)
173 /* Update PTT entery in admin window */
174 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
175 "Updating PTT entry %d to offset 0x%x\n",
176 p_ptt->idx, new_hw_addr);
178 /* The HW is using DWORDS and the address is in Bytes */
179 p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
182 ecore_ptt_config_addr(p_ptt) +
183 OFFSETOF(struct pxp_ptt_entry, offset),
184 OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
187 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
188 struct ecore_ptt *p_ptt, u32 hw_addr)
190 u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
193 offset = hw_addr - win_hw_addr;
195 if (p_ptt->hwfn_id != p_hwfn->my_id)
196 DP_NOTICE(p_hwfn, true,
197 "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
198 p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
200 /* Verify the address is within the window */
201 if (hw_addr < win_hw_addr ||
202 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
203 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
207 return ecore_ptt_get_bar_addr(p_ptt) + offset;
210 struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
211 enum reserved_ptts ptt_idx)
213 if (ptt_idx >= RESERVED_PTT_MAX) {
214 DP_NOTICE(p_hwfn, true,
215 "Requested PTT %d is out of range\n", ptt_idx);
219 return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
222 static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
223 struct ecore_ptt *p_ptt)
225 bool is_empty = true;
228 if (!p_hwfn->p_dev->chk_reg_fifo)
231 /* ecore_rd() cannot be used here since it calls this function */
232 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
233 is_empty = REG_RD(p_hwfn, bar_addr) == 0;
236 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
244 void ecore_wr(struct ecore_hwfn *p_hwfn,
245 struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
250 prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
252 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
253 REG_WR(p_hwfn, bar_addr, val);
254 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
255 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
256 bar_addr, hw_addr, val);
259 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
263 OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
264 "reg_fifo err was caused by a call to ecore_wr(0x%x, 0x%x)\n",
268 u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
273 prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
275 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
276 val = REG_RD(p_hwfn, bar_addr);
278 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
279 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
280 bar_addr, hw_addr, val);
283 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
287 OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
288 "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
294 static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
295 struct ecore_ptt *p_ptt,
297 u32 hw_addr, osal_size_t n, bool to_device)
299 u32 dw_count, *host_addr, hw_offset;
300 osal_size_t quota, done = 0;
301 u32 OSAL_IOMEM *reg_addr;
304 quota = OSAL_MIN_T(osal_size_t, n - done,
305 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
307 if (IS_PF(p_hwfn->p_dev)) {
308 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
309 hw_offset = ecore_ptt_get_bar_addr(p_ptt);
311 hw_offset = hw_addr + done;
314 dw_count = quota / 4;
315 host_addr = (u32 *)((u8 *)addr + done);
316 reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
320 DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
323 *host_addr++ = DIRECT_REG_RD(p_hwfn,
330 void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
331 struct ecore_ptt *p_ptt,
332 void *dest, u32 hw_addr, osal_size_t n)
334 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
335 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
336 hw_addr, dest, hw_addr, (unsigned long)n);
338 ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
341 void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
342 struct ecore_ptt *p_ptt,
343 u32 hw_addr, void *src, osal_size_t n)
345 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
346 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
347 hw_addr, hw_addr, src, (unsigned long)n);
349 ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
352 void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
353 struct ecore_ptt *p_ptt, u16 fid)
357 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
358 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
360 /* Every pretend undos prev pretends, including previous port pretend */
362 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
363 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
364 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
366 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
367 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
369 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
370 p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
373 ecore_ptt_config_addr(p_ptt) +
374 OFFSETOF(struct pxp_ptt_entry, pretend),
375 *(u32 *)&p_ptt->pxp.pretend);
378 void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
379 struct ecore_ptt *p_ptt, u8 port_id)
383 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
384 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
385 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
386 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
389 ecore_ptt_config_addr(p_ptt) +
390 OFFSETOF(struct pxp_ptt_entry, pretend),
391 *(u32 *)&p_ptt->pxp.pretend);
394 void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
398 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
399 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
400 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
402 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
405 ecore_ptt_config_addr(p_ptt) +
406 OFFSETOF(struct pxp_ptt_entry, pretend),
407 *(u32 *)&p_ptt->pxp.pretend);
410 void ecore_port_fid_pretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
415 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
416 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
417 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
419 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
420 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
422 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
423 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
425 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
426 p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
429 ecore_ptt_config_addr(p_ptt) +
430 OFFSETOF(struct pxp_ptt_entry, pretend),
431 *(u32 *)&p_ptt->pxp.pretend);
434 u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
436 u32 concrete_fid = 0;
438 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
439 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
440 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
448 * Although the implementation is ready, today we don't have any flow that
449 * utliizes said locks - and we want to keep it this way.
450 * If this changes, this needs to be revisted.
456 static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
457 const u8 is_src_type_grc,
458 const u8 is_dst_type_grc,
459 struct ecore_dmae_params *p_params)
464 /* Whether the source is the PCIe or the GRC.
465 * 0- The source is the PCIe
466 * 1- The source is the GRC.
468 opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
469 : DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
470 opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
471 DMAE_CMD_SRC_PF_ID_SHIFT;
473 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
474 opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
475 : DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
476 opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
477 DMAE_CMD_DST_PF_ID_SHIFT;
479 /* DMAE_E4_TODO need to check which value to specifiy here. */
480 /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
482 /* Whether to write a completion word to the completion destination:
483 * 0-Do not write a completion word
484 * 1-Write the completion word
486 opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
487 opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
489 if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
490 opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
492 /* swapping mode 3 - big endian there should be a define ifdefed in
493 * the HSI somewhere. Since it is currently
495 opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
497 opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
499 /* reset source address in next go */
500 opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
502 /* reset dest address in next go */
503 opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
505 /* SRC/DST VFID: all 1's - pf, otherwise VF id */
506 if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
507 opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
508 opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
510 opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
511 DMAE_CMD_SRC_VF_ID_SHIFT);
513 if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
514 opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
515 opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
517 opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
520 p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
521 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
524 static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
526 OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
528 /* All the DMAE 'go' registers form an array in internal memory */
529 return DMAE_REG_GO_C0 + (idx << 2);
532 static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
533 struct ecore_ptt *p_ptt)
535 struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
536 u8 idx_cmd = p_hwfn->dmae_info.channel, i;
537 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
539 /* verify address is not OSAL_NULL */
540 if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
541 ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
542 DP_NOTICE(p_hwfn, true,
543 "source or destination address 0 idx_cmd=%d\n"
544 "opcode = [0x%08x,0x%04x] len=0x%x"
545 " src=0x%x:%x dst=0x%x:%x\n",
547 OSAL_LE32_TO_CPU(p_command->opcode),
548 OSAL_LE16_TO_CPU(p_command->opcode_b),
549 OSAL_LE16_TO_CPU(p_command->length_dw),
550 OSAL_LE32_TO_CPU(p_command->src_addr_hi),
551 OSAL_LE32_TO_CPU(p_command->src_addr_lo),
552 OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
553 OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
558 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
559 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
560 "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
562 OSAL_LE32_TO_CPU(p_command->opcode),
563 OSAL_LE16_TO_CPU(p_command->opcode_b),
564 OSAL_LE16_TO_CPU(p_command->length_dw),
565 OSAL_LE32_TO_CPU(p_command->src_addr_hi),
566 OSAL_LE32_TO_CPU(p_command->src_addr_lo),
567 OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
568 OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
570 /* Copy the command to DMAE - need to do it before every call
571 * for source/dest address no reset.
572 * The number of commands have been increased to 16 (previous was 14)
573 * The first 9 DWs are the command registers, the 10 DW is the
575 * the rest are result registers (which are read only by the client).
577 for (i = 0; i < DMAE_CMD_SIZE; i++) {
578 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
579 *(((u32 *)p_command) + i) : 0;
581 ecore_wr(p_hwfn, p_ptt,
583 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
584 (i * sizeof(u32)), data);
587 ecore_wr(p_hwfn, p_ptt,
588 ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
593 enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
595 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
596 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
597 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
598 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
600 *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
601 if (*p_comp == OSAL_NULL) {
602 DP_NOTICE(p_hwfn, false,
603 "Failed to allocate `p_completion_word'\n");
607 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
608 *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
609 sizeof(struct dmae_cmd));
610 if (*p_cmd == OSAL_NULL) {
611 DP_NOTICE(p_hwfn, false,
612 "Failed to allocate `struct dmae_cmd'\n");
616 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
617 *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
618 sizeof(u32) * DMAE_MAX_RW_SIZE);
619 if (*p_buff == OSAL_NULL) {
620 DP_NOTICE(p_hwfn, false,
621 "Failed to allocate `intermediate_buffer'\n");
625 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
626 p_hwfn->dmae_info.b_mem_ready = true;
628 return ECORE_SUCCESS;
630 ecore_dmae_info_free(p_hwfn);
634 void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
638 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
639 p_hwfn->dmae_info.b_mem_ready = false;
640 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
642 if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
643 p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
644 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
645 p_hwfn->dmae_info.p_completion_word,
646 p_phys, sizeof(u32));
647 p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
650 if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
651 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
652 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
653 p_hwfn->dmae_info.p_dmae_cmd,
654 p_phys, sizeof(struct dmae_cmd));
655 p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
658 if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
659 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
660 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
661 p_hwfn->dmae_info.p_intermediate_buffer,
662 p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
663 p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
667 static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
669 u32 wait_cnt_limit = 10000, wait_cnt = 0;
670 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
673 u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
675 (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
676 ECORE_FPGA_FACTOR : 1));
678 wait_cnt_limit *= factor;
681 /* DMAE_E4_TODO : TODO check if we have to call any other function
682 * other than BARRIER to sync the completion_word since we are not
683 * using the volatile keyword for this
685 OSAL_BARRIER(p_hwfn->p_dev);
686 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
687 OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
688 if (++wait_cnt > wait_cnt_limit) {
689 DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
690 "Timed-out waiting for operation to"
691 " complete. Completion word is 0x%08x"
692 " expected 0x%08x.\n",
693 *p_hwfn->dmae_info.p_completion_word,
694 DMAE_COMPLETION_VAL);
695 ecore_status = ECORE_TIMEOUT;
698 /* to sync the completion_word since we are not
699 * using the volatile keyword for p_completion_word
701 OSAL_BARRIER(p_hwfn->p_dev);
704 if (ecore_status == ECORE_SUCCESS)
705 *p_hwfn->dmae_info.p_completion_word = 0;
710 static enum _ecore_status_t
711 ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
712 struct ecore_ptt *p_ptt,
715 u8 src_type, u8 dst_type, u32 length_dw)
717 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
718 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
719 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
722 case ECORE_DMAE_ADDRESS_GRC:
723 case ECORE_DMAE_ADDRESS_HOST_PHYS:
724 cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
725 cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
727 /* for virt source addresses we use the intermediate buffer. */
728 case ECORE_DMAE_ADDRESS_HOST_VIRT:
729 cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
730 cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
731 OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
732 (void *)(osal_uintptr_t)src_addr,
733 length_dw * sizeof(u32));
740 case ECORE_DMAE_ADDRESS_GRC:
741 case ECORE_DMAE_ADDRESS_HOST_PHYS:
742 cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
743 cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
745 /* for virt destination address we use the intermediate buff. */
746 case ECORE_DMAE_ADDRESS_HOST_VIRT:
747 cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
748 cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
754 cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
756 if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
757 src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
758 OSAL_DMA_SYNC(p_hwfn->p_dev,
759 (void *)HILO_U64(cmd->src_addr_hi,
761 length_dw * sizeof(u32), false);
763 ecore_dmae_post_command(p_hwfn, p_ptt);
765 ecore_status = ecore_dmae_operation_wait(p_hwfn);
767 /* TODO - is it true ? */
768 if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
769 src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
770 OSAL_DMA_SYNC(p_hwfn->p_dev,
771 (void *)HILO_U64(cmd->src_addr_hi,
773 length_dw * sizeof(u32), true);
775 if (ecore_status != ECORE_SUCCESS) {
776 DP_NOTICE(p_hwfn, ECORE_MSG_HW,
777 "Wait Failed. source_addr 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x, intermediate buffer 0x%lx.\n",
778 (unsigned long)src_addr, (unsigned long)dst_addr,
780 (unsigned long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
784 if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
785 OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
786 &p_hwfn->dmae_info.p_intermediate_buffer[0],
787 length_dw * sizeof(u32));
789 return ECORE_SUCCESS;
792 static enum _ecore_status_t
793 ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
794 struct ecore_ptt *p_ptt,
800 struct ecore_dmae_params *p_params)
802 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
803 u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
804 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
805 u64 src_addr_split = 0, dst_addr_split = 0;
806 u16 length_limit = DMAE_MAX_RW_SIZE;
807 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
810 if (!p_hwfn->dmae_info.b_mem_ready) {
811 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
812 "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
813 (unsigned long)src_addr, src_type,
814 (unsigned long)dst_addr, dst_type,
819 if (p_hwfn->p_dev->recov_in_prog) {
820 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
821 "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
822 (unsigned long)src_addr, src_type,
823 (unsigned long)dst_addr, dst_type,
825 /* Return success to let the flow to be completed successfully
826 * w/o any error handling.
828 return ECORE_SUCCESS;
832 DP_NOTICE(p_hwfn, true,
833 "ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%lx, destination addr 0x%lx, size_in_dwords 0x%x\n",
834 (unsigned long)src_addr,
835 (unsigned long)dst_addr,
840 ecore_dmae_opcode(p_hwfn,
841 (src_type == ECORE_DMAE_ADDRESS_GRC),
842 (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
844 cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
845 cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
846 cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
848 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
849 cnt_split = size_in_dwords / length_limit;
850 length_mod = size_in_dwords % length_limit;
852 src_addr_split = src_addr;
853 dst_addr_split = dst_addr;
855 for (i = 0; i <= cnt_split; i++) {
856 offset = length_limit * i;
858 if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
859 if (src_type == ECORE_DMAE_ADDRESS_GRC)
860 src_addr_split = src_addr + offset;
862 src_addr_split = src_addr + (offset * 4);
865 if (dst_type == ECORE_DMAE_ADDRESS_GRC)
866 dst_addr_split = dst_addr + offset;
868 dst_addr_split = dst_addr + (offset * 4);
870 length_cur = (cnt_split == i) ? length_mod : length_limit;
872 /* might be zero on last iteration */
876 ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
883 if (ecore_status != ECORE_SUCCESS) {
884 DP_NOTICE(p_hwfn, false,
885 "ecore_dmae_execute_sub_operation Failed"
886 " with error 0x%x. source_addr 0x%lx,"
887 " dest addr 0x%lx, size_in_dwords 0x%x\n",
888 ecore_status, (unsigned long)src_addr,
889 (unsigned long)dst_addr, length_cur);
891 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
900 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
901 struct ecore_ptt *p_ptt,
903 u32 grc_addr, u32 size_in_dwords, u32 flags)
905 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
906 struct ecore_dmae_params params;
907 enum _ecore_status_t rc;
909 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
910 params.flags = flags;
912 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
914 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
916 ECORE_DMAE_ADDRESS_HOST_VIRT,
917 ECORE_DMAE_ADDRESS_GRC,
918 size_in_dwords, ¶ms);
920 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
926 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
927 struct ecore_ptt *p_ptt,
929 dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
931 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
932 struct ecore_dmae_params params;
933 enum _ecore_status_t rc;
935 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
936 params.flags = flags;
938 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
940 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
941 dest_addr, ECORE_DMAE_ADDRESS_GRC,
942 ECORE_DMAE_ADDRESS_HOST_VIRT,
943 size_in_dwords, ¶ms);
945 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
951 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
952 struct ecore_ptt *p_ptt,
953 dma_addr_t source_addr,
954 dma_addr_t dest_addr,
955 u32 size_in_dwords, struct ecore_dmae_params *p_params)
957 enum _ecore_status_t rc;
959 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
961 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
963 ECORE_DMAE_ADDRESS_HOST_PHYS,
964 ECORE_DMAE_ADDRESS_HOST_PHYS,
965 size_in_dwords, p_params);
967 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
972 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
973 enum ecore_hw_err_type err_type)
975 /* Fan failure cannot be masked by handling of another HW error */
976 if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
977 DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
978 "Recovery is in progress."
979 "Avoid notifying about HW error %d.\n",
984 OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
987 enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
988 struct ecore_ptt *p_ptt,
991 u32 size = OSAL_PAGE_SIZE / 2, val;
992 struct ecore_dmae_params params;
993 enum _ecore_status_t rc = ECORE_SUCCESS;
998 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size);
1000 DP_NOTICE(p_hwfn, false,
1001 "DMAE sanity [%s]: failed to allocate memory\n",
1006 /* Fill the bottom half of the allocated memory with a known pattern */
1007 for (p_tmp = (u32 *)p_virt;
1008 p_tmp < (u32 *)((u8 *)p_virt + size);
1010 /* Save the address itself as the value */
1011 val = (u32)(osal_uintptr_t)p_tmp;
1015 /* Zero the top half of the allocated memory */
1016 OSAL_MEM_ZERO((u8 *)p_virt + size, size);
1018 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1019 "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n",
1020 phase, (unsigned long)p_phys, p_virt,
1021 (unsigned long)(p_phys + size),
1022 (u8 *)p_virt + size, size);
1024 OSAL_MEMSET(¶ms, 0, sizeof(params));
1025 rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
1026 size / 4 /* size_in_dwords */, ¶ms);
1027 if (rc != ECORE_SUCCESS) {
1028 DP_NOTICE(p_hwfn, false,
1029 "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
1034 /* Verify that the top half of the allocated memory has the pattern */
1035 for (p_tmp = (u32 *)((u8 *)p_virt + size);
1036 p_tmp < (u32 *)((u8 *)p_virt + (2 * size));
1038 /* The corresponding address in the bottom half */
1039 val = (u32)(osal_uintptr_t)p_tmp - size;
1041 if (*p_tmp != val) {
1042 DP_NOTICE(p_hwfn, false,
1043 "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
1045 (unsigned long)p_phys +
1046 ((u8 *)p_tmp - (u8 *)p_virt),
1047 p_tmp, *p_tmp, val);
1048 rc = ECORE_UNKNOWN_ERROR;
1054 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);