2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
10 #include "ecore_hsi_common.h"
11 #include "ecore_status.h"
15 #include "ecore_utils.h"
16 #include "ecore_iov_api.h"
19 #define ECORE_EMUL_FACTOR 2000
20 #define ECORE_FPGA_FACTOR 200
23 #define ECORE_BAR_ACQUIRE_TIMEOUT 1000
26 #define ECORE_BAR_INVALID_OFFSET -1
29 osal_list_entry_t list_entry;
31 struct pxp_ptt_entry pxp;
34 struct ecore_ptt_pool {
35 osal_list_t free_list;
37 struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
40 enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
42 struct ecore_ptt_pool *p_pool;
45 p_pool = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
46 sizeof(struct ecore_ptt_pool));
50 OSAL_LIST_INIT(&p_pool->free_list);
51 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
52 p_pool->ptts[i].idx = i;
53 p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
54 p_pool->ptts[i].pxp.pretend.control = 0;
56 /* There are special PTT entries that are taken only by design.
57 * The rest are added ot the list for general usage.
59 if (i >= RESERVED_PTT_MAX)
60 OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
64 p_hwfn->p_ptt_pool = p_pool;
65 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
66 OSAL_SPIN_LOCK_INIT(&p_pool->lock);
71 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
73 struct ecore_ptt *p_ptt;
76 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
77 p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
78 p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
82 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
84 if (p_hwfn->p_ptt_pool)
85 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
86 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
87 p_hwfn->p_ptt_pool = OSAL_NULL;
90 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
92 struct ecore_ptt *p_ptt;
95 /* Take the free PTT from the list */
96 for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
97 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
98 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list))
100 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
104 /* We should not time-out, but it can happen... --> Lock isn't held */
105 if (i == ECORE_BAR_ACQUIRE_TIMEOUT) {
106 DP_NOTICE(p_hwfn, true, "Failed to allocate PTT\n");
110 p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
111 struct ecore_ptt, list_entry);
112 OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
113 &p_hwfn->p_ptt_pool->free_list);
114 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
116 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "allocated ptt %d\n", p_ptt->idx);
121 void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
123 /* This PTT should not be set to pretend if it is being released */
125 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
126 OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
127 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
130 u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
132 /* The HW is using DWORDS and we need to translate it to Bytes */
133 return p_ptt->pxp.offset << 2;
136 static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
138 return PXP_PF_WINDOW_ADMIN_PER_PF_START +
139 p_ptt->idx * sizeof(struct pxp_ptt_entry);
142 u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
144 return PXP_EXTERNAL_BAR_PF_WINDOW_START +
145 p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
148 void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
149 struct ecore_ptt *p_ptt, u32 new_hw_addr)
153 prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
155 if (new_hw_addr == prev_hw_addr)
158 /* Update PTT entery in admin window */
159 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
160 "Updating PTT entry %d to offset 0x%x\n",
161 p_ptt->idx, new_hw_addr);
163 /* The HW is using DWORDS and the address is in Bytes */
164 p_ptt->pxp.offset = new_hw_addr >> 2;
167 ecore_ptt_config_addr(p_ptt) +
168 OFFSETOF(struct pxp_ptt_entry, offset), p_ptt->pxp.offset);
171 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
172 struct ecore_ptt *p_ptt, u32 hw_addr)
174 u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
177 offset = hw_addr - win_hw_addr;
179 /* Verify the address is within the window */
180 if (hw_addr < win_hw_addr ||
181 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
182 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
186 return ecore_ptt_get_bar_addr(p_ptt) + offset;
189 struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
190 enum reserved_ptts ptt_idx)
192 if (ptt_idx >= RESERVED_PTT_MAX) {
193 DP_NOTICE(p_hwfn, true,
194 "Requested PTT %d is out of range\n", ptt_idx);
198 return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
201 void ecore_wr(struct ecore_hwfn *p_hwfn,
202 struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
204 u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
206 REG_WR(p_hwfn, bar_addr, val);
207 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
208 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
209 bar_addr, hw_addr, val);
212 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
217 u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
219 u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
220 u32 val = REG_RD(p_hwfn, bar_addr);
222 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
223 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
224 bar_addr, hw_addr, val);
227 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
234 static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
235 struct ecore_ptt *p_ptt,
237 u32 hw_addr, osal_size_t n, bool to_device)
239 u32 dw_count, *host_addr, hw_offset;
240 osal_size_t quota, done = 0;
241 u32 OSAL_IOMEM *reg_addr;
244 quota = OSAL_MIN_T(osal_size_t, n - done,
245 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
247 if (IS_PF(p_hwfn->p_dev)) {
248 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
249 hw_offset = ecore_ptt_get_bar_addr(p_ptt);
251 hw_offset = hw_addr + done;
254 dw_count = quota / 4;
255 host_addr = (u32 *)((u8 *)addr + done);
256 reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
260 DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
263 *host_addr++ = DIRECT_REG_RD(p_hwfn,
270 void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
271 struct ecore_ptt *p_ptt,
272 void *dest, u32 hw_addr, osal_size_t n)
274 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
275 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
276 hw_addr, dest, hw_addr, (unsigned long)n);
278 ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
281 void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
282 struct ecore_ptt *p_ptt,
283 u32 hw_addr, void *src, osal_size_t n)
285 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
286 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
287 hw_addr, hw_addr, src, (unsigned long)n);
289 ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
292 void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
293 struct ecore_ptt *p_ptt, u16 fid)
298 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
299 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
301 /* Every pretend undos prev pretends, including previous port pretend */
302 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
303 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
304 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
305 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
307 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
308 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
310 p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
312 p_pretend = &p_ptt->pxp.pretend;
314 ecore_ptt_config_addr(p_ptt) +
315 OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
318 void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
319 struct ecore_ptt *p_ptt, u8 port_id)
324 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
325 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
326 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
327 p_ptt->pxp.pretend.control = control;
329 p_pretend = &p_ptt->pxp.pretend;
331 ecore_ptt_config_addr(p_ptt) +
332 OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
335 void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
340 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
341 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
342 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
343 p_ptt->pxp.pretend.control = control;
345 p_pretend = &p_ptt->pxp.pretend;
347 ecore_ptt_config_addr(p_ptt) +
348 OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
351 u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
353 u32 concrete_fid = 0;
355 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
356 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
357 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
365 * Although the implementation is ready, today we don't have any flow that
366 * utliizes said locks - and we want to keep it this way.
367 * If this changes, this needs to be revisted.
373 static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
374 const u8 is_src_type_grc,
375 const u8 is_dst_type_grc,
376 struct ecore_dmae_params *p_params)
381 /* Whether the source is the PCIe or the GRC.
382 * 0- The source is the PCIe
383 * 1- The source is the GRC.
385 opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
386 : DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
387 opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
388 DMAE_CMD_SRC_PF_ID_SHIFT;
390 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
391 opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
392 : DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
393 opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
394 DMAE_CMD_DST_PF_ID_SHIFT;
396 /* DMAE_E4_TODO need to check which value to specifiy here. */
397 /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
399 /* Whether to write a completion word to the completion destination:
400 * 0-Do not write a completion word
401 * 1-Write the completion word
403 opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
404 opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
406 if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
407 opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
409 /* swapping mode 3 - big endian there should be a define ifdefed in
410 * the HSI somewhere. Since it is currently
412 opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
414 opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
416 /* reset source address in next go */
417 opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
419 /* reset dest address in next go */
420 opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
422 /* SRC/DST VFID: all 1's - pf, otherwise VF id */
423 if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
424 opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
425 opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
427 opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
428 DMAE_CMD_SRC_VF_ID_SHIFT);
430 if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
431 opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
432 opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
434 opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
437 p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
438 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
441 static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
443 OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
445 return DMAE_REG_GO_C0 + idx * 4;
448 static enum _ecore_status_t
449 ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
451 struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
452 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
453 u8 idx_cmd = p_hwfn->dmae_info.channel, i;
455 /* verify address is not OSAL_NULL */
456 if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
457 ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
458 DP_NOTICE(p_hwfn, true,
459 "source or destination address 0 idx_cmd=%d\n"
460 "opcode = [0x%08x,0x%04x] len=0x%x"
461 " src=0x%x:%x dst=0x%x:%x\n",
462 idx_cmd, (u32)p_command->opcode,
463 (u16)p_command->opcode_b,
464 (int)p_command->length,
465 (int)p_command->src_addr_hi,
466 (int)p_command->src_addr_lo,
467 (int)p_command->dst_addr_hi,
468 (int)p_command->dst_addr_lo);
473 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
474 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
475 "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
476 idx_cmd, (u32)p_command->opcode,
477 (u16)p_command->opcode_b,
478 (int)p_command->length,
479 (int)p_command->src_addr_hi,
480 (int)p_command->src_addr_lo,
481 (int)p_command->dst_addr_hi, (int)p_command->dst_addr_lo);
483 /* Copy the command to DMAE - need to do it before every call
484 * for source/dest address no reset.
485 * The number of commands have been increased to 16 (previous was 14)
486 * The first 9 DWs are the command registers, the 10 DW is the
488 * the rest are result registers (which are read only by the client).
490 for (i = 0; i < DMAE_CMD_SIZE; i++) {
491 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
492 *(((u32 *)p_command) + i) : 0;
494 ecore_wr(p_hwfn, p_ptt,
496 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
497 (i * sizeof(u32)), data);
500 ecore_wr(p_hwfn, p_ptt,
501 ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
506 enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
508 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
509 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
510 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
511 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
513 *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
514 if (*p_comp == OSAL_NULL) {
515 DP_NOTICE(p_hwfn, true,
516 "Failed to allocate `p_completion_word'\n");
517 ecore_dmae_info_free(p_hwfn);
521 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
522 *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
523 sizeof(struct dmae_cmd));
524 if (*p_cmd == OSAL_NULL) {
525 DP_NOTICE(p_hwfn, true,
526 "Failed to allocate `struct dmae_cmd'\n");
527 ecore_dmae_info_free(p_hwfn);
531 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
532 *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
533 sizeof(u32) * DMAE_MAX_RW_SIZE);
534 if (*p_buff == OSAL_NULL) {
535 DP_NOTICE(p_hwfn, true,
536 "Failed to allocate `intermediate_buffer'\n");
537 ecore_dmae_info_free(p_hwfn);
541 /* DMAE_E4_TODO : Need to change this to reflect proper channel */
542 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
544 return ECORE_SUCCESS;
547 void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
551 /* Just make sure no one is in the middle */
552 OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
554 if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
555 p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
556 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
557 p_hwfn->dmae_info.p_completion_word,
558 p_phys, sizeof(u32));
559 p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
562 if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
563 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
564 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
565 p_hwfn->dmae_info.p_dmae_cmd,
566 p_phys, sizeof(struct dmae_cmd));
567 p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
570 if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
571 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
572 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
573 p_hwfn->dmae_info.p_intermediate_buffer,
574 p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
575 p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
578 OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
581 static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
583 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
584 u32 wait_cnt_limit = 10000, wait_cnt = 0;
587 u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
589 (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
590 ECORE_FPGA_FACTOR : 1));
592 wait_cnt_limit *= factor;
595 /* DMAE_E4_TODO : TODO check if we have to call any other function
596 * other than BARRIER to sync the completion_word since we are not
597 * using the volatile keyword for this
599 OSAL_BARRIER(p_hwfn->p_dev);
600 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
601 /* DMAE_E4_TODO : using OSAL_MSLEEP instead of mm_wait since mm
602 * functions are getting depriciated. Need to review for future.
604 OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
605 if (++wait_cnt > wait_cnt_limit) {
606 DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
607 "Timed-out waiting for operation to"
608 " complete. Completion word is 0x%08x"
609 " expected 0x%08x.\n",
610 *p_hwfn->dmae_info.p_completion_word,
611 DMAE_COMPLETION_VAL);
612 ecore_status = ECORE_TIMEOUT;
615 /* to sync the completion_word since we are not
616 * using the volatile keyword for p_completion_word
618 OSAL_BARRIER(p_hwfn->p_dev);
621 if (ecore_status == ECORE_SUCCESS)
622 *p_hwfn->dmae_info.p_completion_word = 0;
627 static enum _ecore_status_t
628 ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
629 struct ecore_ptt *p_ptt,
632 u8 src_type, u8 dst_type, u32 length)
634 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
635 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
636 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
639 case ECORE_DMAE_ADDRESS_GRC:
640 case ECORE_DMAE_ADDRESS_HOST_PHYS:
641 cmd->src_addr_hi = DMA_HI(src_addr);
642 cmd->src_addr_lo = DMA_LO(src_addr);
644 /* for virt source addresses we use the intermediate buffer. */
645 case ECORE_DMAE_ADDRESS_HOST_VIRT:
646 cmd->src_addr_hi = DMA_HI(phys);
647 cmd->src_addr_lo = DMA_LO(phys);
648 OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
649 (void *)(osal_uintptr_t)src_addr,
650 length * sizeof(u32));
657 case ECORE_DMAE_ADDRESS_GRC:
658 case ECORE_DMAE_ADDRESS_HOST_PHYS:
659 cmd->dst_addr_hi = DMA_HI(dst_addr);
660 cmd->dst_addr_lo = DMA_LO(dst_addr);
662 /* for virt destination address we use the intermediate buff. */
663 case ECORE_DMAE_ADDRESS_HOST_VIRT:
664 cmd->dst_addr_hi = DMA_HI(phys);
665 cmd->dst_addr_lo = DMA_LO(phys);
671 cmd->length = (u16)length;
673 if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
674 src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
675 OSAL_DMA_SYNC(p_hwfn->p_dev,
676 (void *)HILO_U64(cmd->src_addr_hi,
678 length * sizeof(u32), false);
680 ecore_dmae_post_command(p_hwfn, p_ptt);
682 ecore_status = ecore_dmae_operation_wait(p_hwfn);
684 /* TODO - is it true ? */
685 if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
686 src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
687 OSAL_DMA_SYNC(p_hwfn->p_dev,
688 (void *)HILO_U64(cmd->src_addr_hi,
690 length * sizeof(u32), true);
692 if (ecore_status != ECORE_SUCCESS) {
693 DP_NOTICE(p_hwfn, ECORE_MSG_HW,
694 "ecore_dmae_host2grc: Wait Failed. source_addr"
695 " 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x\n",
696 (unsigned long)src_addr, (unsigned long)dst_addr,
701 if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
702 OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
703 &p_hwfn->dmae_info.p_intermediate_buffer[0],
704 length * sizeof(u32));
706 return ECORE_SUCCESS;
709 static enum _ecore_status_t
710 ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
711 struct ecore_ptt *p_ptt,
717 struct ecore_dmae_params *p_params)
719 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
720 u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
721 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
722 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
723 u64 src_addr_split = 0, dst_addr_split = 0;
724 u16 length_limit = DMAE_MAX_RW_SIZE;
727 ecore_dmae_opcode(p_hwfn,
728 (src_type == ECORE_DMAE_ADDRESS_GRC),
729 (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
731 cmd->comp_addr_lo = DMA_LO(phys);
732 cmd->comp_addr_hi = DMA_HI(phys);
733 cmd->comp_val = DMAE_COMPLETION_VAL;
735 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
736 cnt_split = size_in_dwords / length_limit;
737 length_mod = size_in_dwords % length_limit;
739 src_addr_split = src_addr;
740 dst_addr_split = dst_addr;
742 for (i = 0; i <= cnt_split; i++) {
743 offset = length_limit * i;
745 if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
746 if (src_type == ECORE_DMAE_ADDRESS_GRC)
747 src_addr_split = src_addr + offset;
749 src_addr_split = src_addr + (offset * 4);
752 if (dst_type == ECORE_DMAE_ADDRESS_GRC)
753 dst_addr_split = dst_addr + offset;
755 dst_addr_split = dst_addr + (offset * 4);
757 length_cur = (cnt_split == i) ? length_mod : length_limit;
759 /* might be zero on last iteration */
763 ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
770 if (ecore_status != ECORE_SUCCESS) {
771 DP_NOTICE(p_hwfn, false,
772 "ecore_dmae_execute_sub_operation Failed"
773 " with error 0x%x. source_addr 0x%lx,"
774 " dest addr 0x%lx, size_in_dwords 0x%x\n",
775 ecore_status, (unsigned long)src_addr,
776 (unsigned long)dst_addr, length_cur);
778 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
787 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
788 struct ecore_ptt *p_ptt,
790 u32 grc_addr, u32 size_in_dwords, u32 flags)
792 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
793 struct ecore_dmae_params params;
794 enum _ecore_status_t rc;
796 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
797 params.flags = flags;
799 OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
801 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
803 ECORE_DMAE_ADDRESS_HOST_VIRT,
804 ECORE_DMAE_ADDRESS_GRC,
805 size_in_dwords, ¶ms);
807 OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
813 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
814 struct ecore_ptt *p_ptt,
816 dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
818 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
819 struct ecore_dmae_params params;
820 enum _ecore_status_t rc;
822 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
823 params.flags = flags;
825 OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
827 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
828 dest_addr, ECORE_DMAE_ADDRESS_GRC,
829 ECORE_DMAE_ADDRESS_HOST_VIRT,
830 size_in_dwords, ¶ms);
832 OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
838 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
839 struct ecore_ptt *p_ptt,
840 dma_addr_t source_addr,
841 dma_addr_t dest_addr,
842 u32 size_in_dwords, struct ecore_dmae_params *p_params)
844 enum _ecore_status_t rc;
846 OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
848 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
850 ECORE_DMAE_ADDRESS_HOST_PHYS,
851 ECORE_DMAE_ADDRESS_HOST_PHYS,
852 size_in_dwords, p_params);
854 OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
859 u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
860 enum protocol_type proto,
861 union ecore_qm_pq_params *p_params)
865 if ((proto == PROTOCOLID_CORE ||
866 proto == PROTOCOLID_ETH) && !p_params) {
867 DP_NOTICE(p_hwfn, true,
868 "Protocol %d received NULL PQ params\n", proto);
873 case PROTOCOLID_CORE:
874 if (p_params->core.tc == LB_TC)
875 pq_id = p_hwfn->qm_info.pure_lb_pq;
876 else if (p_params->core.tc == OOO_LB_TC)
877 pq_id = p_hwfn->qm_info.ooo_pq;
879 pq_id = p_hwfn->qm_info.offload_pq;
882 pq_id = p_params->eth.tc;
883 /* TODO - multi-CoS for VFs? */
884 if (p_params->eth.is_vf)
885 pq_id += p_hwfn->qm_info.vf_queues_offset +
892 pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, ECORE_PQ);
897 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
898 enum ecore_hw_err_type err_type)
900 /* Fan failure cannot be masked by handling of another HW error */
901 if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
902 DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
903 "Recovery is in progress."
904 "Avoid notifying about HW error %d.\n",
909 OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);