2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 /* include the precompiled configuration values - only once */
11 #include "ecore_hsi_common.h"
14 #include "ecore_status.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_init_fw_funcs.h"
18 #include "ecore_iro_values.h"
19 #include "ecore_sriov.h"
20 #include "ecore_gtt_values.h"
22 #include "ecore_init_ops.h"
24 #define ECORE_INIT_MAX_POLL_COUNT 100
25 #define ECORE_INIT_POLL_PERIOD_US 500
27 void ecore_init_iro_array(struct ecore_dev *p_dev)
29 p_dev->iro_arr = iro_arr;
32 /* Runtime configuration helpers */
33 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
37 for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
38 p_hwfn->rt_data.b_valid[i] = false;
41 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
43 if (rt_offset >= RUNTIME_ARRAY_SIZE) {
45 "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n",
46 val, rt_offset, RUNTIME_ARRAY_SIZE);
50 p_hwfn->rt_data.init_val[rt_offset] = val;
51 p_hwfn->rt_data.b_valid[rt_offset] = true;
54 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
55 u32 rt_offset, u32 *p_val, osal_size_t size)
59 if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
61 "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n",
62 rt_offset, (u32)(rt_offset + size - 1),
67 for (i = 0; i < size / sizeof(u32); i++) {
68 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
69 p_hwfn->rt_data.b_valid[rt_offset + i] = true;
73 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
74 struct ecore_ptt *p_ptt,
77 u16 size, bool b_must_dmae)
79 u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
80 bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
82 enum _ecore_status_t rc = ECORE_SUCCESS;
84 /* Since not all RT entries are initialized, go over the RT and
85 * for each segment of initialized values use DMA.
87 for (i = 0; i < size; i++) {
91 /* In case there isn't any wide-bus configuration here,
92 * simply write the data instead of using dmae.
95 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
99 /* Start of a new segment */
100 for (segment = 1; i + segment < size; segment++)
101 if (!p_valid[i + segment])
104 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
105 (osal_uintptr_t)(p_init_val + i),
106 addr + (i << 2), segment, 0);
107 if (rc != ECORE_SUCCESS)
110 /* Jump over the entire segment, including invalid entry */
117 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
119 struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
121 if (IS_VF(p_hwfn->p_dev))
122 return ECORE_SUCCESS;
124 rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
125 sizeof(bool) * RUNTIME_ARRAY_SIZE);
126 if (!rt_data->b_valid)
129 rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
130 sizeof(u32) * RUNTIME_ARRAY_SIZE);
131 if (!rt_data->init_val) {
132 OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
136 return ECORE_SUCCESS;
139 void ecore_init_free(struct ecore_hwfn *p_hwfn)
141 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
142 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
145 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
146 struct ecore_ptt *p_ptt,
148 u32 dmae_data_offset,
149 u32 size, const u32 *p_buf,
153 enum _ecore_status_t rc = ECORE_SUCCESS;
155 /* Perform DMAE only for lengthy enough sections or for wide-bus */
157 if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
158 !b_can_dmae || (!b_must_dmae && (size < 16))) {
160 if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
162 const u32 *data = p_buf + dmae_data_offset;
165 for (i = 0; i < size; i++)
166 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
168 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
169 (osal_uintptr_t)(p_buf +
177 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
178 struct ecore_ptt *p_ptt,
179 u32 addr, u32 fill_count)
181 static u32 zero_buffer[DMAE_MAX_RW_SIZE];
183 OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
185 return ecore_dmae_host2grc(p_hwfn, p_ptt,
186 (osal_uintptr_t)&zero_buffer[0],
188 ECORE_DMAE_FLAG_RW_REPL_SRC);
191 static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
192 struct ecore_ptt *p_ptt,
193 u32 addr, u32 fill, u32 fill_count)
197 for (i = 0; i < fill_count; i++, addr += sizeof(u32))
198 ecore_wr(p_hwfn, p_ptt, addr, fill);
201 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
202 struct ecore_ptt *p_ptt,
203 struct init_write_op *cmd,
207 u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
208 u32 data = OSAL_LE32_TO_CPU(cmd->data);
209 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
210 #ifdef CONFIG_ECORE_ZIPPED_FW
211 u32 offset, output_len, input_len, max_size;
213 struct ecore_dev *p_dev = p_hwfn->p_dev;
214 union init_array_hdr *hdr;
215 const u32 *array_data;
216 enum _ecore_status_t rc = ECORE_SUCCESS;
219 array_data = p_dev->fw_data->arr_data;
221 hdr = (union init_array_hdr *)
222 (uintptr_t)(array_data + dmae_array_offset);
223 data = OSAL_LE32_TO_CPU(hdr->raw.data);
224 switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
225 case INIT_ARR_ZIPPED:
226 #ifdef CONFIG_ECORE_ZIPPED_FW
227 offset = dmae_array_offset + 1;
228 input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
229 max_size = MAX_ZIPPED_SIZE * 4;
230 OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
232 output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
233 (u8 *)(uintptr_t)&array_data[offset],
235 (u8 *)p_hwfn->unzip_buf);
237 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
240 b_must_dmae, b_can_dmae);
242 DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
246 DP_NOTICE(p_hwfn, true,
247 "Using zipped firmware without config enabled\n");
251 case INIT_ARR_PATTERN:
253 u32 repeats = GET_FIELD(data,
254 INIT_ARRAY_PATTERN_HDR_REPETITIONS);
257 size = GET_FIELD(data,
258 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
260 for (i = 0; i < repeats; i++, addr += size << 2) {
261 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
271 case INIT_ARR_STANDARD:
272 size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
273 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
274 dmae_array_offset + 1,
276 b_must_dmae, b_can_dmae);
283 /* init_ops write command */
284 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
285 struct ecore_ptt *p_ptt,
286 struct init_write_op *p_cmd,
289 u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
290 bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
291 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
292 enum _ecore_status_t rc = ECORE_SUCCESS;
295 if (b_must_dmae && !b_can_dmae) {
296 DP_NOTICE(p_hwfn, true,
297 "Need to write to %08x for Wide-bus but DMAE isn't"
303 switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
304 case INIT_SRC_INLINE:
305 data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
306 ecore_wr(p_hwfn, p_ptt, addr, data);
309 data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
310 if (b_must_dmae || (b_can_dmae && (data >= 64)))
311 rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
313 ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
316 rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
317 b_must_dmae, b_can_dmae);
319 case INIT_SRC_RUNTIME:
320 rc = ecore_init_rt(p_hwfn, p_ptt, addr,
321 OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
322 OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
330 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
332 return (val == expected_val);
335 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
337 return (val & expected_val) == expected_val;
340 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
342 return (val | expected_val) > 0;
345 /* init_ops read/poll commands */
346 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
347 struct ecore_ptt *p_ptt, struct init_read_op *cmd)
349 bool (*comp_check)(u32 val, u32 expected_val);
350 u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
351 u32 data, addr, poll;
354 data = OSAL_LE32_TO_CPU(cmd->op_data);
355 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
356 poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
359 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
363 val = ecore_rd(p_hwfn, p_ptt, addr);
365 if (poll == INIT_POLL_NONE)
370 comp_check = comp_eq;
373 comp_check = comp_or;
376 comp_check = comp_and;
379 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
384 data = OSAL_LE32_TO_CPU(cmd->expected_val);
386 i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
388 val = ecore_rd(p_hwfn, p_ptt, addr);
391 if (i == ECORE_INIT_MAX_POLL_COUNT)
392 DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
393 addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
394 OSAL_LE32_TO_CPU(cmd->op_data));
397 /* init_ops callbacks entry point */
398 static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
399 struct ecore_ptt *p_ptt,
400 struct init_callback_op *p_cmd)
402 enum _ecore_status_t rc;
404 switch (p_cmd->callback_id) {
406 rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
409 DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
417 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
418 u16 *p_offset, int modes)
420 struct ecore_dev *p_dev = p_hwfn->p_dev;
421 const u8 *modes_tree_buf;
422 u8 arg1, arg2, tree_val;
424 modes_tree_buf = p_dev->fw_data->modes_tree_buf;
425 tree_val = modes_tree_buf[(*p_offset)++];
427 case INIT_MODE_OP_NOT:
428 return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
429 case INIT_MODE_OP_OR:
430 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
431 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
433 case INIT_MODE_OP_AND:
434 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
435 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
438 tree_val -= MAX_INIT_MODE_OPS;
439 return (modes & (1 << tree_val)) ? 1 : 0;
443 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
444 struct init_if_mode_op *p_cmd, int modes)
446 u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
448 if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
451 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
452 INIT_IF_MODE_OP_CMD_OFFSET);
455 static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
456 u32 phase, u32 phase_id)
458 u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
459 u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
461 if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
462 (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
463 GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
464 return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
469 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
470 struct ecore_ptt *p_ptt,
471 int phase, int phase_id, int modes)
473 struct ecore_dev *p_dev = p_hwfn->p_dev;
474 u32 cmd_num, num_init_ops;
475 union init_op *init_ops;
477 enum _ecore_status_t rc = ECORE_SUCCESS;
479 num_init_ops = p_dev->fw_data->init_ops_size;
480 init_ops = p_dev->fw_data->init_ops;
482 #ifdef CONFIG_ECORE_ZIPPED_FW
483 p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
484 MAX_ZIPPED_SIZE * 4);
485 if (!p_hwfn->unzip_buf) {
486 DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
491 for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
492 union init_op *cmd = &init_ops[cmd_num];
493 u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
495 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
497 rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
502 ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
505 case INIT_OP_IF_MODE:
506 cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
509 case INIT_OP_IF_PHASE:
510 cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
512 b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
515 /* ecore_init_run is always invoked from
518 OSAL_UDELAY(cmd->delay.delay);
521 case INIT_OP_CALLBACK:
522 rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
529 #ifdef CONFIG_ECORE_ZIPPED_FW
530 OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
535 void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
536 struct ecore_ptt *p_ptt)
542 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
543 /* This is done by MFW on ASIC; regardless, this should only
544 * be done once per chip [i.e., common]. Implementation is
545 * not too bright, but it should work on the simple FPGA/EMUL
548 static bool initialized;
552 /* initialize PTT/GTT (poll for completion) */
554 ecore_wr(p_hwfn, p_ptt,
555 PGLUE_B_REG_START_INIT_PTT_GTT, 1);
560 /* ptt might be overrided by HW until this is done */
562 ecore_ptt_invalidate(p_hwfn);
563 val = ecore_rd(p_hwfn, p_ptt,
564 PGLUE_B_REG_INIT_DONE_PTT_GTT);
565 } while ((val != 1) && --poll_cnt);
569 "PGLUE_B_REG_INIT_DONE didn't complete\n");
573 /* Set the global windows */
574 gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
576 for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
577 if (pxp_global_win[i])
578 REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
582 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
583 #ifdef CONFIG_ECORE_BINARY_FW
586 const u8 OSAL_UNUSED * fw_data)
589 struct ecore_fw_data *fw = p_dev->fw_data;
591 #ifdef CONFIG_ECORE_BINARY_FW
592 struct bin_buffer_hdr *buf_hdr;
596 DP_NOTICE(p_dev, true, "Invalid fw data\n");
600 buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data;
602 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
603 fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset));
605 offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
606 fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset));
608 offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
609 fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset));
611 offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
612 fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
613 len = buf_hdr[BIN_BUF_INIT_CMD].length;
614 fw->init_ops_size = len / sizeof(struct init_raw_op);
616 fw->init_ops = (union init_op *)init_ops;
617 fw->arr_data = (u32 *)init_val;
618 fw->modes_tree_buf = (u8 *)modes_tree_buf;
619 fw->init_ops_size = init_ops_size;
622 return ECORE_SUCCESS;