1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
7 /* include the precompiled configuration values - only once */
9 #include "ecore_hsi_common.h"
12 #include "ecore_status.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_init_fw_funcs.h"
16 #include "ecore_iro_values.h"
17 #include "ecore_sriov.h"
18 #include "ecore_gtt_values.h"
20 #include "ecore_init_ops.h"
22 #define ECORE_INIT_MAX_POLL_COUNT 100
23 #define ECORE_INIT_POLL_PERIOD_US 500
25 void ecore_init_iro_array(struct ecore_dev *p_dev)
27 p_dev->iro_arr = iro_arr;
30 /* Runtime configuration helpers */
31 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
35 for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
36 p_hwfn->rt_data.b_valid[i] = false;
39 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
41 if (rt_offset >= RUNTIME_ARRAY_SIZE) {
43 "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n",
44 val, rt_offset, RUNTIME_ARRAY_SIZE);
48 p_hwfn->rt_data.init_val[rt_offset] = val;
49 p_hwfn->rt_data.b_valid[rt_offset] = true;
52 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
53 u32 rt_offset, u32 *p_val, osal_size_t size)
57 if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
59 "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n",
60 rt_offset, (u32)(rt_offset + size - 1),
65 for (i = 0; i < size / sizeof(u32); i++) {
66 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
67 p_hwfn->rt_data.b_valid[rt_offset + i] = true;
71 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
72 struct ecore_ptt *p_ptt,
75 u16 size, bool b_must_dmae)
77 u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
78 bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
80 enum _ecore_status_t rc = ECORE_SUCCESS;
82 /* Since not all RT entries are initialized, go over the RT and
83 * for each segment of initialized values use DMA.
85 for (i = 0; i < size; i++) {
89 /* In case there isn't any wide-bus configuration here,
90 * simply write the data instead of using dmae.
93 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
97 /* Start of a new segment */
98 for (segment = 1; i + segment < size; segment++)
99 if (!p_valid[i + segment])
102 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
103 (osal_uintptr_t)(p_init_val + i),
104 addr + (i << 2), segment,
105 OSAL_NULL /* default parameters */);
106 if (rc != ECORE_SUCCESS)
109 /* Jump over the entire segment, including invalid entry */
116 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
118 struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
120 if (IS_VF(p_hwfn->p_dev))
121 return ECORE_SUCCESS;
123 rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
124 sizeof(bool) * RUNTIME_ARRAY_SIZE);
125 if (!rt_data->b_valid)
128 rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
129 sizeof(u32) * RUNTIME_ARRAY_SIZE);
130 if (!rt_data->init_val) {
131 OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
135 return ECORE_SUCCESS;
138 void ecore_init_free(struct ecore_hwfn *p_hwfn)
140 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
141 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
144 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
145 struct ecore_ptt *p_ptt,
147 u32 dmae_data_offset,
148 u32 size, const u32 *p_buf,
152 enum _ecore_status_t rc = ECORE_SUCCESS;
154 /* Perform DMAE only for lengthy enough sections or for wide-bus */
156 if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
157 !b_can_dmae || (!b_must_dmae && (size < 16))) {
159 if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
161 const u32 *data = p_buf + dmae_data_offset;
164 for (i = 0; i < size; i++)
165 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
167 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
168 (osal_uintptr_t)(p_buf +
171 OSAL_NULL /* default parameters */);
177 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
178 struct ecore_ptt *p_ptt,
179 u32 addr, u32 fill_count)
181 static u32 zero_buffer[DMAE_MAX_RW_SIZE];
182 struct dmae_params params;
184 OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
186 OSAL_MEMSET(¶ms, 0, sizeof(params));
187 SET_FIELD(params.flags, DMAE_PARAMS_RW_REPL_SRC, 0x1);
188 return ecore_dmae_host2grc(p_hwfn, p_ptt,
189 (osal_uintptr_t)&zero_buffer[0],
190 addr, fill_count, ¶ms);
193 static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
194 struct ecore_ptt *p_ptt,
195 u32 addr, u32 fill, u32 fill_count)
199 for (i = 0; i < fill_count; i++, addr += sizeof(u32))
200 ecore_wr(p_hwfn, p_ptt, addr, fill);
203 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
204 struct ecore_ptt *p_ptt,
205 struct init_write_op *cmd,
209 u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
210 u32 data = OSAL_LE32_TO_CPU(cmd->data);
211 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
212 #ifdef CONFIG_ECORE_ZIPPED_FW
213 u32 offset, output_len, input_len, max_size;
215 struct ecore_dev *p_dev = p_hwfn->p_dev;
216 union init_array_hdr *hdr;
217 const u32 *array_data;
218 enum _ecore_status_t rc = ECORE_SUCCESS;
221 array_data = p_dev->fw_data->arr_data;
223 hdr = (union init_array_hdr *)
224 (uintptr_t)(array_data + dmae_array_offset);
225 data = OSAL_LE32_TO_CPU(hdr->raw.data);
226 switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
227 case INIT_ARR_ZIPPED:
228 #ifdef CONFIG_ECORE_ZIPPED_FW
229 offset = dmae_array_offset + 1;
230 input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
231 max_size = MAX_ZIPPED_SIZE * 4;
232 OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
234 output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
235 (u8 *)(uintptr_t)&array_data[offset],
237 (u8 *)p_hwfn->unzip_buf);
239 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
242 b_must_dmae, b_can_dmae);
244 DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
248 DP_NOTICE(p_hwfn, true,
249 "Using zipped firmware without config enabled\n");
253 case INIT_ARR_PATTERN:
255 u32 repeats = GET_FIELD(data,
256 INIT_ARRAY_PATTERN_HDR_REPETITIONS);
259 size = GET_FIELD(data,
260 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
262 for (i = 0; i < repeats; i++, addr += size << 2) {
263 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
273 case INIT_ARR_STANDARD:
274 size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
275 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
276 dmae_array_offset + 1,
278 b_must_dmae, b_can_dmae);
285 /* init_ops write command */
286 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
287 struct ecore_ptt *p_ptt,
288 struct init_write_op *p_cmd,
291 u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
292 bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
293 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
294 enum _ecore_status_t rc = ECORE_SUCCESS;
297 if (b_must_dmae && !b_can_dmae) {
298 DP_NOTICE(p_hwfn, true,
299 "Need to write to %08x for Wide-bus but DMAE isn't"
305 switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
306 case INIT_SRC_INLINE:
307 data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
308 ecore_wr(p_hwfn, p_ptt, addr, data);
311 data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
312 if (b_must_dmae || (b_can_dmae && (data >= 64)))
313 rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
315 ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
318 rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
319 b_must_dmae, b_can_dmae);
321 case INIT_SRC_RUNTIME:
322 rc = ecore_init_rt(p_hwfn, p_ptt, addr,
323 OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
324 OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
332 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
334 return (val == expected_val);
337 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
339 return (val & expected_val) == expected_val;
342 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
344 return (val | expected_val) > 0;
347 /* init_ops read/poll commands */
348 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
349 struct ecore_ptt *p_ptt, struct init_read_op *cmd)
351 bool (*comp_check)(u32 val, u32 expected_val);
352 u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
353 u32 data, addr, poll;
356 data = OSAL_LE32_TO_CPU(cmd->op_data);
357 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
358 poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
361 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
365 val = ecore_rd(p_hwfn, p_ptt, addr);
367 if (poll == INIT_POLL_NONE)
372 comp_check = comp_eq;
375 comp_check = comp_or;
378 comp_check = comp_and;
381 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
386 data = OSAL_LE32_TO_CPU(cmd->expected_val);
388 i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
390 val = ecore_rd(p_hwfn, p_ptt, addr);
393 if (i == ECORE_INIT_MAX_POLL_COUNT)
394 DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
395 addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
396 OSAL_LE32_TO_CPU(cmd->op_data));
399 /* init_ops callbacks entry point */
400 static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
401 struct ecore_ptt *p_ptt,
402 struct init_callback_op *p_cmd)
404 enum _ecore_status_t rc;
406 switch (p_cmd->callback_id) {
408 rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
411 DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
419 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
420 u16 *p_offset, int modes)
422 struct ecore_dev *p_dev = p_hwfn->p_dev;
423 u8 arg1, arg2, tree_val;
424 const u8 *modes_tree;
426 modes_tree = p_dev->fw_data->modes_tree_buf;
427 tree_val = modes_tree[(*p_offset)++];
429 case INIT_MODE_OP_NOT:
430 return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
431 case INIT_MODE_OP_OR:
432 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
433 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
435 case INIT_MODE_OP_AND:
436 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
437 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
440 tree_val -= MAX_INIT_MODE_OPS;
441 return (modes & (1 << tree_val)) ? 1 : 0;
445 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
446 struct init_if_mode_op *p_cmd, int modes)
448 u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
450 if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
453 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
454 INIT_IF_MODE_OP_CMD_OFFSET);
457 static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
458 u32 phase, u32 phase_id)
460 u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
461 u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
463 if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
464 (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
465 GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
466 return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
471 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
472 struct ecore_ptt *p_ptt,
473 int phase, int phase_id, int modes)
475 struct ecore_dev *p_dev = p_hwfn->p_dev;
476 u32 cmd_num, num_init_ops;
479 enum _ecore_status_t rc = ECORE_SUCCESS;
481 num_init_ops = p_dev->fw_data->init_ops_size;
482 init = p_dev->fw_data->init_ops;
484 #ifdef CONFIG_ECORE_ZIPPED_FW
485 p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
486 MAX_ZIPPED_SIZE * 4);
487 if (!p_hwfn->unzip_buf) {
488 DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
493 for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
494 union init_op *cmd = &init[cmd_num];
495 u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
497 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
499 rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
504 ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
507 case INIT_OP_IF_MODE:
508 cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
511 case INIT_OP_IF_PHASE:
512 cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
514 b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
517 /* ecore_init_run is always invoked from
520 OSAL_UDELAY(cmd->delay.delay);
523 case INIT_OP_CALLBACK:
524 rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
531 #ifdef CONFIG_ECORE_ZIPPED_FW
532 OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
537 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
538 #ifdef CONFIG_ECORE_BINARY_FW
541 const u8 OSAL_UNUSED * fw_data)
544 struct ecore_fw_data *fw = p_dev->fw_data;
546 #ifdef CONFIG_ECORE_BINARY_FW
547 struct bin_buffer_hdr *buf_hdr;
551 DP_NOTICE(p_dev, true, "Invalid fw data\n");
555 buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data;
557 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
558 fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset));
560 offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
561 fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset));
563 offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
564 fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset));
566 offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
567 fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
568 len = buf_hdr[BIN_BUF_INIT_CMD].length;
569 fw->init_ops_size = len / sizeof(struct init_raw_op);
571 fw->init_ops = (union init_op *)init_ops;
572 fw->arr_data = (u32 *)init_val;
573 fw->modes_tree_buf = (u8 *)modes_tree_buf;
574 fw->init_ops_size = init_ops_size;
577 return ECORE_SUCCESS;