1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2007-2013 Broadcom Corporation.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9 * Copyright (c) 2015-2018 Cavium Inc.
10 * All rights reserved.
14 #ifndef ECORE_INIT_OPS_H
15 #define ECORE_INIT_OPS_H
17 static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t *zbuf, int len);
18 static void ecore_write_dmae_phys_len(struct bnx2x_softc *sc,
19 ecore_dma_addr_t phys_addr, uint32_t addr,
22 static void ecore_init_str_wr(struct bnx2x_softc *sc, uint32_t addr,
23 const uint32_t *data, uint32_t len)
27 for (i = 0; i < len; i++)
28 REG_WR(sc, addr + i*4, data[i]);
31 static void ecore_write_big_buf(struct bnx2x_softc *sc, uint32_t addr,
32 uint32_t len, uint8_t wb __rte_unused)
35 ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len);
37 /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
39 ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
42 static void ecore_init_fill(struct bnx2x_softc *sc, uint32_t addr, int fill,
43 uint32_t len, uint8_t wb)
45 uint32_t buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
46 uint32_t buf_len32 = buf_len/4;
49 ECORE_MEMSET(GUNZIP_BUF(sc), (uint8_t)fill, buf_len);
51 for (i = 0; i < len; i += buf_len32) {
52 uint32_t cur_len = min(buf_len32, len - i);
54 ecore_write_big_buf(sc, addr + i * 4, cur_len, wb);
58 static void ecore_write_big_buf_wb(struct bnx2x_softc *sc, uint32_t addr, uint32_t len)
61 ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len);
63 /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
65 ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
68 static void ecore_init_wr_64(struct bnx2x_softc *sc, uint32_t addr,
69 const uint32_t *data, uint32_t len64)
71 uint32_t buf_len32 = FW_BUF_SIZE/4;
72 uint32_t len = len64*2;
76 /* 64 bit value is in a blob: first low DWORD, then high DWORD */
77 data64 = HILO_U64((*(data + 1)), (*data));
79 len64 = min((uint32_t)(FW_BUF_SIZE/8), len64);
80 for (i = 0; i < len64; i++) {
81 uint64_t *pdata = ((uint64_t *)(GUNZIP_BUF(sc))) + i;
86 for (i = 0; i < len; i += buf_len32) {
87 uint32_t cur_len = min(buf_len32, len - i);
89 ecore_write_big_buf_wb(sc, addr + i*4, cur_len);
93 /*********************************************************
94 There are different blobs for each PRAM section.
95 In addition, each blob write operation is divided into a few operations
96 in order to decrease the amount of phys. contiguous buffer needed.
97 Thus, when we select a blob the address may be with some offset
98 from the beginning of PRAM section.
99 The same holds for the INT_TABLE sections.
100 **********************************************************/
101 #define IF_IS_INT_TABLE_ADDR(base, addr) \
102 if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
104 #define IF_IS_PRAM_ADDR(base, addr) \
105 if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
107 static const uint8_t *ecore_sel_blob(struct bnx2x_softc *sc, uint32_t addr,
110 IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
111 data = INIT_TSEM_INT_TABLE_DATA(sc);
113 IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
114 data = INIT_CSEM_INT_TABLE_DATA(sc);
116 IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
117 data = INIT_USEM_INT_TABLE_DATA(sc);
119 IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
120 data = INIT_XSEM_INT_TABLE_DATA(sc);
122 IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
123 data = INIT_TSEM_PRAM_DATA(sc);
125 IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
126 data = INIT_CSEM_PRAM_DATA(sc);
128 IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
129 data = INIT_USEM_PRAM_DATA(sc);
131 IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
132 data = INIT_XSEM_PRAM_DATA(sc);
137 static void ecore_init_wr_wb(struct bnx2x_softc *sc, uint32_t addr,
138 const uint32_t *data, uint32_t len)
141 VIRT_WR_DMAE_LEN(sc, data, addr, len, 0);
143 /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
145 ecore_init_str_wr(sc, addr, data, len);
149 static void ecore_wr_64(struct bnx2x_softc *sc, uint32_t reg, uint32_t val_lo,
152 uint32_t wb_write[2];
154 wb_write[0] = val_lo;
155 wb_write[1] = val_hi;
156 REG_WR_DMAE_LEN(sc, reg, wb_write, 2);
159 static void ecore_init_wr_zp(struct bnx2x_softc *sc, uint32_t addr, uint32_t len,
162 const uint8_t *data = NULL;
166 data = ecore_sel_blob(sc, addr, data) + blob_off*4;
168 rc = ecore_gunzip(sc, data, len);
172 /* gunzip_outlen is in dwords */
173 len = GUNZIP_OUTLEN(sc);
174 for (i = 0; i < len; i++)
175 ((uint32_t *)GUNZIP_BUF(sc))[i] = (uint32_t)
176 ECORE_CPU_TO_LE32(((uint32_t *)GUNZIP_BUF(sc))[i]);
178 ecore_write_big_buf_wb(sc, addr, len);
181 static void ecore_init_block(struct bnx2x_softc *sc, uint32_t block, uint32_t stage)
184 INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage,
187 INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage,
189 const union init_op *op;
190 uint32_t op_idx, op_type, addr, len;
191 const uint32_t *data, *data_base;
194 if (op_start == op_end)
197 data_base = INIT_DATA(sc);
199 for (op_idx = op_start; op_idx < op_end; op_idx++) {
201 op = (const union init_op *)&(INIT_OPS(sc)[op_idx]);
202 /* Get generic data */
203 op_type = op->raw.op;
204 addr = op->raw.offset;
205 /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
206 * OP_WR64 (we assume that op_arr_write and op_write have the
209 len = op->arr_wr.data_len;
210 data = data_base + op->arr_wr.data_off;
217 REG_WR(sc, addr, op->write.val);
220 ecore_init_str_wr(sc, addr, data, len);
223 ecore_init_wr_wb(sc, addr, data, len);
226 ecore_init_fill(sc, addr, 0, op->zero.len, 0);
229 ecore_init_fill(sc, addr, 0, op->zero.len, 1);
232 ecore_init_wr_zp(sc, addr, len,
233 op->arr_wr.data_off);
236 ecore_init_wr_64(sc, addr, data, len);
239 /* if any of the flags doesn't match, skip the
242 if ((INIT_MODE_FLAGS(sc) &
243 op->if_mode.mode_bit_map) !=
244 op->if_mode.mode_bit_map)
245 op_idx += op->if_mode.cmd_offset;
248 /* if all the flags don't match, skip the conditional
251 if ((INIT_MODE_FLAGS(sc) &
252 op->if_mode.mode_bit_map) == 0)
253 op_idx += op->if_mode.cmd_offset;
256 /* Should never get here! */
264 /****************************************************************************
266 ****************************************************************************/
268 * This code configures the PCI read/write arbiter
269 * which implements a weighted round robin
270 * between the virtual queues in the chip.
272 * The values were derived for each PCI max payload and max request size.
273 * since max payload and max request size are only known at run time,
274 * this is done as a separate init stage.
282 /* configuration for one arbiter queue */
289 /* derived configuration for each read queue for each max request size */
290 static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
291 /* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
292 { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
293 { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
294 { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
295 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
296 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
297 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
298 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
299 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
300 /* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
301 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
302 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
303 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
304 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
305 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
306 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
307 { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
308 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
309 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
310 /* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
311 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
312 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
313 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
314 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
315 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
316 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
317 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
318 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
319 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
322 /* derived configuration for each write queue for each max request size */
323 static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
324 /* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
325 { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
326 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
327 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
328 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
329 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
330 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
331 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
332 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
333 /* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
334 { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
335 { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
336 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
339 /* register addresses for read queues */
340 static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
341 /* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
342 PXP2_REG_RQ_BW_RD_UBOUND0},
343 {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
344 PXP2_REG_PSWRQ_BW_UB1},
345 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
346 PXP2_REG_PSWRQ_BW_UB2},
347 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
348 PXP2_REG_PSWRQ_BW_UB3},
349 {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
350 PXP2_REG_RQ_BW_RD_UBOUND4},
351 {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
352 PXP2_REG_RQ_BW_RD_UBOUND5},
353 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
354 PXP2_REG_PSWRQ_BW_UB6},
355 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
356 PXP2_REG_PSWRQ_BW_UB7},
357 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
358 PXP2_REG_PSWRQ_BW_UB8},
359 /* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
360 PXP2_REG_PSWRQ_BW_UB9},
361 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
362 PXP2_REG_PSWRQ_BW_UB10},
363 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
364 PXP2_REG_PSWRQ_BW_UB11},
365 {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
366 PXP2_REG_RQ_BW_RD_UBOUND12},
367 {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
368 PXP2_REG_RQ_BW_RD_UBOUND13},
369 {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
370 PXP2_REG_RQ_BW_RD_UBOUND14},
371 {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
372 PXP2_REG_RQ_BW_RD_UBOUND15},
373 {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
374 PXP2_REG_RQ_BW_RD_UBOUND16},
375 {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
376 PXP2_REG_RQ_BW_RD_UBOUND17},
377 {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
378 PXP2_REG_RQ_BW_RD_UBOUND18},
379 /* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
380 PXP2_REG_RQ_BW_RD_UBOUND19},
381 {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
382 PXP2_REG_RQ_BW_RD_UBOUND20},
383 {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
384 PXP2_REG_RQ_BW_RD_UBOUND22},
385 {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
386 PXP2_REG_RQ_BW_RD_UBOUND23},
387 {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
388 PXP2_REG_RQ_BW_RD_UBOUND24},
389 {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
390 PXP2_REG_RQ_BW_RD_UBOUND25},
391 {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
392 PXP2_REG_RQ_BW_RD_UBOUND26},
393 {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
394 PXP2_REG_RQ_BW_RD_UBOUND27},
395 {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
396 PXP2_REG_PSWRQ_BW_UB28}
399 /* register addresses for write queues */
400 static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
401 /* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
402 PXP2_REG_PSWRQ_BW_UB1},
403 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
404 PXP2_REG_PSWRQ_BW_UB2},
405 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
406 PXP2_REG_PSWRQ_BW_UB3},
407 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
408 PXP2_REG_PSWRQ_BW_UB6},
409 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
410 PXP2_REG_PSWRQ_BW_UB7},
411 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
412 PXP2_REG_PSWRQ_BW_UB8},
413 {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
414 PXP2_REG_PSWRQ_BW_UB9},
415 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
416 PXP2_REG_PSWRQ_BW_UB10},
417 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
418 PXP2_REG_PSWRQ_BW_UB11},
419 /* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
420 PXP2_REG_PSWRQ_BW_UB28},
421 {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
422 PXP2_REG_RQ_BW_WR_UBOUND29},
423 {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
424 PXP2_REG_RQ_BW_WR_UBOUND30}
427 static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,
432 if (r_order > MAX_RD_ORD) {
433 ECORE_MSG(sc, "read order of %d order adjusted to %d",
434 r_order, MAX_RD_ORD);
435 r_order = MAX_RD_ORD;
437 if (w_order > MAX_WR_ORD) {
438 ECORE_MSG(sc, "write order of %d order adjusted to %d",
439 w_order, MAX_WR_ORD);
440 w_order = MAX_WR_ORD;
442 if (CHIP_REV_IS_FPGA(sc)) {
443 ECORE_MSG(sc, "write order adjusted to 1 for FPGA");
446 ECORE_MSG(sc, "read order %d write order %d", r_order, w_order);
448 for (i = 0; i < NUM_RD_Q-1; i++) {
449 REG_WR(sc, read_arb_addr[i].l, read_arb_data[i][r_order].l);
450 REG_WR(sc, read_arb_addr[i].add,
451 read_arb_data[i][r_order].add);
452 REG_WR(sc, read_arb_addr[i].ubound,
453 read_arb_data[i][r_order].ubound);
456 for (i = 0; i < NUM_WR_Q-1; i++) {
457 if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
458 (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
460 REG_WR(sc, write_arb_addr[i].l,
461 write_arb_data[i][w_order].l);
463 REG_WR(sc, write_arb_addr[i].add,
464 write_arb_data[i][w_order].add);
466 REG_WR(sc, write_arb_addr[i].ubound,
467 write_arb_data[i][w_order].ubound);
470 val = REG_RD(sc, write_arb_addr[i].l);
471 REG_WR(sc, write_arb_addr[i].l,
472 val | (write_arb_data[i][w_order].l << 10));
474 val = REG_RD(sc, write_arb_addr[i].add);
475 REG_WR(sc, write_arb_addr[i].add,
476 val | (write_arb_data[i][w_order].add << 10));
478 val = REG_RD(sc, write_arb_addr[i].ubound);
479 REG_WR(sc, write_arb_addr[i].ubound,
480 val | (write_arb_data[i][w_order].ubound << 7));
484 val = write_arb_data[NUM_WR_Q-1][w_order].add;
485 val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
486 val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
487 REG_WR(sc, PXP2_REG_PSWRQ_BW_RD, val);
489 val = read_arb_data[NUM_RD_Q-1][r_order].add;
490 val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
491 val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
492 REG_WR(sc, PXP2_REG_PSWRQ_BW_WR, val);
494 REG_WR(sc, PXP2_REG_RQ_WR_MBS0, w_order);
495 REG_WR(sc, PXP2_REG_RQ_WR_MBS1, w_order);
496 REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order);
497 REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order);
499 if ((CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) && (r_order == MAX_RD_ORD))
500 REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
503 REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
504 else if (CHIP_IS_E2(sc))
505 REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
507 REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
509 if (!CHIP_IS_E1(sc)) {
510 /* MPS w_order optimal TH presently TH
515 /* DMAE is special */
516 if (!CHIP_IS_E1H(sc)) {
517 /* E2 can use optimal TH */
519 REG_WR(sc, PXP2_REG_WR_DMAE_MPS, val);
521 val = ((w_order == 0) ? 2 : 3);
522 REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2);
525 REG_WR(sc, PXP2_REG_WR_HC_MPS, val);
526 REG_WR(sc, PXP2_REG_WR_USDM_MPS, val);
527 REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val);
528 REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val);
529 REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val);
530 REG_WR(sc, PXP2_REG_WR_QM_MPS, val);
531 REG_WR(sc, PXP2_REG_WR_TM_MPS, val);
532 REG_WR(sc, PXP2_REG_WR_SRC_MPS, val);
533 REG_WR(sc, PXP2_REG_WR_DBG_MPS, val);
534 REG_WR(sc, PXP2_REG_WR_CDU_MPS, val);
537 /* Validate number of tags suppoted by device */
538 #define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
539 val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST);
542 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
545 /****************************************************************************
547 ****************************************************************************/
549 * This codes hides the low level HW interaction for ILT management and
550 * configuration. The API consists of a shadow ILT table which is set by the
551 * driver and a set of routines to use it to configure the HW.
555 /* ILT HW init operations */
557 /* ILT memory management operations */
558 #define ILT_MEMOP_ALLOC 0
559 #define ILT_MEMOP_FREE 1
561 /* the phys address is shifted right 12 bits and has an added
562 * 1=valid bit added to the 53rd bit
563 * then since this is a wide register(TM)
564 * we split it into two 32 bit writes
566 #define ILT_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF))
567 #define ILT_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44)))
568 #define ILT_RANGE(f, l) (((l) << 10) | f)
570 static int ecore_ilt_line_mem_op(struct bnx2x_softc *sc __rte_unused,
571 struct ilt_line *line, uint32_t size,
574 if (memop == ILT_MEMOP_FREE) {
575 ECORE_ILT_FREE(line->page, line->page_mapping, line->size);
578 ECORE_ILT_ZALLOC(line->page, &line->page_mapping, size);
586 static int ecore_ilt_client_mem_op(struct bnx2x_softc *sc, int cli_num,
590 struct ecore_ilt *ilt = SC_ILT(sc);
591 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
593 if (!ilt || !ilt->lines)
596 if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
599 for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
600 rc = ecore_ilt_line_mem_op(sc, &ilt->lines[i],
601 ilt_cli->page_size, memop);
606 static int ecore_ilt_mem_op(struct bnx2x_softc *sc, uint8_t memop)
608 int rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_CDU, memop);
610 rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_QM, memop);
611 if (!rc && CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc))
612 rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop);
617 static void ecore_ilt_line_wr(struct bnx2x_softc *sc, int abs_idx,
618 ecore_dma_addr_t page_mapping)
623 reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx * 8;
625 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx * 8;
627 ecore_wr_64(sc, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
630 static void ecore_ilt_line_init_op(struct bnx2x_softc *sc,
631 struct ecore_ilt *ilt, int idx, uint8_t initop)
633 ecore_dma_addr_t null_mapping;
634 int abs_idx = ilt->start_line + idx;
639 /* set in the init-value array */
641 ecore_ilt_line_wr(sc, abs_idx, ilt->lines[idx].page_mapping);
645 ecore_ilt_line_wr(sc, abs_idx, null_mapping);
650 static void ecore_ilt_boundary_init_op(struct bnx2x_softc *sc,
651 struct ilt_client_info *ilt_cli,
653 uint8_t initop __rte_unused)
655 uint32_t start_reg = 0;
656 uint32_t end_reg = 0;
658 /* The boundary is either SET or INIT,
659 CLEAR => SET and for now SET ~~ INIT */
661 /* find the appropriate regs */
662 if (CHIP_IS_E1(sc)) {
663 switch (ilt_cli->client_num) {
665 start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
668 start_reg = PXP2_REG_PSWRQ_QM0_L2P;
671 start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
674 start_reg = PXP2_REG_PSWRQ_TM0_L2P;
677 REG_WR(sc, start_reg + SC_FUNC(sc) * 4,
678 ILT_RANGE((ilt_start + ilt_cli->start),
679 (ilt_start + ilt_cli->end)));
681 switch (ilt_cli->client_num) {
683 start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
684 end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
687 start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
688 end_reg = PXP2_REG_RQ_QM_LAST_ILT;
691 start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
692 end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
695 start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
696 end_reg = PXP2_REG_RQ_TM_LAST_ILT;
699 REG_WR(sc, start_reg, (ilt_start + ilt_cli->start));
700 REG_WR(sc, end_reg, (ilt_start + ilt_cli->end));
704 static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc,
705 struct ecore_ilt *ilt,
706 struct ilt_client_info *ilt_cli,
711 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
714 for (i = ilt_cli->start; i <= ilt_cli->end; i++)
715 ecore_ilt_line_init_op(sc, ilt, i, initop);
717 /* init/clear the ILT boundries */
718 ecore_ilt_boundary_init_op(sc, ilt_cli, ilt->start_line, initop);
721 static void ecore_ilt_client_init_op(struct bnx2x_softc *sc,
722 struct ilt_client_info *ilt_cli, uint8_t initop)
724 struct ecore_ilt *ilt = SC_ILT(sc);
726 ecore_ilt_client_init_op_ilt(sc, ilt, ilt_cli, initop);
729 static void ecore_ilt_client_id_init_op(struct bnx2x_softc *sc,
730 int cli_num, uint8_t initop)
732 struct ecore_ilt *ilt = SC_ILT(sc);
733 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
735 ecore_ilt_client_init_op(sc, ilt_cli, initop);
738 static void ecore_ilt_init_op(struct bnx2x_softc *sc, uint8_t initop)
740 ecore_ilt_client_id_init_op(sc, ILT_CLIENT_CDU, initop);
741 ecore_ilt_client_id_init_op(sc, ILT_CLIENT_QM, initop);
742 if (CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc))
743 ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop);
746 static void ecore_ilt_init_client_psz(struct bnx2x_softc *sc, int cli_num,
747 uint32_t psz_reg, uint8_t initop)
749 struct ecore_ilt *ilt = SC_ILT(sc);
750 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
752 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
757 /* set in the init-value array */
759 REG_WR(sc, psz_reg, ILOG2(ilt_cli->page_size >> 12));
767 * called during init common stage, ilt clients should be initialized
768 * prioir to calling this function
770 static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop)
772 ecore_ilt_init_client_psz(sc, ILT_CLIENT_CDU,
773 PXP2_REG_RQ_CDU_P_SIZE, initop);
774 ecore_ilt_init_client_psz(sc, ILT_CLIENT_QM,
775 PXP2_REG_RQ_QM_P_SIZE, initop);
776 ecore_ilt_init_client_psz(sc, ILT_CLIENT_SRC,
777 PXP2_REG_RQ_SRC_P_SIZE, initop);
778 ecore_ilt_init_client_psz(sc, ILT_CLIENT_TM,
779 PXP2_REG_RQ_TM_P_SIZE, initop);
782 /****************************************************************************
784 ****************************************************************************/
785 #define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
786 #define QM_INIT_MIN_CID_COUNT 31
787 #define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
789 /* called during init port stage */
790 static void ecore_qm_init_cid_count(struct bnx2x_softc *sc, int qm_cid_count,
793 int port = SC_PORT(sc);
795 if (QM_INIT(qm_cid_count)) {
798 /* set in the init-value array */
800 REG_WR(sc, QM_REG_CONNNUM_0 + port*4,
801 qm_cid_count/16 - 1);
809 static void ecore_qm_set_ptr_table(struct bnx2x_softc *sc, int qm_cid_count,
810 uint32_t base_reg, uint32_t reg)
813 uint32_t wb_data[2] = {0, 0};
814 for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
815 REG_WR(sc, base_reg + i*4,
816 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
817 ecore_init_wr_wb(sc, reg + i*8,
822 /* called during init common stage */
823 static void ecore_qm_init_ptr_table(struct bnx2x_softc *sc, int qm_cid_count,
826 if (!QM_INIT(qm_cid_count))
831 /* set in the init-value array */
833 ecore_qm_set_ptr_table(sc, qm_cid_count,
834 QM_REG_BASEADDR, QM_REG_PTRTBL);
836 ecore_qm_set_ptr_table(sc, qm_cid_count,
837 QM_REG_BASEADDR_EXT_A,
838 QM_REG_PTRTBL_EXT_A);
845 #endif /* ECORE_INIT_OPS_H */