2 * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written consent.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
37 #ifndef ECORE_INIT_OPS_H
38 #define ECORE_INIT_OPS_H
40 static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t *zbuf, int len);
41 static void ecore_write_dmae_phys_len(struct bnx2x_softc *sc,
42 ecore_dma_addr_t phys_addr, uint32_t addr,
45 static void ecore_init_str_wr(struct bnx2x_softc *sc, uint32_t addr,
46 const uint32_t *data, uint32_t len)
50 for (i = 0; i < len; i++)
51 REG_WR(sc, addr + i*4, data[i]);
54 static void ecore_write_big_buf(struct bnx2x_softc *sc, uint32_t addr, uint32_t len)
57 ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len);
59 else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
62 static void ecore_init_fill(struct bnx2x_softc *sc, uint32_t addr, int fill,
65 uint32_t buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
66 uint32_t buf_len32 = buf_len/4;
69 ECORE_MEMSET(GUNZIP_BUF(sc), (uint8_t)fill, buf_len);
71 for (i = 0; i < len; i += buf_len32) {
72 uint32_t cur_len = min(buf_len32, len - i);
74 ecore_write_big_buf(sc, addr + i*4, cur_len);
78 static void ecore_write_big_buf_wb(struct bnx2x_softc *sc, uint32_t addr, uint32_t len)
81 ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len);
83 else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
86 static void ecore_init_wr_64(struct bnx2x_softc *sc, uint32_t addr,
87 const uint32_t *data, uint32_t len64)
89 uint32_t buf_len32 = FW_BUF_SIZE/4;
90 uint32_t len = len64*2;
94 /* 64 bit value is in a blob: first low DWORD, then high DWORD */
95 data64 = HILO_U64((*(data + 1)), (*data));
97 len64 = min((uint32_t)(FW_BUF_SIZE/8), len64);
98 for (i = 0; i < len64; i++) {
99 uint64_t *pdata = ((uint64_t *)(GUNZIP_BUF(sc))) + i;
104 for (i = 0; i < len; i += buf_len32) {
105 uint32_t cur_len = min(buf_len32, len - i);
107 ecore_write_big_buf_wb(sc, addr + i*4, cur_len);
111 /*********************************************************
112 There are different blobs for each PRAM section.
113 In addition, each blob write operation is divided into a few operations
114 in order to decrease the amount of phys. contiguous buffer needed.
115 Thus, when we select a blob the address may be with some offset
116 from the beginning of PRAM section.
117 The same holds for the INT_TABLE sections.
118 **********************************************************/
119 #define IF_IS_INT_TABLE_ADDR(base, addr) \
120 if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
122 #define IF_IS_PRAM_ADDR(base, addr) \
123 if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
125 static const uint8_t *ecore_sel_blob(struct bnx2x_softc *sc, uint32_t addr,
128 IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
129 data = INIT_TSEM_INT_TABLE_DATA(sc);
131 IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
132 data = INIT_CSEM_INT_TABLE_DATA(sc);
134 IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
135 data = INIT_USEM_INT_TABLE_DATA(sc);
137 IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
138 data = INIT_XSEM_INT_TABLE_DATA(sc);
140 IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
141 data = INIT_TSEM_PRAM_DATA(sc);
143 IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
144 data = INIT_CSEM_PRAM_DATA(sc);
146 IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
147 data = INIT_USEM_PRAM_DATA(sc);
149 IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
150 data = INIT_XSEM_PRAM_DATA(sc);
155 static void ecore_init_wr_wb(struct bnx2x_softc *sc, uint32_t addr,
156 const uint32_t *data, uint32_t len)
159 VIRT_WR_DMAE_LEN(sc, data, addr, len, 0);
161 else ecore_init_str_wr(sc, addr, data, len);
164 static void ecore_wr_64(struct bnx2x_softc *sc, uint32_t reg, uint32_t val_lo,
167 uint32_t wb_write[2];
169 wb_write[0] = val_lo;
170 wb_write[1] = val_hi;
171 REG_WR_DMAE_LEN(sc, reg, wb_write, 2);
174 static void ecore_init_wr_zp(struct bnx2x_softc *sc, uint32_t addr, uint32_t len,
177 const uint8_t *data = NULL;
181 data = ecore_sel_blob(sc, addr, data) + blob_off*4;
183 rc = ecore_gunzip(sc, data, len);
187 /* gunzip_outlen is in dwords */
188 len = GUNZIP_OUTLEN(sc);
189 for (i = 0; i < len; i++)
190 ((uint32_t *)GUNZIP_BUF(sc))[i] = (uint32_t)
191 ECORE_CPU_TO_LE32(((uint32_t *)GUNZIP_BUF(sc))[i]);
193 ecore_write_big_buf_wb(sc, addr, len);
196 static void ecore_init_block(struct bnx2x_softc *sc, uint32_t block, uint32_t stage)
199 INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage,
202 INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage,
204 const union init_op *op;
205 uint32_t op_idx, op_type, addr, len;
206 const uint32_t *data, *data_base;
209 if (op_start == op_end)
212 data_base = INIT_DATA(sc);
214 for (op_idx = op_start; op_idx < op_end; op_idx++) {
216 op = (const union init_op *)&(INIT_OPS(sc)[op_idx]);
217 /* Get generic data */
218 op_type = op->raw.op;
219 addr = op->raw.offset;
220 /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
221 * OP_WR64 (we assume that op_arr_write and op_write have the
224 len = op->arr_wr.data_len;
225 data = data_base + op->arr_wr.data_off;
232 REG_WR(sc, addr, op->write.val);
235 ecore_init_str_wr(sc, addr, data, len);
238 ecore_init_wr_wb(sc, addr, data, len);
242 ecore_init_fill(sc, addr, 0, op->zero.len);
245 ecore_init_wr_zp(sc, addr, len, op->arr_wr.data_off);
248 ecore_init_wr_64(sc, addr, data, len);
251 /* if any of the flags doesn't match, skip the
254 if ((INIT_MODE_FLAGS(sc) &
255 op->if_mode.mode_bit_map) !=
256 op->if_mode.mode_bit_map)
257 op_idx += op->if_mode.cmd_offset;
260 /* if all the flags don't match, skip the conditional
263 if ((INIT_MODE_FLAGS(sc) &
264 op->if_mode.mode_bit_map) == 0)
265 op_idx += op->if_mode.cmd_offset;
267 /* the following opcodes are unused at the moment. */
273 /* Should never get here! */
281 /****************************************************************************
283 ****************************************************************************/
285 * This code configures the PCI read/write arbiter
286 * which implements a weighted round robin
287 * between the virtual queues in the chip.
289 * The values were derived for each PCI max payload and max request size.
290 * since max payload and max request size are only known at run time,
291 * this is done as a separate init stage.
299 /* configuration for one arbiter queue */
306 /* derived configuration for each read queue for each max request size */
307 static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
308 /* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
309 { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
310 { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
311 { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
312 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
313 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
314 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
315 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
316 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
317 /* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
318 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
319 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
320 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
321 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
322 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
323 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
324 { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
325 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
326 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
327 /* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
328 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
329 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
330 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
331 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
332 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
333 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
334 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
335 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
336 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
339 /* derived configuration for each write queue for each max request size */
340 static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
341 /* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
342 { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
343 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
344 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
345 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
346 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
347 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
348 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
349 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
350 /* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
351 { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
352 { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
353 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
356 /* register addresses for read queues */
357 static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
358 /* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
359 PXP2_REG_RQ_BW_RD_UBOUND0},
360 {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
361 PXP2_REG_PSWRQ_BW_UB1},
362 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
363 PXP2_REG_PSWRQ_BW_UB2},
364 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
365 PXP2_REG_PSWRQ_BW_UB3},
366 {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
367 PXP2_REG_RQ_BW_RD_UBOUND4},
368 {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
369 PXP2_REG_RQ_BW_RD_UBOUND5},
370 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
371 PXP2_REG_PSWRQ_BW_UB6},
372 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
373 PXP2_REG_PSWRQ_BW_UB7},
374 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
375 PXP2_REG_PSWRQ_BW_UB8},
376 /* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
377 PXP2_REG_PSWRQ_BW_UB9},
378 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
379 PXP2_REG_PSWRQ_BW_UB10},
380 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
381 PXP2_REG_PSWRQ_BW_UB11},
382 {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
383 PXP2_REG_RQ_BW_RD_UBOUND12},
384 {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
385 PXP2_REG_RQ_BW_RD_UBOUND13},
386 {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
387 PXP2_REG_RQ_BW_RD_UBOUND14},
388 {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
389 PXP2_REG_RQ_BW_RD_UBOUND15},
390 {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
391 PXP2_REG_RQ_BW_RD_UBOUND16},
392 {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
393 PXP2_REG_RQ_BW_RD_UBOUND17},
394 {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
395 PXP2_REG_RQ_BW_RD_UBOUND18},
396 /* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
397 PXP2_REG_RQ_BW_RD_UBOUND19},
398 {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
399 PXP2_REG_RQ_BW_RD_UBOUND20},
400 {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
401 PXP2_REG_RQ_BW_RD_UBOUND22},
402 {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
403 PXP2_REG_RQ_BW_RD_UBOUND23},
404 {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
405 PXP2_REG_RQ_BW_RD_UBOUND24},
406 {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
407 PXP2_REG_RQ_BW_RD_UBOUND25},
408 {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
409 PXP2_REG_RQ_BW_RD_UBOUND26},
410 {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
411 PXP2_REG_RQ_BW_RD_UBOUND27},
412 {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
413 PXP2_REG_PSWRQ_BW_UB28}
416 /* register addresses for write queues */
417 static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
418 /* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
419 PXP2_REG_PSWRQ_BW_UB1},
420 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
421 PXP2_REG_PSWRQ_BW_UB2},
422 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
423 PXP2_REG_PSWRQ_BW_UB3},
424 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
425 PXP2_REG_PSWRQ_BW_UB6},
426 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
427 PXP2_REG_PSWRQ_BW_UB7},
428 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
429 PXP2_REG_PSWRQ_BW_UB8},
430 {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
431 PXP2_REG_PSWRQ_BW_UB9},
432 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
433 PXP2_REG_PSWRQ_BW_UB10},
434 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
435 PXP2_REG_PSWRQ_BW_UB11},
436 /* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
437 PXP2_REG_PSWRQ_BW_UB28},
438 {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
439 PXP2_REG_RQ_BW_WR_UBOUND29},
440 {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
441 PXP2_REG_RQ_BW_WR_UBOUND30}
444 static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,
449 if (r_order > MAX_RD_ORD) {
450 ECORE_MSG("read order of %d order adjusted to %d",
451 r_order, MAX_RD_ORD);
452 r_order = MAX_RD_ORD;
454 if (w_order > MAX_WR_ORD) {
455 ECORE_MSG("write order of %d order adjusted to %d",
456 w_order, MAX_WR_ORD);
457 w_order = MAX_WR_ORD;
459 if (CHIP_REV_IS_FPGA(sc)) {
460 ECORE_MSG("write order adjusted to 1 for FPGA");
463 ECORE_MSG("read order %d write order %d", r_order, w_order);
465 for (i = 0; i < NUM_RD_Q-1; i++) {
466 REG_WR(sc, read_arb_addr[i].l, read_arb_data[i][r_order].l);
467 REG_WR(sc, read_arb_addr[i].add,
468 read_arb_data[i][r_order].add);
469 REG_WR(sc, read_arb_addr[i].ubound,
470 read_arb_data[i][r_order].ubound);
473 for (i = 0; i < NUM_WR_Q-1; i++) {
474 if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
475 (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
477 REG_WR(sc, write_arb_addr[i].l,
478 write_arb_data[i][w_order].l);
480 REG_WR(sc, write_arb_addr[i].add,
481 write_arb_data[i][w_order].add);
483 REG_WR(sc, write_arb_addr[i].ubound,
484 write_arb_data[i][w_order].ubound);
487 val = REG_RD(sc, write_arb_addr[i].l);
488 REG_WR(sc, write_arb_addr[i].l,
489 val | (write_arb_data[i][w_order].l << 10));
491 val = REG_RD(sc, write_arb_addr[i].add);
492 REG_WR(sc, write_arb_addr[i].add,
493 val | (write_arb_data[i][w_order].add << 10));
495 val = REG_RD(sc, write_arb_addr[i].ubound);
496 REG_WR(sc, write_arb_addr[i].ubound,
497 val | (write_arb_data[i][w_order].ubound << 7));
501 val = write_arb_data[NUM_WR_Q-1][w_order].add;
502 val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
503 val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
504 REG_WR(sc, PXP2_REG_PSWRQ_BW_RD, val);
506 val = read_arb_data[NUM_RD_Q-1][r_order].add;
507 val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
508 val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
509 REG_WR(sc, PXP2_REG_PSWRQ_BW_WR, val);
511 REG_WR(sc, PXP2_REG_RQ_WR_MBS0, w_order);
512 REG_WR(sc, PXP2_REG_RQ_WR_MBS1, w_order);
513 REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order);
514 REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order);
516 if (CHIP_IS_E1H(sc) && (r_order == MAX_RD_ORD))
517 REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
520 REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
521 else if (CHIP_IS_E2(sc))
522 REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
524 REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
526 /* MPS w_order optimal TH presently TH
531 /* DMAE is special */
532 if (!CHIP_IS_E1H(sc)) {
533 /* E2 can use optimal TH */
535 REG_WR(sc, PXP2_REG_WR_DMAE_MPS, val);
537 val = ((w_order == 0) ? 2 : 3);
538 REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2);
541 REG_WR(sc, PXP2_REG_WR_HC_MPS, val);
542 REG_WR(sc, PXP2_REG_WR_USDM_MPS, val);
543 REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val);
544 REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val);
545 REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val);
546 REG_WR(sc, PXP2_REG_WR_QM_MPS, val);
547 REG_WR(sc, PXP2_REG_WR_TM_MPS, val);
548 REG_WR(sc, PXP2_REG_WR_SRC_MPS, val);
549 REG_WR(sc, PXP2_REG_WR_DBG_MPS, val);
550 REG_WR(sc, PXP2_REG_WR_CDU_MPS, val);
552 /* Validate number of tags suppoted by device */
553 #define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
554 val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST);
557 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
560 /****************************************************************************
562 ****************************************************************************/
564 * This codes hides the low level HW interaction for ILT management and
565 * configuration. The API consists of a shadow ILT table which is set by the
566 * driver and a set of routines to use it to configure the HW.
570 /* ILT HW init operations */
572 /* ILT memory management operations */
573 #define ILT_MEMOP_ALLOC 0
574 #define ILT_MEMOP_FREE 1
576 /* the phys address is shifted right 12 bits and has an added
577 * 1=valid bit added to the 53rd bit
578 * then since this is a wide register(TM)
579 * we split it into two 32 bit writes
581 #define ILT_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF))
582 #define ILT_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44)))
583 #define ILT_RANGE(f, l) (((l) << 10) | f)
585 static int ecore_ilt_line_mem_op(struct bnx2x_softc *sc,
586 struct ilt_line *line, uint32_t size, uint8_t memop, int cli_num, int i)
588 #define ECORE_ILT_NAMESIZE 10
589 char str[ECORE_ILT_NAMESIZE];
591 if (memop == ILT_MEMOP_FREE) {
592 ECORE_ILT_FREE(line->page, line->page_mapping, line->size);
595 snprintf(str, ECORE_ILT_NAMESIZE, "ILT_%d_%d", cli_num, i);
596 ECORE_ILT_ZALLOC(line->page, &line->page_mapping, size, str);
604 static int ecore_ilt_client_mem_op(struct bnx2x_softc *sc, int cli_num,
608 struct ecore_ilt *ilt = SC_ILT(sc);
609 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
611 if (!ilt || !ilt->lines)
614 if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
617 for (i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
618 rc = ecore_ilt_line_mem_op(sc, &ilt->lines[i],
619 ilt_cli->page_size, memop, cli_num, i);
624 static inline int ecore_ilt_mem_op_cnic(struct bnx2x_softc *sc, uint8_t memop)
628 if (CONFIGURE_NIC_MODE(sc))
629 rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop);
631 rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_TM, memop);
636 static int ecore_ilt_mem_op(struct bnx2x_softc *sc, uint8_t memop)
638 int rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_CDU, memop);
640 rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_QM, memop);
641 if (!rc && CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc))
642 rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop);
647 static void ecore_ilt_line_wr(struct bnx2x_softc *sc, int abs_idx,
648 ecore_dma_addr_t page_mapping)
652 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
654 ecore_wr_64(sc, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
657 static void ecore_ilt_line_init_op(struct bnx2x_softc *sc,
658 struct ecore_ilt *ilt, int idx, uint8_t initop)
660 ecore_dma_addr_t null_mapping;
661 int abs_idx = ilt->start_line + idx;
665 /* set in the init-value array */
667 ecore_ilt_line_wr(sc, abs_idx, ilt->lines[idx].page_mapping);
671 ecore_ilt_line_wr(sc, abs_idx, null_mapping);
676 static void ecore_ilt_boundry_init_op(struct bnx2x_softc *sc,
677 struct ilt_client_info *ilt_cli,
680 uint32_t start_reg = 0;
681 uint32_t end_reg = 0;
683 /* The boundary is either SET or INIT,
684 CLEAR => SET and for now SET ~~ INIT */
686 /* find the appropriate regs */
687 switch (ilt_cli->client_num) {
689 start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
690 end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
693 start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
694 end_reg = PXP2_REG_RQ_QM_LAST_ILT;
697 start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
698 end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
701 start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
702 end_reg = PXP2_REG_RQ_TM_LAST_ILT;
705 REG_WR(sc, start_reg, (ilt_start + ilt_cli->start));
706 REG_WR(sc, end_reg, (ilt_start + ilt_cli->end));
709 static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc,
710 struct ecore_ilt *ilt,
711 struct ilt_client_info *ilt_cli,
716 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
719 for (i = ilt_cli->start; i <= ilt_cli->end; i++)
720 ecore_ilt_line_init_op(sc, ilt, i, initop);
722 /* init/clear the ILT boundries */
723 ecore_ilt_boundry_init_op(sc, ilt_cli, ilt->start_line);
726 static void ecore_ilt_client_init_op(struct bnx2x_softc *sc,
727 struct ilt_client_info *ilt_cli, uint8_t initop)
729 struct ecore_ilt *ilt = SC_ILT(sc);
731 ecore_ilt_client_init_op_ilt(sc, ilt, ilt_cli, initop);
734 static void ecore_ilt_client_id_init_op(struct bnx2x_softc *sc,
735 int cli_num, uint8_t initop)
737 struct ecore_ilt *ilt = SC_ILT(sc);
738 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
740 ecore_ilt_client_init_op(sc, ilt_cli, initop);
743 static inline void ecore_ilt_init_op_cnic(struct bnx2x_softc *sc, uint8_t initop)
745 if (CONFIGURE_NIC_MODE(sc))
746 ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop);
747 ecore_ilt_client_id_init_op(sc, ILT_CLIENT_TM, initop);
750 static void ecore_ilt_init_op(struct bnx2x_softc *sc, uint8_t initop)
752 ecore_ilt_client_id_init_op(sc, ILT_CLIENT_CDU, initop);
753 ecore_ilt_client_id_init_op(sc, ILT_CLIENT_QM, initop);
754 if (CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc))
755 ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop);
758 static void ecore_ilt_init_client_psz(struct bnx2x_softc *sc, int cli_num,
759 uint32_t psz_reg, uint8_t initop)
761 struct ecore_ilt *ilt = SC_ILT(sc);
762 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
764 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
769 /* set in the init-value array */
771 REG_WR(sc, psz_reg, ILOG2(ilt_cli->page_size >> 12));
779 * called during init common stage, ilt clients should be initialized
780 * prioir to calling this function
782 static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop)
784 ecore_ilt_init_client_psz(sc, ILT_CLIENT_CDU,
785 PXP2_REG_RQ_CDU_P_SIZE, initop);
786 ecore_ilt_init_client_psz(sc, ILT_CLIENT_QM,
787 PXP2_REG_RQ_QM_P_SIZE, initop);
788 ecore_ilt_init_client_psz(sc, ILT_CLIENT_SRC,
789 PXP2_REG_RQ_SRC_P_SIZE, initop);
790 ecore_ilt_init_client_psz(sc, ILT_CLIENT_TM,
791 PXP2_REG_RQ_TM_P_SIZE, initop);
794 /****************************************************************************
796 ****************************************************************************/
797 #define QM_QUEUES_PER_FUNC 16
798 #define QM_INIT_MIN_CID_COUNT 31
799 #define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
801 /* called during init port stage */
802 static void ecore_qm_init_cid_count(struct bnx2x_softc *sc, int qm_cid_count,
805 int port = SC_PORT(sc);
807 if (QM_INIT(qm_cid_count)) {
810 /* set in the init-value array */
812 REG_WR(sc, QM_REG_CONNNUM_0 + port*4,
813 qm_cid_count/16 - 1);
821 static void ecore_qm_set_ptr_table(struct bnx2x_softc *sc, int qm_cid_count,
822 uint32_t base_reg, uint32_t reg)
825 uint32_t wb_data[2] = {0, 0};
826 for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
827 REG_WR(sc, base_reg + i*4,
828 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
829 ecore_init_wr_wb(sc, reg + i*8,
834 /* called during init common stage */
835 static void ecore_qm_init_ptr_table(struct bnx2x_softc *sc, int qm_cid_count,
838 if (!QM_INIT(qm_cid_count))
843 /* set in the init-value array */
845 ecore_qm_set_ptr_table(sc, qm_cid_count,
846 QM_REG_BASEADDR, QM_REG_PTRTBL);
848 ecore_qm_set_ptr_table(sc, qm_cid_count,
849 QM_REG_BASEADDR_EXT_A,
850 QM_REG_PTRTBL_EXT_A);
857 /****************************************************************************
858 * SRC initializations
859 ****************************************************************************/
861 /* called during init func stage */
862 static void ecore_src_init_t2(struct bnx2x_softc *sc, struct src_ent *t2,
863 ecore_dma_addr_t t2_mapping, int src_cid_count)
866 int port = SC_PORT(sc);
869 for (i = 0; i < src_cid_count-1; i++)
870 t2[i].next = (uint64_t)(t2_mapping +
871 (i+1)*sizeof(struct src_ent));
873 /* tell the searcher where the T2 table is */
874 REG_WR(sc, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
876 ecore_wr_64(sc, SRC_REG_FIRSTFREE0 + port*16,
877 U64_LO(t2_mapping), U64_HI(t2_mapping));
879 ecore_wr_64(sc, SRC_REG_LASTFREE0 + port*16,
880 U64_LO((uint64_t)t2_mapping +
881 (src_cid_count-1) * sizeof(struct src_ent)),
882 U64_HI((uint64_t)t2_mapping +
883 (src_cid_count-1) * sizeof(struct src_ent)));
886 #endif /* ECORE_INIT_OPS_H */