1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
9 #include "ecore_init_ops.h"
11 #include "ecore_rt_defs.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore_hsi_init_func.h"
14 #include "ecore_hsi_eth.h"
15 #include "ecore_hsi_init_tool.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
19 #define CDU_VALIDATION_DEFAULT_CFG 61
21 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
22 { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
23 { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
24 { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
26 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
27 { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
30 /* General constants */
31 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
32 QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
33 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
35 #define QM_INVALID_PQ_ID 0xffff
38 #define QM_BYPASS_EN 1
39 #define QM_BYTE_CRD_EN 1
41 /* Other PQ constants */
42 #define QM_OTHER_PQS_PER_PF 4
45 #define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
49 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
50 #define QM_WFQ_UPPER_BOUND 62500000
52 /* Bit of VOQ in WFQ VP PQ map */
53 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
55 /* Bit of PF in WFQ VP PQ map */
56 #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
57 #define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
59 /* 0x9000 = 4*9*1024 */
60 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
62 /* Max WFQ increment value is 0.7 * upper bound */
63 #define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
65 /* Number of VOQs in E5 QmWfqCrd register */
66 #define QM_WFQ_CRD_E5_NUM_VOQS 16
71 #define QM_RL_PERIOD 5
73 /* Period in 25MHz cycles */
74 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
76 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
77 * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
78 * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
79 * although the credit increment value was the correct one and FW calculated
80 * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
83 #define QM_RL_INC_VAL(rate) \
84 OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
87 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
88 #define QM_PF_RL_UPPER_BOUND 62500000
90 /* Max PF RL increment value is 0.7 * upper bound */
91 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
93 /* Vport RL Upper bound, link speed is in Mpbs */
94 #define QM_VP_RL_UPPER_BOUND(speed) \
95 ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
97 /* Max Vport RL increment value is the Vport RL upper bound */
98 #define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
100 /* Vport RL credit threshold in case of QM bypass */
101 #define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
103 /* AFullOprtnstcCrdMask constants */
104 #define QM_OPPOR_LINE_VOQ_DEF 1
105 #define QM_OPPOR_FW_STOP_DEF 0
106 #define QM_OPPOR_PQ_EMPTY_DEF 1
108 /* Command Queue constants: */
110 /* Pure LB CmdQ lines (+spare) */
111 #define PBF_CMDQ_PURE_LB_LINES 150
113 #define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
115 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
116 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
118 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
119 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
121 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
122 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
124 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
125 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
127 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
128 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
130 /* BTB: blocks constants (block size = 256B) */
132 /* 256B blocks in 9700B packet */
133 #define BTB_JUMBO_PKT_BLOCKS 38
135 /* Headroom per-port */
136 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
137 #define BTB_PURE_LB_FACTOR 10
139 /* Factored (hence really 0.7) */
140 #define BTB_PURE_LB_RATIO 7
142 /* QM stop command constants */
143 #define QM_STOP_PQ_MASK_WIDTH 32
144 #define QM_STOP_CMD_ADDR 2
145 #define QM_STOP_CMD_STRUCT_SIZE 2
146 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
147 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
148 #define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
149 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
150 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
151 #define QM_STOP_CMD_GROUP_ID_MASK 15
152 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
153 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
154 #define QM_STOP_CMD_PQ_TYPE_MASK 1
155 #define QM_STOP_CMD_MAX_POLL_COUNT 100
156 #define QM_STOP_CMD_POLL_PERIOD_US 500
158 /* QM command macros */
159 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
160 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
161 SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
163 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, \
164 vp_pq_id, rl_id, ext_voq, wrr) \
166 OSAL_MEMSET(&map, 0, sizeof(map)); \
167 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
168 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); \
169 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); \
170 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); \
171 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); \
173 QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); \
174 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, \
178 #define WRITE_PQ_INFO_TO_RAM 1
179 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
180 (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
181 ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
182 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
183 (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
185 /******************** INTERNAL IMPLEMENTATION *********************/
187 /* Returns the external VOQ number */
188 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
191 u8 max_phys_tcs_per_port)
193 if (tc == PURE_LB_TC)
194 return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
196 return port_id * (max_phys_tcs_per_port) + tc;
199 /* Prepare PF RL enable/disable runtime init values */
200 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
202 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
204 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
205 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
207 /* Enable RLs for all VOQs */
208 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
210 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
211 if (num_ext_voqs >= 32)
212 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
213 (u32)(voq_bit_mask >> 32));
216 /* Write RL period */
217 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
218 QM_RL_PERIOD_CLK_25M);
219 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
220 QM_RL_PERIOD_CLK_25M);
222 /* Set credit threshold for QM bypass flow */
224 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
225 QM_PF_RL_UPPER_BOUND);
229 /* Prepare PF WFQ enable/disable runtime init values */
230 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
232 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
234 /* Set credit threshold for QM bypass flow */
235 if (pf_wfq_en && QM_BYPASS_EN)
236 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
240 /* Prepare VPORT RL enable/disable runtime init values */
241 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
243 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
244 vport_rl_en ? 1 : 0);
246 /* Write RL period (use timer 0 only) */
247 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
248 QM_RL_PERIOD_CLK_25M);
249 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
250 QM_RL_PERIOD_CLK_25M);
252 /* Set credit threshold for QM bypass flow */
255 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
256 QM_VP_RL_BYPASS_THRESH_SPEED);
260 /* Prepare VPORT WFQ enable/disable runtime init values */
261 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
263 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
264 vport_wfq_en ? 1 : 0);
266 /* Set credit threshold for QM bypass flow */
267 if (vport_wfq_en && QM_BYPASS_EN)
268 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
272 /* Prepare runtime init values to allocate PBF command queue lines for
275 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
281 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
283 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
285 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
287 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
291 /* Prepare runtime init values to allocate PBF command queue lines. */
292 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
293 u8 max_ports_per_engine,
294 u8 max_phys_tcs_per_port,
295 struct init_qm_port_params
296 port_params[MAX_NUM_PORTS])
298 u8 tc, ext_voq, port_id, num_tcs_in_port;
299 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
301 /* Clear PBF lines of all VOQs */
302 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
303 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
305 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
306 u16 phys_lines, phys_lines_per_tc;
308 if (!port_params[port_id].active)
311 /* Find number of command queue lines to divide between the
312 * active physical TCs. In E5, 1/8 of the lines are reserved.
313 * the lines for pure LB TC are subtracted.
315 phys_lines = port_params[port_id].num_pbf_cmd_lines;
316 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
318 /* Find #lines per active physical TC */
320 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
321 if (((port_params[port_id].active_phys_tcs >> tc) &
324 phys_lines_per_tc = phys_lines / num_tcs_in_port;
326 /* Init registers per active TC */
327 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
328 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
329 max_phys_tcs_per_port);
330 if (((port_params[port_id].active_phys_tcs >> tc) &
332 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
336 /* Init registers for pure LB TC */
337 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
338 max_phys_tcs_per_port);
339 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
340 PBF_CMDQ_PURE_LB_LINES);
345 * Prepare runtime init values to allocate guaranteed BTB blocks for the
346 * specified port. The guaranteed BTB space is divided between the TCs as
347 * follows (shared space Is currently not used):
349 * B BTB blocks for this port
350 * C Number of physical TCs for this port
352 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
354 * b. B = B 38 (remainder after global headroom allocation)
355 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
356 * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
357 * e. B/C blocks are allocated for each physical TC.
359 * - MTU is up to 9700 bytes (38 blocks)
360 * - All TCs are considered symmetrical (same rate and packet size)
361 * - No optimization for lossy TC (all are considered lossless). Shared space is
362 * not enabled and allocated for each TC.
364 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
365 u8 max_ports_per_engine,
366 u8 max_phys_tcs_per_port,
367 struct init_qm_port_params
368 port_params[MAX_NUM_PORTS])
370 u32 usable_blocks, pure_lb_blocks, phys_blocks;
371 u8 tc, ext_voq, port_id, num_tcs_in_port;
373 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
374 if (!port_params[port_id].active)
377 /* Subtract headroom blocks */
378 usable_blocks = port_params[port_id].num_btb_blocks -
381 /* Find blocks per physical TC. use factor to avoid floating
385 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
386 if (((port_params[port_id].active_phys_tcs >> tc) &
390 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
391 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
393 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
396 phys_blocks = (usable_blocks - pure_lb_blocks) /
399 /* Init physical TCs */
400 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
401 if (((port_params[port_id].active_phys_tcs >> tc) &
403 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
404 max_phys_tcs_per_port);
406 PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
411 /* Init pure LB TC */
412 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
413 max_phys_tcs_per_port);
414 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
419 /* Prepare Tx PQ mapping runtime init values for the specified PF */
420 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
421 struct ecore_ptt *p_ptt,
423 u8 max_phys_tcs_per_port,
431 u32 base_mem_addr_4kb,
432 struct init_qm_pq_params *pq_params,
433 struct init_qm_vport_params *vport_params)
435 /* A bit per Tx PQ indicating if the PQ is associated with a VF */
436 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
437 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
438 u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
439 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
441 num_pqs = num_pf_pqs + num_vf_pqs;
443 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
444 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
446 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
447 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
448 mem_addr_4kb = base_mem_addr_4kb;
450 /* Set mapping from PQ group to PF */
451 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
452 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
456 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
457 QM_PQ_SIZE_256B(num_pf_cids));
458 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
459 QM_PQ_SIZE_256B(num_vf_cids));
461 /* Go over all Tx PQs */
462 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
463 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
464 u8 ext_voq, vport_id_in_pf;
465 bool is_vf_pq, rl_valid;
468 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
470 max_phys_tcs_per_port);
471 is_vf_pq = (i >= num_pf_pqs);
472 rl_valid = pq_params[i].rl_valid > 0;
474 /* Update first Tx PQ of VPORT/TC */
475 vport_id_in_pf = pq_params[i].vport_id - start_vport;
477 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
478 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
479 u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
480 (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
482 /* Create new VP PQ */
483 vport_params[vport_id_in_pf].
484 first_tx_pq_id[pq_params[i].tc_id] = pq_id;
485 first_tx_pq_id = pq_id;
487 /* Map VP PQ to VOQ and PF */
488 STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
489 first_tx_pq_id, map_val);
493 if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
494 DP_NOTICE(p_hwfn, true,
495 "Invalid VPORT ID for rate limiter config\n");
499 /* Prepare PQ map entry */
500 struct qm_rf_pq_map_e4 tx_pq_map;
502 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
504 first_tx_pq_id, rl_valid ?
505 pq_params[i].vport_id : 0,
506 ext_voq, pq_params[i].wrr_group);
508 /* Set PQ base address */
509 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
512 /* Clear PQ pointer table entry (64 bit) */
514 for (j = 0; j < 2; j++)
515 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
518 /* Write PQ info to RAM */
519 if (WRITE_PQ_INFO_TO_RAM != 0) {
522 pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
524 pq_params[i].port_id,
525 rl_valid ? 1 : 0, rl_valid ?
526 pq_params[i].vport_id : 0);
527 ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
531 /* If VF PQ, add indication to PQ VF mask */
533 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
534 (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
535 mem_addr_4kb += vport_pq_mem_4kb;
537 mem_addr_4kb += pq_mem_4kb;
541 /* Store Tx PQ VF mask to size select register */
542 for (i = 0; i < num_tx_pq_vf_masks; i++)
543 if (tx_pq_vf_mask[i])
544 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
545 i, tx_pq_vf_mask[i]);
548 /* Prepare Other PQ mapping runtime init values for the specified PF */
549 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
554 u32 base_mem_addr_4kb)
556 u32 pq_size, pq_mem_4kb, mem_addr_4kb;
557 u16 i, j, pq_id, pq_group;
559 /* A single other PQ group is used in each PF, where PQ group i is used
563 pq_size = num_pf_cids + num_tids;
564 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
565 mem_addr_4kb = base_mem_addr_4kb;
567 /* Map PQ group to PF */
568 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
572 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
573 QM_PQ_SIZE_256B(pq_size));
575 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
576 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
577 /* Set PQ base address */
578 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
581 /* Clear PQ pointer table entry */
583 for (j = 0; j < 2; j++)
585 QM_REG_PTRTBLOTHER_RT_OFFSET +
588 mem_addr_4kb += pq_mem_4kb;
592 /* Prepare PF WFQ runtime init values for the specified PF.
593 * Return -1 on error.
595 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
598 u8 max_phys_tcs_per_port,
600 struct init_qm_pq_params *pq_params)
602 u32 inc_val, crd_reg_offset;
606 inc_val = QM_WFQ_INC_VAL(pf_wfq);
607 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
608 DP_NOTICE(p_hwfn, true,
609 "Invalid PF WFQ weight configuration\n");
613 for (i = 0; i < num_tx_pqs; i++) {
614 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
616 max_phys_tcs_per_port);
617 crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
618 QM_REG_WFQPFCRD_RT_OFFSET :
619 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
620 ext_voq * MAX_NUM_PFS_BB +
621 (pf_id % MAX_NUM_PFS_BB);
622 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
623 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
626 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
627 pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
628 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
633 /* Prepare PF RL runtime init values for the specified PF.
634 * Return -1 on error.
636 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
640 inc_val = QM_RL_INC_VAL(pf_rl);
641 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
642 DP_NOTICE(p_hwfn, true,
643 "Invalid PF rate limit configuration\n");
647 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
648 (u32)QM_RL_CRD_REG_SIGN_BIT);
649 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
650 QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
651 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
656 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
657 * Return -1 on error.
659 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
661 struct init_qm_vport_params *vport_params)
667 /* Go over all PF VPORTs */
668 for (i = 0; i < num_vports; i++) {
669 if (!vport_params[i].vport_wfq)
672 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
673 if (inc_val > QM_WFQ_MAX_INC_VAL) {
674 DP_NOTICE(p_hwfn, true,
675 "Invalid VPORT WFQ weight configuration\n");
679 /* Each VPORT can have several VPORT PQ IDs for various TCs */
680 for (tc = 0; tc < NUM_OF_TCS; tc++) {
681 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
682 if (vport_pq_id != QM_INVALID_PQ_ID) {
683 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
685 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
687 QM_REG_WFQVPWEIGHT_RT_OFFSET +
688 vport_pq_id, inc_val);
695 /* Prepare VPORT RL runtime init values for the specified VPORTs.
696 * Return -1 on error.
698 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
702 struct init_qm_vport_params *vport_params)
707 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
708 DP_NOTICE(p_hwfn, true,
709 "Invalid VPORT ID for rate limiter configuration\n");
713 /* Go over all PF VPORTs */
714 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
715 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
716 vport_params[i].vport_rl : link_speed);
717 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
718 DP_NOTICE(p_hwfn, true,
719 "Invalid VPORT rate-limit configuration\n");
723 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
724 (u32)QM_RL_CRD_REG_SIGN_BIT);
726 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
727 QM_VP_RL_UPPER_BOUND(link_speed) |
728 (u32)QM_RL_CRD_REG_SIGN_BIT);
729 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
736 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
737 struct ecore_ptt *p_ptt)
741 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
743 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
744 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
747 /* Check if timeout while waiting for SDM command ready */
748 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
749 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
750 "Timeout waiting for QM SDM cmd ready signal\n");
757 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
758 struct ecore_ptt *p_ptt,
763 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
766 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
767 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
768 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
769 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
770 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
772 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
776 /******************** INTERFACE IMPLEMENTATION *********************/
778 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
784 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
785 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
786 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
789 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
790 u8 max_ports_per_engine,
791 u8 max_phys_tcs_per_port,
796 struct init_qm_port_params
797 port_params[MAX_NUM_PORTS])
801 /* Init AFullOprtnstcCrdMask */
802 mask = (QM_OPPOR_LINE_VOQ_DEF <<
803 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
804 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
805 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
806 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
807 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
808 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
809 (QM_OPPOR_FW_STOP_DEF <<
810 QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
811 (QM_OPPOR_PQ_EMPTY_DEF <<
812 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
813 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
815 /* Enable/disable PF RL */
816 ecore_enable_pf_rl(p_hwfn, pf_rl_en);
818 /* Enable/disable PF WFQ */
819 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
821 /* Enable/disable VPORT RL */
822 ecore_enable_vport_rl(p_hwfn, vport_rl_en);
824 /* Enable/disable VPORT WFQ */
825 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
827 /* Init PBF CMDQ line credit */
828 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
829 max_phys_tcs_per_port, port_params);
831 /* Init BTB blocks in PBF */
832 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
833 max_phys_tcs_per_port, port_params);
838 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
839 struct ecore_ptt *p_ptt,
841 u8 max_phys_tcs_per_port,
854 struct init_qm_pq_params *pq_params,
855 struct init_qm_vport_params *vport_params)
857 u32 other_mem_size_4kb;
860 other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
863 /* Clear first Tx PQ ID array for each VPORT */
864 for (i = 0; i < num_vports; i++)
865 for (tc = 0; tc < NUM_OF_TCS; tc++)
866 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
868 /* Map Other PQs (if any) */
869 #if QM_OTHER_PQS_PER_PF > 0
870 ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
875 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
876 is_pf_loading, num_pf_cids, num_vf_cids,
877 start_pq, num_pf_pqs, num_vf_pqs, start_vport,
878 other_mem_size_4kb, pq_params, vport_params);
882 if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
883 max_phys_tcs_per_port,
884 num_pf_pqs + num_vf_pqs, pq_params))
888 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
892 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
896 if (ecore_vport_rl_rt_init
897 (p_hwfn, start_vport, num_vports, link_speed, vport_params))
903 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
904 struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
908 inc_val = QM_WFQ_INC_VAL(pf_wfq);
909 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
910 DP_NOTICE(p_hwfn, true,
911 "Invalid PF WFQ weight configuration\n");
915 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
920 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
921 struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
925 inc_val = QM_RL_INC_VAL(pf_rl);
926 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
927 DP_NOTICE(p_hwfn, true,
928 "Invalid PF rate limit configuration\n");
932 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
933 (u32)QM_RL_CRD_REG_SIGN_BIT);
934 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
939 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
940 struct ecore_ptt *p_ptt,
941 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
947 inc_val = QM_WFQ_INC_VAL(vport_wfq);
948 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
949 DP_NOTICE(p_hwfn, true,
950 "Invalid VPORT WFQ weight configuration\n");
954 for (tc = 0; tc < NUM_OF_TCS; tc++) {
955 vport_pq_id = first_tx_pq_id[tc];
956 if (vport_pq_id != QM_INVALID_PQ_ID) {
957 ecore_wr(p_hwfn, p_ptt,
958 QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
965 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
966 struct ecore_ptt *p_ptt, u8 vport_id,
970 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
972 if (vport_id >= max_qm_global_rls) {
973 DP_NOTICE(p_hwfn, true,
974 "Invalid VPORT ID for rate limiter configuration\n");
978 inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
979 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
980 DP_NOTICE(p_hwfn, true,
981 "Invalid VPORT rate-limit configuration\n");
985 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
986 (u32)QM_RL_CRD_REG_SIGN_BIT);
987 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
992 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
993 struct ecore_ptt *p_ptt,
995 bool is_tx_pq, u16 start_pq, u16 num_pqs)
997 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
998 u32 pq_mask = 0, last_pq, pq_id;
1000 last_pq = start_pq + num_pqs - 1;
1002 /* Set command's PQ type */
1003 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
1005 /* Go over requested PQs */
1006 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
1007 /* Set PQ bit in mask (stop command only) */
1008 if (!is_release_cmd)
1009 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
1011 /* If last PQ or end of PQ mask, write command */
1012 if ((pq_id == last_pq) ||
1013 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
1014 (QM_STOP_PQ_MASK_WIDTH - 1))) {
1015 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
1017 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
1018 pq_id / QM_STOP_PQ_MASK_WIDTH);
1019 if (!ecore_send_qm_cmd
1020 (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
1031 /* NIG: ETS configuration constants */
1032 #define NIG_TX_ETS_CLIENT_OFFSET 4
1033 #define NIG_LB_ETS_CLIENT_OFFSET 1
1034 #define NIG_ETS_MIN_WFQ_BYTES 1600
1036 /* NIG: ETS constants */
1037 #define NIG_ETS_UP_BOUND(weight, mtu) \
1038 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1040 /* NIG: RL constants */
1042 /* Byte base type value */
1043 #define NIG_RL_BASE_TYPE 1
1046 #define NIG_RL_PERIOD 1
1048 /* Period in 25MHz cycles */
1049 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
1052 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
1054 #define NIG_RL_MAX_VAL(inc_val, mtu) \
1055 (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
1057 /* NIG: packet prioritry configuration constants */
1058 #define NIG_PRIORITY_MAP_TC_BITS 4
1061 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
1062 struct ecore_ptt *p_ptt,
1063 struct init_ets_req *req, bool is_lb)
1065 u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
1066 u32 tc_bound_base_addr, tc_bound_addr_diff;
1067 u8 sp_tc_map = 0, wfq_tc_map = 0;
1068 u8 tc, num_tc, tc_client_offset;
1070 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
1071 tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
1072 NIG_TX_ETS_CLIENT_OFFSET;
1073 min_weight = 0xffffffff;
1074 tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1075 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1076 tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
1077 NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1078 NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
1079 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1080 tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1081 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1082 tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
1083 NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1084 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
1085 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1087 for (tc = 0; tc < num_tc; tc++) {
1088 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1092 sp_tc_map |= (1 << tc);
1094 if (!tc_req->use_wfq)
1097 /* Update WFQ map */
1098 wfq_tc_map |= (1 << tc);
1100 /* Find minimal weight */
1101 if (tc_req->weight < min_weight)
1102 min_weight = tc_req->weight;
1106 ecore_wr(p_hwfn, p_ptt,
1107 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
1108 NIG_REG_TX_ARB_CLIENT_IS_STRICT,
1109 (sp_tc_map << tc_client_offset));
1112 ecore_wr(p_hwfn, p_ptt,
1113 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
1114 NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
1115 (wfq_tc_map << tc_client_offset));
1116 /* write WFQ weights */
1117 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
1118 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1121 if (!tc_req->use_wfq)
1124 /* Translate weight to bytes */
1125 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1128 /* Write WFQ weight */
1129 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
1130 tc_weight_addr_diff * tc_client_offset, byte_weight);
1132 /* Write WFQ upper bound */
1133 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
1134 tc_bound_addr_diff * tc_client_offset,
1135 NIG_ETS_UP_BOUND(byte_weight, req->mtu));
1139 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1140 struct ecore_ptt *p_ptt,
1141 struct init_nig_lb_rl_req *req)
1143 u32 ctrl, inc_val, reg_offset;
1146 /* Disable global MAC+LB RL */
1149 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1150 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1152 /* Configure and enable global MAC+LB RL */
1153 if (req->lb_mac_rate) {
1155 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
1156 NIG_RL_PERIOD_CLK_25M);
1157 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1158 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
1160 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
1161 NIG_RL_MAX_VAL(inc_val, req->mtu));
1166 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1167 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1170 /* Disable global LB-only RL */
1173 NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1174 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1176 /* Configure and enable global LB-only RL */
1179 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
1180 NIG_RL_PERIOD_CLK_25M);
1181 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1182 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
1184 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
1185 NIG_RL_MAX_VAL(inc_val, req->mtu));
1189 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1190 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1194 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
1195 tc++, reg_offset += 4) {
1199 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1200 ecore_wr(p_hwfn, p_ptt,
1201 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1203 /* Configure and enable TC RL */
1204 if (!req->tc_rate[tc])
1208 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
1209 reg_offset, NIG_RL_PERIOD_CLK_25M);
1210 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1211 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
1212 reg_offset, inc_val);
1213 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
1214 reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1218 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1219 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
1224 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1225 struct ecore_ptt *p_ptt,
1226 struct init_nig_pri_tc_map_req *req)
1228 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1229 u32 pri_tc_mask = 0;
1232 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1233 if (!req->pri[pri].valid)
1236 pri_tc_mask |= (req->pri[pri].tc_id <<
1237 (pri * NIG_PRIORITY_MAP_TC_BITS));
1238 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1241 /* Write priority -> TC mask */
1242 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1244 /* Write TC -> priority mask */
1245 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1246 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
1248 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
1254 /* PRS: ETS configuration constants */
1255 #define PRS_ETS_MIN_WFQ_BYTES 1600
1256 #define PRS_ETS_UP_BOUND(weight, mtu) \
1257 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1260 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1261 struct ecore_ptt *p_ptt, struct init_ets_req *req)
1263 u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1264 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1266 tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
1267 PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1268 tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
1269 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1271 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1272 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1276 sp_tc_map |= (1 << tc);
1278 if (!tc_req->use_wfq)
1281 /* Update WFQ map */
1282 wfq_tc_map |= (1 << tc);
1284 /* Find minimal weight */
1285 if (tc_req->weight < min_weight)
1286 min_weight = tc_req->weight;
1290 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1293 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1296 /* write WFQ weights */
1297 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1298 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1301 if (!tc_req->use_wfq)
1304 /* Translate weight to bytes */
1305 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1308 /* Write WFQ weight */
1309 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
1310 tc_weight_addr_diff, byte_weight);
1312 /* Write WFQ upper bound */
1313 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1314 tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
1320 /* BRB: RAM configuration constants */
1321 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1322 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1323 #define BRB_BLOCK_SIZE 128
1324 #define BRB_MIN_BLOCKS_PER_TC 9
1325 #define BRB_HYST_BYTES 10240
1326 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1328 /* Temporary big RAM allocation - should be updated */
1329 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1330 struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1332 u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1333 u32 active_port_blocks, reg_offset = 0;
1334 u8 port, active_ports = 0;
1336 tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
1338 min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
1340 total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1341 BRB_TOTAL_RAM_BLOCKS_BB;
1343 /* Find number of active ports */
1344 for (port = 0; port < MAX_NUM_PORTS; port++)
1345 if (req->num_active_tcs[port])
1348 active_port_blocks = (u32)(total_blocks / active_ports);
1350 for (port = 0; port < req->max_ports_per_engine; port++) {
1351 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1352 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1353 u32 tc_guaranteed_blocks;
1356 /* Calculate per-port sizes */
1357 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
1359 port_blocks = req->num_active_tcs[port] ? active_port_blocks :
1361 port_guaranteed_blocks = req->num_active_tcs[port] *
1362 tc_guaranteed_blocks;
1363 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1364 full_xoff_th = req->num_active_tcs[port] *
1365 BRB_MIN_BLOCKS_PER_TC;
1366 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1367 pause_xoff_th = tc_headroom_blocks;
1368 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1370 /* Init total size per port */
1371 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1374 /* Init shared size per port */
1375 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1376 port_shared_blocks);
1378 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1379 /* Clear init values for non-active TCs */
1380 if (tc == req->num_active_tcs[port]) {
1381 tc_guaranteed_blocks = 0;
1388 /* Init guaranteed size per TC */
1389 ecore_wr(p_hwfn, p_ptt,
1390 BRB_REG_TC_GUARANTIED_0 + reg_offset,
1391 tc_guaranteed_blocks);
1392 ecore_wr(p_hwfn, p_ptt,
1393 BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1396 /* Init pause/full thresholds per physical TC - for
1399 ecore_wr(p_hwfn, p_ptt,
1400 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1401 reg_offset, full_xoff_th);
1402 ecore_wr(p_hwfn, p_ptt,
1403 BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1404 reg_offset, full_xon_th);
1405 ecore_wr(p_hwfn, p_ptt,
1406 BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1407 reg_offset, pause_xoff_th);
1408 ecore_wr(p_hwfn, p_ptt,
1409 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1410 reg_offset, pause_xon_th);
1412 /* Init pause/full thresholds per physical TC - for
1415 ecore_wr(p_hwfn, p_ptt,
1416 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1417 reg_offset, full_xoff_th);
1418 ecore_wr(p_hwfn, p_ptt,
1419 BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1420 reg_offset, full_xon_th);
1421 ecore_wr(p_hwfn, p_ptt,
1422 BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1423 reg_offset, pause_xoff_th);
1424 ecore_wr(p_hwfn, p_ptt,
1425 BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1426 reg_offset, pause_xon_th);
1431 /* In MF should be called once per port to set EtherType of OuterTag */
1432 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1434 /* Update DORQ register */
1435 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1438 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1439 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1440 #define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
1441 #define PRS_ETH_OUTPUT_FORMAT -46832
1443 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1444 struct ecore_ptt *p_ptt, u16 dest_port)
1446 /* Update PRS register */
1447 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1449 /* Update NIG register */
1450 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1452 /* Update PBF register */
1453 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1456 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1457 struct ecore_ptt *p_ptt, bool vxlan_enable)
1461 /* Update PRS register */
1462 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1463 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1464 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1466 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1467 if (reg_val) { /* TODO: handle E5 init */
1468 reg_val = ecore_rd(p_hwfn, p_ptt,
1469 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1471 /* Update output only if tunnel blocks not included. */
1472 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1473 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1474 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1477 /* Update NIG register */
1478 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1479 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1480 NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1482 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1484 /* Update DORQ register */
1485 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1486 vxlan_enable ? 1 : 0);
1489 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1490 struct ecore_ptt *p_ptt,
1491 bool eth_gre_enable, bool ip_gre_enable)
1495 /* Update PRS register */
1496 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1497 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1498 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1500 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1501 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1503 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1504 if (reg_val) { /* TODO: handle E5 init */
1505 reg_val = ecore_rd(p_hwfn, p_ptt,
1506 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1508 /* Update output only if tunnel blocks not included. */
1509 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1510 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1511 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1514 /* Update NIG register */
1515 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1516 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1517 NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1519 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1520 NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1522 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1524 /* Update DORQ registers */
1525 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1526 eth_gre_enable ? 1 : 0);
1527 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1528 ip_gre_enable ? 1 : 0);
1531 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1532 struct ecore_ptt *p_ptt, u16 dest_port)
1534 /* Update PRS register */
1535 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1537 /* Update NIG register */
1538 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1540 /* Update PBF register */
1541 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1544 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1545 struct ecore_ptt *p_ptt,
1546 bool eth_geneve_enable, bool ip_geneve_enable)
1550 /* Update PRS register */
1551 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1552 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1553 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1555 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1556 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1558 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1559 if (reg_val) { /* TODO: handle E5 init */
1560 reg_val = ecore_rd(p_hwfn, p_ptt,
1561 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1563 /* Update output only if tunnel blocks not included. */
1564 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1565 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1566 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1569 /* Update NIG register */
1570 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1571 eth_geneve_enable ? 1 : 0);
1572 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1573 ip_geneve_enable ? 1 : 0);
1575 /* EDPM with geneve tunnel not supported in BB */
1576 if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1579 /* Update DORQ registers */
1580 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
1581 eth_geneve_enable ? 1 : 0);
1582 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
1583 ip_geneve_enable ? 1 : 0);
1586 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
1587 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
1589 void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
1590 struct ecore_ptt *p_ptt,
1593 u32 reg_val, cfg_mask;
1595 /* read PRS config register */
1596 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1598 /* set VXLAN_NO_L2_ENABLE mask */
1599 cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1602 /* set VXLAN_NO_L2_ENABLE flag */
1603 reg_val |= cfg_mask;
1605 /* update PRS FIC register */
1606 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1607 (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1609 /* clear VXLAN_NO_L2_ENABLE flag */
1610 reg_val &= ~cfg_mask;
1613 /* write PRS config register */
1614 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1617 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1618 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1619 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1620 #define PARSER_ETH_CONN_CM_HDR 0
1621 #define CAM_LINE_SIZE sizeof(u32)
1622 #define RAM_LINE_SIZE sizeof(u64)
1623 #define REG_SIZE sizeof(u32)
1625 void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
1626 struct ecore_ptt *p_ptt,
1629 /* disable gft search for PF */
1630 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1632 /* Clean ram & cam for next gft session*/
1635 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1638 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1639 RAM_LINE_SIZE * pf_id, 0);
1640 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1641 RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
1645 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1646 struct ecore_ptt *p_ptt)
1648 u32 rfs_cm_hdr_event_id;
1650 /* Set RFS event ID to be awakened i Tstorm By Prs */
1651 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1652 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1653 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1654 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1655 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1656 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1659 void ecore_gft_config(struct ecore_hwfn *p_hwfn,
1660 struct ecore_ptt *p_ptt,
1666 enum gft_profile_type profile_type)
1668 u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
1671 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1673 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
1674 if (profile_type >= MAX_GFT_PROFILE_TYPE)
1675 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
1677 /* Set RFS event ID to be awakened i Tstorm By Prs */
1678 reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1679 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1680 reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1681 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1683 /* Do not load context only cid in PRS on match. */
1684 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1686 /* Do not use tenant ID exist bit for gft search*/
1687 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1691 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1693 /* Filters are per PF!! */
1694 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1695 GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1696 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1698 if (!(tcp && udp)) {
1700 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1701 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1704 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1705 GFT_PROFILE_TCP_PROTOCOL);
1708 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1709 GFT_PROFILE_UDP_PROTOCOL);
1712 if (!(ipv4 && ipv6)) {
1713 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1715 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1718 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1722 /* Write characteristics to cam */
1723 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1725 cam_line = ecore_rd(p_hwfn, p_ptt,
1726 PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1728 /* Write line to RAM - compare to filter 4 tuple */
1732 /* Search no IP as GFT */
1733 search_non_ip_as_gft = 0;
1736 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1737 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1739 if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1740 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1741 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1742 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1743 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1744 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
1745 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1746 } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1747 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1748 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1749 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1750 } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1751 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1752 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1753 } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1754 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1755 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1756 } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1757 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1759 /* Allow tunneled traffic without inner IP */
1760 search_non_ip_as_gft = 1;
1763 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,
1764 search_non_ip_as_gft);
1765 ecore_wr(p_hwfn, p_ptt,
1766 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1768 ecore_wr(p_hwfn, p_ptt,
1769 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
1770 REG_SIZE, ram_line_hi);
1772 /* Set default profile so that no filter match will happen */
1773 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1774 PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
1775 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1776 PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
1778 /* Enable gft search */
1779 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1782 /* Configure VF zone size mode */
1783 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1784 struct ecore_ptt *p_ptt, u16 mode,
1787 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1788 u32 msdm_vf_offset_mask;
1790 if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1791 msdm_vf_size_log += 1;
1792 else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1793 msdm_vf_size_log += 2;
1795 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1798 STORE_RT_REG(p_hwfn,
1799 PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1801 STORE_RT_REG(p_hwfn,
1802 PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1803 msdm_vf_offset_mask);
1805 ecore_wr(p_hwfn, p_ptt,
1806 PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1807 ecore_wr(p_hwfn, p_ptt,
1808 PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1812 /* Get mstorm statistics for offset by VF zone size mode */
1813 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1815 u16 vf_zone_size_mode)
1817 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1819 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1820 (stat_cnt_id > MAX_NUM_PFS)) {
1821 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1822 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1823 (stat_cnt_id - MAX_NUM_PFS);
1824 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1825 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1826 (stat_cnt_id - MAX_NUM_PFS);
1832 /* Get mstorm VF producer offset by VF zone size mode */
1833 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1836 u16 vf_zone_size_mode)
1838 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1840 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1841 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1842 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1844 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1845 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1852 #ifndef LINUX_REMOVE
1853 #define CRC8_INIT_VALUE 0xFF
1855 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1857 /* Calculate and return CDU validation byte per connection type / region /
1860 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1862 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1864 static u8 crc8_table_valid; /*automatically initialized to 0*/
1865 u8 crc, validation_byte = 0;
1866 u32 validation_string = 0;
1869 if (crc8_table_valid == 0) {
1870 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1871 crc8_table_valid = 1;
1875 * The CRC is calculated on the String-to-compress:
1876 * [31:8] = {CID[31:20],CID[11:0]}
1880 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1881 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1883 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1884 validation_string |= ((region & 0xF) << 4);
1886 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1887 validation_string |= (conn_type & 0xF);
1889 /* Convert to big-endian and calculate CRC8*/
1890 data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1892 crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1895 /* The validation byte [7:0] is composed:
1896 * for type A validation
1897 * [7] = active configuration bit
1900 * for type B validation
1901 * [7] = active configuration bit
1902 * [6:3] = connection_type[3:0]
1906 validation_byte |= ((validation_cfg >>
1907 CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1909 if ((validation_cfg >>
1910 CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1911 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1913 validation_byte |= crc & 0x7F;
1915 return validation_byte;
1918 /* Calcualte and set validation bytes for session context */
1919 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1920 u8 ctx_type, u32 cid)
1922 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1924 p_ctx = (u8 *)p_ctx_mem;
1925 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1926 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1927 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1929 OSAL_MEMSET(p_ctx, 0, ctx_size);
1931 *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1932 *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1933 *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1936 /* Calcualte and set validation bytes for task context */
1937 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1940 u8 *p_ctx, *region1_val_ptr;
1942 p_ctx = (u8 *)p_ctx_mem;
1943 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1945 OSAL_MEMSET(p_ctx, 0, ctx_size);
1947 *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1950 /* Memset session context to 0 while preserving validation bytes */
1951 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1953 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1954 u8 x_val, t_val, u_val;
1956 p_ctx = (u8 *)p_ctx_mem;
1957 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1958 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1959 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1965 OSAL_MEMSET(p_ctx, 0, ctx_size);
1972 /* Memset task context to 0 while preserving validation bytes */
1973 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1975 u8 *p_ctx, *region1_val_ptr;
1978 p_ctx = (u8 *)p_ctx_mem;
1979 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1981 region1_val = *region1_val_ptr;
1983 OSAL_MEMSET(p_ctx, 0, ctx_size);
1985 *region1_val_ptr = region1_val;
1988 /* Enable and configure context validation */
1989 void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
1990 struct ecore_ptt *p_ptt)
1994 /* Enable validation for connection region 3 - bits [31:24] */
1995 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1996 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1998 /* Enable validation for connection region 5 - bits [15: 8] */
1999 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
2000 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
2002 /* Enable validation for connection region 1 - bits [15: 8] */
2003 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
2004 ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
2008 /*******************************************************************************
2009 * File name : rdma_init.c
2010 * Author : Michael Shteinbok
2011 *******************************************************************************
2012 *******************************************************************************
2014 * RDMA HSI functions
2016 *******************************************************************************
2017 * Notes: This is the input to the auto generated file drv_init_fw_funcs.c
2019 *******************************************************************************
2021 static u32 ecore_get_rdma_assert_ram_addr(struct ecore_hwfn *p_hwfn,
2025 case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2026 TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2027 case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2028 MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2029 case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2030 USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2031 case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2032 XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2033 case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2034 YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2035 case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2036 PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2042 void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
2043 struct ecore_ptt *p_ptt,
2044 u8 assert_level[NUM_STORMS])
2047 for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
2048 u32 ram_addr = ecore_get_rdma_assert_ram_addr(p_hwfn, storm_id);
2050 ecore_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);