1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
9 #include "ecore_init_ops.h"
11 #include "ecore_rt_defs.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore_hsi_init_func.h"
14 #include "ecore_hsi_eth.h"
15 #include "ecore_hsi_init_tool.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
19 #define CDU_VALIDATION_DEFAULT_CFG 61
21 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
22 { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
23 { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
24 { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
26 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
27 { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
30 /* General constants */
31 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
32 QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
33 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
35 #define QM_INVALID_PQ_ID 0xffff
38 #define QM_BYPASS_EN 1
39 #define QM_BYTE_CRD_EN 1
41 /* Other PQ constants */
42 #define QM_OTHER_PQS_PER_PF 4
45 #define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
49 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
50 #define QM_WFQ_UPPER_BOUND 62500000
52 /* Bit of VOQ in WFQ VP PQ map */
53 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
55 /* Bit of PF in WFQ VP PQ map */
56 #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
57 #define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
59 /* 0x9000 = 4*9*1024 */
60 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
62 /* Max WFQ increment value is 0.7 * upper bound */
63 #define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
65 /* Number of VOQs in E5 QmWfqCrd register */
66 #define QM_WFQ_CRD_E5_NUM_VOQS 16
71 #define QM_RL_PERIOD 5
73 /* Period in 25MHz cycles */
74 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
76 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
77 * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
78 * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
79 * although the credit increment value was the correct one and FW calculated
80 * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
83 #define QM_RL_INC_VAL(rate) \
84 OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
87 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
88 #define QM_PF_RL_UPPER_BOUND 62500000
90 /* Max PF RL increment value is 0.7 * upper bound */
91 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
93 /* Vport RL Upper bound, link speed is in Mpbs */
94 #define QM_VP_RL_UPPER_BOUND(speed) \
95 ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
97 /* Max Vport RL increment value is the Vport RL upper bound */
98 #define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
100 /* Vport RL credit threshold in case of QM bypass */
101 #define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
103 /* AFullOprtnstcCrdMask constants */
104 #define QM_OPPOR_LINE_VOQ_DEF 1
105 #define QM_OPPOR_FW_STOP_DEF 0
106 #define QM_OPPOR_PQ_EMPTY_DEF 1
108 /* Command Queue constants: */
110 /* Pure LB CmdQ lines (+spare) */
111 #define PBF_CMDQ_PURE_LB_LINES 150
113 #define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
115 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
116 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
118 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
119 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
121 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
122 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
124 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
125 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
127 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
128 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
130 /* BTB: blocks constants (block size = 256B) */
132 /* 256B blocks in 9700B packet */
133 #define BTB_JUMBO_PKT_BLOCKS 38
135 /* Headroom per-port */
136 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
137 #define BTB_PURE_LB_FACTOR 10
139 /* Factored (hence really 0.7) */
140 #define BTB_PURE_LB_RATIO 7
142 /* QM stop command constants */
143 #define QM_STOP_PQ_MASK_WIDTH 32
144 #define QM_STOP_CMD_ADDR 2
145 #define QM_STOP_CMD_STRUCT_SIZE 2
146 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
147 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
148 #define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
149 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
150 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
151 #define QM_STOP_CMD_GROUP_ID_MASK 15
152 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
153 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
154 #define QM_STOP_CMD_PQ_TYPE_MASK 1
155 #define QM_STOP_CMD_MAX_POLL_COUNT 100
156 #define QM_STOP_CMD_POLL_PERIOD_US 500
158 /* QM command macros */
159 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
160 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
161 SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
163 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, \
164 rl_valid, rl_id, voq, wrr) \
166 OSAL_MEMSET(&(map), 0, sizeof(map)); \
167 SET_FIELD(map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
168 SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0); \
169 SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_ID, rl_id); \
170 SET_FIELD(map.reg, QM_RF_PQ_MAP_VP_PQ_ID, vp_pq_id); \
171 SET_FIELD(map.reg, QM_RF_PQ_MAP_VOQ, voq); \
172 SET_FIELD(map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, wrr); \
173 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
177 #define WRITE_PQ_INFO_TO_RAM 1
178 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
179 (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
180 ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
181 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
182 (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
184 /******************** INTERNAL IMPLEMENTATION *********************/
186 /* Returns the external VOQ number */
187 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
190 u8 max_phys_tcs_per_port)
192 if (tc == PURE_LB_TC)
193 return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
195 return port_id * (max_phys_tcs_per_port) + tc;
198 /* Prepare PF RL enable/disable runtime init values */
199 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
201 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
203 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
204 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
206 /* Enable RLs for all VOQs */
207 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
209 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
210 if (num_ext_voqs >= 32)
211 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
212 (u32)(voq_bit_mask >> 32));
215 /* Write RL period */
216 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
217 QM_RL_PERIOD_CLK_25M);
218 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
219 QM_RL_PERIOD_CLK_25M);
221 /* Set credit threshold for QM bypass flow */
223 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
224 QM_PF_RL_UPPER_BOUND);
228 /* Prepare PF WFQ enable/disable runtime init values */
229 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
231 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
233 /* Set credit threshold for QM bypass flow */
234 if (pf_wfq_en && QM_BYPASS_EN)
235 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
239 /* Prepare VPORT RL enable/disable runtime init values */
240 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
242 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
243 vport_rl_en ? 1 : 0);
245 /* Write RL period (use timer 0 only) */
246 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
247 QM_RL_PERIOD_CLK_25M);
248 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
249 QM_RL_PERIOD_CLK_25M);
251 /* Set credit threshold for QM bypass flow */
254 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
255 QM_VP_RL_BYPASS_THRESH_SPEED);
259 /* Prepare VPORT WFQ enable/disable runtime init values */
260 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
262 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
263 vport_wfq_en ? 1 : 0);
265 /* Set credit threshold for QM bypass flow */
266 if (vport_wfq_en && QM_BYPASS_EN)
267 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
271 /* Prepare runtime init values to allocate PBF command queue lines for
274 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
280 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
282 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
284 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
286 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
290 /* Prepare runtime init values to allocate PBF command queue lines. */
291 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
292 u8 max_ports_per_engine,
293 u8 max_phys_tcs_per_port,
294 struct init_qm_port_params
295 port_params[MAX_NUM_PORTS])
297 u8 tc, ext_voq, port_id, num_tcs_in_port;
298 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
300 /* Clear PBF lines of all VOQs */
301 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
302 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
304 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
305 u16 phys_lines, phys_lines_per_tc;
307 if (!port_params[port_id].active)
310 /* Find number of command queue lines to divide between the
311 * active physical TCs. In E5, 1/8 of the lines are reserved.
312 * the lines for pure LB TC are subtracted.
314 phys_lines = port_params[port_id].num_pbf_cmd_lines;
315 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
317 /* Find #lines per active physical TC */
319 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
320 if (((port_params[port_id].active_phys_tcs >> tc) &
323 phys_lines_per_tc = phys_lines / num_tcs_in_port;
325 /* Init registers per active TC */
326 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
327 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
328 max_phys_tcs_per_port);
329 if (((port_params[port_id].active_phys_tcs >> tc) &
331 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
335 /* Init registers for pure LB TC */
336 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
337 max_phys_tcs_per_port);
338 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
339 PBF_CMDQ_PURE_LB_LINES);
344 * Prepare runtime init values to allocate guaranteed BTB blocks for the
345 * specified port. The guaranteed BTB space is divided between the TCs as
346 * follows (shared space Is currently not used):
348 * B BTB blocks for this port
349 * C Number of physical TCs for this port
351 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
353 * b. B = B 38 (remainder after global headroom allocation)
354 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
355 * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
356 * e. B/C blocks are allocated for each physical TC.
358 * - MTU is up to 9700 bytes (38 blocks)
359 * - All TCs are considered symmetrical (same rate and packet size)
360 * - No optimization for lossy TC (all are considered lossless). Shared space is
361 * not enabled and allocated for each TC.
363 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
364 u8 max_ports_per_engine,
365 u8 max_phys_tcs_per_port,
366 struct init_qm_port_params
367 port_params[MAX_NUM_PORTS])
369 u32 usable_blocks, pure_lb_blocks, phys_blocks;
370 u8 tc, ext_voq, port_id, num_tcs_in_port;
372 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
373 if (!port_params[port_id].active)
376 /* Subtract headroom blocks */
377 usable_blocks = port_params[port_id].num_btb_blocks -
380 /* Find blocks per physical TC. use factor to avoid floating
384 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
385 if (((port_params[port_id].active_phys_tcs >> tc) &
389 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
390 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
392 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
395 phys_blocks = (usable_blocks - pure_lb_blocks) /
398 /* Init physical TCs */
399 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
400 if (((port_params[port_id].active_phys_tcs >> tc) &
402 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
403 max_phys_tcs_per_port);
405 PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
410 /* Init pure LB TC */
411 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
412 max_phys_tcs_per_port);
413 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
418 /* Prepare Tx PQ mapping runtime init values for the specified PF */
419 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
420 struct ecore_ptt *p_ptt,
422 u8 max_phys_tcs_per_port,
430 u32 base_mem_addr_4kb,
431 struct init_qm_pq_params *pq_params,
432 struct init_qm_vport_params *vport_params)
434 /* A bit per Tx PQ indicating if the PQ is associated with a VF */
435 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
436 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
437 u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
438 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
440 num_pqs = num_pf_pqs + num_vf_pqs;
442 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
443 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
445 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
446 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
447 mem_addr_4kb = base_mem_addr_4kb;
449 /* Set mapping from PQ group to PF */
450 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
451 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
455 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
456 QM_PQ_SIZE_256B(num_pf_cids));
457 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
458 QM_PQ_SIZE_256B(num_vf_cids));
460 /* Go over all Tx PQs */
461 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
462 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
463 u8 ext_voq, vport_id_in_pf;
464 bool is_vf_pq, rl_valid;
467 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
469 max_phys_tcs_per_port);
470 is_vf_pq = (i >= num_pf_pqs);
471 rl_valid = pq_params[i].rl_valid > 0;
473 /* Update first Tx PQ of VPORT/TC */
474 vport_id_in_pf = pq_params[i].vport_id - start_vport;
476 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
477 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
478 u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
479 (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
481 /* Create new VP PQ */
482 vport_params[vport_id_in_pf].
483 first_tx_pq_id[pq_params[i].tc_id] = pq_id;
484 first_tx_pq_id = pq_id;
486 /* Map VP PQ to VOQ and PF */
487 STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
488 first_tx_pq_id, map_val);
492 if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
493 DP_NOTICE(p_hwfn, true,
494 "Invalid VPORT ID for rate limiter config\n");
498 /* Prepare PQ map entry */
499 struct qm_rf_pq_map tx_pq_map;
501 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, pq_id, first_tx_pq_id,
503 rl_valid ? pq_params[i].vport_id : 0,
504 ext_voq, pq_params[i].wrr_group);
506 /* Set PQ base address */
507 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
510 /* Clear PQ pointer table entry (64 bit) */
512 for (j = 0; j < 2; j++)
513 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
516 /* Write PQ info to RAM */
517 if (WRITE_PQ_INFO_TO_RAM != 0) {
520 pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
522 pq_params[i].port_id,
523 rl_valid ? 1 : 0, rl_valid ?
524 pq_params[i].vport_id : 0);
525 ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
529 /* If VF PQ, add indication to PQ VF mask */
531 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
532 (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
533 mem_addr_4kb += vport_pq_mem_4kb;
535 mem_addr_4kb += pq_mem_4kb;
539 /* Store Tx PQ VF mask to size select register */
540 for (i = 0; i < num_tx_pq_vf_masks; i++)
541 if (tx_pq_vf_mask[i])
542 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
543 i, tx_pq_vf_mask[i]);
546 /* Prepare Other PQ mapping runtime init values for the specified PF */
547 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
552 u32 base_mem_addr_4kb)
554 u32 pq_size, pq_mem_4kb, mem_addr_4kb;
555 u16 i, j, pq_id, pq_group;
557 /* A single other PQ group is used in each PF, where PQ group i is used
561 pq_size = num_pf_cids + num_tids;
562 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
563 mem_addr_4kb = base_mem_addr_4kb;
565 /* Map PQ group to PF */
566 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
570 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
571 QM_PQ_SIZE_256B(pq_size));
573 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
574 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
575 /* Set PQ base address */
576 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
579 /* Clear PQ pointer table entry */
581 for (j = 0; j < 2; j++)
583 QM_REG_PTRTBLOTHER_RT_OFFSET +
586 mem_addr_4kb += pq_mem_4kb;
590 /* Prepare PF WFQ runtime init values for the specified PF.
591 * Return -1 on error.
593 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
596 u8 max_phys_tcs_per_port,
598 struct init_qm_pq_params *pq_params)
600 u32 inc_val, crd_reg_offset;
604 inc_val = QM_WFQ_INC_VAL(pf_wfq);
605 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
606 DP_NOTICE(p_hwfn, true,
607 "Invalid PF WFQ weight configuration\n");
611 for (i = 0; i < num_tx_pqs; i++) {
612 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
614 max_phys_tcs_per_port);
615 crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
616 QM_REG_WFQPFCRD_RT_OFFSET :
617 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
618 ext_voq * MAX_NUM_PFS_BB +
619 (pf_id % MAX_NUM_PFS_BB);
620 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
621 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
624 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
625 pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
626 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
631 /* Prepare PF RL runtime init values for the specified PF.
632 * Return -1 on error.
634 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
638 inc_val = QM_RL_INC_VAL(pf_rl);
639 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
640 DP_NOTICE(p_hwfn, true,
641 "Invalid PF rate limit configuration\n");
645 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
646 (u32)QM_RL_CRD_REG_SIGN_BIT);
647 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
648 QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
649 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
654 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
655 * Return -1 on error.
657 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
659 struct init_qm_vport_params *vport_params)
665 /* Go over all PF VPORTs */
666 for (i = 0; i < num_vports; i++) {
667 if (!vport_params[i].wfq)
670 inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
671 if (inc_val > QM_WFQ_MAX_INC_VAL) {
672 DP_NOTICE(p_hwfn, true,
673 "Invalid VPORT WFQ weight configuration\n");
677 /* Each VPORT can have several VPORT PQ IDs for various TCs */
678 for (tc = 0; tc < NUM_OF_TCS; tc++) {
679 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
680 if (vport_pq_id != QM_INVALID_PQ_ID) {
681 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
683 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
685 QM_REG_WFQVPWEIGHT_RT_OFFSET +
686 vport_pq_id, inc_val);
693 /* Prepare VPORT RL runtime init values for the specified VPORTs.
694 * Return -1 on error.
696 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
700 struct init_qm_vport_params *vport_params)
705 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
706 DP_NOTICE(p_hwfn, true,
707 "Invalid VPORT ID for rate limiter configuration\n");
711 /* Go over all PF VPORTs */
712 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
713 inc_val = QM_RL_INC_VAL(link_speed);
714 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
715 DP_NOTICE(p_hwfn, true,
716 "Invalid VPORT rate-limit configuration\n");
720 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
721 (u32)QM_RL_CRD_REG_SIGN_BIT);
723 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
724 QM_VP_RL_UPPER_BOUND(link_speed) |
725 (u32)QM_RL_CRD_REG_SIGN_BIT);
726 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
733 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
734 struct ecore_ptt *p_ptt)
738 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
740 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
741 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
744 /* Check if timeout while waiting for SDM command ready */
745 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
746 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
747 "Timeout waiting for QM SDM cmd ready signal\n");
754 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
755 struct ecore_ptt *p_ptt,
760 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
763 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
764 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
765 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
766 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
767 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
769 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
773 /******************** INTERFACE IMPLEMENTATION *********************/
775 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
781 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
782 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
783 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
786 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
787 u8 max_ports_per_engine,
788 u8 max_phys_tcs_per_port,
793 struct init_qm_port_params
794 port_params[MAX_NUM_PORTS])
798 /* Init AFullOprtnstcCrdMask */
799 mask = (QM_OPPOR_LINE_VOQ_DEF <<
800 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
801 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
802 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
803 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
804 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
805 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
806 (QM_OPPOR_FW_STOP_DEF <<
807 QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
808 (QM_OPPOR_PQ_EMPTY_DEF <<
809 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
810 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
812 /* Enable/disable PF RL */
813 ecore_enable_pf_rl(p_hwfn, pf_rl_en);
815 /* Enable/disable PF WFQ */
816 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
818 /* Enable/disable VPORT RL */
819 ecore_enable_vport_rl(p_hwfn, vport_rl_en);
821 /* Enable/disable VPORT WFQ */
822 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
824 /* Init PBF CMDQ line credit */
825 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
826 max_phys_tcs_per_port, port_params);
828 /* Init BTB blocks in PBF */
829 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
830 max_phys_tcs_per_port, port_params);
835 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
836 struct ecore_ptt *p_ptt,
838 u8 max_phys_tcs_per_port,
851 struct init_qm_pq_params *pq_params,
852 struct init_qm_vport_params *vport_params)
854 u32 other_mem_size_4kb;
857 other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
860 /* Clear first Tx PQ ID array for each VPORT */
861 for (i = 0; i < num_vports; i++)
862 for (tc = 0; tc < NUM_OF_TCS; tc++)
863 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
865 /* Map Other PQs (if any) */
866 #if QM_OTHER_PQS_PER_PF > 0
867 ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
872 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
873 is_pf_loading, num_pf_cids, num_vf_cids,
874 start_pq, num_pf_pqs, num_vf_pqs, start_vport,
875 other_mem_size_4kb, pq_params, vport_params);
879 if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
880 max_phys_tcs_per_port,
881 num_pf_pqs + num_vf_pqs, pq_params))
885 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
889 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
893 if (ecore_vport_rl_rt_init
894 (p_hwfn, start_vport, num_vports, link_speed, vport_params))
900 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
901 struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
905 inc_val = QM_WFQ_INC_VAL(pf_wfq);
906 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
907 DP_NOTICE(p_hwfn, true,
908 "Invalid PF WFQ weight configuration\n");
912 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
917 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
918 struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
922 inc_val = QM_RL_INC_VAL(pf_rl);
923 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
924 DP_NOTICE(p_hwfn, true,
925 "Invalid PF rate limit configuration\n");
929 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
930 (u32)QM_RL_CRD_REG_SIGN_BIT);
931 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
936 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
937 struct ecore_ptt *p_ptt,
938 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
944 inc_val = QM_WFQ_INC_VAL(vport_wfq);
945 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
946 DP_NOTICE(p_hwfn, true,
947 "Invalid VPORT WFQ weight configuration\n");
951 for (tc = 0; tc < NUM_OF_TCS; tc++) {
952 vport_pq_id = first_tx_pq_id[tc];
953 if (vport_pq_id != QM_INVALID_PQ_ID) {
954 ecore_wr(p_hwfn, p_ptt,
955 QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
962 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
963 struct ecore_ptt *p_ptt, u8 vport_id,
967 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
969 if (vport_id >= max_qm_global_rls) {
970 DP_NOTICE(p_hwfn, true,
971 "Invalid VPORT ID for rate limiter configuration\n");
975 inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
976 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
977 DP_NOTICE(p_hwfn, true,
978 "Invalid VPORT rate-limit configuration\n");
982 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
983 (u32)QM_RL_CRD_REG_SIGN_BIT);
984 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
989 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
990 struct ecore_ptt *p_ptt,
992 bool is_tx_pq, u16 start_pq, u16 num_pqs)
994 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
995 u32 pq_mask = 0, last_pq, pq_id;
997 last_pq = start_pq + num_pqs - 1;
999 /* Set command's PQ type */
1000 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
1002 /* Go over requested PQs */
1003 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
1004 /* Set PQ bit in mask (stop command only) */
1005 if (!is_release_cmd)
1006 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
1008 /* If last PQ or end of PQ mask, write command */
1009 if ((pq_id == last_pq) ||
1010 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
1011 (QM_STOP_PQ_MASK_WIDTH - 1))) {
1012 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
1014 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
1015 pq_id / QM_STOP_PQ_MASK_WIDTH);
1016 if (!ecore_send_qm_cmd
1017 (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
1028 /* NIG: ETS configuration constants */
1029 #define NIG_TX_ETS_CLIENT_OFFSET 4
1030 #define NIG_LB_ETS_CLIENT_OFFSET 1
1031 #define NIG_ETS_MIN_WFQ_BYTES 1600
1033 /* NIG: ETS constants */
1034 #define NIG_ETS_UP_BOUND(weight, mtu) \
1035 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1037 /* NIG: RL constants */
1039 /* Byte base type value */
1040 #define NIG_RL_BASE_TYPE 1
1043 #define NIG_RL_PERIOD 1
1045 /* Period in 25MHz cycles */
1046 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
1049 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
1051 #define NIG_RL_MAX_VAL(inc_val, mtu) \
1052 (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
1054 /* NIG: packet prioritry configuration constants */
1055 #define NIG_PRIORITY_MAP_TC_BITS 4
1058 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
1059 struct ecore_ptt *p_ptt,
1060 struct init_ets_req *req, bool is_lb)
1062 u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
1063 u32 tc_bound_base_addr, tc_bound_addr_diff;
1064 u8 sp_tc_map = 0, wfq_tc_map = 0;
1065 u8 tc, num_tc, tc_client_offset;
1067 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
1068 tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
1069 NIG_TX_ETS_CLIENT_OFFSET;
1070 min_weight = 0xffffffff;
1071 tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1072 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1073 tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
1074 NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1075 NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
1076 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1077 tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1078 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1079 tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
1080 NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1081 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
1082 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1084 for (tc = 0; tc < num_tc; tc++) {
1085 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1089 sp_tc_map |= (1 << tc);
1091 if (!tc_req->use_wfq)
1094 /* Update WFQ map */
1095 wfq_tc_map |= (1 << tc);
1097 /* Find minimal weight */
1098 if (tc_req->weight < min_weight)
1099 min_weight = tc_req->weight;
1103 ecore_wr(p_hwfn, p_ptt,
1104 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
1105 NIG_REG_TX_ARB_CLIENT_IS_STRICT,
1106 (sp_tc_map << tc_client_offset));
1109 ecore_wr(p_hwfn, p_ptt,
1110 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
1111 NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
1112 (wfq_tc_map << tc_client_offset));
1113 /* write WFQ weights */
1114 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
1115 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1118 if (!tc_req->use_wfq)
1121 /* Translate weight to bytes */
1122 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1125 /* Write WFQ weight */
1126 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
1127 tc_weight_addr_diff * tc_client_offset, byte_weight);
1129 /* Write WFQ upper bound */
1130 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
1131 tc_bound_addr_diff * tc_client_offset,
1132 NIG_ETS_UP_BOUND(byte_weight, req->mtu));
1136 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1137 struct ecore_ptt *p_ptt,
1138 struct init_nig_lb_rl_req *req)
1140 u32 ctrl, inc_val, reg_offset;
1143 /* Disable global MAC+LB RL */
1146 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1147 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1149 /* Configure and enable global MAC+LB RL */
1150 if (req->lb_mac_rate) {
1152 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
1153 NIG_RL_PERIOD_CLK_25M);
1154 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1155 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
1157 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
1158 NIG_RL_MAX_VAL(inc_val, req->mtu));
1163 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1164 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1167 /* Disable global LB-only RL */
1170 NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1171 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1173 /* Configure and enable global LB-only RL */
1176 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
1177 NIG_RL_PERIOD_CLK_25M);
1178 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1179 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
1181 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
1182 NIG_RL_MAX_VAL(inc_val, req->mtu));
1186 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1187 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1191 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
1192 tc++, reg_offset += 4) {
1196 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1197 ecore_wr(p_hwfn, p_ptt,
1198 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1200 /* Configure and enable TC RL */
1201 if (!req->tc_rate[tc])
1205 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
1206 reg_offset, NIG_RL_PERIOD_CLK_25M);
1207 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1208 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
1209 reg_offset, inc_val);
1210 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
1211 reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1215 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1216 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
1221 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1222 struct ecore_ptt *p_ptt,
1223 struct init_nig_pri_tc_map_req *req)
1225 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1226 u32 pri_tc_mask = 0;
1229 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1230 if (!req->pri[pri].valid)
1233 pri_tc_mask |= (req->pri[pri].tc_id <<
1234 (pri * NIG_PRIORITY_MAP_TC_BITS));
1235 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1238 /* Write priority -> TC mask */
1239 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1241 /* Write TC -> priority mask */
1242 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1243 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
1245 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
1251 /* PRS: ETS configuration constants */
1252 #define PRS_ETS_MIN_WFQ_BYTES 1600
1253 #define PRS_ETS_UP_BOUND(weight, mtu) \
1254 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1257 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1258 struct ecore_ptt *p_ptt, struct init_ets_req *req)
1260 u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1261 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1263 tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
1264 PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1265 tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
1266 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1268 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1269 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1273 sp_tc_map |= (1 << tc);
1275 if (!tc_req->use_wfq)
1278 /* Update WFQ map */
1279 wfq_tc_map |= (1 << tc);
1281 /* Find minimal weight */
1282 if (tc_req->weight < min_weight)
1283 min_weight = tc_req->weight;
1287 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1290 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1293 /* write WFQ weights */
1294 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1295 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1298 if (!tc_req->use_wfq)
1301 /* Translate weight to bytes */
1302 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1305 /* Write WFQ weight */
1306 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
1307 tc_weight_addr_diff, byte_weight);
1309 /* Write WFQ upper bound */
1310 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1311 tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
1317 /* BRB: RAM configuration constants */
1318 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1319 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1320 #define BRB_BLOCK_SIZE 128
1321 #define BRB_MIN_BLOCKS_PER_TC 9
1322 #define BRB_HYST_BYTES 10240
1323 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1325 /* Temporary big RAM allocation - should be updated */
1326 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1327 struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1329 u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1330 u32 active_port_blocks, reg_offset = 0;
1331 u8 port, active_ports = 0;
1333 tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
1335 min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
1337 total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1338 BRB_TOTAL_RAM_BLOCKS_BB;
1340 /* Find number of active ports */
1341 for (port = 0; port < MAX_NUM_PORTS; port++)
1342 if (req->num_active_tcs[port])
1345 active_port_blocks = (u32)(total_blocks / active_ports);
1347 for (port = 0; port < req->max_ports_per_engine; port++) {
1348 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1349 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1350 u32 tc_guaranteed_blocks;
1353 /* Calculate per-port sizes */
1354 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
1356 port_blocks = req->num_active_tcs[port] ? active_port_blocks :
1358 port_guaranteed_blocks = req->num_active_tcs[port] *
1359 tc_guaranteed_blocks;
1360 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1361 full_xoff_th = req->num_active_tcs[port] *
1362 BRB_MIN_BLOCKS_PER_TC;
1363 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1364 pause_xoff_th = tc_headroom_blocks;
1365 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1367 /* Init total size per port */
1368 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1371 /* Init shared size per port */
1372 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1373 port_shared_blocks);
1375 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1376 /* Clear init values for non-active TCs */
1377 if (tc == req->num_active_tcs[port]) {
1378 tc_guaranteed_blocks = 0;
1385 /* Init guaranteed size per TC */
1386 ecore_wr(p_hwfn, p_ptt,
1387 BRB_REG_TC_GUARANTIED_0 + reg_offset,
1388 tc_guaranteed_blocks);
1389 ecore_wr(p_hwfn, p_ptt,
1390 BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1393 /* Init pause/full thresholds per physical TC - for
1396 ecore_wr(p_hwfn, p_ptt,
1397 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1398 reg_offset, full_xoff_th);
1399 ecore_wr(p_hwfn, p_ptt,
1400 BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1401 reg_offset, full_xon_th);
1402 ecore_wr(p_hwfn, p_ptt,
1403 BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1404 reg_offset, pause_xoff_th);
1405 ecore_wr(p_hwfn, p_ptt,
1406 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1407 reg_offset, pause_xon_th);
1409 /* Init pause/full thresholds per physical TC - for
1412 ecore_wr(p_hwfn, p_ptt,
1413 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1414 reg_offset, full_xoff_th);
1415 ecore_wr(p_hwfn, p_ptt,
1416 BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1417 reg_offset, full_xon_th);
1418 ecore_wr(p_hwfn, p_ptt,
1419 BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1420 reg_offset, pause_xoff_th);
1421 ecore_wr(p_hwfn, p_ptt,
1422 BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1423 reg_offset, pause_xon_th);
1428 /* In MF should be called once per port to set EtherType of OuterTag */
1429 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1431 /* Update DORQ register */
1432 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1435 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1436 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1437 #define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
1438 #define PRS_ETH_OUTPUT_FORMAT -46832
1440 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1441 struct ecore_ptt *p_ptt, u16 dest_port)
1443 /* Update PRS register */
1444 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1446 /* Update NIG register */
1447 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1449 /* Update PBF register */
1450 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1453 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1454 struct ecore_ptt *p_ptt, bool vxlan_enable)
1458 /* Update PRS register */
1459 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1460 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1461 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1463 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1464 if (reg_val) { /* TODO: handle E5 init */
1465 reg_val = ecore_rd(p_hwfn, p_ptt,
1466 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1468 /* Update output only if tunnel blocks not included. */
1469 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1470 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1471 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1474 /* Update NIG register */
1475 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1476 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1477 NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1479 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1481 /* Update DORQ register */
1482 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1483 vxlan_enable ? 1 : 0);
1486 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1487 struct ecore_ptt *p_ptt,
1488 bool eth_gre_enable, bool ip_gre_enable)
1492 /* Update PRS register */
1493 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1494 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1495 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1497 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1498 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1500 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1501 if (reg_val) { /* TODO: handle E5 init */
1502 reg_val = ecore_rd(p_hwfn, p_ptt,
1503 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1505 /* Update output only if tunnel blocks not included. */
1506 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1507 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1508 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1511 /* Update NIG register */
1512 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1513 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1514 NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1516 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1517 NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1519 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1521 /* Update DORQ registers */
1522 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1523 eth_gre_enable ? 1 : 0);
1524 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1525 ip_gre_enable ? 1 : 0);
1528 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1529 struct ecore_ptt *p_ptt, u16 dest_port)
1531 /* Update PRS register */
1532 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1534 /* Update NIG register */
1535 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1537 /* Update PBF register */
1538 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1541 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1542 struct ecore_ptt *p_ptt,
1543 bool eth_geneve_enable, bool ip_geneve_enable)
1547 /* Update PRS register */
1548 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1549 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1550 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1552 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1553 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1555 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1556 if (reg_val) { /* TODO: handle E5 init */
1557 reg_val = ecore_rd(p_hwfn, p_ptt,
1558 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1560 /* Update output only if tunnel blocks not included. */
1561 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1562 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1563 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1566 /* Update NIG register */
1567 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1568 eth_geneve_enable ? 1 : 0);
1569 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1570 ip_geneve_enable ? 1 : 0);
1572 /* EDPM with geneve tunnel not supported in BB */
1573 if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1576 /* Update DORQ registers */
1577 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
1578 eth_geneve_enable ? 1 : 0);
1579 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
1580 ip_geneve_enable ? 1 : 0);
1583 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
1584 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
1586 void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
1587 struct ecore_ptt *p_ptt,
1590 u32 reg_val, cfg_mask;
1592 /* read PRS config register */
1593 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1595 /* set VXLAN_NO_L2_ENABLE mask */
1596 cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1599 /* set VXLAN_NO_L2_ENABLE flag */
1600 reg_val |= cfg_mask;
1602 /* update PRS FIC register */
1603 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1604 (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1606 /* clear VXLAN_NO_L2_ENABLE flag */
1607 reg_val &= ~cfg_mask;
1610 /* write PRS config register */
1611 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1614 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1615 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1616 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1617 #define PARSER_ETH_CONN_CM_HDR 0
1618 #define CAM_LINE_SIZE sizeof(u32)
1619 #define RAM_LINE_SIZE sizeof(u64)
1620 #define REG_SIZE sizeof(u32)
1622 void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
1623 struct ecore_ptt *p_ptt,
1626 /* disable gft search for PF */
1627 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1629 /* Clean ram & cam for next gft session*/
1632 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1635 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1636 RAM_LINE_SIZE * pf_id, 0);
1637 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1638 RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
1642 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1643 struct ecore_ptt *p_ptt)
1645 u32 rfs_cm_hdr_event_id;
1647 /* Set RFS event ID to be awakened i Tstorm By Prs */
1648 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1649 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1650 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1651 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1652 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1653 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1656 void ecore_gft_config(struct ecore_hwfn *p_hwfn,
1657 struct ecore_ptt *p_ptt,
1663 enum gft_profile_type profile_type)
1665 u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
1668 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1670 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
1671 if (profile_type >= MAX_GFT_PROFILE_TYPE)
1672 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
1674 /* Set RFS event ID to be awakened i Tstorm By Prs */
1675 reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1676 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1677 reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1678 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1680 /* Do not load context only cid in PRS on match. */
1681 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1683 /* Do not use tenant ID exist bit for gft search*/
1684 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1688 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1690 /* Filters are per PF!! */
1691 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1692 GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1693 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1695 if (!(tcp && udp)) {
1697 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1698 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1701 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1702 GFT_PROFILE_TCP_PROTOCOL);
1705 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1706 GFT_PROFILE_UDP_PROTOCOL);
1709 if (!(ipv4 && ipv6)) {
1710 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1712 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1715 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1719 /* Write characteristics to cam */
1720 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1722 cam_line = ecore_rd(p_hwfn, p_ptt,
1723 PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1725 /* Write line to RAM - compare to filter 4 tuple */
1729 /* Search no IP as GFT */
1730 search_non_ip_as_gft = 0;
1733 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1734 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1736 if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1737 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1738 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1739 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1740 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1741 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
1742 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1743 } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1744 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1745 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1746 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1747 } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1748 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1749 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1750 } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1751 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1752 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1753 } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1754 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1756 /* Allow tunneled traffic without inner IP */
1757 search_non_ip_as_gft = 1;
1760 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,
1761 search_non_ip_as_gft);
1762 ecore_wr(p_hwfn, p_ptt,
1763 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1765 ecore_wr(p_hwfn, p_ptt,
1766 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
1767 REG_SIZE, ram_line_hi);
1769 /* Set default profile so that no filter match will happen */
1770 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1771 PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
1772 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1773 PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
1775 /* Enable gft search */
1776 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1779 /* Configure VF zone size mode */
1780 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1781 struct ecore_ptt *p_ptt, u16 mode,
1784 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1785 u32 msdm_vf_offset_mask;
1787 if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1788 msdm_vf_size_log += 1;
1789 else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1790 msdm_vf_size_log += 2;
1792 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1795 STORE_RT_REG(p_hwfn,
1796 PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1798 STORE_RT_REG(p_hwfn,
1799 PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1800 msdm_vf_offset_mask);
1802 ecore_wr(p_hwfn, p_ptt,
1803 PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1804 ecore_wr(p_hwfn, p_ptt,
1805 PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1809 /* Get mstorm statistics for offset by VF zone size mode */
1810 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1812 u16 vf_zone_size_mode)
1814 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1816 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1817 (stat_cnt_id > MAX_NUM_PFS)) {
1818 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1819 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1820 (stat_cnt_id - MAX_NUM_PFS);
1821 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1822 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1823 (stat_cnt_id - MAX_NUM_PFS);
1829 /* Get mstorm VF producer offset by VF zone size mode */
1830 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1833 u16 vf_zone_size_mode)
1835 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1837 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1838 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1839 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1841 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1842 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1849 #ifndef LINUX_REMOVE
1850 #define CRC8_INIT_VALUE 0xFF
1852 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1854 /* Calculate and return CDU validation byte per connection type / region /
1857 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1859 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1861 static u8 crc8_table_valid; /*automatically initialized to 0*/
1862 u8 crc, validation_byte = 0;
1863 u32 validation_string = 0;
1866 if (crc8_table_valid == 0) {
1867 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1868 crc8_table_valid = 1;
1872 * The CRC is calculated on the String-to-compress:
1873 * [31:8] = {CID[31:20],CID[11:0]}
1877 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1878 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1880 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1881 validation_string |= ((region & 0xF) << 4);
1883 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1884 validation_string |= (conn_type & 0xF);
1886 /* Convert to big-endian and calculate CRC8*/
1887 data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1889 crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1892 /* The validation byte [7:0] is composed:
1893 * for type A validation
1894 * [7] = active configuration bit
1897 * for type B validation
1898 * [7] = active configuration bit
1899 * [6:3] = connection_type[3:0]
1903 validation_byte |= ((validation_cfg >>
1904 CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1906 if ((validation_cfg >>
1907 CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1908 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1910 validation_byte |= crc & 0x7F;
1912 return validation_byte;
1915 /* Calcualte and set validation bytes for session context */
1916 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1917 u8 ctx_type, u32 cid)
1919 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1921 p_ctx = (u8 *)p_ctx_mem;
1922 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1923 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1924 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1926 OSAL_MEMSET(p_ctx, 0, ctx_size);
1928 *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1929 *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1930 *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1933 /* Calcualte and set validation bytes for task context */
1934 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1937 u8 *p_ctx, *region1_val_ptr;
1939 p_ctx = (u8 *)p_ctx_mem;
1940 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1942 OSAL_MEMSET(p_ctx, 0, ctx_size);
1944 *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1947 /* Memset session context to 0 while preserving validation bytes */
1948 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1950 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1951 u8 x_val, t_val, u_val;
1953 p_ctx = (u8 *)p_ctx_mem;
1954 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1955 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1956 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1962 OSAL_MEMSET(p_ctx, 0, ctx_size);
1969 /* Memset task context to 0 while preserving validation bytes */
1970 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1972 u8 *p_ctx, *region1_val_ptr;
1975 p_ctx = (u8 *)p_ctx_mem;
1976 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1978 region1_val = *region1_val_ptr;
1980 OSAL_MEMSET(p_ctx, 0, ctx_size);
1982 *region1_val_ptr = region1_val;
1985 /* Enable and configure context validation */
1986 void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
1987 struct ecore_ptt *p_ptt)
1991 /* Enable validation for connection region 3 - bits [31:24] */
1992 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1993 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1995 /* Enable validation for connection region 5 - bits [15: 8] */
1996 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1997 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1999 /* Enable validation for connection region 1 - bits [15: 8] */
2000 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
2001 ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
2005 /*******************************************************************************
2006 * File name : rdma_init.c
2007 * Author : Michael Shteinbok
2008 *******************************************************************************
2009 *******************************************************************************
2011 * RDMA HSI functions
2013 *******************************************************************************
2014 * Notes: This is the input to the auto generated file drv_init_fw_funcs.c
2016 *******************************************************************************
2018 static u32 ecore_get_rdma_assert_ram_addr(struct ecore_hwfn *p_hwfn,
2022 case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2023 TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2024 case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2025 MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2026 case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2027 USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2028 case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2029 XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2030 case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2031 YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2032 case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2033 PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2039 void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
2040 struct ecore_ptt *p_ptt,
2041 u8 assert_level[NUM_STORMS])
2044 for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
2045 u32 ram_addr = ecore_get_rdma_assert_ram_addr(p_hwfn, storm_id);
2047 ecore_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);