1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
9 #include "ecore_init_ops.h"
11 #include "ecore_rt_defs.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore_hsi_init_func.h"
14 #include "ecore_hsi_eth.h"
15 #include "ecore_hsi_init_tool.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
19 #define CDU_VALIDATION_DEFAULT_CFG 61
21 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
22 { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
23 { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
24 { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
26 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
27 { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
30 /* General constants */
31 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
32 QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
33 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
35 #define QM_INVALID_PQ_ID 0xffff
38 #define QM_BYPASS_EN 1
39 #define QM_BYTE_CRD_EN 1
41 /* Other PQ constants */
42 #define QM_OTHER_PQS_PER_PF 4
45 #define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
49 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
50 #define QM_WFQ_UPPER_BOUND 62500000
52 /* Bit of VOQ in WFQ VP PQ map */
53 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
55 /* Bit of PF in WFQ VP PQ map */
56 #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
57 #define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
59 /* 0x9000 = 4*9*1024 */
60 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
62 /* Max WFQ increment value is 0.7 * upper bound */
63 #define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
65 /* Number of VOQs in E5 QmWfqCrd register */
66 #define QM_WFQ_CRD_E5_NUM_VOQS 16
71 #define QM_RL_PERIOD 5
73 /* Period in 25MHz cycles */
74 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
76 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
77 * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
78 * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
79 * although the credit increment value was the correct one and FW calculated
80 * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
83 #define QM_RL_INC_VAL(rate) \
84 OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
87 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
88 #define QM_PF_RL_UPPER_BOUND 62500000
90 /* Max PF RL increment value is 0.7 * upper bound */
91 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
93 /* Vport RL Upper bound, link speed is in Mpbs */
94 #define QM_VP_RL_UPPER_BOUND(speed) \
95 ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
97 /* Max Vport RL increment value is the Vport RL upper bound */
98 #define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
100 /* Vport RL credit threshold in case of QM bypass */
101 #define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
103 /* AFullOprtnstcCrdMask constants */
104 #define QM_OPPOR_LINE_VOQ_DEF 1
105 #define QM_OPPOR_FW_STOP_DEF 0
106 #define QM_OPPOR_PQ_EMPTY_DEF 1
108 /* Command Queue constants: */
110 /* Pure LB CmdQ lines (+spare) */
111 #define PBF_CMDQ_PURE_LB_LINES 150
113 #define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
115 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
116 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
118 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
119 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
121 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
122 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
124 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
125 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
127 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
128 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
130 /* BTB: blocks constants (block size = 256B) */
132 /* 256B blocks in 9700B packet */
133 #define BTB_JUMBO_PKT_BLOCKS 38
135 /* Headroom per-port */
136 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
137 #define BTB_PURE_LB_FACTOR 10
139 /* Factored (hence really 0.7) */
140 #define BTB_PURE_LB_RATIO 7
142 /* QM stop command constants */
143 #define QM_STOP_PQ_MASK_WIDTH 32
144 #define QM_STOP_CMD_ADDR 2
145 #define QM_STOP_CMD_STRUCT_SIZE 2
146 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
147 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
148 #define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
149 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
150 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
151 #define QM_STOP_CMD_GROUP_ID_MASK 15
152 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
153 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
154 #define QM_STOP_CMD_PQ_TYPE_MASK 1
155 #define QM_STOP_CMD_MAX_POLL_COUNT 100
156 #define QM_STOP_CMD_POLL_PERIOD_US 500
158 /* QM command macros */
159 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
160 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
161 SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
163 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, \
164 rl_valid, rl_id, voq, wrr) \
166 OSAL_MEMSET(&(map), 0, sizeof(map)); \
167 SET_FIELD(map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
168 SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0); \
169 SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_ID, rl_id); \
170 SET_FIELD(map.reg, QM_RF_PQ_MAP_VP_PQ_ID, vp_pq_id); \
171 SET_FIELD(map.reg, QM_RF_PQ_MAP_VOQ, voq); \
172 SET_FIELD(map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, wrr); \
173 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
177 #define WRITE_PQ_INFO_TO_RAM 1
178 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
179 (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
180 ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
181 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
182 (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
184 /******************** INTERNAL IMPLEMENTATION *********************/
186 /* Returns the external VOQ number */
187 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
190 u8 max_phys_tcs_per_port)
192 if (tc == PURE_LB_TC)
193 return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
195 return port_id * (max_phys_tcs_per_port) + tc;
198 /* Prepare PF RL enable/disable runtime init values */
199 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
201 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
203 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
204 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
206 /* Enable RLs for all VOQs */
207 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
209 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
210 if (num_ext_voqs >= 32)
211 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
212 (u32)(voq_bit_mask >> 32));
215 /* Write RL period */
216 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
217 QM_RL_PERIOD_CLK_25M);
218 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
219 QM_RL_PERIOD_CLK_25M);
221 /* Set credit threshold for QM bypass flow */
223 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
224 QM_PF_RL_UPPER_BOUND);
228 /* Prepare PF WFQ enable/disable runtime init values */
229 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
231 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
233 /* Set credit threshold for QM bypass flow */
234 if (pf_wfq_en && QM_BYPASS_EN)
235 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
239 /* Prepare VPORT RL enable/disable runtime init values */
240 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
242 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
243 vport_rl_en ? 1 : 0);
245 /* Write RL period (use timer 0 only) */
246 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
247 QM_RL_PERIOD_CLK_25M);
248 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
249 QM_RL_PERIOD_CLK_25M);
251 /* Set credit threshold for QM bypass flow */
254 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
255 QM_VP_RL_BYPASS_THRESH_SPEED);
259 /* Prepare VPORT WFQ enable/disable runtime init values */
260 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
262 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
263 vport_wfq_en ? 1 : 0);
265 /* Set credit threshold for QM bypass flow */
266 if (vport_wfq_en && QM_BYPASS_EN)
267 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
271 /* Prepare runtime init values to allocate PBF command queue lines for
274 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
280 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
282 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
284 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
286 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
290 /* Prepare runtime init values to allocate PBF command queue lines. */
291 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
292 u8 max_ports_per_engine,
293 u8 max_phys_tcs_per_port,
294 struct init_qm_port_params
295 port_params[MAX_NUM_PORTS])
297 u8 tc, ext_voq, port_id, num_tcs_in_port;
298 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
300 /* Clear PBF lines of all VOQs */
301 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
302 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
304 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
305 u16 phys_lines, phys_lines_per_tc;
307 if (!port_params[port_id].active)
310 /* Find number of command queue lines to divide between the
311 * active physical TCs. In E5, 1/8 of the lines are reserved.
312 * the lines for pure LB TC are subtracted.
314 phys_lines = port_params[port_id].num_pbf_cmd_lines;
315 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
317 /* Find #lines per active physical TC */
319 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
320 if (((port_params[port_id].active_phys_tcs >> tc) &
323 phys_lines_per_tc = phys_lines / num_tcs_in_port;
325 /* Init registers per active TC */
326 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
327 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
328 max_phys_tcs_per_port);
329 if (((port_params[port_id].active_phys_tcs >> tc) &
331 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
335 /* Init registers for pure LB TC */
336 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
337 max_phys_tcs_per_port);
338 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
339 PBF_CMDQ_PURE_LB_LINES);
344 * Prepare runtime init values to allocate guaranteed BTB blocks for the
345 * specified port. The guaranteed BTB space is divided between the TCs as
346 * follows (shared space Is currently not used):
348 * B BTB blocks for this port
349 * C Number of physical TCs for this port
351 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
353 * b. B = B 38 (remainder after global headroom allocation)
354 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
355 * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
356 * e. B/C blocks are allocated for each physical TC.
358 * - MTU is up to 9700 bytes (38 blocks)
359 * - All TCs are considered symmetrical (same rate and packet size)
360 * - No optimization for lossy TC (all are considered lossless). Shared space is
361 * not enabled and allocated for each TC.
363 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
364 u8 max_ports_per_engine,
365 u8 max_phys_tcs_per_port,
366 struct init_qm_port_params
367 port_params[MAX_NUM_PORTS])
369 u32 usable_blocks, pure_lb_blocks, phys_blocks;
370 u8 tc, ext_voq, port_id, num_tcs_in_port;
372 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
373 if (!port_params[port_id].active)
376 /* Subtract headroom blocks */
377 usable_blocks = port_params[port_id].num_btb_blocks -
380 /* Find blocks per physical TC. use factor to avoid floating
384 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
385 if (((port_params[port_id].active_phys_tcs >> tc) &
389 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
390 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
392 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
395 phys_blocks = (usable_blocks - pure_lb_blocks) /
398 /* Init physical TCs */
399 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
400 if (((port_params[port_id].active_phys_tcs >> tc) &
402 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
403 max_phys_tcs_per_port);
405 PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
410 /* Init pure LB TC */
411 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
412 max_phys_tcs_per_port);
413 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
418 /* Prepare Tx PQ mapping runtime init values for the specified PF */
419 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
420 struct ecore_ptt *p_ptt,
422 u8 max_phys_tcs_per_port,
430 u32 base_mem_addr_4kb,
431 struct init_qm_pq_params *pq_params,
432 struct init_qm_vport_params *vport_params)
434 /* A bit per Tx PQ indicating if the PQ is associated with a VF */
435 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
436 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
437 u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
438 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
440 num_pqs = num_pf_pqs + num_vf_pqs;
442 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
443 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
445 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
446 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
447 mem_addr_4kb = base_mem_addr_4kb;
449 /* Set mapping from PQ group to PF */
450 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
451 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
455 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
456 QM_PQ_SIZE_256B(num_pf_cids));
457 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
458 QM_PQ_SIZE_256B(num_vf_cids));
460 /* Go over all Tx PQs */
461 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
462 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
463 u8 ext_voq, vport_id_in_pf;
464 bool is_vf_pq, rl_valid;
467 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
469 max_phys_tcs_per_port);
470 is_vf_pq = (i >= num_pf_pqs);
471 rl_valid = pq_params[i].rl_valid > 0;
473 /* Update first Tx PQ of VPORT/TC */
474 vport_id_in_pf = pq_params[i].vport_id - start_vport;
476 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
477 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
478 u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
479 (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
481 /* Create new VP PQ */
482 vport_params[vport_id_in_pf].
483 first_tx_pq_id[pq_params[i].tc_id] = pq_id;
484 first_tx_pq_id = pq_id;
486 /* Map VP PQ to VOQ and PF */
487 STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
488 first_tx_pq_id, map_val);
492 if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
493 DP_NOTICE(p_hwfn, true,
494 "Invalid VPORT ID for rate limiter config\n");
498 /* Prepare PQ map entry */
499 struct qm_rf_pq_map tx_pq_map;
501 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, pq_id, first_tx_pq_id,
503 rl_valid ? pq_params[i].vport_id : 0,
504 ext_voq, pq_params[i].wrr_group);
506 /* Set PQ base address */
507 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
510 /* Clear PQ pointer table entry (64 bit) */
512 for (j = 0; j < 2; j++)
513 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
516 /* Write PQ info to RAM */
517 if (WRITE_PQ_INFO_TO_RAM != 0) {
520 pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
522 pq_params[i].port_id,
523 rl_valid ? 1 : 0, rl_valid ?
524 pq_params[i].vport_id : 0);
525 ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
529 /* If VF PQ, add indication to PQ VF mask */
531 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
532 (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
533 mem_addr_4kb += vport_pq_mem_4kb;
535 mem_addr_4kb += pq_mem_4kb;
539 /* Store Tx PQ VF mask to size select register */
540 for (i = 0; i < num_tx_pq_vf_masks; i++)
541 if (tx_pq_vf_mask[i])
542 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
543 i, tx_pq_vf_mask[i]);
546 /* Prepare Other PQ mapping runtime init values for the specified PF */
547 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
552 u32 base_mem_addr_4kb)
554 u32 pq_size, pq_mem_4kb, mem_addr_4kb;
555 u16 i, j, pq_id, pq_group;
557 /* A single other PQ group is used in each PF, where PQ group i is used
561 pq_size = num_pf_cids + num_tids;
562 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
563 mem_addr_4kb = base_mem_addr_4kb;
565 /* Map PQ group to PF */
566 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
570 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
571 QM_PQ_SIZE_256B(pq_size));
573 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
574 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
575 /* Set PQ base address */
576 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
579 /* Clear PQ pointer table entry */
581 for (j = 0; j < 2; j++)
583 QM_REG_PTRTBLOTHER_RT_OFFSET +
586 mem_addr_4kb += pq_mem_4kb;
590 /* Prepare PF WFQ runtime init values for the specified PF.
591 * Return -1 on error.
593 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
596 u8 max_phys_tcs_per_port,
598 struct init_qm_pq_params *pq_params)
600 u32 inc_val, crd_reg_offset;
604 inc_val = QM_WFQ_INC_VAL(pf_wfq);
605 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
606 DP_NOTICE(p_hwfn, true,
607 "Invalid PF WFQ weight configuration\n");
611 for (i = 0; i < num_tx_pqs; i++) {
612 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
614 max_phys_tcs_per_port);
615 crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
616 QM_REG_WFQPFCRD_RT_OFFSET :
617 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
618 ext_voq * MAX_NUM_PFS_BB +
619 (pf_id % MAX_NUM_PFS_BB);
620 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
621 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
624 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
625 pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
626 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
631 /* Prepare PF RL runtime init values for the specified PF.
632 * Return -1 on error.
634 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
638 inc_val = QM_RL_INC_VAL(pf_rl);
639 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
640 DP_NOTICE(p_hwfn, true,
641 "Invalid PF rate limit configuration\n");
645 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
646 (u32)QM_RL_CRD_REG_SIGN_BIT);
647 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
648 QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
649 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
654 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
655 * Return -1 on error.
657 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
659 struct init_qm_vport_params *vport_params)
665 /* Go over all PF VPORTs */
666 for (i = 0; i < num_vports; i++) {
667 if (!vport_params[i].vport_wfq)
670 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
671 if (inc_val > QM_WFQ_MAX_INC_VAL) {
672 DP_NOTICE(p_hwfn, true,
673 "Invalid VPORT WFQ weight configuration\n");
677 /* Each VPORT can have several VPORT PQ IDs for various TCs */
678 for (tc = 0; tc < NUM_OF_TCS; tc++) {
679 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
680 if (vport_pq_id != QM_INVALID_PQ_ID) {
681 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
683 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
685 QM_REG_WFQVPWEIGHT_RT_OFFSET +
686 vport_pq_id, inc_val);
693 /* Prepare VPORT RL runtime init values for the specified VPORTs.
694 * Return -1 on error.
696 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
700 struct init_qm_vport_params *vport_params)
705 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
706 DP_NOTICE(p_hwfn, true,
707 "Invalid VPORT ID for rate limiter configuration\n");
711 /* Go over all PF VPORTs */
712 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
713 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
714 vport_params[i].vport_rl : link_speed);
715 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
716 DP_NOTICE(p_hwfn, true,
717 "Invalid VPORT rate-limit configuration\n");
721 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
722 (u32)QM_RL_CRD_REG_SIGN_BIT);
724 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
725 QM_VP_RL_UPPER_BOUND(link_speed) |
726 (u32)QM_RL_CRD_REG_SIGN_BIT);
727 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
734 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
735 struct ecore_ptt *p_ptt)
739 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
741 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
742 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
745 /* Check if timeout while waiting for SDM command ready */
746 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
747 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
748 "Timeout waiting for QM SDM cmd ready signal\n");
755 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
756 struct ecore_ptt *p_ptt,
761 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
764 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
765 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
766 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
767 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
768 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
770 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
774 /******************** INTERFACE IMPLEMENTATION *********************/
776 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
782 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
783 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
784 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
787 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
788 u8 max_ports_per_engine,
789 u8 max_phys_tcs_per_port,
794 struct init_qm_port_params
795 port_params[MAX_NUM_PORTS])
799 /* Init AFullOprtnstcCrdMask */
800 mask = (QM_OPPOR_LINE_VOQ_DEF <<
801 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
802 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
803 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
804 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
805 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
806 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
807 (QM_OPPOR_FW_STOP_DEF <<
808 QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
809 (QM_OPPOR_PQ_EMPTY_DEF <<
810 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
811 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
813 /* Enable/disable PF RL */
814 ecore_enable_pf_rl(p_hwfn, pf_rl_en);
816 /* Enable/disable PF WFQ */
817 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
819 /* Enable/disable VPORT RL */
820 ecore_enable_vport_rl(p_hwfn, vport_rl_en);
822 /* Enable/disable VPORT WFQ */
823 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
825 /* Init PBF CMDQ line credit */
826 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
827 max_phys_tcs_per_port, port_params);
829 /* Init BTB blocks in PBF */
830 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
831 max_phys_tcs_per_port, port_params);
836 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
837 struct ecore_ptt *p_ptt,
839 u8 max_phys_tcs_per_port,
852 struct init_qm_pq_params *pq_params,
853 struct init_qm_vport_params *vport_params)
855 u32 other_mem_size_4kb;
858 other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
861 /* Clear first Tx PQ ID array for each VPORT */
862 for (i = 0; i < num_vports; i++)
863 for (tc = 0; tc < NUM_OF_TCS; tc++)
864 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
866 /* Map Other PQs (if any) */
867 #if QM_OTHER_PQS_PER_PF > 0
868 ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
873 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
874 is_pf_loading, num_pf_cids, num_vf_cids,
875 start_pq, num_pf_pqs, num_vf_pqs, start_vport,
876 other_mem_size_4kb, pq_params, vport_params);
880 if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
881 max_phys_tcs_per_port,
882 num_pf_pqs + num_vf_pqs, pq_params))
886 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
890 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
894 if (ecore_vport_rl_rt_init
895 (p_hwfn, start_vport, num_vports, link_speed, vport_params))
901 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
902 struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
906 inc_val = QM_WFQ_INC_VAL(pf_wfq);
907 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
908 DP_NOTICE(p_hwfn, true,
909 "Invalid PF WFQ weight configuration\n");
913 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
918 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
919 struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
923 inc_val = QM_RL_INC_VAL(pf_rl);
924 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
925 DP_NOTICE(p_hwfn, true,
926 "Invalid PF rate limit configuration\n");
930 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
931 (u32)QM_RL_CRD_REG_SIGN_BIT);
932 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
937 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
938 struct ecore_ptt *p_ptt,
939 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
945 inc_val = QM_WFQ_INC_VAL(vport_wfq);
946 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
947 DP_NOTICE(p_hwfn, true,
948 "Invalid VPORT WFQ weight configuration\n");
952 for (tc = 0; tc < NUM_OF_TCS; tc++) {
953 vport_pq_id = first_tx_pq_id[tc];
954 if (vport_pq_id != QM_INVALID_PQ_ID) {
955 ecore_wr(p_hwfn, p_ptt,
956 QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
963 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
964 struct ecore_ptt *p_ptt, u8 vport_id,
968 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
970 if (vport_id >= max_qm_global_rls) {
971 DP_NOTICE(p_hwfn, true,
972 "Invalid VPORT ID for rate limiter configuration\n");
976 inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
977 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
978 DP_NOTICE(p_hwfn, true,
979 "Invalid VPORT rate-limit configuration\n");
983 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
984 (u32)QM_RL_CRD_REG_SIGN_BIT);
985 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
990 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
991 struct ecore_ptt *p_ptt,
993 bool is_tx_pq, u16 start_pq, u16 num_pqs)
995 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
996 u32 pq_mask = 0, last_pq, pq_id;
998 last_pq = start_pq + num_pqs - 1;
1000 /* Set command's PQ type */
1001 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
1003 /* Go over requested PQs */
1004 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
1005 /* Set PQ bit in mask (stop command only) */
1006 if (!is_release_cmd)
1007 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
1009 /* If last PQ or end of PQ mask, write command */
1010 if ((pq_id == last_pq) ||
1011 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
1012 (QM_STOP_PQ_MASK_WIDTH - 1))) {
1013 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
1015 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
1016 pq_id / QM_STOP_PQ_MASK_WIDTH);
1017 if (!ecore_send_qm_cmd
1018 (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
1029 /* NIG: ETS configuration constants */
1030 #define NIG_TX_ETS_CLIENT_OFFSET 4
1031 #define NIG_LB_ETS_CLIENT_OFFSET 1
1032 #define NIG_ETS_MIN_WFQ_BYTES 1600
1034 /* NIG: ETS constants */
1035 #define NIG_ETS_UP_BOUND(weight, mtu) \
1036 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1038 /* NIG: RL constants */
1040 /* Byte base type value */
1041 #define NIG_RL_BASE_TYPE 1
1044 #define NIG_RL_PERIOD 1
1046 /* Period in 25MHz cycles */
1047 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
1050 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
1052 #define NIG_RL_MAX_VAL(inc_val, mtu) \
1053 (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
1055 /* NIG: packet prioritry configuration constants */
1056 #define NIG_PRIORITY_MAP_TC_BITS 4
1059 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
1060 struct ecore_ptt *p_ptt,
1061 struct init_ets_req *req, bool is_lb)
1063 u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
1064 u32 tc_bound_base_addr, tc_bound_addr_diff;
1065 u8 sp_tc_map = 0, wfq_tc_map = 0;
1066 u8 tc, num_tc, tc_client_offset;
1068 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
1069 tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
1070 NIG_TX_ETS_CLIENT_OFFSET;
1071 min_weight = 0xffffffff;
1072 tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1073 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1074 tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
1075 NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1076 NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
1077 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1078 tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1079 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1080 tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
1081 NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1082 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
1083 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1085 for (tc = 0; tc < num_tc; tc++) {
1086 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1090 sp_tc_map |= (1 << tc);
1092 if (!tc_req->use_wfq)
1095 /* Update WFQ map */
1096 wfq_tc_map |= (1 << tc);
1098 /* Find minimal weight */
1099 if (tc_req->weight < min_weight)
1100 min_weight = tc_req->weight;
1104 ecore_wr(p_hwfn, p_ptt,
1105 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
1106 NIG_REG_TX_ARB_CLIENT_IS_STRICT,
1107 (sp_tc_map << tc_client_offset));
1110 ecore_wr(p_hwfn, p_ptt,
1111 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
1112 NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
1113 (wfq_tc_map << tc_client_offset));
1114 /* write WFQ weights */
1115 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
1116 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1119 if (!tc_req->use_wfq)
1122 /* Translate weight to bytes */
1123 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1126 /* Write WFQ weight */
1127 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
1128 tc_weight_addr_diff * tc_client_offset, byte_weight);
1130 /* Write WFQ upper bound */
1131 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
1132 tc_bound_addr_diff * tc_client_offset,
1133 NIG_ETS_UP_BOUND(byte_weight, req->mtu));
1137 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1138 struct ecore_ptt *p_ptt,
1139 struct init_nig_lb_rl_req *req)
1141 u32 ctrl, inc_val, reg_offset;
1144 /* Disable global MAC+LB RL */
1147 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1148 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1150 /* Configure and enable global MAC+LB RL */
1151 if (req->lb_mac_rate) {
1153 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
1154 NIG_RL_PERIOD_CLK_25M);
1155 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1156 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
1158 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
1159 NIG_RL_MAX_VAL(inc_val, req->mtu));
1164 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1165 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1168 /* Disable global LB-only RL */
1171 NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1172 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1174 /* Configure and enable global LB-only RL */
1177 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
1178 NIG_RL_PERIOD_CLK_25M);
1179 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1180 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
1182 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
1183 NIG_RL_MAX_VAL(inc_val, req->mtu));
1187 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1188 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1192 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
1193 tc++, reg_offset += 4) {
1197 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1198 ecore_wr(p_hwfn, p_ptt,
1199 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1201 /* Configure and enable TC RL */
1202 if (!req->tc_rate[tc])
1206 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
1207 reg_offset, NIG_RL_PERIOD_CLK_25M);
1208 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1209 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
1210 reg_offset, inc_val);
1211 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
1212 reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1216 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1217 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
1222 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1223 struct ecore_ptt *p_ptt,
1224 struct init_nig_pri_tc_map_req *req)
1226 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1227 u32 pri_tc_mask = 0;
1230 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1231 if (!req->pri[pri].valid)
1234 pri_tc_mask |= (req->pri[pri].tc_id <<
1235 (pri * NIG_PRIORITY_MAP_TC_BITS));
1236 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1239 /* Write priority -> TC mask */
1240 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1242 /* Write TC -> priority mask */
1243 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1244 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
1246 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
1252 /* PRS: ETS configuration constants */
1253 #define PRS_ETS_MIN_WFQ_BYTES 1600
1254 #define PRS_ETS_UP_BOUND(weight, mtu) \
1255 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1258 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1259 struct ecore_ptt *p_ptt, struct init_ets_req *req)
1261 u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1262 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1264 tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
1265 PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1266 tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
1267 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1269 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1270 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1274 sp_tc_map |= (1 << tc);
1276 if (!tc_req->use_wfq)
1279 /* Update WFQ map */
1280 wfq_tc_map |= (1 << tc);
1282 /* Find minimal weight */
1283 if (tc_req->weight < min_weight)
1284 min_weight = tc_req->weight;
1288 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1291 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1294 /* write WFQ weights */
1295 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1296 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1299 if (!tc_req->use_wfq)
1302 /* Translate weight to bytes */
1303 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1306 /* Write WFQ weight */
1307 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
1308 tc_weight_addr_diff, byte_weight);
1310 /* Write WFQ upper bound */
1311 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1312 tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
1318 /* BRB: RAM configuration constants */
1319 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1320 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1321 #define BRB_BLOCK_SIZE 128
1322 #define BRB_MIN_BLOCKS_PER_TC 9
1323 #define BRB_HYST_BYTES 10240
1324 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1326 /* Temporary big RAM allocation - should be updated */
1327 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1328 struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1330 u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1331 u32 active_port_blocks, reg_offset = 0;
1332 u8 port, active_ports = 0;
1334 tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
1336 min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
1338 total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1339 BRB_TOTAL_RAM_BLOCKS_BB;
1341 /* Find number of active ports */
1342 for (port = 0; port < MAX_NUM_PORTS; port++)
1343 if (req->num_active_tcs[port])
1346 active_port_blocks = (u32)(total_blocks / active_ports);
1348 for (port = 0; port < req->max_ports_per_engine; port++) {
1349 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1350 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1351 u32 tc_guaranteed_blocks;
1354 /* Calculate per-port sizes */
1355 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
1357 port_blocks = req->num_active_tcs[port] ? active_port_blocks :
1359 port_guaranteed_blocks = req->num_active_tcs[port] *
1360 tc_guaranteed_blocks;
1361 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1362 full_xoff_th = req->num_active_tcs[port] *
1363 BRB_MIN_BLOCKS_PER_TC;
1364 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1365 pause_xoff_th = tc_headroom_blocks;
1366 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1368 /* Init total size per port */
1369 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1372 /* Init shared size per port */
1373 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1374 port_shared_blocks);
1376 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1377 /* Clear init values for non-active TCs */
1378 if (tc == req->num_active_tcs[port]) {
1379 tc_guaranteed_blocks = 0;
1386 /* Init guaranteed size per TC */
1387 ecore_wr(p_hwfn, p_ptt,
1388 BRB_REG_TC_GUARANTIED_0 + reg_offset,
1389 tc_guaranteed_blocks);
1390 ecore_wr(p_hwfn, p_ptt,
1391 BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1394 /* Init pause/full thresholds per physical TC - for
1397 ecore_wr(p_hwfn, p_ptt,
1398 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1399 reg_offset, full_xoff_th);
1400 ecore_wr(p_hwfn, p_ptt,
1401 BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1402 reg_offset, full_xon_th);
1403 ecore_wr(p_hwfn, p_ptt,
1404 BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1405 reg_offset, pause_xoff_th);
1406 ecore_wr(p_hwfn, p_ptt,
1407 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1408 reg_offset, pause_xon_th);
1410 /* Init pause/full thresholds per physical TC - for
1413 ecore_wr(p_hwfn, p_ptt,
1414 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1415 reg_offset, full_xoff_th);
1416 ecore_wr(p_hwfn, p_ptt,
1417 BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1418 reg_offset, full_xon_th);
1419 ecore_wr(p_hwfn, p_ptt,
1420 BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1421 reg_offset, pause_xoff_th);
1422 ecore_wr(p_hwfn, p_ptt,
1423 BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1424 reg_offset, pause_xon_th);
1429 /* In MF should be called once per port to set EtherType of OuterTag */
1430 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1432 /* Update DORQ register */
1433 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1436 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1437 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1438 #define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
1439 #define PRS_ETH_OUTPUT_FORMAT -46832
1441 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1442 struct ecore_ptt *p_ptt, u16 dest_port)
1444 /* Update PRS register */
1445 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1447 /* Update NIG register */
1448 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1450 /* Update PBF register */
1451 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1454 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1455 struct ecore_ptt *p_ptt, bool vxlan_enable)
1459 /* Update PRS register */
1460 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1461 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1462 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1464 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1465 if (reg_val) { /* TODO: handle E5 init */
1466 reg_val = ecore_rd(p_hwfn, p_ptt,
1467 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1469 /* Update output only if tunnel blocks not included. */
1470 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1471 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1472 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1475 /* Update NIG register */
1476 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1477 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1478 NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1480 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1482 /* Update DORQ register */
1483 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1484 vxlan_enable ? 1 : 0);
1487 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1488 struct ecore_ptt *p_ptt,
1489 bool eth_gre_enable, bool ip_gre_enable)
1493 /* Update PRS register */
1494 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1495 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1496 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1498 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1499 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1501 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1502 if (reg_val) { /* TODO: handle E5 init */
1503 reg_val = ecore_rd(p_hwfn, p_ptt,
1504 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1506 /* Update output only if tunnel blocks not included. */
1507 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1508 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1509 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1512 /* Update NIG register */
1513 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1514 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1515 NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1517 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1518 NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1520 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1522 /* Update DORQ registers */
1523 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1524 eth_gre_enable ? 1 : 0);
1525 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1526 ip_gre_enable ? 1 : 0);
1529 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1530 struct ecore_ptt *p_ptt, u16 dest_port)
1532 /* Update PRS register */
1533 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1535 /* Update NIG register */
1536 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1538 /* Update PBF register */
1539 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1542 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1543 struct ecore_ptt *p_ptt,
1544 bool eth_geneve_enable, bool ip_geneve_enable)
1548 /* Update PRS register */
1549 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1550 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1551 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1553 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1554 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1556 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1557 if (reg_val) { /* TODO: handle E5 init */
1558 reg_val = ecore_rd(p_hwfn, p_ptt,
1559 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1561 /* Update output only if tunnel blocks not included. */
1562 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1563 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1564 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1567 /* Update NIG register */
1568 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1569 eth_geneve_enable ? 1 : 0);
1570 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1571 ip_geneve_enable ? 1 : 0);
1573 /* EDPM with geneve tunnel not supported in BB */
1574 if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1577 /* Update DORQ registers */
1578 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
1579 eth_geneve_enable ? 1 : 0);
1580 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
1581 ip_geneve_enable ? 1 : 0);
1584 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
1585 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
1587 void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
1588 struct ecore_ptt *p_ptt,
1591 u32 reg_val, cfg_mask;
1593 /* read PRS config register */
1594 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1596 /* set VXLAN_NO_L2_ENABLE mask */
1597 cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1600 /* set VXLAN_NO_L2_ENABLE flag */
1601 reg_val |= cfg_mask;
1603 /* update PRS FIC register */
1604 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1605 (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1607 /* clear VXLAN_NO_L2_ENABLE flag */
1608 reg_val &= ~cfg_mask;
1611 /* write PRS config register */
1612 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1615 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1616 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1617 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1618 #define PARSER_ETH_CONN_CM_HDR 0
1619 #define CAM_LINE_SIZE sizeof(u32)
1620 #define RAM_LINE_SIZE sizeof(u64)
1621 #define REG_SIZE sizeof(u32)
1623 void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
1624 struct ecore_ptt *p_ptt,
1627 /* disable gft search for PF */
1628 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1630 /* Clean ram & cam for next gft session*/
1633 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1636 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1637 RAM_LINE_SIZE * pf_id, 0);
1638 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1639 RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
1643 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1644 struct ecore_ptt *p_ptt)
1646 u32 rfs_cm_hdr_event_id;
1648 /* Set RFS event ID to be awakened i Tstorm By Prs */
1649 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1650 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1651 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1652 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1653 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1654 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1657 void ecore_gft_config(struct ecore_hwfn *p_hwfn,
1658 struct ecore_ptt *p_ptt,
1664 enum gft_profile_type profile_type)
1666 u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
1669 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1671 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
1672 if (profile_type >= MAX_GFT_PROFILE_TYPE)
1673 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
1675 /* Set RFS event ID to be awakened i Tstorm By Prs */
1676 reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1677 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1678 reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1679 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1681 /* Do not load context only cid in PRS on match. */
1682 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1684 /* Do not use tenant ID exist bit for gft search*/
1685 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1689 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1691 /* Filters are per PF!! */
1692 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1693 GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1694 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1696 if (!(tcp && udp)) {
1698 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1699 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1702 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1703 GFT_PROFILE_TCP_PROTOCOL);
1706 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1707 GFT_PROFILE_UDP_PROTOCOL);
1710 if (!(ipv4 && ipv6)) {
1711 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1713 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1716 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1720 /* Write characteristics to cam */
1721 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1723 cam_line = ecore_rd(p_hwfn, p_ptt,
1724 PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1726 /* Write line to RAM - compare to filter 4 tuple */
1730 /* Search no IP as GFT */
1731 search_non_ip_as_gft = 0;
1734 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1735 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1737 if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1738 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1739 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1740 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1741 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1742 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
1743 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1744 } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1745 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1746 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1747 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1748 } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1749 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1750 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1751 } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1752 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1753 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1754 } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1755 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1757 /* Allow tunneled traffic without inner IP */
1758 search_non_ip_as_gft = 1;
1761 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,
1762 search_non_ip_as_gft);
1763 ecore_wr(p_hwfn, p_ptt,
1764 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1766 ecore_wr(p_hwfn, p_ptt,
1767 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
1768 REG_SIZE, ram_line_hi);
1770 /* Set default profile so that no filter match will happen */
1771 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1772 PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
1773 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1774 PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
1776 /* Enable gft search */
1777 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1780 /* Configure VF zone size mode */
1781 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1782 struct ecore_ptt *p_ptt, u16 mode,
1785 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1786 u32 msdm_vf_offset_mask;
1788 if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1789 msdm_vf_size_log += 1;
1790 else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1791 msdm_vf_size_log += 2;
1793 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1796 STORE_RT_REG(p_hwfn,
1797 PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1799 STORE_RT_REG(p_hwfn,
1800 PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1801 msdm_vf_offset_mask);
1803 ecore_wr(p_hwfn, p_ptt,
1804 PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1805 ecore_wr(p_hwfn, p_ptt,
1806 PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1810 /* Get mstorm statistics for offset by VF zone size mode */
1811 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1813 u16 vf_zone_size_mode)
1815 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1817 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1818 (stat_cnt_id > MAX_NUM_PFS)) {
1819 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1820 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1821 (stat_cnt_id - MAX_NUM_PFS);
1822 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1823 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1824 (stat_cnt_id - MAX_NUM_PFS);
1830 /* Get mstorm VF producer offset by VF zone size mode */
1831 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1834 u16 vf_zone_size_mode)
1836 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1838 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1839 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1840 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1842 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1843 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1850 #ifndef LINUX_REMOVE
1851 #define CRC8_INIT_VALUE 0xFF
1853 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1855 /* Calculate and return CDU validation byte per connection type / region /
1858 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1860 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1862 static u8 crc8_table_valid; /*automatically initialized to 0*/
1863 u8 crc, validation_byte = 0;
1864 u32 validation_string = 0;
1867 if (crc8_table_valid == 0) {
1868 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1869 crc8_table_valid = 1;
1873 * The CRC is calculated on the String-to-compress:
1874 * [31:8] = {CID[31:20],CID[11:0]}
1878 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1879 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1881 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1882 validation_string |= ((region & 0xF) << 4);
1884 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1885 validation_string |= (conn_type & 0xF);
1887 /* Convert to big-endian and calculate CRC8*/
1888 data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1890 crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1893 /* The validation byte [7:0] is composed:
1894 * for type A validation
1895 * [7] = active configuration bit
1898 * for type B validation
1899 * [7] = active configuration bit
1900 * [6:3] = connection_type[3:0]
1904 validation_byte |= ((validation_cfg >>
1905 CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1907 if ((validation_cfg >>
1908 CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1909 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1911 validation_byte |= crc & 0x7F;
1913 return validation_byte;
1916 /* Calcualte and set validation bytes for session context */
1917 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1918 u8 ctx_type, u32 cid)
1920 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1922 p_ctx = (u8 *)p_ctx_mem;
1923 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1924 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1925 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1927 OSAL_MEMSET(p_ctx, 0, ctx_size);
1929 *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1930 *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1931 *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1934 /* Calcualte and set validation bytes for task context */
1935 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1938 u8 *p_ctx, *region1_val_ptr;
1940 p_ctx = (u8 *)p_ctx_mem;
1941 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1943 OSAL_MEMSET(p_ctx, 0, ctx_size);
1945 *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1948 /* Memset session context to 0 while preserving validation bytes */
1949 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1951 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1952 u8 x_val, t_val, u_val;
1954 p_ctx = (u8 *)p_ctx_mem;
1955 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1956 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1957 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1963 OSAL_MEMSET(p_ctx, 0, ctx_size);
1970 /* Memset task context to 0 while preserving validation bytes */
1971 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1973 u8 *p_ctx, *region1_val_ptr;
1976 p_ctx = (u8 *)p_ctx_mem;
1977 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1979 region1_val = *region1_val_ptr;
1981 OSAL_MEMSET(p_ctx, 0, ctx_size);
1983 *region1_val_ptr = region1_val;
1986 /* Enable and configure context validation */
1987 void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
1988 struct ecore_ptt *p_ptt)
1992 /* Enable validation for connection region 3 - bits [31:24] */
1993 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1994 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1996 /* Enable validation for connection region 5 - bits [15: 8] */
1997 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1998 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
2000 /* Enable validation for connection region 1 - bits [15: 8] */
2001 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
2002 ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
2006 /*******************************************************************************
2007 * File name : rdma_init.c
2008 * Author : Michael Shteinbok
2009 *******************************************************************************
2010 *******************************************************************************
2012 * RDMA HSI functions
2014 *******************************************************************************
2015 * Notes: This is the input to the auto generated file drv_init_fw_funcs.c
2017 *******************************************************************************
2019 static u32 ecore_get_rdma_assert_ram_addr(struct ecore_hwfn *p_hwfn,
2023 case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2024 TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2025 case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2026 MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2027 case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2028 USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2029 case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2030 XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2031 case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2032 YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2033 case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2034 PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
2040 void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
2041 struct ecore_ptt *p_ptt,
2042 u8 assert_level[NUM_STORMS])
2045 for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
2046 u32 ram_addr = ecore_get_rdma_assert_ram_addr(p_hwfn, storm_id);
2048 ecore_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);